Alex Elder | 2b9feef | 2020-03-05 22:28:25 -0600 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | |
| 3 | /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. |
| 4 | * Copyright (C) 2018-2020 Linaro Ltd. |
| 5 | */ |
| 6 | |
| 7 | #include <linux/types.h> |
| 8 | #include <linux/kernel.h> |
| 9 | #include <linux/bits.h> |
| 10 | #include <linux/bitops.h> |
| 11 | #include <linux/bitfield.h> |
| 12 | #include <linux/io.h> |
| 13 | #include <linux/build_bug.h> |
| 14 | #include <linux/device.h> |
| 15 | #include <linux/dma-mapping.h> |
| 16 | |
| 17 | #include "ipa.h" |
| 18 | #include "ipa_version.h" |
| 19 | #include "ipa_endpoint.h" |
| 20 | #include "ipa_table.h" |
| 21 | #include "ipa_reg.h" |
| 22 | #include "ipa_mem.h" |
| 23 | #include "ipa_cmd.h" |
| 24 | #include "gsi.h" |
| 25 | #include "gsi_trans.h" |
| 26 | |
| 27 | /** |
| 28 | * DOC: IPA Filter and Route Tables |
| 29 | * |
| 30 | * The IPA has tables defined in its local shared memory that define filter |
| 31 | * and routing rules. Each entry in these tables contains a 64-bit DMA |
| 32 | * address that refers to DRAM (system memory) containing a rule definition. |
| 33 | * A rule consists of a contiguous block of 32-bit values terminated with |
| 34 | * 32 zero bits. A special "zero entry" rule consisting of 64 zero bits |
| 35 | * represents "no filtering" or "no routing," and is the reset value for |
| 36 | * filter or route table rules. Separate tables (both filter and route) |
| 37 | * used for IPv4 and IPv6. Additionally, there can be hashed filter or |
| 38 | * route tables, which are used when a hash of message metadata matches. |
| 39 | * Hashed operation is not supported by all IPA hardware. |
| 40 | * |
| 41 | * Each filter rule is associated with an AP or modem TX endpoint, though |
| 42 | * not all TX endpoints support filtering. The first 64-bit entry in a |
| 43 | * filter table is a bitmap indicating which endpoints have entries in |
| 44 | * the table. The low-order bit (bit 0) in this bitmap represents a |
| 45 | * special global filter, which applies to all traffic. This is not |
| 46 | * used in the current code. Bit 1, if set, indicates that there is an |
| 47 | * entry (i.e. a DMA address referring to a rule) for endpoint 0 in the |
| 48 | * table. Bit 2, if set, indicates there is an entry for endpoint 1, |
| 49 | * and so on. Space is set aside in IPA local memory to hold as many |
| 50 | * filter table entries as might be required, but typically they are not |
| 51 | * all used. |
| 52 | * |
| 53 | * The AP initializes all entries in a filter table to refer to a "zero" |
| 54 | * entry. Once initialized the modem and AP update the entries for |
| 55 | * endpoints they "own" directly. Currently the AP does not use the |
| 56 | * IPA filtering functionality. |
| 57 | * |
| 58 | * IPA Filter Table |
| 59 | * ---------------------- |
| 60 | * endpoint bitmap | 0x0000000000000048 | Bits 3 and 6 set (endpoints 2 and 5) |
| 61 | * |--------------------| |
| 62 | * 1st endpoint | 0x000123456789abc0 | DMA address for modem endpoint 2 rule |
| 63 | * |--------------------| |
| 64 | * 2nd endpoint | 0x000123456789abf0 | DMA address for AP endpoint 5 rule |
| 65 | * |--------------------| |
| 66 | * (unused) | | (Unused space in filter table) |
| 67 | * |--------------------| |
| 68 | * . . . |
| 69 | * |--------------------| |
| 70 | * (unused) | | (Unused space in filter table) |
| 71 | * ---------------------- |
| 72 | * |
| 73 | * The set of available route rules is divided about equally between the AP |
| 74 | * and modem. The AP initializes all entries in a route table to refer to |
| 75 | * a "zero entry". Once initialized, the modem and AP are responsible for |
| 76 | * updating their own entries. All entries in a route table are usable, |
| 77 | * though the AP currently does not use the IPA routing functionality. |
| 78 | * |
| 79 | * IPA Route Table |
| 80 | * ---------------------- |
| 81 | * 1st modem route | 0x0001234500001100 | DMA address for first route rule |
| 82 | * |--------------------| |
| 83 | * 2nd modem route | 0x0001234500001140 | DMA address for second route rule |
| 84 | * |--------------------| |
| 85 | * . . . |
| 86 | * |--------------------| |
| 87 | * Last modem route| 0x0001234500002280 | DMA address for Nth route rule |
| 88 | * |--------------------| |
| 89 | * 1st AP route | 0x0001234500001100 | DMA address for route rule (N+1) |
| 90 | * |--------------------| |
| 91 | * 2nd AP route | 0x0001234500001140 | DMA address for next route rule |
| 92 | * |--------------------| |
| 93 | * . . . |
| 94 | * |--------------------| |
| 95 | * Last AP route | 0x0001234500002280 | DMA address for last route rule |
| 96 | * ---------------------- |
| 97 | */ |
| 98 | |
| 99 | /* IPA hardware constrains filter and route tables alignment */ |
| 100 | #define IPA_TABLE_ALIGN 128 /* Minimum table alignment */ |
| 101 | |
| 102 | /* Assignment of route table entries to the modem and AP */ |
| 103 | #define IPA_ROUTE_MODEM_MIN 0 |
| 104 | #define IPA_ROUTE_MODEM_COUNT 8 |
| 105 | |
| 106 | #define IPA_ROUTE_AP_MIN IPA_ROUTE_MODEM_COUNT |
| 107 | #define IPA_ROUTE_AP_COUNT \ |
| 108 | (IPA_ROUTE_COUNT_MAX - IPA_ROUTE_MODEM_COUNT) |
| 109 | |
| 110 | /* Filter or route rules consist of a set of 32-bit values followed by a |
| 111 | * 32-bit all-zero rule list terminator. The "zero rule" is simply an |
| 112 | * all-zero rule followed by the list terminator. |
| 113 | */ |
| 114 | #define IPA_ZERO_RULE_SIZE (2 * sizeof(__le32)) |
| 115 | |
| 116 | #ifdef IPA_VALIDATE |
| 117 | |
| 118 | /* Check things that can be validated at build time. */ |
| 119 | static void ipa_table_validate_build(void) |
| 120 | { |
| 121 | /* IPA hardware accesses memory 128 bytes at a time. Addresses |
| 122 | * referred to by entries in filter and route tables must be |
| 123 | * aligned on 128-byte byte boundaries. The only rule address |
| 124 | * ever use is the "zero rule", and it's aligned at the base |
| 125 | * of a coherent DMA allocation. |
| 126 | */ |
| 127 | BUILD_BUG_ON(ARCH_DMA_MINALIGN % IPA_TABLE_ALIGN); |
| 128 | |
| 129 | /* Filter and route tables contain DMA addresses that refer to |
| 130 | * filter or route rules. We use a fixed constant to represent |
| 131 | * the size of either type of table entry. Code in ipa_table_init() |
| 132 | * uses a pointer to __le64 to initialize table entriews. |
| 133 | */ |
| 134 | BUILD_BUG_ON(IPA_TABLE_ENTRY_SIZE != sizeof(dma_addr_t)); |
| 135 | BUILD_BUG_ON(sizeof(dma_addr_t) != sizeof(__le64)); |
| 136 | |
| 137 | /* A "zero rule" is used to represent no filtering or no routing. |
| 138 | * It is a 64-bit block of zeroed memory. Code in ipa_table_init() |
| 139 | * assumes that it can be written using a pointer to __le64. |
| 140 | */ |
| 141 | BUILD_BUG_ON(IPA_ZERO_RULE_SIZE != sizeof(__le64)); |
| 142 | |
| 143 | /* Impose a practical limit on the number of routes */ |
| 144 | BUILD_BUG_ON(IPA_ROUTE_COUNT_MAX > 32); |
| 145 | /* The modem must be allotted at least one route table entry */ |
| 146 | BUILD_BUG_ON(!IPA_ROUTE_MODEM_COUNT); |
| 147 | /* But it can't have more than what is available */ |
| 148 | BUILD_BUG_ON(IPA_ROUTE_MODEM_COUNT > IPA_ROUTE_COUNT_MAX); |
| 149 | |
| 150 | } |
| 151 | |
| 152 | static bool |
| 153 | ipa_table_valid_one(struct ipa *ipa, bool route, bool ipv6, bool hashed) |
| 154 | { |
| 155 | struct device *dev = &ipa->pdev->dev; |
| 156 | const struct ipa_mem *mem; |
| 157 | u32 size; |
| 158 | |
| 159 | if (route) { |
| 160 | if (ipv6) |
| 161 | mem = hashed ? &ipa->mem[IPA_MEM_V6_ROUTE_HASHED] |
| 162 | : &ipa->mem[IPA_MEM_V6_ROUTE]; |
| 163 | else |
| 164 | mem = hashed ? &ipa->mem[IPA_MEM_V4_ROUTE_HASHED] |
| 165 | : &ipa->mem[IPA_MEM_V4_ROUTE]; |
| 166 | size = IPA_ROUTE_COUNT_MAX * IPA_TABLE_ENTRY_SIZE; |
| 167 | } else { |
| 168 | if (ipv6) |
| 169 | mem = hashed ? &ipa->mem[IPA_MEM_V6_FILTER_HASHED] |
| 170 | : &ipa->mem[IPA_MEM_V6_FILTER]; |
| 171 | else |
| 172 | mem = hashed ? &ipa->mem[IPA_MEM_V4_FILTER_HASHED] |
| 173 | : &ipa->mem[IPA_MEM_V4_FILTER]; |
| 174 | size = (1 + IPA_FILTER_COUNT_MAX) * IPA_TABLE_ENTRY_SIZE; |
| 175 | } |
| 176 | |
| 177 | if (!ipa_cmd_table_valid(ipa, mem, route, ipv6, hashed)) |
| 178 | return false; |
| 179 | |
| 180 | /* mem->size >= size is sufficient, but we'll demand more */ |
| 181 | if (mem->size == size) |
| 182 | return true; |
| 183 | |
| 184 | /* Hashed table regions can be zero size if hashing is not supported */ |
| 185 | if (hashed && !mem->size) |
| 186 | return true; |
| 187 | |
| 188 | dev_err(dev, "IPv%c %s%s table region size 0x%02x, expected 0x%02x\n", |
| 189 | ipv6 ? '6' : '4', hashed ? "hashed " : "", |
| 190 | route ? "route" : "filter", mem->size, size); |
| 191 | |
| 192 | return false; |
| 193 | } |
| 194 | |
| 195 | /* Verify the filter and route table memory regions are the expected size */ |
| 196 | bool ipa_table_valid(struct ipa *ipa) |
| 197 | { |
| 198 | bool valid = true; |
| 199 | |
| 200 | valid = valid && ipa_table_valid_one(ipa, false, false, false); |
| 201 | valid = valid && ipa_table_valid_one(ipa, false, false, true); |
| 202 | valid = valid && ipa_table_valid_one(ipa, false, true, false); |
| 203 | valid = valid && ipa_table_valid_one(ipa, false, true, true); |
| 204 | valid = valid && ipa_table_valid_one(ipa, true, false, false); |
| 205 | valid = valid && ipa_table_valid_one(ipa, true, false, true); |
| 206 | valid = valid && ipa_table_valid_one(ipa, true, true, false); |
| 207 | valid = valid && ipa_table_valid_one(ipa, true, true, true); |
| 208 | |
| 209 | return valid; |
| 210 | } |
| 211 | |
| 212 | bool ipa_filter_map_valid(struct ipa *ipa, u32 filter_map) |
| 213 | { |
| 214 | struct device *dev = &ipa->pdev->dev; |
| 215 | u32 count; |
| 216 | |
| 217 | if (!filter_map) { |
| 218 | dev_err(dev, "at least one filtering endpoint is required\n"); |
| 219 | |
| 220 | return false; |
| 221 | } |
| 222 | |
| 223 | count = hweight32(filter_map); |
| 224 | if (count > IPA_FILTER_COUNT_MAX) { |
| 225 | dev_err(dev, "too many filtering endpoints (%u, max %u)\n", |
| 226 | count, IPA_FILTER_COUNT_MAX); |
| 227 | |
| 228 | return false; |
| 229 | } |
| 230 | |
| 231 | return true; |
| 232 | } |
| 233 | |
| 234 | #else /* !IPA_VALIDATE */ |
| 235 | static void ipa_table_validate_build(void) |
| 236 | |
| 237 | { |
| 238 | } |
| 239 | |
| 240 | #endif /* !IPA_VALIDATE */ |
| 241 | |
| 242 | /* Zero entry count means no table, so just return a 0 address */ |
| 243 | static dma_addr_t ipa_table_addr(struct ipa *ipa, bool filter_mask, u16 count) |
| 244 | { |
| 245 | u32 skip; |
| 246 | |
| 247 | if (!count) |
| 248 | return 0; |
| 249 | |
| 250 | /* assert(count <= max_t(u32, IPA_FILTER_COUNT_MAX, IPA_ROUTE_COUNT_MAX)); */ |
| 251 | |
| 252 | /* Skip over the zero rule and possibly the filter mask */ |
| 253 | skip = filter_mask ? 1 : 2; |
| 254 | |
| 255 | return ipa->table_addr + skip * sizeof(*ipa->table_virt); |
| 256 | } |
| 257 | |
| 258 | static void ipa_table_reset_add(struct gsi_trans *trans, bool filter, |
| 259 | u16 first, u16 count, const struct ipa_mem *mem) |
| 260 | { |
| 261 | struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); |
| 262 | dma_addr_t addr; |
| 263 | u32 offset; |
| 264 | u16 size; |
| 265 | |
| 266 | /* Nothing to do if the table memory regions is empty */ |
| 267 | if (!mem->size) |
| 268 | return; |
| 269 | |
| 270 | if (filter) |
| 271 | first++; /* skip over bitmap */ |
| 272 | |
| 273 | offset = mem->offset + first * IPA_TABLE_ENTRY_SIZE; |
| 274 | size = count * IPA_TABLE_ENTRY_SIZE; |
| 275 | addr = ipa_table_addr(ipa, false, count); |
| 276 | |
| 277 | ipa_cmd_dma_shared_mem_add(trans, offset, size, addr, true); |
| 278 | } |
| 279 | |
| 280 | /* Reset entries in a single filter table belonging to either the AP or |
| 281 | * modem to refer to the zero entry. The memory region supplied will be |
| 282 | * for the IPv4 and IPv6 non-hashed and hashed filter tables. |
| 283 | */ |
| 284 | static int |
| 285 | ipa_filter_reset_table(struct ipa *ipa, const struct ipa_mem *mem, bool modem) |
| 286 | { |
| 287 | u32 ep_mask = ipa->filter_map; |
| 288 | u32 count = hweight32(ep_mask); |
| 289 | struct gsi_trans *trans; |
| 290 | enum gsi_ee_id ee_id; |
| 291 | |
| 292 | if (!mem->size) |
| 293 | return 0; |
| 294 | |
| 295 | trans = ipa_cmd_trans_alloc(ipa, count); |
| 296 | if (!trans) { |
| 297 | dev_err(&ipa->pdev->dev, |
| 298 | "no transaction for %s filter reset\n", |
| 299 | modem ? "modem" : "AP"); |
| 300 | return -EBUSY; |
| 301 | } |
| 302 | |
| 303 | ee_id = modem ? GSI_EE_MODEM : GSI_EE_AP; |
| 304 | while (ep_mask) { |
| 305 | u32 endpoint_id = __ffs(ep_mask); |
| 306 | struct ipa_endpoint *endpoint; |
| 307 | |
| 308 | ep_mask ^= BIT(endpoint_id); |
| 309 | |
| 310 | endpoint = &ipa->endpoint[endpoint_id]; |
| 311 | if (endpoint->ee_id != ee_id) |
| 312 | continue; |
| 313 | |
| 314 | ipa_table_reset_add(trans, true, endpoint_id, 1, mem); |
| 315 | } |
| 316 | |
| 317 | gsi_trans_commit_wait(trans); |
| 318 | |
| 319 | return 0; |
| 320 | } |
| 321 | |
| 322 | /* Theoretically, each filter table could have more filter slots to |
| 323 | * update than the maximum number of commands in a transaction. So |
| 324 | * we do each table separately. |
| 325 | */ |
| 326 | static int ipa_filter_reset(struct ipa *ipa, bool modem) |
| 327 | { |
| 328 | int ret; |
| 329 | |
| 330 | ret = ipa_filter_reset_table(ipa, &ipa->mem[IPA_MEM_V4_FILTER], modem); |
| 331 | if (ret) |
| 332 | return ret; |
| 333 | |
| 334 | ret = ipa_filter_reset_table(ipa, &ipa->mem[IPA_MEM_V4_FILTER_HASHED], |
| 335 | modem); |
| 336 | if (ret) |
| 337 | return ret; |
| 338 | |
| 339 | ret = ipa_filter_reset_table(ipa, &ipa->mem[IPA_MEM_V6_FILTER], modem); |
| 340 | if (ret) |
| 341 | return ret; |
| 342 | ret = ipa_filter_reset_table(ipa, &ipa->mem[IPA_MEM_V6_FILTER_HASHED], |
| 343 | modem); |
| 344 | |
| 345 | return ret; |
| 346 | } |
| 347 | |
| 348 | /* The AP routes and modem routes are each contiguous within the |
| 349 | * table. We can update each table with a single command, and we |
| 350 | * won't exceed the per-transaction command limit. |
| 351 | * */ |
| 352 | static int ipa_route_reset(struct ipa *ipa, bool modem) |
| 353 | { |
| 354 | struct gsi_trans *trans; |
| 355 | u16 first; |
| 356 | u16 count; |
| 357 | |
| 358 | trans = ipa_cmd_trans_alloc(ipa, 4); |
| 359 | if (!trans) { |
| 360 | dev_err(&ipa->pdev->dev, |
| 361 | "no transaction for %s route reset\n", |
| 362 | modem ? "modem" : "AP"); |
| 363 | return -EBUSY; |
| 364 | } |
| 365 | |
| 366 | if (modem) { |
| 367 | first = IPA_ROUTE_MODEM_MIN; |
| 368 | count = IPA_ROUTE_MODEM_COUNT; |
| 369 | } else { |
| 370 | first = IPA_ROUTE_AP_MIN; |
| 371 | count = IPA_ROUTE_AP_COUNT; |
| 372 | } |
| 373 | |
| 374 | ipa_table_reset_add(trans, false, first, count, |
| 375 | &ipa->mem[IPA_MEM_V4_ROUTE]); |
| 376 | ipa_table_reset_add(trans, false, first, count, |
| 377 | &ipa->mem[IPA_MEM_V4_ROUTE_HASHED]); |
| 378 | |
| 379 | ipa_table_reset_add(trans, false, first, count, |
| 380 | &ipa->mem[IPA_MEM_V6_ROUTE]); |
| 381 | ipa_table_reset_add(trans, false, first, count, |
| 382 | &ipa->mem[IPA_MEM_V6_ROUTE_HASHED]); |
| 383 | |
| 384 | gsi_trans_commit_wait(trans); |
| 385 | |
| 386 | return 0; |
| 387 | } |
| 388 | |
| 389 | void ipa_table_reset(struct ipa *ipa, bool modem) |
| 390 | { |
| 391 | struct device *dev = &ipa->pdev->dev; |
| 392 | const char *ee_name; |
| 393 | int ret; |
| 394 | |
| 395 | ee_name = modem ? "modem" : "AP"; |
| 396 | |
| 397 | /* Report errors, but reset filter and route tables */ |
| 398 | ret = ipa_filter_reset(ipa, modem); |
| 399 | if (ret) |
| 400 | dev_err(dev, "error %d resetting filter table for %s\n", |
| 401 | ret, ee_name); |
| 402 | |
| 403 | ret = ipa_route_reset(ipa, modem); |
| 404 | if (ret) |
| 405 | dev_err(dev, "error %d resetting route table for %s\n", |
| 406 | ret, ee_name); |
| 407 | } |
| 408 | |
| 409 | int ipa_table_hash_flush(struct ipa *ipa) |
| 410 | { |
| 411 | u32 offset = ipa_reg_filt_rout_hash_flush_offset(ipa->version); |
| 412 | struct gsi_trans *trans; |
| 413 | u32 val; |
| 414 | |
| 415 | /* IPA version 4.2 does not support hashed tables */ |
| 416 | if (ipa->version == IPA_VERSION_4_2) |
| 417 | return 0; |
| 418 | |
| 419 | trans = ipa_cmd_trans_alloc(ipa, 1); |
| 420 | if (!trans) { |
| 421 | dev_err(&ipa->pdev->dev, "no transaction for hash flush\n"); |
| 422 | return -EBUSY; |
| 423 | } |
| 424 | |
| 425 | val = IPV4_FILTER_HASH_FLUSH | IPV6_FILTER_HASH_FLUSH; |
| 426 | val |= IPV6_ROUTER_HASH_FLUSH | IPV4_ROUTER_HASH_FLUSH; |
| 427 | |
| 428 | ipa_cmd_register_write_add(trans, offset, val, val, false); |
| 429 | |
| 430 | gsi_trans_commit_wait(trans); |
| 431 | |
| 432 | return 0; |
| 433 | } |
| 434 | |
| 435 | static void ipa_table_init_add(struct gsi_trans *trans, bool filter, |
| 436 | enum ipa_cmd_opcode opcode, |
| 437 | const struct ipa_mem *mem, |
| 438 | const struct ipa_mem *hash_mem) |
| 439 | { |
| 440 | struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); |
| 441 | dma_addr_t hash_addr; |
| 442 | dma_addr_t addr; |
| 443 | u16 hash_count; |
| 444 | u16 hash_size; |
| 445 | u16 count; |
| 446 | u16 size; |
| 447 | |
| 448 | /* The number of filtering endpoints determines number of entries |
| 449 | * in the filter table. The hashed and non-hashed filter table |
| 450 | * will have the same number of entries. The size of the route |
| 451 | * table region determines the number of entries it has. |
| 452 | */ |
| 453 | if (filter) { |
| 454 | count = hweight32(ipa->filter_map); |
| 455 | hash_count = hash_mem->size ? count : 0; |
| 456 | } else { |
| 457 | count = mem->size / IPA_TABLE_ENTRY_SIZE; |
| 458 | hash_count = hash_mem->size / IPA_TABLE_ENTRY_SIZE; |
| 459 | } |
| 460 | size = count * IPA_TABLE_ENTRY_SIZE; |
| 461 | hash_size = hash_count * IPA_TABLE_ENTRY_SIZE; |
| 462 | |
| 463 | addr = ipa_table_addr(ipa, filter, count); |
| 464 | hash_addr = ipa_table_addr(ipa, filter, hash_count); |
| 465 | |
| 466 | ipa_cmd_table_init_add(trans, opcode, size, mem->offset, addr, |
| 467 | hash_size, hash_mem->offset, hash_addr); |
| 468 | } |
| 469 | |
| 470 | int ipa_table_setup(struct ipa *ipa) |
| 471 | { |
| 472 | struct gsi_trans *trans; |
| 473 | |
| 474 | trans = ipa_cmd_trans_alloc(ipa, 4); |
| 475 | if (!trans) { |
| 476 | dev_err(&ipa->pdev->dev, "no transaction for table setup\n"); |
| 477 | return -EBUSY; |
| 478 | } |
| 479 | |
| 480 | ipa_table_init_add(trans, false, IPA_CMD_IP_V4_ROUTING_INIT, |
| 481 | &ipa->mem[IPA_MEM_V4_ROUTE], |
| 482 | &ipa->mem[IPA_MEM_V4_ROUTE_HASHED]); |
| 483 | |
| 484 | ipa_table_init_add(trans, false, IPA_CMD_IP_V6_ROUTING_INIT, |
| 485 | &ipa->mem[IPA_MEM_V6_ROUTE], |
| 486 | &ipa->mem[IPA_MEM_V6_ROUTE_HASHED]); |
| 487 | |
| 488 | ipa_table_init_add(trans, true, IPA_CMD_IP_V4_FILTER_INIT, |
| 489 | &ipa->mem[IPA_MEM_V4_FILTER], |
| 490 | &ipa->mem[IPA_MEM_V4_FILTER_HASHED]); |
| 491 | |
| 492 | ipa_table_init_add(trans, true, IPA_CMD_IP_V6_FILTER_INIT, |
| 493 | &ipa->mem[IPA_MEM_V6_FILTER], |
| 494 | &ipa->mem[IPA_MEM_V6_FILTER_HASHED]); |
| 495 | |
| 496 | gsi_trans_commit_wait(trans); |
| 497 | |
| 498 | return 0; |
| 499 | } |
| 500 | |
| 501 | void ipa_table_teardown(struct ipa *ipa) |
| 502 | { |
| 503 | /* Nothing to do */ /* XXX Maybe reset the tables? */ |
| 504 | } |
| 505 | |
| 506 | /** |
| 507 | * ipa_filter_tuple_zero() - Zero an endpoint's hashed filter tuple |
Alex Elder | e3eea08 | 2020-07-13 07:24:18 -0500 | [diff] [blame] | 508 | * @endpoint: Endpoint whose filter hash tuple should be zeroed |
Alex Elder | 2b9feef | 2020-03-05 22:28:25 -0600 | [diff] [blame] | 509 | * |
| 510 | * Endpoint must be for the AP (not modem) and support filtering. Updates |
| 511 | * the filter hash values without changing route ones. |
| 512 | */ |
| 513 | static void ipa_filter_tuple_zero(struct ipa_endpoint *endpoint) |
| 514 | { |
| 515 | u32 endpoint_id = endpoint->endpoint_id; |
| 516 | u32 offset; |
| 517 | u32 val; |
| 518 | |
| 519 | offset = IPA_REG_ENDP_FILTER_ROUTER_HSH_CFG_N_OFFSET(endpoint_id); |
| 520 | |
| 521 | val = ioread32(endpoint->ipa->reg_virt + offset); |
| 522 | |
| 523 | /* Zero all filter-related fields, preserving the rest */ |
| 524 | u32_replace_bits(val, 0, IPA_REG_ENDP_FILTER_HASH_MSK_ALL); |
| 525 | |
| 526 | iowrite32(val, endpoint->ipa->reg_virt + offset); |
| 527 | } |
| 528 | |
| 529 | static void ipa_filter_config(struct ipa *ipa, bool modem) |
| 530 | { |
| 531 | enum gsi_ee_id ee_id = modem ? GSI_EE_MODEM : GSI_EE_AP; |
| 532 | u32 ep_mask = ipa->filter_map; |
| 533 | |
| 534 | /* IPA version 4.2 has no hashed route tables */ |
| 535 | if (ipa->version == IPA_VERSION_4_2) |
| 536 | return; |
| 537 | |
| 538 | while (ep_mask) { |
| 539 | u32 endpoint_id = __ffs(ep_mask); |
| 540 | struct ipa_endpoint *endpoint; |
| 541 | |
| 542 | ep_mask ^= BIT(endpoint_id); |
| 543 | |
| 544 | endpoint = &ipa->endpoint[endpoint_id]; |
| 545 | if (endpoint->ee_id == ee_id) |
| 546 | ipa_filter_tuple_zero(endpoint); |
| 547 | } |
| 548 | } |
| 549 | |
| 550 | static void ipa_filter_deconfig(struct ipa *ipa, bool modem) |
| 551 | { |
| 552 | /* Nothing to do */ |
| 553 | } |
| 554 | |
| 555 | static bool ipa_route_id_modem(u32 route_id) |
| 556 | { |
| 557 | return route_id >= IPA_ROUTE_MODEM_MIN && |
| 558 | route_id <= IPA_ROUTE_MODEM_MIN + IPA_ROUTE_MODEM_COUNT - 1; |
| 559 | } |
| 560 | |
| 561 | /** |
| 562 | * ipa_route_tuple_zero() - Zero a hashed route table entry tuple |
Alex Elder | e3eea08 | 2020-07-13 07:24:18 -0500 | [diff] [blame] | 563 | * @ipa: IPA pointer |
Alex Elder | 2b9feef | 2020-03-05 22:28:25 -0600 | [diff] [blame] | 564 | * @route_id: Route table entry whose hash tuple should be zeroed |
| 565 | * |
| 566 | * Updates the route hash values without changing filter ones. |
| 567 | */ |
| 568 | static void ipa_route_tuple_zero(struct ipa *ipa, u32 route_id) |
| 569 | { |
| 570 | u32 offset = IPA_REG_ENDP_FILTER_ROUTER_HSH_CFG_N_OFFSET(route_id); |
| 571 | u32 val; |
| 572 | |
| 573 | val = ioread32(ipa->reg_virt + offset); |
| 574 | |
| 575 | /* Zero all route-related fields, preserving the rest */ |
| 576 | u32_replace_bits(val, 0, IPA_REG_ENDP_ROUTER_HASH_MSK_ALL); |
| 577 | |
| 578 | iowrite32(val, ipa->reg_virt + offset); |
| 579 | } |
| 580 | |
| 581 | static void ipa_route_config(struct ipa *ipa, bool modem) |
| 582 | { |
| 583 | u32 route_id; |
| 584 | |
| 585 | /* IPA version 4.2 has no hashed route tables */ |
| 586 | if (ipa->version == IPA_VERSION_4_2) |
| 587 | return; |
| 588 | |
| 589 | for (route_id = 0; route_id < IPA_ROUTE_COUNT_MAX; route_id++) |
| 590 | if (ipa_route_id_modem(route_id) == modem) |
| 591 | ipa_route_tuple_zero(ipa, route_id); |
| 592 | } |
| 593 | |
| 594 | static void ipa_route_deconfig(struct ipa *ipa, bool modem) |
| 595 | { |
| 596 | /* Nothing to do */ |
| 597 | } |
| 598 | |
| 599 | void ipa_table_config(struct ipa *ipa) |
| 600 | { |
| 601 | ipa_filter_config(ipa, false); |
| 602 | ipa_filter_config(ipa, true); |
| 603 | ipa_route_config(ipa, false); |
| 604 | ipa_route_config(ipa, true); |
| 605 | } |
| 606 | |
| 607 | void ipa_table_deconfig(struct ipa *ipa) |
| 608 | { |
| 609 | ipa_route_deconfig(ipa, true); |
| 610 | ipa_route_deconfig(ipa, false); |
| 611 | ipa_filter_deconfig(ipa, true); |
| 612 | ipa_filter_deconfig(ipa, false); |
| 613 | } |
| 614 | |
| 615 | /* |
| 616 | * Initialize a coherent DMA allocation containing initialized filter and |
| 617 | * route table data. This is used when initializing or resetting the IPA |
| 618 | * filter or route table. |
| 619 | * |
| 620 | * The first entry in a filter table contains a bitmap indicating which |
| 621 | * endpoints contain entries in the table. In addition to that first entry, |
| 622 | * there are at most IPA_FILTER_COUNT_MAX entries that follow. Filter table |
| 623 | * entries are 64 bits wide, and (other than the bitmap) contain the DMA |
| 624 | * address of a filter rule. A "zero rule" indicates no filtering, and |
| 625 | * consists of 64 bits of zeroes. When a filter table is initialized (or |
| 626 | * reset) its entries are made to refer to the zero rule. |
| 627 | * |
| 628 | * Each entry in a route table is the DMA address of a routing rule. For |
| 629 | * routing there is also a 64-bit "zero rule" that means no routing, and |
| 630 | * when a route table is initialized or reset, its entries are made to refer |
| 631 | * to the zero rule. The zero rule is shared for route and filter tables. |
| 632 | * |
| 633 | * Note that the IPA hardware requires a filter or route rule address to be |
| 634 | * aligned on a 128 byte boundary. The coherent DMA buffer we allocate here |
| 635 | * has a minimum alignment, and we place the zero rule at the base of that |
| 636 | * allocated space. In ipa_table_init() we verify the minimum DMA allocation |
| 637 | * meets our requirement. |
| 638 | * |
| 639 | * +-------------------+ |
| 640 | * --> | zero rule | |
| 641 | * / |-------------------| |
| 642 | * | | filter mask | |
| 643 | * |\ |-------------------| |
| 644 | * | ---- zero rule address | \ |
| 645 | * |\ |-------------------| | |
| 646 | * | ---- zero rule address | | IPA_FILTER_COUNT_MAX |
| 647 | * | |-------------------| > or IPA_ROUTE_COUNT_MAX, |
| 648 | * | ... | whichever is greater |
| 649 | * \ |-------------------| | |
| 650 | * ---- zero rule address | / |
| 651 | * +-------------------+ |
| 652 | */ |
| 653 | int ipa_table_init(struct ipa *ipa) |
| 654 | { |
| 655 | u32 count = max_t(u32, IPA_FILTER_COUNT_MAX, IPA_ROUTE_COUNT_MAX); |
| 656 | struct device *dev = &ipa->pdev->dev; |
| 657 | dma_addr_t addr; |
| 658 | __le64 le_addr; |
| 659 | __le64 *virt; |
| 660 | size_t size; |
| 661 | |
| 662 | ipa_table_validate_build(); |
| 663 | |
| 664 | size = IPA_ZERO_RULE_SIZE + (1 + count) * IPA_TABLE_ENTRY_SIZE; |
| 665 | virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL); |
| 666 | if (!virt) |
| 667 | return -ENOMEM; |
| 668 | |
| 669 | ipa->table_virt = virt; |
| 670 | ipa->table_addr = addr; |
| 671 | |
| 672 | /* First slot is the zero rule */ |
| 673 | *virt++ = 0; |
| 674 | |
| 675 | /* Next is the filter table bitmap. The "soft" bitmap value |
| 676 | * must be converted to the hardware representation by shifting |
| 677 | * it left one position. (Bit 0 repesents global filtering, |
| 678 | * which is possible but not used.) |
| 679 | */ |
| 680 | *virt++ = cpu_to_le64((u64)ipa->filter_map << 1); |
| 681 | |
| 682 | /* All the rest contain the DMA address of the zero rule */ |
| 683 | le_addr = cpu_to_le64(addr); |
| 684 | while (count--) |
| 685 | *virt++ = le_addr; |
| 686 | |
| 687 | return 0; |
| 688 | } |
| 689 | |
| 690 | void ipa_table_exit(struct ipa *ipa) |
| 691 | { |
| 692 | u32 count = max_t(u32, 1 + IPA_FILTER_COUNT_MAX, IPA_ROUTE_COUNT_MAX); |
| 693 | struct device *dev = &ipa->pdev->dev; |
| 694 | size_t size; |
| 695 | |
| 696 | size = IPA_ZERO_RULE_SIZE + (1 + count) * IPA_TABLE_ENTRY_SIZE; |
| 697 | |
| 698 | dma_free_coherent(dev, size, ipa->table_virt, ipa->table_addr); |
| 699 | ipa->table_addr = 0; |
| 700 | ipa->table_virt = NULL; |
| 701 | } |