Dennis Dalessandro | b518d3e | 2016-01-06 09:56:15 -0800 | [diff] [blame] | 1 | /* |
Michael J. Ruhl | fe2ac04 | 2019-06-28 14:21:58 -0400 | [diff] [blame] | 2 | * Copyright(c) 2016 - 2019 Intel Corporation. |
Dennis Dalessandro | b518d3e | 2016-01-06 09:56:15 -0800 | [diff] [blame] | 3 | * |
| 4 | * This file is provided under a dual BSD/GPLv2 license. When using or |
| 5 | * redistributing this file, you may do so under either license. |
| 6 | * |
| 7 | * GPL LICENSE SUMMARY |
| 8 | * |
| 9 | * This program is free software; you can redistribute it and/or modify |
| 10 | * it under the terms of version 2 of the GNU General Public License as |
| 11 | * published by the Free Software Foundation. |
| 12 | * |
| 13 | * This program is distributed in the hope that it will be useful, but |
| 14 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 16 | * General Public License for more details. |
| 17 | * |
| 18 | * BSD LICENSE |
| 19 | * |
| 20 | * Redistribution and use in source and binary forms, with or without |
| 21 | * modification, are permitted provided that the following conditions |
| 22 | * are met: |
| 23 | * |
| 24 | * - Redistributions of source code must retain the above copyright |
| 25 | * notice, this list of conditions and the following disclaimer. |
| 26 | * - Redistributions in binary form must reproduce the above copyright |
| 27 | * notice, this list of conditions and the following disclaimer in |
| 28 | * the documentation and/or other materials provided with the |
| 29 | * distribution. |
| 30 | * - Neither the name of Intel Corporation nor the names of its |
| 31 | * contributors may be used to endorse or promote products derived |
| 32 | * from this software without specific prior written permission. |
| 33 | * |
| 34 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 35 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 36 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 37 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 38 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 39 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 40 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 41 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 42 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 43 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 44 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 45 | * |
| 46 | */ |
| 47 | |
Dennis Dalessandro | 3b0b3fb | 2016-01-22 13:00:35 -0800 | [diff] [blame] | 48 | #include <linux/hash.h> |
Dennis Dalessandro | 0acb0cc | 2016-01-06 10:04:46 -0800 | [diff] [blame] | 49 | #include <linux/bitops.h> |
| 50 | #include <linux/lockdep.h> |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 51 | #include <linux/vmalloc.h> |
| 52 | #include <linux/slab.h> |
| 53 | #include <rdma/ib_verbs.h> |
Don Hiatt | 832666c | 2017-02-08 05:28:25 -0800 | [diff] [blame] | 54 | #include <rdma/ib_hdrs.h> |
Don Hiatt | 13c1922 | 2017-08-04 13:53:51 -0700 | [diff] [blame] | 55 | #include <rdma/opa_addr.h> |
Shamir Rabinovitch | 8994445 | 2019-02-07 18:44:49 +0200 | [diff] [blame] | 56 | #include <rdma/uverbs_ioctl.h> |
Dennis Dalessandro | b518d3e | 2016-01-06 09:56:15 -0800 | [diff] [blame] | 57 | #include "qp.h" |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 58 | #include "vt.h" |
Dennis Dalessandro | 3b0b3fb | 2016-01-22 13:00:35 -0800 | [diff] [blame] | 59 | #include "trace.h" |
Dennis Dalessandro | b518d3e | 2016-01-06 09:56:15 -0800 | [diff] [blame] | 60 | |
Kamenee Arumugam | f592ae3 | 2019-06-28 14:04:30 -0400 | [diff] [blame] | 61 | #define RVT_RWQ_COUNT_THRESHOLD 16 |
| 62 | |
Kees Cook | a2930e5 | 2017-10-16 15:51:13 -0700 | [diff] [blame] | 63 | static void rvt_rc_timeout(struct timer_list *t); |
Venkata Sandeep Dhanalakota | 11a10d4 | 2017-02-08 05:27:13 -0800 | [diff] [blame] | 64 | |
| 65 | /* |
| 66 | * Convert the AETH RNR timeout code into the number of microseconds. |
| 67 | */ |
| 68 | static const u32 ib_rvt_rnr_table[32] = { |
| 69 | 655360, /* 00: 655.36 */ |
| 70 | 10, /* 01: .01 */ |
| 71 | 20, /* 02 .02 */ |
| 72 | 30, /* 03: .03 */ |
| 73 | 40, /* 04: .04 */ |
| 74 | 60, /* 05: .06 */ |
| 75 | 80, /* 06: .08 */ |
| 76 | 120, /* 07: .12 */ |
| 77 | 160, /* 08: .16 */ |
| 78 | 240, /* 09: .24 */ |
| 79 | 320, /* 0A: .32 */ |
| 80 | 480, /* 0B: .48 */ |
| 81 | 640, /* 0C: .64 */ |
| 82 | 960, /* 0D: .96 */ |
| 83 | 1280, /* 0E: 1.28 */ |
| 84 | 1920, /* 0F: 1.92 */ |
| 85 | 2560, /* 10: 2.56 */ |
| 86 | 3840, /* 11: 3.84 */ |
| 87 | 5120, /* 12: 5.12 */ |
| 88 | 7680, /* 13: 7.68 */ |
| 89 | 10240, /* 14: 10.24 */ |
| 90 | 15360, /* 15: 15.36 */ |
| 91 | 20480, /* 16: 20.48 */ |
| 92 | 30720, /* 17: 30.72 */ |
| 93 | 40960, /* 18: 40.96 */ |
| 94 | 61440, /* 19: 61.44 */ |
| 95 | 81920, /* 1A: 81.92 */ |
| 96 | 122880, /* 1B: 122.88 */ |
| 97 | 163840, /* 1C: 163.84 */ |
| 98 | 245760, /* 1D: 245.76 */ |
| 99 | 327680, /* 1E: 327.68 */ |
| 100 | 491520 /* 1F: 491.52 */ |
| 101 | }; |
| 102 | |
Dennis Dalessandro | bfbac09 | 2016-01-22 13:00:22 -0800 | [diff] [blame] | 103 | /* |
| 104 | * Note that it is OK to post send work requests in the SQE and ERR |
| 105 | * states; rvt_do_send() will process them and generate error |
| 106 | * completions as per IB 1.2 C10-96. |
| 107 | */ |
| 108 | const int ib_rvt_state_ops[IB_QPS_ERR + 1] = { |
| 109 | [IB_QPS_RESET] = 0, |
| 110 | [IB_QPS_INIT] = RVT_POST_RECV_OK, |
| 111 | [IB_QPS_RTR] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK, |
| 112 | [IB_QPS_RTS] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK | |
| 113 | RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK | |
| 114 | RVT_PROCESS_NEXT_SEND_OK, |
| 115 | [IB_QPS_SQD] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK | |
| 116 | RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK, |
| 117 | [IB_QPS_SQE] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK | |
| 118 | RVT_POST_SEND_OK | RVT_FLUSH_SEND, |
| 119 | [IB_QPS_ERR] = RVT_POST_RECV_OK | RVT_FLUSH_RECV | |
| 120 | RVT_POST_SEND_OK | RVT_FLUSH_SEND, |
| 121 | }; |
| 122 | EXPORT_SYMBOL(ib_rvt_state_ops); |
| 123 | |
Brian Welty | 019f118 | 2018-09-26 10:44:33 -0700 | [diff] [blame] | 124 | /* platform specific: return the last level cache (llc) size, in KiB */ |
| 125 | static int rvt_wss_llc_size(void) |
| 126 | { |
| 127 | /* assume that the boot CPU value is universal for all CPUs */ |
| 128 | return boot_cpu_data.x86_cache_size; |
| 129 | } |
| 130 | |
| 131 | /* platform specific: cacheless copy */ |
| 132 | static void cacheless_memcpy(void *dst, void *src, size_t n) |
| 133 | { |
| 134 | /* |
| 135 | * Use the only available X64 cacheless copy. Add a __user cast |
| 136 | * to quiet sparse. The src agument is already in the kernel so |
| 137 | * there are no security issues. The extra fault recovery machinery |
| 138 | * is not invoked. |
| 139 | */ |
| 140 | __copy_user_nocache(dst, (void __user *)src, n, 0); |
| 141 | } |
| 142 | |
| 143 | void rvt_wss_exit(struct rvt_dev_info *rdi) |
| 144 | { |
| 145 | struct rvt_wss *wss = rdi->wss; |
| 146 | |
| 147 | if (!wss) |
| 148 | return; |
| 149 | |
| 150 | /* coded to handle partially initialized and repeat callers */ |
| 151 | kfree(wss->entries); |
| 152 | wss->entries = NULL; |
| 153 | kfree(rdi->wss); |
| 154 | rdi->wss = NULL; |
| 155 | } |
| 156 | |
| 157 | /** |
| 158 | * rvt_wss_init - Init wss data structures |
| 159 | * |
| 160 | * Return: 0 on success |
| 161 | */ |
| 162 | int rvt_wss_init(struct rvt_dev_info *rdi) |
| 163 | { |
| 164 | unsigned int sge_copy_mode = rdi->dparms.sge_copy_mode; |
| 165 | unsigned int wss_threshold = rdi->dparms.wss_threshold; |
| 166 | unsigned int wss_clean_period = rdi->dparms.wss_clean_period; |
| 167 | long llc_size; |
| 168 | long llc_bits; |
| 169 | long table_size; |
| 170 | long table_bits; |
| 171 | struct rvt_wss *wss; |
| 172 | int node = rdi->dparms.node; |
| 173 | |
| 174 | if (sge_copy_mode != RVT_SGE_COPY_ADAPTIVE) { |
| 175 | rdi->wss = NULL; |
| 176 | return 0; |
| 177 | } |
| 178 | |
| 179 | rdi->wss = kzalloc_node(sizeof(*rdi->wss), GFP_KERNEL, node); |
| 180 | if (!rdi->wss) |
| 181 | return -ENOMEM; |
| 182 | wss = rdi->wss; |
| 183 | |
| 184 | /* check for a valid percent range - default to 80 if none or invalid */ |
| 185 | if (wss_threshold < 1 || wss_threshold > 100) |
| 186 | wss_threshold = 80; |
| 187 | |
| 188 | /* reject a wildly large period */ |
| 189 | if (wss_clean_period > 1000000) |
| 190 | wss_clean_period = 256; |
| 191 | |
| 192 | /* reject a zero period */ |
| 193 | if (wss_clean_period == 0) |
| 194 | wss_clean_period = 1; |
| 195 | |
| 196 | /* |
| 197 | * Calculate the table size - the next power of 2 larger than the |
| 198 | * LLC size. LLC size is in KiB. |
| 199 | */ |
| 200 | llc_size = rvt_wss_llc_size() * 1024; |
| 201 | table_size = roundup_pow_of_two(llc_size); |
| 202 | |
| 203 | /* one bit per page in rounded up table */ |
| 204 | llc_bits = llc_size / PAGE_SIZE; |
| 205 | table_bits = table_size / PAGE_SIZE; |
| 206 | wss->pages_mask = table_bits - 1; |
| 207 | wss->num_entries = table_bits / BITS_PER_LONG; |
| 208 | |
| 209 | wss->threshold = (llc_bits * wss_threshold) / 100; |
| 210 | if (wss->threshold == 0) |
| 211 | wss->threshold = 1; |
| 212 | |
| 213 | wss->clean_period = wss_clean_period; |
| 214 | atomic_set(&wss->clean_counter, wss_clean_period); |
| 215 | |
| 216 | wss->entries = kcalloc_node(wss->num_entries, sizeof(*wss->entries), |
| 217 | GFP_KERNEL, node); |
| 218 | if (!wss->entries) { |
| 219 | rvt_wss_exit(rdi); |
| 220 | return -ENOMEM; |
| 221 | } |
| 222 | |
| 223 | return 0; |
| 224 | } |
| 225 | |
| 226 | /* |
| 227 | * Advance the clean counter. When the clean period has expired, |
| 228 | * clean an entry. |
| 229 | * |
| 230 | * This is implemented in atomics to avoid locking. Because multiple |
| 231 | * variables are involved, it can be racy which can lead to slightly |
| 232 | * inaccurate information. Since this is only a heuristic, this is |
| 233 | * OK. Any innaccuracies will clean themselves out as the counter |
| 234 | * advances. That said, it is unlikely the entry clean operation will |
| 235 | * race - the next possible racer will not start until the next clean |
| 236 | * period. |
| 237 | * |
| 238 | * The clean counter is implemented as a decrement to zero. When zero |
| 239 | * is reached an entry is cleaned. |
| 240 | */ |
| 241 | static void wss_advance_clean_counter(struct rvt_wss *wss) |
| 242 | { |
| 243 | int entry; |
| 244 | int weight; |
| 245 | unsigned long bits; |
| 246 | |
| 247 | /* become the cleaner if we decrement the counter to zero */ |
| 248 | if (atomic_dec_and_test(&wss->clean_counter)) { |
| 249 | /* |
| 250 | * Set, not add, the clean period. This avoids an issue |
| 251 | * where the counter could decrement below the clean period. |
| 252 | * Doing a set can result in lost decrements, slowing the |
| 253 | * clean advance. Since this a heuristic, this possible |
| 254 | * slowdown is OK. |
| 255 | * |
| 256 | * An alternative is to loop, advancing the counter by a |
| 257 | * clean period until the result is > 0. However, this could |
| 258 | * lead to several threads keeping another in the clean loop. |
| 259 | * This could be mitigated by limiting the number of times |
| 260 | * we stay in the loop. |
| 261 | */ |
| 262 | atomic_set(&wss->clean_counter, wss->clean_period); |
| 263 | |
| 264 | /* |
| 265 | * Uniquely grab the entry to clean and move to next. |
| 266 | * The current entry is always the lower bits of |
| 267 | * wss.clean_entry. The table size, wss.num_entries, |
| 268 | * is always a power-of-2. |
| 269 | */ |
| 270 | entry = (atomic_inc_return(&wss->clean_entry) - 1) |
| 271 | & (wss->num_entries - 1); |
| 272 | |
| 273 | /* clear the entry and count the bits */ |
| 274 | bits = xchg(&wss->entries[entry], 0); |
| 275 | weight = hweight64((u64)bits); |
| 276 | /* only adjust the contended total count if needed */ |
| 277 | if (weight) |
| 278 | atomic_sub(weight, &wss->total_count); |
| 279 | } |
| 280 | } |
| 281 | |
| 282 | /* |
| 283 | * Insert the given address into the working set array. |
| 284 | */ |
| 285 | static void wss_insert(struct rvt_wss *wss, void *address) |
| 286 | { |
| 287 | u32 page = ((unsigned long)address >> PAGE_SHIFT) & wss->pages_mask; |
| 288 | u32 entry = page / BITS_PER_LONG; /* assumes this ends up a shift */ |
| 289 | u32 nr = page & (BITS_PER_LONG - 1); |
| 290 | |
| 291 | if (!test_and_set_bit(nr, &wss->entries[entry])) |
| 292 | atomic_inc(&wss->total_count); |
| 293 | |
| 294 | wss_advance_clean_counter(wss); |
| 295 | } |
| 296 | |
| 297 | /* |
| 298 | * Is the working set larger than the threshold? |
| 299 | */ |
| 300 | static inline bool wss_exceeds_threshold(struct rvt_wss *wss) |
| 301 | { |
| 302 | return atomic_read(&wss->total_count) >= wss->threshold; |
| 303 | } |
| 304 | |
Mike Marciniszyn | d2b8d4d | 2016-01-22 12:50:43 -0800 | [diff] [blame] | 305 | static void get_map_page(struct rvt_qpn_table *qpt, |
Leon Romanovsky | 0f4d027 | 2017-05-23 14:38:14 +0300 | [diff] [blame] | 306 | struct rvt_qpn_map *map) |
Dennis Dalessandro | 0acb0cc | 2016-01-06 10:04:46 -0800 | [diff] [blame] | 307 | { |
Leon Romanovsky | 0f4d027 | 2017-05-23 14:38:14 +0300 | [diff] [blame] | 308 | unsigned long page = get_zeroed_page(GFP_KERNEL); |
Dennis Dalessandro | 0acb0cc | 2016-01-06 10:04:46 -0800 | [diff] [blame] | 309 | |
| 310 | /* |
| 311 | * Free the page if someone raced with us installing it. |
| 312 | */ |
| 313 | |
| 314 | spin_lock(&qpt->lock); |
| 315 | if (map->page) |
| 316 | free_page(page); |
| 317 | else |
| 318 | map->page = (void *)page; |
| 319 | spin_unlock(&qpt->lock); |
| 320 | } |
| 321 | |
| 322 | /** |
| 323 | * init_qpn_table - initialize the QP number table for a device |
| 324 | * @qpt: the QPN table |
| 325 | */ |
| 326 | static int init_qpn_table(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt) |
| 327 | { |
| 328 | u32 offset, i; |
| 329 | struct rvt_qpn_map *map; |
| 330 | int ret = 0; |
| 331 | |
Harish Chegondi | fef2efd | 2016-01-22 12:50:30 -0800 | [diff] [blame] | 332 | if (!(rdi->dparms.qpn_res_end >= rdi->dparms.qpn_res_start)) |
Dennis Dalessandro | 0acb0cc | 2016-01-06 10:04:46 -0800 | [diff] [blame] | 333 | return -EINVAL; |
| 334 | |
| 335 | spin_lock_init(&qpt->lock); |
| 336 | |
| 337 | qpt->last = rdi->dparms.qpn_start; |
| 338 | qpt->incr = rdi->dparms.qpn_inc << rdi->dparms.qos_shift; |
| 339 | |
| 340 | /* |
| 341 | * Drivers may want some QPs beyond what we need for verbs let them use |
| 342 | * our qpn table. No need for two. Lets go ahead and mark the bitmaps |
| 343 | * for those. The reserved range must be *after* the range which verbs |
| 344 | * will pick from. |
| 345 | */ |
| 346 | |
| 347 | /* Figure out number of bit maps needed before reserved range */ |
| 348 | qpt->nmaps = rdi->dparms.qpn_res_start / RVT_BITS_PER_PAGE; |
| 349 | |
| 350 | /* This should always be zero */ |
| 351 | offset = rdi->dparms.qpn_res_start & RVT_BITS_PER_PAGE_MASK; |
| 352 | |
| 353 | /* Starting with the first reserved bit map */ |
| 354 | map = &qpt->map[qpt->nmaps]; |
| 355 | |
| 356 | rvt_pr_info(rdi, "Reserving QPNs from 0x%x to 0x%x for non-verbs use\n", |
| 357 | rdi->dparms.qpn_res_start, rdi->dparms.qpn_res_end); |
Harish Chegondi | fef2efd | 2016-01-22 12:50:30 -0800 | [diff] [blame] | 358 | for (i = rdi->dparms.qpn_res_start; i <= rdi->dparms.qpn_res_end; i++) { |
Dennis Dalessandro | 0acb0cc | 2016-01-06 10:04:46 -0800 | [diff] [blame] | 359 | if (!map->page) { |
Leon Romanovsky | 0f4d027 | 2017-05-23 14:38:14 +0300 | [diff] [blame] | 360 | get_map_page(qpt, map); |
Dennis Dalessandro | 0acb0cc | 2016-01-06 10:04:46 -0800 | [diff] [blame] | 361 | if (!map->page) { |
| 362 | ret = -ENOMEM; |
| 363 | break; |
| 364 | } |
| 365 | } |
| 366 | set_bit(offset, map->page); |
| 367 | offset++; |
| 368 | if (offset == RVT_BITS_PER_PAGE) { |
| 369 | /* next page */ |
| 370 | qpt->nmaps++; |
| 371 | map++; |
| 372 | offset = 0; |
| 373 | } |
| 374 | } |
| 375 | return ret; |
| 376 | } |
| 377 | |
| 378 | /** |
| 379 | * free_qpn_table - free the QP number table for a device |
| 380 | * @qpt: the QPN table |
| 381 | */ |
| 382 | static void free_qpn_table(struct rvt_qpn_table *qpt) |
| 383 | { |
| 384 | int i; |
| 385 | |
| 386 | for (i = 0; i < ARRAY_SIZE(qpt->map); i++) |
| 387 | free_page((unsigned long)qpt->map[i].page); |
| 388 | } |
| 389 | |
Dennis Dalessandro | 90793f7 | 2016-02-14 12:10:29 -0800 | [diff] [blame] | 390 | /** |
| 391 | * rvt_driver_qp_init - Init driver qp resources |
| 392 | * @rdi: rvt dev strucutre |
| 393 | * |
| 394 | * Return: 0 on success |
| 395 | */ |
Dennis Dalessandro | 0acb0cc | 2016-01-06 10:04:46 -0800 | [diff] [blame] | 396 | int rvt_driver_qp_init(struct rvt_dev_info *rdi) |
| 397 | { |
| 398 | int i; |
| 399 | int ret = -ENOMEM; |
| 400 | |
Dennis Dalessandro | 0acb0cc | 2016-01-06 10:04:46 -0800 | [diff] [blame] | 401 | if (!rdi->dparms.qp_table_size) |
| 402 | return -EINVAL; |
| 403 | |
| 404 | /* |
| 405 | * If driver is not doing any QP allocation then make sure it is |
| 406 | * providing the necessary QP functions. |
| 407 | */ |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 408 | if (!rdi->driver_f.free_all_qps || |
| 409 | !rdi->driver_f.qp_priv_alloc || |
| 410 | !rdi->driver_f.qp_priv_free || |
Venkata Sandeep Dhanalakota | 11a10d4 | 2017-02-08 05:27:13 -0800 | [diff] [blame] | 411 | !rdi->driver_f.notify_qp_reset || |
| 412 | !rdi->driver_f.notify_restart_rc) |
Dennis Dalessandro | 0acb0cc | 2016-01-06 10:04:46 -0800 | [diff] [blame] | 413 | return -EINVAL; |
| 414 | |
| 415 | /* allocate parent object */ |
Mitko Haralanov | d1b697b | 2016-02-03 14:14:54 -0800 | [diff] [blame] | 416 | rdi->qp_dev = kzalloc_node(sizeof(*rdi->qp_dev), GFP_KERNEL, |
| 417 | rdi->dparms.node); |
Dennis Dalessandro | 0acb0cc | 2016-01-06 10:04:46 -0800 | [diff] [blame] | 418 | if (!rdi->qp_dev) |
| 419 | return -ENOMEM; |
| 420 | |
| 421 | /* allocate hash table */ |
| 422 | rdi->qp_dev->qp_table_size = rdi->dparms.qp_table_size; |
| 423 | rdi->qp_dev->qp_table_bits = ilog2(rdi->dparms.qp_table_size); |
| 424 | rdi->qp_dev->qp_table = |
Johannes Thumshirn | 3c07347 | 2017-11-15 17:32:41 -0800 | [diff] [blame] | 425 | kmalloc_array_node(rdi->qp_dev->qp_table_size, |
Mitko Haralanov | d1b697b | 2016-02-03 14:14:54 -0800 | [diff] [blame] | 426 | sizeof(*rdi->qp_dev->qp_table), |
| 427 | GFP_KERNEL, rdi->dparms.node); |
Dennis Dalessandro | 0acb0cc | 2016-01-06 10:04:46 -0800 | [diff] [blame] | 428 | if (!rdi->qp_dev->qp_table) |
| 429 | goto no_qp_table; |
| 430 | |
| 431 | for (i = 0; i < rdi->qp_dev->qp_table_size; i++) |
| 432 | RCU_INIT_POINTER(rdi->qp_dev->qp_table[i], NULL); |
| 433 | |
| 434 | spin_lock_init(&rdi->qp_dev->qpt_lock); |
| 435 | |
| 436 | /* initialize qpn map */ |
| 437 | if (init_qpn_table(rdi, &rdi->qp_dev->qpn_table)) |
| 438 | goto fail_table; |
| 439 | |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 440 | spin_lock_init(&rdi->n_qps_lock); |
| 441 | |
| 442 | return 0; |
Dennis Dalessandro | 0acb0cc | 2016-01-06 10:04:46 -0800 | [diff] [blame] | 443 | |
| 444 | fail_table: |
| 445 | kfree(rdi->qp_dev->qp_table); |
| 446 | free_qpn_table(&rdi->qp_dev->qpn_table); |
| 447 | |
| 448 | no_qp_table: |
| 449 | kfree(rdi->qp_dev); |
| 450 | |
| 451 | return ret; |
| 452 | } |
| 453 | |
| 454 | /** |
| 455 | * free_all_qps - check for QPs still in use |
Randy Dunlap | 4f9a301 | 2018-01-05 16:22:32 -0800 | [diff] [blame] | 456 | * @rdi: rvt device info structure |
Dennis Dalessandro | 0acb0cc | 2016-01-06 10:04:46 -0800 | [diff] [blame] | 457 | * |
| 458 | * There should not be any QPs still in use. |
| 459 | * Free memory for table. |
| 460 | */ |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 461 | static unsigned rvt_free_all_qps(struct rvt_dev_info *rdi) |
Dennis Dalessandro | 0acb0cc | 2016-01-06 10:04:46 -0800 | [diff] [blame] | 462 | { |
| 463 | unsigned long flags; |
| 464 | struct rvt_qp *qp; |
| 465 | unsigned n, qp_inuse = 0; |
| 466 | spinlock_t *ql; /* work around too long line below */ |
| 467 | |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 468 | if (rdi->driver_f.free_all_qps) |
| 469 | qp_inuse = rdi->driver_f.free_all_qps(rdi); |
Dennis Dalessandro | 0acb0cc | 2016-01-06 10:04:46 -0800 | [diff] [blame] | 470 | |
Dennis Dalessandro | 4e74080 | 2016-01-22 13:00:55 -0800 | [diff] [blame] | 471 | qp_inuse += rvt_mcast_tree_empty(rdi); |
| 472 | |
Dennis Dalessandro | 0acb0cc | 2016-01-06 10:04:46 -0800 | [diff] [blame] | 473 | if (!rdi->qp_dev) |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 474 | return qp_inuse; |
Dennis Dalessandro | 0acb0cc | 2016-01-06 10:04:46 -0800 | [diff] [blame] | 475 | |
| 476 | ql = &rdi->qp_dev->qpt_lock; |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 477 | spin_lock_irqsave(ql, flags); |
Dennis Dalessandro | 0acb0cc | 2016-01-06 10:04:46 -0800 | [diff] [blame] | 478 | for (n = 0; n < rdi->qp_dev->qp_table_size; n++) { |
| 479 | qp = rcu_dereference_protected(rdi->qp_dev->qp_table[n], |
| 480 | lockdep_is_held(ql)); |
| 481 | RCU_INIT_POINTER(rdi->qp_dev->qp_table[n], NULL); |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 482 | |
| 483 | for (; qp; qp = rcu_dereference_protected(qp->next, |
| 484 | lockdep_is_held(ql))) |
Dennis Dalessandro | 0acb0cc | 2016-01-06 10:04:46 -0800 | [diff] [blame] | 485 | qp_inuse++; |
Dennis Dalessandro | 0acb0cc | 2016-01-06 10:04:46 -0800 | [diff] [blame] | 486 | } |
| 487 | spin_unlock_irqrestore(ql, flags); |
| 488 | synchronize_rcu(); |
| 489 | return qp_inuse; |
| 490 | } |
| 491 | |
Dennis Dalessandro | 90793f7 | 2016-02-14 12:10:29 -0800 | [diff] [blame] | 492 | /** |
| 493 | * rvt_qp_exit - clean up qps on device exit |
| 494 | * @rdi: rvt dev structure |
| 495 | * |
| 496 | * Check for qp leaks and free resources. |
| 497 | */ |
Dennis Dalessandro | 0acb0cc | 2016-01-06 10:04:46 -0800 | [diff] [blame] | 498 | void rvt_qp_exit(struct rvt_dev_info *rdi) |
| 499 | { |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 500 | u32 qps_inuse = rvt_free_all_qps(rdi); |
Dennis Dalessandro | 0acb0cc | 2016-01-06 10:04:46 -0800 | [diff] [blame] | 501 | |
Dennis Dalessandro | 0acb0cc | 2016-01-06 10:04:46 -0800 | [diff] [blame] | 502 | if (qps_inuse) |
| 503 | rvt_pr_err(rdi, "QP memory leak! %u still in use\n", |
| 504 | qps_inuse); |
| 505 | if (!rdi->qp_dev) |
| 506 | return; |
| 507 | |
| 508 | kfree(rdi->qp_dev->qp_table); |
| 509 | free_qpn_table(&rdi->qp_dev->qpn_table); |
| 510 | kfree(rdi->qp_dev); |
| 511 | } |
| 512 | |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 513 | static inline unsigned mk_qpn(struct rvt_qpn_table *qpt, |
| 514 | struct rvt_qpn_map *map, unsigned off) |
| 515 | { |
| 516 | return (map - qpt->map) * RVT_BITS_PER_PAGE + off; |
| 517 | } |
| 518 | |
Dennis Dalessandro | f1badc7 | 2016-02-03 14:15:02 -0800 | [diff] [blame] | 519 | /** |
| 520 | * alloc_qpn - Allocate the next available qpn or zero/one for QP type |
| 521 | * IB_QPT_SMI/IB_QPT_GSI |
Randy Dunlap | 4f9a301 | 2018-01-05 16:22:32 -0800 | [diff] [blame] | 522 | * @rdi: rvt device info structure |
| 523 | * @qpt: queue pair number table pointer |
| 524 | * @port_num: IB port number, 1 based, comes from core |
Dennis Dalessandro | f1badc7 | 2016-02-03 14:15:02 -0800 | [diff] [blame] | 525 | * |
| 526 | * Return: The queue pair number |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 527 | */ |
| 528 | static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt, |
Leon Romanovsky | 0f4d027 | 2017-05-23 14:38:14 +0300 | [diff] [blame] | 529 | enum ib_qp_type type, u8 port_num) |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 530 | { |
| 531 | u32 i, offset, max_scan, qpn; |
| 532 | struct rvt_qpn_map *map; |
| 533 | u32 ret; |
| 534 | |
| 535 | if (rdi->driver_f.alloc_qpn) |
Leon Romanovsky | 0f4d027 | 2017-05-23 14:38:14 +0300 | [diff] [blame] | 536 | return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num); |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 537 | |
| 538 | if (type == IB_QPT_SMI || type == IB_QPT_GSI) { |
| 539 | unsigned n; |
| 540 | |
| 541 | ret = type == IB_QPT_GSI; |
Dennis Dalessandro | f1badc7 | 2016-02-03 14:15:02 -0800 | [diff] [blame] | 542 | n = 1 << (ret + 2 * (port_num - 1)); |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 543 | spin_lock(&qpt->lock); |
| 544 | if (qpt->flags & n) |
| 545 | ret = -EINVAL; |
| 546 | else |
| 547 | qpt->flags |= n; |
| 548 | spin_unlock(&qpt->lock); |
| 549 | goto bail; |
| 550 | } |
| 551 | |
| 552 | qpn = qpt->last + qpt->incr; |
| 553 | if (qpn >= RVT_QPN_MAX) |
| 554 | qpn = qpt->incr | ((qpt->last & 1) ^ 1); |
| 555 | /* offset carries bit 0 */ |
| 556 | offset = qpn & RVT_BITS_PER_PAGE_MASK; |
| 557 | map = &qpt->map[qpn / RVT_BITS_PER_PAGE]; |
| 558 | max_scan = qpt->nmaps - !offset; |
| 559 | for (i = 0;;) { |
| 560 | if (unlikely(!map->page)) { |
Leon Romanovsky | 0f4d027 | 2017-05-23 14:38:14 +0300 | [diff] [blame] | 561 | get_map_page(qpt, map); |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 562 | if (unlikely(!map->page)) |
| 563 | break; |
| 564 | } |
| 565 | do { |
| 566 | if (!test_and_set_bit(offset, map->page)) { |
| 567 | qpt->last = qpn; |
| 568 | ret = qpn; |
| 569 | goto bail; |
| 570 | } |
| 571 | offset += qpt->incr; |
| 572 | /* |
| 573 | * This qpn might be bogus if offset >= BITS_PER_PAGE. |
| 574 | * That is OK. It gets re-assigned below |
| 575 | */ |
| 576 | qpn = mk_qpn(qpt, map, offset); |
| 577 | } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX); |
| 578 | /* |
| 579 | * In order to keep the number of pages allocated to a |
| 580 | * minimum, we scan the all existing pages before increasing |
| 581 | * the size of the bitmap table. |
| 582 | */ |
| 583 | if (++i > max_scan) { |
| 584 | if (qpt->nmaps == RVT_QPNMAP_ENTRIES) |
| 585 | break; |
| 586 | map = &qpt->map[qpt->nmaps++]; |
| 587 | /* start at incr with current bit 0 */ |
| 588 | offset = qpt->incr | (offset & 1); |
| 589 | } else if (map < &qpt->map[qpt->nmaps]) { |
| 590 | ++map; |
| 591 | /* start at incr with current bit 0 */ |
| 592 | offset = qpt->incr | (offset & 1); |
| 593 | } else { |
| 594 | map = &qpt->map[0]; |
| 595 | /* wrap to first map page, invert bit 0 */ |
| 596 | offset = qpt->incr | ((offset & 1) ^ 1); |
| 597 | } |
Brian Welty | 501edc42 | 2016-06-09 07:51:20 -0700 | [diff] [blame] | 598 | /* there can be no set bits in low-order QoS bits */ |
Mike Marciniszyn | 2abae62 | 2019-05-24 11:44:38 -0400 | [diff] [blame] | 599 | WARN_ON(rdi->dparms.qos_shift > 1 && |
| 600 | offset & ((BIT(rdi->dparms.qos_shift - 1) - 1) << 1)); |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 601 | qpn = mk_qpn(qpt, map, offset); |
| 602 | } |
| 603 | |
| 604 | ret = -ENOMEM; |
| 605 | |
| 606 | bail: |
| 607 | return ret; |
| 608 | } |
| 609 | |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 610 | /** |
Dennis Dalessandro | 79a225b | 2016-02-14 12:11:20 -0800 | [diff] [blame] | 611 | * rvt_clear_mr_refs - Drop help mr refs |
| 612 | * @qp: rvt qp data structure |
| 613 | * @clr_sends: If shoudl clear send side or not |
| 614 | */ |
| 615 | static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends) |
| 616 | { |
| 617 | unsigned n; |
Mike Marciniszyn | 8b103e9 | 2016-05-24 12:50:40 -0700 | [diff] [blame] | 618 | struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); |
Dennis Dalessandro | 79a225b | 2016-02-14 12:11:20 -0800 | [diff] [blame] | 619 | |
| 620 | if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) |
| 621 | rvt_put_ss(&qp->s_rdma_read_sge); |
| 622 | |
| 623 | rvt_put_ss(&qp->r_sge); |
| 624 | |
| 625 | if (clr_sends) { |
| 626 | while (qp->s_last != qp->s_head) { |
| 627 | struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_last); |
Dennis Dalessandro | 79a225b | 2016-02-14 12:11:20 -0800 | [diff] [blame] | 628 | |
Mike Marciniszyn | d40f69c | 2019-04-12 06:41:42 -0700 | [diff] [blame] | 629 | rvt_put_qp_swqe(qp, wqe); |
Dennis Dalessandro | 79a225b | 2016-02-14 12:11:20 -0800 | [diff] [blame] | 630 | if (++qp->s_last >= qp->s_size) |
| 631 | qp->s_last = 0; |
| 632 | smp_wmb(); /* see qp_set_savail */ |
| 633 | } |
| 634 | if (qp->s_rdma_mr) { |
| 635 | rvt_put_mr(qp->s_rdma_mr); |
| 636 | qp->s_rdma_mr = NULL; |
| 637 | } |
| 638 | } |
| 639 | |
Mike Marciniszyn | 0208da9 | 2017-08-28 11:24:10 -0700 | [diff] [blame] | 640 | for (n = 0; qp->s_ack_queue && n < rvt_max_atomic(rdi); n++) { |
Dennis Dalessandro | 79a225b | 2016-02-14 12:11:20 -0800 | [diff] [blame] | 641 | struct rvt_ack_entry *e = &qp->s_ack_queue[n]; |
| 642 | |
Ira Weiny | fe50827 | 2016-07-27 21:07:36 -0400 | [diff] [blame] | 643 | if (e->rdma_sge.mr) { |
Dennis Dalessandro | 79a225b | 2016-02-14 12:11:20 -0800 | [diff] [blame] | 644 | rvt_put_mr(e->rdma_sge.mr); |
| 645 | e->rdma_sge.mr = NULL; |
| 646 | } |
| 647 | } |
| 648 | } |
| 649 | |
| 650 | /** |
Mike Marciniszyn | 0208da9 | 2017-08-28 11:24:10 -0700 | [diff] [blame] | 651 | * rvt_swqe_has_lkey - return true if lkey is used by swqe |
| 652 | * @wqe - the send wqe |
| 653 | * @lkey - the lkey |
| 654 | * |
| 655 | * Test the swqe for using lkey |
| 656 | */ |
| 657 | static bool rvt_swqe_has_lkey(struct rvt_swqe *wqe, u32 lkey) |
| 658 | { |
| 659 | int i; |
| 660 | |
| 661 | for (i = 0; i < wqe->wr.num_sge; i++) { |
| 662 | struct rvt_sge *sge = &wqe->sg_list[i]; |
| 663 | |
| 664 | if (rvt_mr_has_lkey(sge->mr, lkey)) |
| 665 | return true; |
| 666 | } |
| 667 | return false; |
| 668 | } |
| 669 | |
| 670 | /** |
| 671 | * rvt_qp_sends_has_lkey - return true is qp sends use lkey |
| 672 | * @qp - the rvt_qp |
| 673 | * @lkey - the lkey |
| 674 | */ |
| 675 | static bool rvt_qp_sends_has_lkey(struct rvt_qp *qp, u32 lkey) |
| 676 | { |
| 677 | u32 s_last = qp->s_last; |
| 678 | |
| 679 | while (s_last != qp->s_head) { |
| 680 | struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, s_last); |
| 681 | |
| 682 | if (rvt_swqe_has_lkey(wqe, lkey)) |
| 683 | return true; |
| 684 | |
| 685 | if (++s_last >= qp->s_size) |
| 686 | s_last = 0; |
| 687 | } |
| 688 | if (qp->s_rdma_mr) |
| 689 | if (rvt_mr_has_lkey(qp->s_rdma_mr, lkey)) |
| 690 | return true; |
| 691 | return false; |
| 692 | } |
| 693 | |
| 694 | /** |
| 695 | * rvt_qp_acks_has_lkey - return true if acks have lkey |
| 696 | * @qp - the qp |
| 697 | * @lkey - the lkey |
| 698 | */ |
| 699 | static bool rvt_qp_acks_has_lkey(struct rvt_qp *qp, u32 lkey) |
| 700 | { |
| 701 | int i; |
| 702 | struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); |
| 703 | |
| 704 | for (i = 0; qp->s_ack_queue && i < rvt_max_atomic(rdi); i++) { |
| 705 | struct rvt_ack_entry *e = &qp->s_ack_queue[i]; |
| 706 | |
| 707 | if (rvt_mr_has_lkey(e->rdma_sge.mr, lkey)) |
| 708 | return true; |
| 709 | } |
| 710 | return false; |
| 711 | } |
| 712 | |
| 713 | /* |
| 714 | * rvt_qp_mr_clean - clean up remote ops for lkey |
| 715 | * @qp - the qp |
| 716 | * @lkey - the lkey that is being de-registered |
| 717 | * |
| 718 | * This routine checks if the lkey is being used by |
| 719 | * the qp. |
| 720 | * |
| 721 | * If so, the qp is put into an error state to elminate |
| 722 | * any references from the qp. |
| 723 | */ |
| 724 | void rvt_qp_mr_clean(struct rvt_qp *qp, u32 lkey) |
| 725 | { |
| 726 | bool lastwqe = false; |
| 727 | |
| 728 | if (qp->ibqp.qp_type == IB_QPT_SMI || |
| 729 | qp->ibqp.qp_type == IB_QPT_GSI) |
| 730 | /* avoid special QPs */ |
| 731 | return; |
| 732 | spin_lock_irq(&qp->r_lock); |
| 733 | spin_lock(&qp->s_hlock); |
| 734 | spin_lock(&qp->s_lock); |
| 735 | |
| 736 | if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET) |
| 737 | goto check_lwqe; |
| 738 | |
| 739 | if (rvt_ss_has_lkey(&qp->r_sge, lkey) || |
| 740 | rvt_qp_sends_has_lkey(qp, lkey) || |
| 741 | rvt_qp_acks_has_lkey(qp, lkey)) |
| 742 | lastwqe = rvt_error_qp(qp, IB_WC_LOC_PROT_ERR); |
| 743 | check_lwqe: |
| 744 | spin_unlock(&qp->s_lock); |
| 745 | spin_unlock(&qp->s_hlock); |
| 746 | spin_unlock_irq(&qp->r_lock); |
| 747 | if (lastwqe) { |
| 748 | struct ib_event ev; |
| 749 | |
| 750 | ev.device = qp->ibqp.device; |
| 751 | ev.element.qp = &qp->ibqp; |
| 752 | ev.event = IB_EVENT_QP_LAST_WQE_REACHED; |
| 753 | qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); |
| 754 | } |
| 755 | } |
| 756 | |
| 757 | /** |
Dennis Dalessandro | 79a225b | 2016-02-14 12:11:20 -0800 | [diff] [blame] | 758 | * rvt_remove_qp - remove qp form table |
| 759 | * @rdi: rvt dev struct |
| 760 | * @qp: qp to remove |
| 761 | * |
| 762 | * Remove the QP from the table so it can't be found asynchronously by |
| 763 | * the receive routine. |
| 764 | */ |
| 765 | static void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp) |
| 766 | { |
| 767 | struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1]; |
| 768 | u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits); |
| 769 | unsigned long flags; |
| 770 | int removed = 1; |
| 771 | |
| 772 | spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags); |
| 773 | |
| 774 | if (rcu_dereference_protected(rvp->qp[0], |
| 775 | lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) { |
| 776 | RCU_INIT_POINTER(rvp->qp[0], NULL); |
| 777 | } else if (rcu_dereference_protected(rvp->qp[1], |
| 778 | lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) { |
| 779 | RCU_INIT_POINTER(rvp->qp[1], NULL); |
| 780 | } else { |
| 781 | struct rvt_qp *q; |
| 782 | struct rvt_qp __rcu **qpp; |
| 783 | |
| 784 | removed = 0; |
| 785 | qpp = &rdi->qp_dev->qp_table[n]; |
| 786 | for (; (q = rcu_dereference_protected(*qpp, |
| 787 | lockdep_is_held(&rdi->qp_dev->qpt_lock))) != NULL; |
| 788 | qpp = &q->next) { |
| 789 | if (q == qp) { |
| 790 | RCU_INIT_POINTER(*qpp, |
| 791 | rcu_dereference_protected(qp->next, |
| 792 | lockdep_is_held(&rdi->qp_dev->qpt_lock))); |
| 793 | removed = 1; |
| 794 | trace_rvt_qpremove(qp, n); |
| 795 | break; |
| 796 | } |
| 797 | } |
| 798 | } |
| 799 | |
| 800 | spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags); |
| 801 | if (removed) { |
| 802 | synchronize_rcu(); |
Mike Marciniszyn | 4d6f85c | 2016-09-06 04:34:35 -0700 | [diff] [blame] | 803 | rvt_put_qp(qp); |
Dennis Dalessandro | 79a225b | 2016-02-14 12:11:20 -0800 | [diff] [blame] | 804 | } |
| 805 | } |
| 806 | |
| 807 | /** |
Kamenee Arumugam | dabac6e | 2019-06-28 14:04:24 -0400 | [diff] [blame] | 808 | * rvt_alloc_rq - allocate memory for user or kernel buffer |
| 809 | * @rq: receive queue data structure |
| 810 | * @size: number of request queue entries |
| 811 | * @node: The NUMA node |
| 812 | * @udata: True if user data is available or not false |
| 813 | * |
| 814 | * Return: If memory allocation failed, return -ENONEM |
| 815 | * This function is used by both shared receive |
| 816 | * queues and non-shared receive queues to allocate |
| 817 | * memory. |
| 818 | */ |
| 819 | int rvt_alloc_rq(struct rvt_rq *rq, u32 size, int node, |
| 820 | struct ib_udata *udata) |
| 821 | { |
| 822 | if (udata) { |
| 823 | rq->wq = vmalloc_user(sizeof(struct rvt_rwq) + size); |
| 824 | if (!rq->wq) |
| 825 | goto bail; |
| 826 | /* need kwq with no buffers */ |
| 827 | rq->kwq = kzalloc_node(sizeof(*rq->kwq), GFP_KERNEL, node); |
| 828 | if (!rq->kwq) |
| 829 | goto bail; |
| 830 | rq->kwq->curr_wq = rq->wq->wq; |
| 831 | } else { |
| 832 | /* need kwq with buffers */ |
| 833 | rq->kwq = |
| 834 | vzalloc_node(sizeof(struct rvt_krwq) + size, node); |
| 835 | if (!rq->kwq) |
| 836 | goto bail; |
| 837 | rq->kwq->curr_wq = rq->kwq->wq; |
| 838 | } |
| 839 | |
Kamenee Arumugam | f592ae3 | 2019-06-28 14:04:30 -0400 | [diff] [blame] | 840 | spin_lock_init(&rq->kwq->p_lock); |
| 841 | spin_lock_init(&rq->kwq->c_lock); |
Kamenee Arumugam | dabac6e | 2019-06-28 14:04:24 -0400 | [diff] [blame] | 842 | return 0; |
| 843 | bail: |
| 844 | rvt_free_rq(rq); |
| 845 | return -ENOMEM; |
| 846 | } |
| 847 | |
| 848 | /** |
Mike Marciniszyn | 222f7a9a | 2016-09-06 04:37:26 -0700 | [diff] [blame] | 849 | * rvt_init_qp - initialize the QP state to the reset state |
| 850 | * @qp: the QP to init or reinit |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 851 | * @type: the QP type |
Mike Marciniszyn | 222f7a9a | 2016-09-06 04:37:26 -0700 | [diff] [blame] | 852 | * |
| 853 | * This function is called from both rvt_create_qp() and |
| 854 | * rvt_reset_qp(). The difference is that the reset |
| 855 | * patch the necessary locks to protect against concurent |
| 856 | * access. |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 857 | */ |
Mike Marciniszyn | 222f7a9a | 2016-09-06 04:37:26 -0700 | [diff] [blame] | 858 | static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, |
| 859 | enum ib_qp_type type) |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 860 | { |
Dennis Dalessandro | 3b0b3fb | 2016-01-22 13:00:35 -0800 | [diff] [blame] | 861 | qp->remote_qpn = 0; |
| 862 | qp->qkey = 0; |
| 863 | qp->qp_access_flags = 0; |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 864 | qp->s_flags &= RVT_S_SIGNAL_REQ_WR; |
| 865 | qp->s_hdrwords = 0; |
| 866 | qp->s_wqe = NULL; |
| 867 | qp->s_draining = 0; |
| 868 | qp->s_next_psn = 0; |
| 869 | qp->s_last_psn = 0; |
| 870 | qp->s_sending_psn = 0; |
| 871 | qp->s_sending_hpsn = 0; |
| 872 | qp->s_psn = 0; |
| 873 | qp->r_psn = 0; |
| 874 | qp->r_msn = 0; |
| 875 | if (type == IB_QPT_RC) { |
| 876 | qp->s_state = IB_OPCODE_RC_SEND_LAST; |
| 877 | qp->r_state = IB_OPCODE_RC_SEND_LAST; |
| 878 | } else { |
| 879 | qp->s_state = IB_OPCODE_UC_SEND_LAST; |
| 880 | qp->r_state = IB_OPCODE_UC_SEND_LAST; |
| 881 | } |
| 882 | qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; |
| 883 | qp->r_nak_state = 0; |
| 884 | qp->r_aflags = 0; |
| 885 | qp->r_flags = 0; |
| 886 | qp->s_head = 0; |
| 887 | qp->s_tail = 0; |
| 888 | qp->s_cur = 0; |
| 889 | qp->s_acked = 0; |
| 890 | qp->s_last = 0; |
| 891 | qp->s_ssn = 1; |
| 892 | qp->s_lsn = 0; |
| 893 | qp->s_mig_state = IB_MIG_MIGRATED; |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 894 | qp->r_head_ack_queue = 0; |
| 895 | qp->s_tail_ack_queue = 0; |
Kaike Wan | 4f9264d | 2019-01-23 21:48:48 -0800 | [diff] [blame] | 896 | qp->s_acked_ack_queue = 0; |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 897 | qp->s_num_rd_atomic = 0; |
Kamenee Arumugam | f592ae3 | 2019-06-28 14:04:30 -0400 | [diff] [blame] | 898 | if (qp->r_rq.kwq) |
| 899 | qp->r_rq.kwq->count = qp->r_rq.size; |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 900 | qp->r_sge.num_sge = 0; |
Mike Marciniszyn | 856cc4c | 2016-07-25 13:39:39 -0700 | [diff] [blame] | 901 | atomic_set(&qp->s_reserved_used, 0); |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 902 | } |
| 903 | |
Dennis Dalessandro | b518d3e | 2016-01-06 09:56:15 -0800 | [diff] [blame] | 904 | /** |
Mike Marciniszyn | 222f7a9a | 2016-09-06 04:37:26 -0700 | [diff] [blame] | 905 | * rvt_reset_qp - initialize the QP state to the reset state |
| 906 | * @qp: the QP to reset |
| 907 | * @type: the QP type |
| 908 | * |
| 909 | * r_lock, s_hlock, and s_lock are required to be held by the caller |
| 910 | */ |
| 911 | static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, |
| 912 | enum ib_qp_type type) |
| 913 | __must_hold(&qp->s_lock) |
| 914 | __must_hold(&qp->s_hlock) |
| 915 | __must_hold(&qp->r_lock) |
| 916 | { |
Mike Marciniszyn | 68e78b3 | 2016-09-06 04:37:41 -0700 | [diff] [blame] | 917 | lockdep_assert_held(&qp->r_lock); |
| 918 | lockdep_assert_held(&qp->s_hlock); |
| 919 | lockdep_assert_held(&qp->s_lock); |
Mike Marciniszyn | 222f7a9a | 2016-09-06 04:37:26 -0700 | [diff] [blame] | 920 | if (qp->state != IB_QPS_RESET) { |
| 921 | qp->state = IB_QPS_RESET; |
| 922 | |
| 923 | /* Let drivers flush their waitlist */ |
| 924 | rdi->driver_f.flush_qp_waiters(qp); |
Venkata Sandeep Dhanalakota | 11a10d4 | 2017-02-08 05:27:13 -0800 | [diff] [blame] | 925 | rvt_stop_rc_timers(qp); |
Mike Marciniszyn | 222f7a9a | 2016-09-06 04:37:26 -0700 | [diff] [blame] | 926 | qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT); |
| 927 | spin_unlock(&qp->s_lock); |
| 928 | spin_unlock(&qp->s_hlock); |
| 929 | spin_unlock_irq(&qp->r_lock); |
| 930 | |
| 931 | /* Stop the send queue and the retry timer */ |
| 932 | rdi->driver_f.stop_send_queue(qp); |
Venkata Sandeep Dhanalakota | 11a10d4 | 2017-02-08 05:27:13 -0800 | [diff] [blame] | 933 | rvt_del_timers_sync(qp); |
Mike Marciniszyn | 222f7a9a | 2016-09-06 04:37:26 -0700 | [diff] [blame] | 934 | /* Wait for things to stop */ |
| 935 | rdi->driver_f.quiesce_qp(qp); |
| 936 | |
| 937 | /* take qp out the hash and wait for it to be unused */ |
| 938 | rvt_remove_qp(rdi, qp); |
Mike Marciniszyn | 222f7a9a | 2016-09-06 04:37:26 -0700 | [diff] [blame] | 939 | |
| 940 | /* grab the lock b/c it was locked at call time */ |
| 941 | spin_lock_irq(&qp->r_lock); |
| 942 | spin_lock(&qp->s_hlock); |
| 943 | spin_lock(&qp->s_lock); |
| 944 | |
| 945 | rvt_clear_mr_refs(qp, 1); |
| 946 | /* |
| 947 | * Let the driver do any tear down or re-init it needs to for |
| 948 | * a qp that has been reset |
| 949 | */ |
| 950 | rdi->driver_f.notify_qp_reset(qp); |
| 951 | } |
| 952 | rvt_init_qp(rdi, qp, type); |
Mike Marciniszyn | 68e78b3 | 2016-09-06 04:37:41 -0700 | [diff] [blame] | 953 | lockdep_assert_held(&qp->r_lock); |
| 954 | lockdep_assert_held(&qp->s_hlock); |
| 955 | lockdep_assert_held(&qp->s_lock); |
Mike Marciniszyn | 222f7a9a | 2016-09-06 04:37:26 -0700 | [diff] [blame] | 956 | } |
| 957 | |
Dennis Dalessandro | b2f8a04 | 2017-05-29 17:17:28 -0700 | [diff] [blame] | 958 | /** rvt_free_qpn - Free a qpn from the bit map |
| 959 | * @qpt: QP table |
| 960 | * @qpn: queue pair number to free |
| 961 | */ |
| 962 | static void rvt_free_qpn(struct rvt_qpn_table *qpt, u32 qpn) |
| 963 | { |
| 964 | struct rvt_qpn_map *map; |
| 965 | |
Dennis Dalessandro | 6c31e52 | 2017-05-29 17:19:21 -0700 | [diff] [blame] | 966 | map = qpt->map + (qpn & RVT_QPN_MASK) / RVT_BITS_PER_PAGE; |
Dennis Dalessandro | b2f8a04 | 2017-05-29 17:17:28 -0700 | [diff] [blame] | 967 | if (map->page) |
| 968 | clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page); |
| 969 | } |
| 970 | |
Mike Marciniszyn | 222f7a9a | 2016-09-06 04:37:26 -0700 | [diff] [blame] | 971 | /** |
Michael J. Ruhl | fe2ac04 | 2019-06-28 14:21:58 -0400 | [diff] [blame] | 972 | * get_allowed_ops - Given a QP type return the appropriate allowed OP |
| 973 | * @type: valid, supported, QP type |
| 974 | */ |
| 975 | static u8 get_allowed_ops(enum ib_qp_type type) |
| 976 | { |
| 977 | return type == IB_QPT_RC ? IB_OPCODE_RC : type == IB_QPT_UC ? |
| 978 | IB_OPCODE_UC : IB_OPCODE_UD; |
| 979 | } |
| 980 | |
| 981 | /** |
Michael J. Ruhl | d310c4b | 2019-06-28 14:22:04 -0400 | [diff] [blame] | 982 | * free_ud_wq_attr - Clean up AH attribute cache for UD QPs |
| 983 | * @qp: Valid QP with allowed_ops set |
| 984 | * |
| 985 | * The rvt_swqe data structure being used is a union, so this is |
| 986 | * only valid for UD QPs. |
| 987 | */ |
| 988 | static void free_ud_wq_attr(struct rvt_qp *qp) |
| 989 | { |
| 990 | struct rvt_swqe *wqe; |
| 991 | int i; |
| 992 | |
| 993 | for (i = 0; qp->allowed_ops == IB_OPCODE_UD && i < qp->s_size; i++) { |
| 994 | wqe = rvt_get_swqe_ptr(qp, i); |
| 995 | kfree(wqe->ud_wr.attr); |
| 996 | wqe->ud_wr.attr = NULL; |
| 997 | } |
| 998 | } |
| 999 | |
| 1000 | /** |
| 1001 | * alloc_ud_wq_attr - AH attribute cache for UD QPs |
| 1002 | * @qp: Valid QP with allowed_ops set |
| 1003 | * @node: Numa node for allocation |
| 1004 | * |
| 1005 | * The rvt_swqe data structure being used is a union, so this is |
| 1006 | * only valid for UD QPs. |
| 1007 | */ |
| 1008 | static int alloc_ud_wq_attr(struct rvt_qp *qp, int node) |
| 1009 | { |
| 1010 | struct rvt_swqe *wqe; |
| 1011 | int i; |
| 1012 | |
| 1013 | for (i = 0; qp->allowed_ops == IB_OPCODE_UD && i < qp->s_size; i++) { |
| 1014 | wqe = rvt_get_swqe_ptr(qp, i); |
| 1015 | wqe->ud_wr.attr = kzalloc_node(sizeof(*wqe->ud_wr.attr), |
| 1016 | GFP_KERNEL, node); |
| 1017 | if (!wqe->ud_wr.attr) { |
| 1018 | free_ud_wq_attr(qp); |
| 1019 | return -ENOMEM; |
| 1020 | } |
| 1021 | } |
| 1022 | |
| 1023 | return 0; |
| 1024 | } |
| 1025 | |
| 1026 | /** |
Dennis Dalessandro | b518d3e | 2016-01-06 09:56:15 -0800 | [diff] [blame] | 1027 | * rvt_create_qp - create a queue pair for a device |
| 1028 | * @ibpd: the protection domain who's device we create the queue pair for |
| 1029 | * @init_attr: the attributes of the queue pair |
| 1030 | * @udata: user data for libibverbs.so |
| 1031 | * |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 1032 | * Queue pair creation is mostly an rvt issue. However, drivers have their own |
| 1033 | * unique idea of what queue pair numbers mean. For instance there is a reserved |
| 1034 | * range for PSM. |
| 1035 | * |
Dennis Dalessandro | 90793f7 | 2016-02-14 12:10:29 -0800 | [diff] [blame] | 1036 | * Return: the queue pair on success, otherwise returns an errno. |
Dennis Dalessandro | b518d3e | 2016-01-06 09:56:15 -0800 | [diff] [blame] | 1037 | * |
| 1038 | * Called by the ib_create_qp() core verbs function. |
| 1039 | */ |
| 1040 | struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, |
| 1041 | struct ib_qp_init_attr *init_attr, |
| 1042 | struct ib_udata *udata) |
| 1043 | { |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 1044 | struct rvt_qp *qp; |
| 1045 | int err; |
| 1046 | struct rvt_swqe *swq = NULL; |
| 1047 | size_t sz; |
| 1048 | size_t sg_list_sz; |
| 1049 | struct ib_qp *ret = ERR_PTR(-ENOMEM); |
| 1050 | struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device); |
| 1051 | void *priv = NULL; |
Mike Marciniszyn | afcf8f7 | 2016-07-01 16:02:07 -0700 | [diff] [blame] | 1052 | size_t sqsize; |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 1053 | |
| 1054 | if (!rdi) |
| 1055 | return ERR_PTR(-EINVAL); |
| 1056 | |
Steve Wise | 33023fb | 2018-06-18 08:05:26 -0700 | [diff] [blame] | 1057 | if (init_attr->cap.max_send_sge > rdi->dparms.props.max_send_sge || |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 1058 | init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr || |
Leon Romanovsky | 0f4d027 | 2017-05-23 14:38:14 +0300 | [diff] [blame] | 1059 | init_attr->create_flags) |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 1060 | return ERR_PTR(-EINVAL); |
| 1061 | |
| 1062 | /* Check receive queue parameters if no SRQ is specified. */ |
| 1063 | if (!init_attr->srq) { |
Steve Wise | 33023fb | 2018-06-18 08:05:26 -0700 | [diff] [blame] | 1064 | if (init_attr->cap.max_recv_sge > |
| 1065 | rdi->dparms.props.max_recv_sge || |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 1066 | init_attr->cap.max_recv_wr > rdi->dparms.props.max_qp_wr) |
| 1067 | return ERR_PTR(-EINVAL); |
| 1068 | |
| 1069 | if (init_attr->cap.max_send_sge + |
| 1070 | init_attr->cap.max_send_wr + |
| 1071 | init_attr->cap.max_recv_sge + |
| 1072 | init_attr->cap.max_recv_wr == 0) |
| 1073 | return ERR_PTR(-EINVAL); |
| 1074 | } |
Mike Marciniszyn | afcf8f7 | 2016-07-01 16:02:07 -0700 | [diff] [blame] | 1075 | sqsize = |
Mike Marciniszyn | 856cc4c | 2016-07-25 13:39:39 -0700 | [diff] [blame] | 1076 | init_attr->cap.max_send_wr + 1 + |
| 1077 | rdi->dparms.reserved_operations; |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 1078 | switch (init_attr->qp_type) { |
| 1079 | case IB_QPT_SMI: |
| 1080 | case IB_QPT_GSI: |
| 1081 | if (init_attr->port_num == 0 || |
| 1082 | init_attr->port_num > ibpd->device->phys_port_cnt) |
| 1083 | return ERR_PTR(-EINVAL); |
Bart Van Assche | 2caaa23 | 2017-10-11 10:49:23 -0700 | [diff] [blame] | 1084 | /* fall through */ |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 1085 | case IB_QPT_UC: |
| 1086 | case IB_QPT_RC: |
| 1087 | case IB_QPT_UD: |
Gustavo A. R. Silva | 34755f5 | 2019-05-29 10:12:48 -0500 | [diff] [blame] | 1088 | sz = struct_size(swq, sg_list, init_attr->cap.max_send_sge); |
Kees Cook | fd7bece | 2018-06-12 14:27:52 -0700 | [diff] [blame] | 1089 | swq = vzalloc_node(array_size(sz, sqsize), rdi->dparms.node); |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 1090 | if (!swq) |
| 1091 | return ERR_PTR(-ENOMEM); |
| 1092 | |
| 1093 | sz = sizeof(*qp); |
| 1094 | sg_list_sz = 0; |
| 1095 | if (init_attr->srq) { |
| 1096 | struct rvt_srq *srq = ibsrq_to_rvtsrq(init_attr->srq); |
| 1097 | |
| 1098 | if (srq->rq.max_sge > 1) |
| 1099 | sg_list_sz = sizeof(*qp->r_sg_list) * |
| 1100 | (srq->rq.max_sge - 1); |
| 1101 | } else if (init_attr->cap.max_recv_sge > 1) |
| 1102 | sg_list_sz = sizeof(*qp->r_sg_list) * |
| 1103 | (init_attr->cap.max_recv_sge - 1); |
Leon Romanovsky | 0f4d027 | 2017-05-23 14:38:14 +0300 | [diff] [blame] | 1104 | qp = kzalloc_node(sz + sg_list_sz, GFP_KERNEL, |
| 1105 | rdi->dparms.node); |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 1106 | if (!qp) |
| 1107 | goto bail_swq; |
Michael J. Ruhl | fe2ac04 | 2019-06-28 14:21:58 -0400 | [diff] [blame] | 1108 | qp->allowed_ops = get_allowed_ops(init_attr->qp_type); |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 1109 | |
| 1110 | RCU_INIT_POINTER(qp->next, NULL); |
Mike Marciniszyn | 8b103e9 | 2016-05-24 12:50:40 -0700 | [diff] [blame] | 1111 | if (init_attr->qp_type == IB_QPT_RC) { |
| 1112 | qp->s_ack_queue = |
Kees Cook | 590b5b7 | 2018-06-12 14:04:20 -0700 | [diff] [blame] | 1113 | kcalloc_node(rvt_max_atomic(rdi), |
| 1114 | sizeof(*qp->s_ack_queue), |
| 1115 | GFP_KERNEL, |
| 1116 | rdi->dparms.node); |
Mike Marciniszyn | 8b103e9 | 2016-05-24 12:50:40 -0700 | [diff] [blame] | 1117 | if (!qp->s_ack_queue) |
| 1118 | goto bail_qp; |
| 1119 | } |
Venkata Sandeep Dhanalakota | 11a10d4 | 2017-02-08 05:27:13 -0800 | [diff] [blame] | 1120 | /* initialize timers needed for rc qp */ |
Kees Cook | a2930e5 | 2017-10-16 15:51:13 -0700 | [diff] [blame] | 1121 | timer_setup(&qp->s_timer, rvt_rc_timeout, 0); |
Venkata Sandeep Dhanalakota | 11a10d4 | 2017-02-08 05:27:13 -0800 | [diff] [blame] | 1122 | hrtimer_init(&qp->s_rnr_timer, CLOCK_MONOTONIC, |
| 1123 | HRTIMER_MODE_REL); |
| 1124 | qp->s_rnr_timer.function = rvt_rc_rnr_retry; |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 1125 | |
| 1126 | /* |
| 1127 | * Driver needs to set up it's private QP structure and do any |
| 1128 | * initialization that is needed. |
| 1129 | */ |
Leon Romanovsky | 0f4d027 | 2017-05-23 14:38:14 +0300 | [diff] [blame] | 1130 | priv = rdi->driver_f.qp_priv_alloc(rdi, qp); |
Mike Marciniszyn | c755f4a | 2016-06-22 13:29:33 -0700 | [diff] [blame] | 1131 | if (IS_ERR(priv)) { |
| 1132 | ret = priv; |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 1133 | goto bail_qp; |
Mike Marciniszyn | c755f4a | 2016-06-22 13:29:33 -0700 | [diff] [blame] | 1134 | } |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 1135 | qp->priv = priv; |
| 1136 | qp->timeout_jiffies = |
| 1137 | usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / |
| 1138 | 1000UL); |
| 1139 | if (init_attr->srq) { |
| 1140 | sz = 0; |
| 1141 | } else { |
| 1142 | qp->r_rq.size = init_attr->cap.max_recv_wr + 1; |
| 1143 | qp->r_rq.max_sge = init_attr->cap.max_recv_sge; |
| 1144 | sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) + |
| 1145 | sizeof(struct rvt_rwqe); |
Kamenee Arumugam | dabac6e | 2019-06-28 14:04:24 -0400 | [diff] [blame] | 1146 | err = rvt_alloc_rq(&qp->r_rq, qp->r_rq.size * sz, |
| 1147 | rdi->dparms.node, udata); |
| 1148 | if (err) { |
| 1149 | ret = ERR_PTR(err); |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 1150 | goto bail_driver_priv; |
Kamenee Arumugam | dabac6e | 2019-06-28 14:04:24 -0400 | [diff] [blame] | 1151 | } |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 1152 | } |
| 1153 | |
| 1154 | /* |
| 1155 | * ib_create_qp() will initialize qp->ibqp |
| 1156 | * except for qp->ibqp.qp_num. |
| 1157 | */ |
| 1158 | spin_lock_init(&qp->r_lock); |
Mike Marciniszyn | 46a80d6 | 2016-02-14 12:10:04 -0800 | [diff] [blame] | 1159 | spin_lock_init(&qp->s_hlock); |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 1160 | spin_lock_init(&qp->s_lock); |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 1161 | atomic_set(&qp->refcount, 0); |
Jianxin Xiong | d9f8723 | 2016-07-25 13:38:25 -0700 | [diff] [blame] | 1162 | atomic_set(&qp->local_ops_pending, 0); |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 1163 | init_waitqueue_head(&qp->wait); |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 1164 | INIT_LIST_HEAD(&qp->rspwait); |
| 1165 | qp->state = IB_QPS_RESET; |
| 1166 | qp->s_wq = swq; |
Mike Marciniszyn | afcf8f7 | 2016-07-01 16:02:07 -0700 | [diff] [blame] | 1167 | qp->s_size = sqsize; |
Mike Marciniszyn | 46a80d6 | 2016-02-14 12:10:04 -0800 | [diff] [blame] | 1168 | qp->s_avail = init_attr->cap.max_send_wr; |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 1169 | qp->s_max_sge = init_attr->cap.max_send_sge; |
| 1170 | if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR) |
| 1171 | qp->s_flags = RVT_S_SIGNAL_REQ_WR; |
Michael J. Ruhl | d310c4b | 2019-06-28 14:22:04 -0400 | [diff] [blame] | 1172 | err = alloc_ud_wq_attr(qp, rdi->dparms.node); |
| 1173 | if (err) { |
| 1174 | ret = (ERR_PTR(err)); |
| 1175 | goto bail_driver_priv; |
| 1176 | } |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 1177 | |
| 1178 | err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table, |
| 1179 | init_attr->qp_type, |
Leon Romanovsky | 0f4d027 | 2017-05-23 14:38:14 +0300 | [diff] [blame] | 1180 | init_attr->port_num); |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 1181 | if (err < 0) { |
| 1182 | ret = ERR_PTR(err); |
| 1183 | goto bail_rq_wq; |
| 1184 | } |
| 1185 | qp->ibqp.qp_num = err; |
| 1186 | qp->port_num = init_attr->port_num; |
Mike Marciniszyn | 222f7a9a | 2016-09-06 04:37:26 -0700 | [diff] [blame] | 1187 | rvt_init_qp(rdi, qp, init_attr->qp_type); |
Mike Marciniszyn | 5190f05 | 2018-11-28 10:22:31 -0800 | [diff] [blame] | 1188 | if (rdi->driver_f.qp_priv_init) { |
| 1189 | err = rdi->driver_f.qp_priv_init(rdi, qp, init_attr); |
| 1190 | if (err) { |
| 1191 | ret = ERR_PTR(err); |
| 1192 | goto bail_rq_wq; |
| 1193 | } |
| 1194 | } |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 1195 | break; |
| 1196 | |
| 1197 | default: |
| 1198 | /* Don't support raw QPs */ |
| 1199 | return ERR_PTR(-EINVAL); |
| 1200 | } |
| 1201 | |
| 1202 | init_attr->cap.max_inline_data = 0; |
| 1203 | |
Dennis Dalessandro | b518d3e | 2016-01-06 09:56:15 -0800 | [diff] [blame] | 1204 | /* |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 1205 | * Return the address of the RWQ as the offset to mmap. |
Dennis Dalessandro | bfbac09 | 2016-01-22 13:00:22 -0800 | [diff] [blame] | 1206 | * See rvt_mmap() for details. |
Dennis Dalessandro | b518d3e | 2016-01-06 09:56:15 -0800 | [diff] [blame] | 1207 | */ |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 1208 | if (udata && udata->outlen >= sizeof(__u64)) { |
| 1209 | if (!qp->r_rq.wq) { |
| 1210 | __u64 offset = 0; |
| 1211 | |
| 1212 | err = ib_copy_to_udata(udata, &offset, |
| 1213 | sizeof(offset)); |
| 1214 | if (err) { |
| 1215 | ret = ERR_PTR(err); |
| 1216 | goto bail_qpn; |
| 1217 | } |
| 1218 | } else { |
| 1219 | u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz; |
| 1220 | |
Shamir Rabinovitch | ff23dfa | 2019-03-31 19:10:07 +0300 | [diff] [blame] | 1221 | qp->ip = rvt_create_mmap_info(rdi, s, udata, |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 1222 | qp->r_rq.wq); |
| 1223 | if (!qp->ip) { |
| 1224 | ret = ERR_PTR(-ENOMEM); |
| 1225 | goto bail_qpn; |
| 1226 | } |
| 1227 | |
| 1228 | err = ib_copy_to_udata(udata, &qp->ip->offset, |
| 1229 | sizeof(qp->ip->offset)); |
| 1230 | if (err) { |
| 1231 | ret = ERR_PTR(err); |
| 1232 | goto bail_ip; |
| 1233 | } |
| 1234 | } |
Mike Marciniszyn | ef086c0 | 2016-03-07 11:35:08 -0800 | [diff] [blame] | 1235 | qp->pid = current->pid; |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 1236 | } |
| 1237 | |
| 1238 | spin_lock(&rdi->n_qps_lock); |
| 1239 | if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) { |
| 1240 | spin_unlock(&rdi->n_qps_lock); |
| 1241 | ret = ERR_PTR(-ENOMEM); |
| 1242 | goto bail_ip; |
| 1243 | } |
| 1244 | |
| 1245 | rdi->n_qps_allocated++; |
Vennila Megavannan | bfee5e3 | 2016-02-09 14:29:49 -0800 | [diff] [blame] | 1246 | /* |
| 1247 | * Maintain a busy_jiffies variable that will be added to the timeout |
| 1248 | * period in mod_retry_timer and add_retry_timer. This busy jiffies |
| 1249 | * is scaled by the number of rc qps created for the device to reduce |
| 1250 | * the number of timeouts occurring when there is a large number of |
| 1251 | * qps. busy_jiffies is incremented every rc qp scaling interval. |
| 1252 | * The scaling interval is selected based on extensive performance |
| 1253 | * evaluation of targeted workloads. |
| 1254 | */ |
| 1255 | if (init_attr->qp_type == IB_QPT_RC) { |
| 1256 | rdi->n_rc_qps++; |
| 1257 | rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL; |
| 1258 | } |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 1259 | spin_unlock(&rdi->n_qps_lock); |
| 1260 | |
| 1261 | if (qp->ip) { |
| 1262 | spin_lock_irq(&rdi->pending_lock); |
| 1263 | list_add(&qp->ip->pending_mmaps, &rdi->pending_mmaps); |
| 1264 | spin_unlock_irq(&rdi->pending_lock); |
| 1265 | } |
| 1266 | |
| 1267 | ret = &qp->ibqp; |
| 1268 | |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 1269 | return ret; |
| 1270 | |
| 1271 | bail_ip: |
Jim Foraker | 22dccc5 | 2016-11-01 13:44:12 -0700 | [diff] [blame] | 1272 | if (qp->ip) |
| 1273 | kref_put(&qp->ip->ref, rvt_release_mmap_info); |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 1274 | |
| 1275 | bail_qpn: |
Dennis Dalessandro | b2f8a04 | 2017-05-29 17:17:28 -0700 | [diff] [blame] | 1276 | rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num); |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 1277 | |
| 1278 | bail_rq_wq: |
Kamenee Arumugam | dabac6e | 2019-06-28 14:04:24 -0400 | [diff] [blame] | 1279 | rvt_free_rq(&qp->r_rq); |
Michael J. Ruhl | d310c4b | 2019-06-28 14:22:04 -0400 | [diff] [blame] | 1280 | free_ud_wq_attr(qp); |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 1281 | |
| 1282 | bail_driver_priv: |
| 1283 | rdi->driver_f.qp_priv_free(rdi, qp); |
| 1284 | |
| 1285 | bail_qp: |
Mike Marciniszyn | 8b103e9 | 2016-05-24 12:50:40 -0700 | [diff] [blame] | 1286 | kfree(qp->s_ack_queue); |
Dennis Dalessandro | 515667f | 2016-01-22 12:50:17 -0800 | [diff] [blame] | 1287 | kfree(qp); |
| 1288 | |
| 1289 | bail_swq: |
| 1290 | vfree(swq); |
| 1291 | |
| 1292 | return ret; |
Dennis Dalessandro | b518d3e | 2016-01-06 09:56:15 -0800 | [diff] [blame] | 1293 | } |
| 1294 | |
Dennis Dalessandro | 90793f7 | 2016-02-14 12:10:29 -0800 | [diff] [blame] | 1295 | /** |
Dennis Dalessandro | 3b0b3fb | 2016-01-22 13:00:35 -0800 | [diff] [blame] | 1296 | * rvt_error_qp - put a QP into the error state |
| 1297 | * @qp: the QP to put into the error state |
| 1298 | * @err: the receive completion error to signal if a RWQE is active |
| 1299 | * |
| 1300 | * Flushes both send and receive work queues. |
Dennis Dalessandro | 90793f7 | 2016-02-14 12:10:29 -0800 | [diff] [blame] | 1301 | * |
| 1302 | * Return: true if last WQE event should be generated. |
Dennis Dalessandro | 3b0b3fb | 2016-01-22 13:00:35 -0800 | [diff] [blame] | 1303 | * The QP r_lock and s_lock should be held and interrupts disabled. |
| 1304 | * If we are already in error state, just return. |
| 1305 | */ |
| 1306 | int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err) |
| 1307 | { |
| 1308 | struct ib_wc wc; |
| 1309 | int ret = 0; |
| 1310 | struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); |
| 1311 | |
Mike Marciniszyn | 68e78b3 | 2016-09-06 04:37:41 -0700 | [diff] [blame] | 1312 | lockdep_assert_held(&qp->r_lock); |
| 1313 | lockdep_assert_held(&qp->s_lock); |
Dennis Dalessandro | 3b0b3fb | 2016-01-22 13:00:35 -0800 | [diff] [blame] | 1314 | if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET) |
| 1315 | goto bail; |
| 1316 | |
| 1317 | qp->state = IB_QPS_ERR; |
| 1318 | |
| 1319 | if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) { |
| 1320 | qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR); |
| 1321 | del_timer(&qp->s_timer); |
| 1322 | } |
| 1323 | |
| 1324 | if (qp->s_flags & RVT_S_ANY_WAIT_SEND) |
| 1325 | qp->s_flags &= ~RVT_S_ANY_WAIT_SEND; |
| 1326 | |
| 1327 | rdi->driver_f.notify_error_qp(qp); |
| 1328 | |
| 1329 | /* Schedule the sending tasklet to drain the send work queue. */ |
Mark Rutland | 6aa7de0 | 2017-10-23 14:07:29 -0700 | [diff] [blame] | 1330 | if (READ_ONCE(qp->s_last) != qp->s_head) |
Dennis Dalessandro | 3b0b3fb | 2016-01-22 13:00:35 -0800 | [diff] [blame] | 1331 | rdi->driver_f.schedule_send(qp); |
| 1332 | |
| 1333 | rvt_clear_mr_refs(qp, 0); |
| 1334 | |
| 1335 | memset(&wc, 0, sizeof(wc)); |
| 1336 | wc.qp = &qp->ibqp; |
| 1337 | wc.opcode = IB_WC_RECV; |
| 1338 | |
| 1339 | if (test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) { |
| 1340 | wc.wr_id = qp->r_wr_id; |
| 1341 | wc.status = err; |
| 1342 | rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1); |
| 1343 | } |
| 1344 | wc.status = IB_WC_WR_FLUSH_ERR; |
| 1345 | |
Kamenee Arumugam | dabac6e | 2019-06-28 14:04:24 -0400 | [diff] [blame] | 1346 | if (qp->r_rq.kwq) { |
Dennis Dalessandro | 3b0b3fb | 2016-01-22 13:00:35 -0800 | [diff] [blame] | 1347 | u32 head; |
| 1348 | u32 tail; |
Kamenee Arumugam | dabac6e | 2019-06-28 14:04:24 -0400 | [diff] [blame] | 1349 | struct rvt_rwq *wq = NULL; |
| 1350 | struct rvt_krwq *kwq = NULL; |
Dennis Dalessandro | 3b0b3fb | 2016-01-22 13:00:35 -0800 | [diff] [blame] | 1351 | |
Kamenee Arumugam | f592ae3 | 2019-06-28 14:04:30 -0400 | [diff] [blame] | 1352 | spin_lock(&qp->r_rq.kwq->c_lock); |
Kamenee Arumugam | dabac6e | 2019-06-28 14:04:24 -0400 | [diff] [blame] | 1353 | /* qp->ip used to validate if there is a user buffer mmaped */ |
| 1354 | if (qp->ip) { |
| 1355 | wq = qp->r_rq.wq; |
| 1356 | head = RDMA_READ_UAPI_ATOMIC(wq->head); |
| 1357 | tail = RDMA_READ_UAPI_ATOMIC(wq->tail); |
| 1358 | } else { |
| 1359 | kwq = qp->r_rq.kwq; |
| 1360 | head = kwq->head; |
| 1361 | tail = kwq->tail; |
| 1362 | } |
Dennis Dalessandro | 3b0b3fb | 2016-01-22 13:00:35 -0800 | [diff] [blame] | 1363 | /* sanity check pointers before trusting them */ |
Dennis Dalessandro | 3b0b3fb | 2016-01-22 13:00:35 -0800 | [diff] [blame] | 1364 | if (head >= qp->r_rq.size) |
| 1365 | head = 0; |
Dennis Dalessandro | 3b0b3fb | 2016-01-22 13:00:35 -0800 | [diff] [blame] | 1366 | if (tail >= qp->r_rq.size) |
| 1367 | tail = 0; |
| 1368 | while (tail != head) { |
| 1369 | wc.wr_id = rvt_get_rwqe_ptr(&qp->r_rq, tail)->wr_id; |
| 1370 | if (++tail >= qp->r_rq.size) |
| 1371 | tail = 0; |
| 1372 | rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1); |
| 1373 | } |
Kamenee Arumugam | dabac6e | 2019-06-28 14:04:24 -0400 | [diff] [blame] | 1374 | if (qp->ip) |
| 1375 | RDMA_WRITE_UAPI_ATOMIC(wq->tail, tail); |
| 1376 | else |
| 1377 | kwq->tail = tail; |
Kamenee Arumugam | f592ae3 | 2019-06-28 14:04:30 -0400 | [diff] [blame] | 1378 | spin_unlock(&qp->r_rq.kwq->c_lock); |
Dennis Dalessandro | 3b0b3fb | 2016-01-22 13:00:35 -0800 | [diff] [blame] | 1379 | } else if (qp->ibqp.event_handler) { |
| 1380 | ret = 1; |
| 1381 | } |
| 1382 | |
| 1383 | bail: |
| 1384 | return ret; |
| 1385 | } |
| 1386 | EXPORT_SYMBOL(rvt_error_qp); |
| 1387 | |
| 1388 | /* |
| 1389 | * Put the QP into the hash table. |
| 1390 | * The hash table holds a reference to the QP. |
| 1391 | */ |
| 1392 | static void rvt_insert_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp) |
| 1393 | { |
| 1394 | struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1]; |
| 1395 | unsigned long flags; |
| 1396 | |
Mike Marciniszyn | 4d6f85c | 2016-09-06 04:34:35 -0700 | [diff] [blame] | 1397 | rvt_get_qp(qp); |
Dennis Dalessandro | 3b0b3fb | 2016-01-22 13:00:35 -0800 | [diff] [blame] | 1398 | spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags); |
| 1399 | |
| 1400 | if (qp->ibqp.qp_num <= 1) { |
| 1401 | rcu_assign_pointer(rvp->qp[qp->ibqp.qp_num], qp); |
| 1402 | } else { |
| 1403 | u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits); |
| 1404 | |
| 1405 | qp->next = rdi->qp_dev->qp_table[n]; |
| 1406 | rcu_assign_pointer(rdi->qp_dev->qp_table[n], qp); |
| 1407 | trace_rvt_qpinsert(qp, n); |
| 1408 | } |
| 1409 | |
| 1410 | spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags); |
| 1411 | } |
| 1412 | |
Dennis Dalessandro | 90793f7 | 2016-02-14 12:10:29 -0800 | [diff] [blame] | 1413 | /** |
Parav Pandit | 61347fa | 2016-09-13 19:40:50 +0530 | [diff] [blame] | 1414 | * rvt_modify_qp - modify the attributes of a queue pair |
Dennis Dalessandro | b518d3e | 2016-01-06 09:56:15 -0800 | [diff] [blame] | 1415 | * @ibqp: the queue pair who's attributes we're modifying |
| 1416 | * @attr: the new attributes |
| 1417 | * @attr_mask: the mask of attributes to modify |
| 1418 | * @udata: user data for libibverbs.so |
| 1419 | * |
Dennis Dalessandro | 90793f7 | 2016-02-14 12:10:29 -0800 | [diff] [blame] | 1420 | * Return: 0 on success, otherwise returns an errno. |
Dennis Dalessandro | b518d3e | 2016-01-06 09:56:15 -0800 | [diff] [blame] | 1421 | */ |
| 1422 | int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, |
| 1423 | int attr_mask, struct ib_udata *udata) |
| 1424 | { |
Dennis Dalessandro | 3b0b3fb | 2016-01-22 13:00:35 -0800 | [diff] [blame] | 1425 | struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); |
| 1426 | struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); |
| 1427 | enum ib_qp_state cur_state, new_state; |
| 1428 | struct ib_event ev; |
| 1429 | int lastwqe = 0; |
| 1430 | int mig = 0; |
| 1431 | int pmtu = 0; /* for gcc warning only */ |
Don Hiatt | 13c1922 | 2017-08-04 13:53:51 -0700 | [diff] [blame] | 1432 | int opa_ah; |
Dennis Dalessandro | 3b0b3fb | 2016-01-22 13:00:35 -0800 | [diff] [blame] | 1433 | |
Dennis Dalessandro | 3b0b3fb | 2016-01-22 13:00:35 -0800 | [diff] [blame] | 1434 | spin_lock_irq(&qp->r_lock); |
Mike Marciniszyn | 46a80d6 | 2016-02-14 12:10:04 -0800 | [diff] [blame] | 1435 | spin_lock(&qp->s_hlock); |
Dennis Dalessandro | 3b0b3fb | 2016-01-22 13:00:35 -0800 | [diff] [blame] | 1436 | spin_lock(&qp->s_lock); |
| 1437 | |
| 1438 | cur_state = attr_mask & IB_QP_CUR_STATE ? |
| 1439 | attr->cur_qp_state : qp->state; |
| 1440 | new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; |
Don Hiatt | 13c1922 | 2017-08-04 13:53:51 -0700 | [diff] [blame] | 1441 | opa_ah = rdma_cap_opa_ah(ibqp->device, qp->port_num); |
Dennis Dalessandro | 3b0b3fb | 2016-01-22 13:00:35 -0800 | [diff] [blame] | 1442 | |
| 1443 | if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, |
Kamal Heib | d31131b | 2018-10-02 16:11:21 +0300 | [diff] [blame] | 1444 | attr_mask)) |
Dennis Dalessandro | 3b0b3fb | 2016-01-22 13:00:35 -0800 | [diff] [blame] | 1445 | goto inval; |
| 1446 | |
Ira Weiny | e85ec33 | 2016-01-22 13:04:38 -0800 | [diff] [blame] | 1447 | if (rdi->driver_f.check_modify_qp && |
| 1448 | rdi->driver_f.check_modify_qp(qp, attr, attr_mask, udata)) |
| 1449 | goto inval; |
| 1450 | |
Dennis Dalessandro | 3b0b3fb | 2016-01-22 13:00:35 -0800 | [diff] [blame] | 1451 | if (attr_mask & IB_QP_AV) { |
Don Hiatt | 13c1922 | 2017-08-04 13:53:51 -0700 | [diff] [blame] | 1452 | if (opa_ah) { |
| 1453 | if (rdma_ah_get_dlid(&attr->ah_attr) >= |
| 1454 | opa_get_mcast_base(OPA_MCAST_NR)) |
| 1455 | goto inval; |
| 1456 | } else { |
| 1457 | if (rdma_ah_get_dlid(&attr->ah_attr) >= |
| 1458 | be16_to_cpu(IB_MULTICAST_LID_BASE)) |
| 1459 | goto inval; |
| 1460 | } |
| 1461 | |
Dennis Dalessandro | 3b0b3fb | 2016-01-22 13:00:35 -0800 | [diff] [blame] | 1462 | if (rvt_check_ah(qp->ibqp.device, &attr->ah_attr)) |
| 1463 | goto inval; |
| 1464 | } |
| 1465 | |
| 1466 | if (attr_mask & IB_QP_ALT_PATH) { |
Don Hiatt | 13c1922 | 2017-08-04 13:53:51 -0700 | [diff] [blame] | 1467 | if (opa_ah) { |
| 1468 | if (rdma_ah_get_dlid(&attr->alt_ah_attr) >= |
| 1469 | opa_get_mcast_base(OPA_MCAST_NR)) |
| 1470 | goto inval; |
| 1471 | } else { |
| 1472 | if (rdma_ah_get_dlid(&attr->alt_ah_attr) >= |
| 1473 | be16_to_cpu(IB_MULTICAST_LID_BASE)) |
| 1474 | goto inval; |
| 1475 | } |
| 1476 | |
Dennis Dalessandro | 3b0b3fb | 2016-01-22 13:00:35 -0800 | [diff] [blame] | 1477 | if (rvt_check_ah(qp->ibqp.device, &attr->alt_ah_attr)) |
| 1478 | goto inval; |
| 1479 | if (attr->alt_pkey_index >= rvt_get_npkeys(rdi)) |
| 1480 | goto inval; |
| 1481 | } |
| 1482 | |
| 1483 | if (attr_mask & IB_QP_PKEY_INDEX) |
| 1484 | if (attr->pkey_index >= rvt_get_npkeys(rdi)) |
| 1485 | goto inval; |
| 1486 | |
| 1487 | if (attr_mask & IB_QP_MIN_RNR_TIMER) |
| 1488 | if (attr->min_rnr_timer > 31) |
| 1489 | goto inval; |
| 1490 | |
| 1491 | if (attr_mask & IB_QP_PORT) |
| 1492 | if (qp->ibqp.qp_type == IB_QPT_SMI || |
| 1493 | qp->ibqp.qp_type == IB_QPT_GSI || |
| 1494 | attr->port_num == 0 || |
| 1495 | attr->port_num > ibqp->device->phys_port_cnt) |
| 1496 | goto inval; |
| 1497 | |
| 1498 | if (attr_mask & IB_QP_DEST_QPN) |
| 1499 | if (attr->dest_qp_num > RVT_QPN_MASK) |
| 1500 | goto inval; |
| 1501 | |
| 1502 | if (attr_mask & IB_QP_RETRY_CNT) |
| 1503 | if (attr->retry_cnt > 7) |
| 1504 | goto inval; |
| 1505 | |
| 1506 | if (attr_mask & IB_QP_RNR_RETRY) |
| 1507 | if (attr->rnr_retry > 7) |
| 1508 | goto inval; |
| 1509 | |
Dennis Dalessandro | b518d3e | 2016-01-06 09:56:15 -0800 | [diff] [blame] | 1510 | /* |
Dennis Dalessandro | 3b0b3fb | 2016-01-22 13:00:35 -0800 | [diff] [blame] | 1511 | * Don't allow invalid path_mtu values. OK to set greater |
| 1512 | * than the active mtu (or even the max_cap, if we have tuned |
| 1513 | * that to a small mtu. We'll set qp->path_mtu |
| 1514 | * to the lesser of requested attribute mtu and active, |
| 1515 | * for packetizing messages. |
| 1516 | * Note that the QP port has to be set in INIT and MTU in RTR. |
Dennis Dalessandro | b518d3e | 2016-01-06 09:56:15 -0800 | [diff] [blame] | 1517 | */ |
Dennis Dalessandro | 3b0b3fb | 2016-01-22 13:00:35 -0800 | [diff] [blame] | 1518 | if (attr_mask & IB_QP_PATH_MTU) { |
| 1519 | pmtu = rdi->driver_f.get_pmtu_from_attr(rdi, qp, attr); |
| 1520 | if (pmtu < 0) |
| 1521 | goto inval; |
| 1522 | } |
| 1523 | |
| 1524 | if (attr_mask & IB_QP_PATH_MIG_STATE) { |
| 1525 | if (attr->path_mig_state == IB_MIG_REARM) { |
| 1526 | if (qp->s_mig_state == IB_MIG_ARMED) |
| 1527 | goto inval; |
| 1528 | if (new_state != IB_QPS_RTS) |
| 1529 | goto inval; |
| 1530 | } else if (attr->path_mig_state == IB_MIG_MIGRATED) { |
| 1531 | if (qp->s_mig_state == IB_MIG_REARM) |
| 1532 | goto inval; |
| 1533 | if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD) |
| 1534 | goto inval; |
| 1535 | if (qp->s_mig_state == IB_MIG_ARMED) |
| 1536 | mig = 1; |
| 1537 | } else { |
| 1538 | goto inval; |
| 1539 | } |
| 1540 | } |
| 1541 | |
| 1542 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) |
| 1543 | if (attr->max_dest_rd_atomic > rdi->dparms.max_rdma_atomic) |
| 1544 | goto inval; |
| 1545 | |
| 1546 | switch (new_state) { |
| 1547 | case IB_QPS_RESET: |
| 1548 | if (qp->state != IB_QPS_RESET) |
| 1549 | rvt_reset_qp(rdi, qp, ibqp->qp_type); |
| 1550 | break; |
| 1551 | |
| 1552 | case IB_QPS_RTR: |
| 1553 | /* Allow event to re-trigger if QP set to RTR more than once */ |
| 1554 | qp->r_flags &= ~RVT_R_COMM_EST; |
| 1555 | qp->state = new_state; |
| 1556 | break; |
| 1557 | |
| 1558 | case IB_QPS_SQD: |
| 1559 | qp->s_draining = qp->s_last != qp->s_cur; |
| 1560 | qp->state = new_state; |
| 1561 | break; |
| 1562 | |
| 1563 | case IB_QPS_SQE: |
| 1564 | if (qp->ibqp.qp_type == IB_QPT_RC) |
| 1565 | goto inval; |
| 1566 | qp->state = new_state; |
| 1567 | break; |
| 1568 | |
| 1569 | case IB_QPS_ERR: |
| 1570 | lastwqe = rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); |
| 1571 | break; |
| 1572 | |
| 1573 | default: |
| 1574 | qp->state = new_state; |
| 1575 | break; |
| 1576 | } |
| 1577 | |
| 1578 | if (attr_mask & IB_QP_PKEY_INDEX) |
| 1579 | qp->s_pkey_index = attr->pkey_index; |
| 1580 | |
| 1581 | if (attr_mask & IB_QP_PORT) |
| 1582 | qp->port_num = attr->port_num; |
| 1583 | |
| 1584 | if (attr_mask & IB_QP_DEST_QPN) |
| 1585 | qp->remote_qpn = attr->dest_qp_num; |
| 1586 | |
| 1587 | if (attr_mask & IB_QP_SQ_PSN) { |
| 1588 | qp->s_next_psn = attr->sq_psn & rdi->dparms.psn_modify_mask; |
| 1589 | qp->s_psn = qp->s_next_psn; |
| 1590 | qp->s_sending_psn = qp->s_next_psn; |
| 1591 | qp->s_last_psn = qp->s_next_psn - 1; |
| 1592 | qp->s_sending_hpsn = qp->s_last_psn; |
| 1593 | } |
| 1594 | |
| 1595 | if (attr_mask & IB_QP_RQ_PSN) |
| 1596 | qp->r_psn = attr->rq_psn & rdi->dparms.psn_modify_mask; |
| 1597 | |
| 1598 | if (attr_mask & IB_QP_ACCESS_FLAGS) |
| 1599 | qp->qp_access_flags = attr->qp_access_flags; |
| 1600 | |
| 1601 | if (attr_mask & IB_QP_AV) { |
Jason Gunthorpe | d97099f | 2018-06-13 10:22:05 +0300 | [diff] [blame] | 1602 | rdma_replace_ah_attr(&qp->remote_ah_attr, &attr->ah_attr); |
Dasaratharaman Chandramouli | d8966fc | 2017-04-29 14:41:28 -0400 | [diff] [blame] | 1603 | qp->s_srate = rdma_ah_get_static_rate(&attr->ah_attr); |
Dennis Dalessandro | 3b0b3fb | 2016-01-22 13:00:35 -0800 | [diff] [blame] | 1604 | qp->srate_mbps = ib_rate_to_mbps(qp->s_srate); |
| 1605 | } |
| 1606 | |
| 1607 | if (attr_mask & IB_QP_ALT_PATH) { |
Jason Gunthorpe | d97099f | 2018-06-13 10:22:05 +0300 | [diff] [blame] | 1608 | rdma_replace_ah_attr(&qp->alt_ah_attr, &attr->alt_ah_attr); |
Dennis Dalessandro | 3b0b3fb | 2016-01-22 13:00:35 -0800 | [diff] [blame] | 1609 | qp->s_alt_pkey_index = attr->alt_pkey_index; |
| 1610 | } |
| 1611 | |
| 1612 | if (attr_mask & IB_QP_PATH_MIG_STATE) { |
| 1613 | qp->s_mig_state = attr->path_mig_state; |
| 1614 | if (mig) { |
| 1615 | qp->remote_ah_attr = qp->alt_ah_attr; |
Dasaratharaman Chandramouli | d8966fc | 2017-04-29 14:41:28 -0400 | [diff] [blame] | 1616 | qp->port_num = rdma_ah_get_port_num(&qp->alt_ah_attr); |
Dennis Dalessandro | 3b0b3fb | 2016-01-22 13:00:35 -0800 | [diff] [blame] | 1617 | qp->s_pkey_index = qp->s_alt_pkey_index; |
Dennis Dalessandro | 3b0b3fb | 2016-01-22 13:00:35 -0800 | [diff] [blame] | 1618 | } |
| 1619 | } |
| 1620 | |
| 1621 | if (attr_mask & IB_QP_PATH_MTU) { |
| 1622 | qp->pmtu = rdi->driver_f.mtu_from_qp(rdi, qp, pmtu); |
Mike Marciniszyn | 46a80d6 | 2016-02-14 12:10:04 -0800 | [diff] [blame] | 1623 | qp->log_pmtu = ilog2(qp->pmtu); |
Dennis Dalessandro | 3b0b3fb | 2016-01-22 13:00:35 -0800 | [diff] [blame] | 1624 | } |
| 1625 | |
| 1626 | if (attr_mask & IB_QP_RETRY_CNT) { |
| 1627 | qp->s_retry_cnt = attr->retry_cnt; |
| 1628 | qp->s_retry = attr->retry_cnt; |
| 1629 | } |
| 1630 | |
| 1631 | if (attr_mask & IB_QP_RNR_RETRY) { |
| 1632 | qp->s_rnr_retry_cnt = attr->rnr_retry; |
| 1633 | qp->s_rnr_retry = attr->rnr_retry; |
| 1634 | } |
| 1635 | |
| 1636 | if (attr_mask & IB_QP_MIN_RNR_TIMER) |
| 1637 | qp->r_min_rnr_timer = attr->min_rnr_timer; |
| 1638 | |
| 1639 | if (attr_mask & IB_QP_TIMEOUT) { |
| 1640 | qp->timeout = attr->timeout; |
Kaike Wan | a25ce42 | 2017-06-17 10:37:26 -0700 | [diff] [blame] | 1641 | qp->timeout_jiffies = rvt_timeout_to_jiffies(qp->timeout); |
Dennis Dalessandro | 3b0b3fb | 2016-01-22 13:00:35 -0800 | [diff] [blame] | 1642 | } |
| 1643 | |
| 1644 | if (attr_mask & IB_QP_QKEY) |
| 1645 | qp->qkey = attr->qkey; |
| 1646 | |
| 1647 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) |
| 1648 | qp->r_max_rd_atomic = attr->max_dest_rd_atomic; |
| 1649 | |
| 1650 | if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) |
| 1651 | qp->s_max_rd_atomic = attr->max_rd_atomic; |
| 1652 | |
Ira Weiny | e85ec33 | 2016-01-22 13:04:38 -0800 | [diff] [blame] | 1653 | if (rdi->driver_f.modify_qp) |
| 1654 | rdi->driver_f.modify_qp(qp, attr, attr_mask, udata); |
| 1655 | |
Dennis Dalessandro | 3b0b3fb | 2016-01-22 13:00:35 -0800 | [diff] [blame] | 1656 | spin_unlock(&qp->s_lock); |
Mike Marciniszyn | 46a80d6 | 2016-02-14 12:10:04 -0800 | [diff] [blame] | 1657 | spin_unlock(&qp->s_hlock); |
Dennis Dalessandro | 3b0b3fb | 2016-01-22 13:00:35 -0800 | [diff] [blame] | 1658 | spin_unlock_irq(&qp->r_lock); |
| 1659 | |
| 1660 | if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) |
| 1661 | rvt_insert_qp(rdi, qp); |
| 1662 | |
| 1663 | if (lastwqe) { |
| 1664 | ev.device = qp->ibqp.device; |
| 1665 | ev.element.qp = &qp->ibqp; |
| 1666 | ev.event = IB_EVENT_QP_LAST_WQE_REACHED; |
| 1667 | qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); |
| 1668 | } |
| 1669 | if (mig) { |
| 1670 | ev.device = qp->ibqp.device; |
| 1671 | ev.element.qp = &qp->ibqp; |
| 1672 | ev.event = IB_EVENT_PATH_MIG; |
| 1673 | qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); |
| 1674 | } |
| 1675 | return 0; |
| 1676 | |
| 1677 | inval: |
| 1678 | spin_unlock(&qp->s_lock); |
Mike Marciniszyn | 46a80d6 | 2016-02-14 12:10:04 -0800 | [diff] [blame] | 1679 | spin_unlock(&qp->s_hlock); |
Dennis Dalessandro | 3b0b3fb | 2016-01-22 13:00:35 -0800 | [diff] [blame] | 1680 | spin_unlock_irq(&qp->r_lock); |
| 1681 | return -EINVAL; |
Dennis Dalessandro | b518d3e | 2016-01-06 09:56:15 -0800 | [diff] [blame] | 1682 | } |
| 1683 | |
| 1684 | /** |
| 1685 | * rvt_destroy_qp - destroy a queue pair |
| 1686 | * @ibqp: the queue pair to destroy |
| 1687 | * |
Dennis Dalessandro | b518d3e | 2016-01-06 09:56:15 -0800 | [diff] [blame] | 1688 | * Note that this can be called while the QP is actively sending or |
| 1689 | * receiving! |
Dennis Dalessandro | 90793f7 | 2016-02-14 12:10:29 -0800 | [diff] [blame] | 1690 | * |
| 1691 | * Return: 0 on success. |
Dennis Dalessandro | b518d3e | 2016-01-06 09:56:15 -0800 | [diff] [blame] | 1692 | */ |
Shamir Rabinovitch | c4367a2 | 2019-03-31 19:10:05 +0300 | [diff] [blame] | 1693 | int rvt_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) |
Dennis Dalessandro | b518d3e | 2016-01-06 09:56:15 -0800 | [diff] [blame] | 1694 | { |
Dennis Dalessandro | 5a17ad1 | 2016-01-22 13:00:42 -0800 | [diff] [blame] | 1695 | struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); |
| 1696 | struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); |
Dennis Dalessandro | b518d3e | 2016-01-06 09:56:15 -0800 | [diff] [blame] | 1697 | |
Dennis Dalessandro | 5a17ad1 | 2016-01-22 13:00:42 -0800 | [diff] [blame] | 1698 | spin_lock_irq(&qp->r_lock); |
Mike Marciniszyn | 46a80d6 | 2016-02-14 12:10:04 -0800 | [diff] [blame] | 1699 | spin_lock(&qp->s_hlock); |
Dennis Dalessandro | 5a17ad1 | 2016-01-22 13:00:42 -0800 | [diff] [blame] | 1700 | spin_lock(&qp->s_lock); |
| 1701 | rvt_reset_qp(rdi, qp, ibqp->qp_type); |
| 1702 | spin_unlock(&qp->s_lock); |
Mike Marciniszyn | 46a80d6 | 2016-02-14 12:10:04 -0800 | [diff] [blame] | 1703 | spin_unlock(&qp->s_hlock); |
Dennis Dalessandro | 5a17ad1 | 2016-01-22 13:00:42 -0800 | [diff] [blame] | 1704 | spin_unlock_irq(&qp->r_lock); |
| 1705 | |
Alex Estrin | f9586ab | 2017-10-09 12:38:33 -0700 | [diff] [blame] | 1706 | wait_event(qp->wait, !atomic_read(&qp->refcount)); |
Dennis Dalessandro | 5a17ad1 | 2016-01-22 13:00:42 -0800 | [diff] [blame] | 1707 | /* qpn is now available for use again */ |
| 1708 | rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num); |
| 1709 | |
| 1710 | spin_lock(&rdi->n_qps_lock); |
| 1711 | rdi->n_qps_allocated--; |
Vennila Megavannan | bfee5e3 | 2016-02-09 14:29:49 -0800 | [diff] [blame] | 1712 | if (qp->ibqp.qp_type == IB_QPT_RC) { |
| 1713 | rdi->n_rc_qps--; |
| 1714 | rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL; |
| 1715 | } |
Dennis Dalessandro | 5a17ad1 | 2016-01-22 13:00:42 -0800 | [diff] [blame] | 1716 | spin_unlock(&rdi->n_qps_lock); |
| 1717 | |
| 1718 | if (qp->ip) |
| 1719 | kref_put(&qp->ip->ref, rvt_release_mmap_info); |
Kamenee Arumugam | dabac6e | 2019-06-28 14:04:24 -0400 | [diff] [blame] | 1720 | kvfree(qp->r_rq.kwq); |
Dennis Dalessandro | 5a17ad1 | 2016-01-22 13:00:42 -0800 | [diff] [blame] | 1721 | rdi->driver_f.qp_priv_free(rdi, qp); |
Mike Marciniszyn | 8b103e9 | 2016-05-24 12:50:40 -0700 | [diff] [blame] | 1722 | kfree(qp->s_ack_queue); |
Jason Gunthorpe | d97099f | 2018-06-13 10:22:05 +0300 | [diff] [blame] | 1723 | rdma_destroy_ah_attr(&qp->remote_ah_attr); |
| 1724 | rdma_destroy_ah_attr(&qp->alt_ah_attr); |
Michael J. Ruhl | d310c4b | 2019-06-28 14:22:04 -0400 | [diff] [blame] | 1725 | free_ud_wq_attr(qp); |
Kaike Wan | 838b6fd | 2019-01-23 19:30:07 -0800 | [diff] [blame] | 1726 | vfree(qp->s_wq); |
Dennis Dalessandro | 5a17ad1 | 2016-01-22 13:00:42 -0800 | [diff] [blame] | 1727 | kfree(qp); |
| 1728 | return 0; |
Dennis Dalessandro | b518d3e | 2016-01-06 09:56:15 -0800 | [diff] [blame] | 1729 | } |
| 1730 | |
Dennis Dalessandro | 90793f7 | 2016-02-14 12:10:29 -0800 | [diff] [blame] | 1731 | /** |
| 1732 | * rvt_query_qp - query an ipbq |
| 1733 | * @ibqp: IB qp to query |
| 1734 | * @attr: attr struct to fill in |
| 1735 | * @attr_mask: attr mask ignored |
| 1736 | * @init_attr: struct to fill in |
| 1737 | * |
| 1738 | * Return: always 0 |
| 1739 | */ |
Dennis Dalessandro | b518d3e | 2016-01-06 09:56:15 -0800 | [diff] [blame] | 1740 | int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, |
| 1741 | int attr_mask, struct ib_qp_init_attr *init_attr) |
| 1742 | { |
Harish Chegondi | 74d2d50 | 2016-01-22 13:05:04 -0800 | [diff] [blame] | 1743 | struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); |
| 1744 | struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); |
| 1745 | |
| 1746 | attr->qp_state = qp->state; |
| 1747 | attr->cur_qp_state = attr->qp_state; |
Sebastian Sanchez | 16570d3 | 2017-08-04 13:52:20 -0700 | [diff] [blame] | 1748 | attr->path_mtu = rdi->driver_f.mtu_to_path_mtu(qp->pmtu); |
Harish Chegondi | 74d2d50 | 2016-01-22 13:05:04 -0800 | [diff] [blame] | 1749 | attr->path_mig_state = qp->s_mig_state; |
| 1750 | attr->qkey = qp->qkey; |
| 1751 | attr->rq_psn = qp->r_psn & rdi->dparms.psn_mask; |
| 1752 | attr->sq_psn = qp->s_next_psn & rdi->dparms.psn_mask; |
| 1753 | attr->dest_qp_num = qp->remote_qpn; |
| 1754 | attr->qp_access_flags = qp->qp_access_flags; |
Mike Marciniszyn | 856cc4c | 2016-07-25 13:39:39 -0700 | [diff] [blame] | 1755 | attr->cap.max_send_wr = qp->s_size - 1 - |
| 1756 | rdi->dparms.reserved_operations; |
Harish Chegondi | 74d2d50 | 2016-01-22 13:05:04 -0800 | [diff] [blame] | 1757 | attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1; |
| 1758 | attr->cap.max_send_sge = qp->s_max_sge; |
| 1759 | attr->cap.max_recv_sge = qp->r_rq.max_sge; |
| 1760 | attr->cap.max_inline_data = 0; |
| 1761 | attr->ah_attr = qp->remote_ah_attr; |
| 1762 | attr->alt_ah_attr = qp->alt_ah_attr; |
| 1763 | attr->pkey_index = qp->s_pkey_index; |
| 1764 | attr->alt_pkey_index = qp->s_alt_pkey_index; |
| 1765 | attr->en_sqd_async_notify = 0; |
| 1766 | attr->sq_draining = qp->s_draining; |
| 1767 | attr->max_rd_atomic = qp->s_max_rd_atomic; |
| 1768 | attr->max_dest_rd_atomic = qp->r_max_rd_atomic; |
| 1769 | attr->min_rnr_timer = qp->r_min_rnr_timer; |
| 1770 | attr->port_num = qp->port_num; |
| 1771 | attr->timeout = qp->timeout; |
| 1772 | attr->retry_cnt = qp->s_retry_cnt; |
| 1773 | attr->rnr_retry = qp->s_rnr_retry_cnt; |
Dasaratharaman Chandramouli | d8966fc | 2017-04-29 14:41:28 -0400 | [diff] [blame] | 1774 | attr->alt_port_num = |
| 1775 | rdma_ah_get_port_num(&qp->alt_ah_attr); |
Harish Chegondi | 74d2d50 | 2016-01-22 13:05:04 -0800 | [diff] [blame] | 1776 | attr->alt_timeout = qp->alt_timeout; |
| 1777 | |
| 1778 | init_attr->event_handler = qp->ibqp.event_handler; |
| 1779 | init_attr->qp_context = qp->ibqp.qp_context; |
| 1780 | init_attr->send_cq = qp->ibqp.send_cq; |
| 1781 | init_attr->recv_cq = qp->ibqp.recv_cq; |
| 1782 | init_attr->srq = qp->ibqp.srq; |
| 1783 | init_attr->cap = attr->cap; |
| 1784 | if (qp->s_flags & RVT_S_SIGNAL_REQ_WR) |
| 1785 | init_attr->sq_sig_type = IB_SIGNAL_REQ_WR; |
| 1786 | else |
| 1787 | init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; |
| 1788 | init_attr->qp_type = qp->ibqp.qp_type; |
| 1789 | init_attr->port_num = qp->port_num; |
| 1790 | return 0; |
Dennis Dalessandro | b518d3e | 2016-01-06 09:56:15 -0800 | [diff] [blame] | 1791 | } |
Dennis Dalessandro | 8cf4020 | 2016-01-06 10:01:17 -0800 | [diff] [blame] | 1792 | |
| 1793 | /** |
| 1794 | * rvt_post_receive - post a receive on a QP |
| 1795 | * @ibqp: the QP to post the receive on |
| 1796 | * @wr: the WR to post |
| 1797 | * @bad_wr: the first bad WR is put here |
| 1798 | * |
| 1799 | * This may be called from interrupt context. |
Dennis Dalessandro | 90793f7 | 2016-02-14 12:10:29 -0800 | [diff] [blame] | 1800 | * |
| 1801 | * Return: 0 on success otherwise errno |
Dennis Dalessandro | 8cf4020 | 2016-01-06 10:01:17 -0800 | [diff] [blame] | 1802 | */ |
Bart Van Assche | d34ac5c | 2018-07-18 09:25:32 -0700 | [diff] [blame] | 1803 | int rvt_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, |
| 1804 | const struct ib_recv_wr **bad_wr) |
Dennis Dalessandro | 8cf4020 | 2016-01-06 10:01:17 -0800 | [diff] [blame] | 1805 | { |
Dennis Dalessandro | 120bdaf | 2016-01-22 13:00:48 -0800 | [diff] [blame] | 1806 | struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); |
Kamenee Arumugam | dabac6e | 2019-06-28 14:04:24 -0400 | [diff] [blame] | 1807 | struct rvt_krwq *wq = qp->r_rq.kwq; |
Dennis Dalessandro | 120bdaf | 2016-01-22 13:00:48 -0800 | [diff] [blame] | 1808 | unsigned long flags; |
Alex Estrin | 000a830 | 2016-03-07 11:35:51 -0800 | [diff] [blame] | 1809 | int qp_err_flush = (ib_rvt_state_ops[qp->state] & RVT_FLUSH_RECV) && |
| 1810 | !qp->ibqp.srq; |
Dennis Dalessandro | 8cf4020 | 2016-01-06 10:01:17 -0800 | [diff] [blame] | 1811 | |
Dennis Dalessandro | 120bdaf | 2016-01-22 13:00:48 -0800 | [diff] [blame] | 1812 | /* Check that state is OK to post receive. */ |
| 1813 | if (!(ib_rvt_state_ops[qp->state] & RVT_POST_RECV_OK) || !wq) { |
| 1814 | *bad_wr = wr; |
| 1815 | return -EINVAL; |
| 1816 | } |
| 1817 | |
| 1818 | for (; wr; wr = wr->next) { |
| 1819 | struct rvt_rwqe *wqe; |
| 1820 | u32 next; |
| 1821 | int i; |
| 1822 | |
| 1823 | if ((unsigned)wr->num_sge > qp->r_rq.max_sge) { |
| 1824 | *bad_wr = wr; |
| 1825 | return -EINVAL; |
| 1826 | } |
| 1827 | |
Kamenee Arumugam | f592ae3 | 2019-06-28 14:04:30 -0400 | [diff] [blame] | 1828 | spin_lock_irqsave(&qp->r_rq.kwq->p_lock, flags); |
Dennis Dalessandro | 120bdaf | 2016-01-22 13:00:48 -0800 | [diff] [blame] | 1829 | next = wq->head + 1; |
| 1830 | if (next >= qp->r_rq.size) |
| 1831 | next = 0; |
Kamenee Arumugam | dabac6e | 2019-06-28 14:04:24 -0400 | [diff] [blame] | 1832 | if (next == READ_ONCE(wq->tail)) { |
Kamenee Arumugam | f592ae3 | 2019-06-28 14:04:30 -0400 | [diff] [blame] | 1833 | spin_unlock_irqrestore(&qp->r_rq.kwq->p_lock, flags); |
Dennis Dalessandro | 120bdaf | 2016-01-22 13:00:48 -0800 | [diff] [blame] | 1834 | *bad_wr = wr; |
| 1835 | return -ENOMEM; |
| 1836 | } |
Alex Estrin | 000a830 | 2016-03-07 11:35:51 -0800 | [diff] [blame] | 1837 | if (unlikely(qp_err_flush)) { |
| 1838 | struct ib_wc wc; |
Dennis Dalessandro | 120bdaf | 2016-01-22 13:00:48 -0800 | [diff] [blame] | 1839 | |
Alex Estrin | 000a830 | 2016-03-07 11:35:51 -0800 | [diff] [blame] | 1840 | memset(&wc, 0, sizeof(wc)); |
| 1841 | wc.qp = &qp->ibqp; |
| 1842 | wc.opcode = IB_WC_RECV; |
| 1843 | wc.wr_id = wr->wr_id; |
| 1844 | wc.status = IB_WC_WR_FLUSH_ERR; |
| 1845 | rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1); |
| 1846 | } else { |
| 1847 | wqe = rvt_get_rwqe_ptr(&qp->r_rq, wq->head); |
| 1848 | wqe->wr_id = wr->wr_id; |
| 1849 | wqe->num_sge = wr->num_sge; |
| 1850 | for (i = 0; i < wr->num_sge; i++) |
| 1851 | wqe->sg_list[i] = wr->sg_list[i]; |
| 1852 | /* |
| 1853 | * Make sure queue entry is written |
| 1854 | * before the head index. |
| 1855 | */ |
Kamenee Arumugam | dabac6e | 2019-06-28 14:04:24 -0400 | [diff] [blame] | 1856 | smp_store_release(&wq->head, next); |
Alex Estrin | 000a830 | 2016-03-07 11:35:51 -0800 | [diff] [blame] | 1857 | } |
Kamenee Arumugam | f592ae3 | 2019-06-28 14:04:30 -0400 | [diff] [blame] | 1858 | spin_unlock_irqrestore(&qp->r_rq.kwq->p_lock, flags); |
Dennis Dalessandro | 120bdaf | 2016-01-22 13:00:48 -0800 | [diff] [blame] | 1859 | } |
| 1860 | return 0; |
Dennis Dalessandro | 8cf4020 | 2016-01-06 10:01:17 -0800 | [diff] [blame] | 1861 | } |
| 1862 | |
| 1863 | /** |
Mike Marciniszyn | afcf8f7 | 2016-07-01 16:02:07 -0700 | [diff] [blame] | 1864 | * rvt_qp_valid_operation - validate post send wr request |
| 1865 | * @qp - the qp |
| 1866 | * @post-parms - the post send table for the driver |
| 1867 | * @wr - the work request |
Mike Marciniszyn | 46a80d6 | 2016-02-14 12:10:04 -0800 | [diff] [blame] | 1868 | * |
Mike Marciniszyn | afcf8f7 | 2016-07-01 16:02:07 -0700 | [diff] [blame] | 1869 | * The routine validates the operation based on the |
| 1870 | * validation table an returns the length of the operation |
| 1871 | * which can extend beyond the ib_send_bw. Operation |
| 1872 | * dependent flags key atomic operation validation. |
| 1873 | * |
| 1874 | * There is an exception for UD qps that validates the pd and |
| 1875 | * overrides the length to include the additional UD specific |
| 1876 | * length. |
| 1877 | * |
| 1878 | * Returns a negative error or the length of the work request |
| 1879 | * for building the swqe. |
| 1880 | */ |
| 1881 | static inline int rvt_qp_valid_operation( |
| 1882 | struct rvt_qp *qp, |
| 1883 | const struct rvt_operation_params *post_parms, |
Bart Van Assche | f696bf6 | 2018-07-18 09:25:14 -0700 | [diff] [blame] | 1884 | const struct ib_send_wr *wr) |
Mike Marciniszyn | afcf8f7 | 2016-07-01 16:02:07 -0700 | [diff] [blame] | 1885 | { |
| 1886 | int len; |
| 1887 | |
| 1888 | if (wr->opcode >= RVT_OPERATION_MAX || !post_parms[wr->opcode].length) |
| 1889 | return -EINVAL; |
| 1890 | if (!(post_parms[wr->opcode].qpt_support & BIT(qp->ibqp.qp_type))) |
| 1891 | return -EINVAL; |
| 1892 | if ((post_parms[wr->opcode].flags & RVT_OPERATION_PRIV) && |
| 1893 | ibpd_to_rvtpd(qp->ibqp.pd)->user) |
| 1894 | return -EINVAL; |
| 1895 | if (post_parms[wr->opcode].flags & RVT_OPERATION_ATOMIC_SGE && |
| 1896 | (wr->num_sge == 0 || |
| 1897 | wr->sg_list[0].length < sizeof(u64) || |
| 1898 | wr->sg_list[0].addr & (sizeof(u64) - 1))) |
| 1899 | return -EINVAL; |
| 1900 | if (post_parms[wr->opcode].flags & RVT_OPERATION_ATOMIC && |
| 1901 | !qp->s_max_rd_atomic) |
| 1902 | return -EINVAL; |
| 1903 | len = post_parms[wr->opcode].length; |
| 1904 | /* UD specific */ |
| 1905 | if (qp->ibqp.qp_type != IB_QPT_UC && |
| 1906 | qp->ibqp.qp_type != IB_QPT_RC) { |
| 1907 | if (qp->ibqp.pd != ud_wr(wr)->ah->pd) |
| 1908 | return -EINVAL; |
| 1909 | len = sizeof(struct ib_ud_wr); |
| 1910 | } |
| 1911 | return len; |
| 1912 | } |
| 1913 | |
| 1914 | /** |
Mike Marciniszyn | 856cc4c | 2016-07-25 13:39:39 -0700 | [diff] [blame] | 1915 | * rvt_qp_is_avail - determine queue capacity |
Randy Dunlap | 4f9a301 | 2018-01-05 16:22:32 -0800 | [diff] [blame] | 1916 | * @qp: the qp |
| 1917 | * @rdi: the rdmavt device |
| 1918 | * @reserved_op: is reserved operation |
Mike Marciniszyn | 46a80d6 | 2016-02-14 12:10:04 -0800 | [diff] [blame] | 1919 | * |
| 1920 | * This assumes the s_hlock is held but the s_last |
| 1921 | * qp variable is uncontrolled. |
Mike Marciniszyn | afcf8f7 | 2016-07-01 16:02:07 -0700 | [diff] [blame] | 1922 | * |
Mike Marciniszyn | 856cc4c | 2016-07-25 13:39:39 -0700 | [diff] [blame] | 1923 | * For non reserved operations, the qp->s_avail |
| 1924 | * may be changed. |
| 1925 | * |
| 1926 | * The return value is zero or a -ENOMEM. |
Mike Marciniszyn | 46a80d6 | 2016-02-14 12:10:04 -0800 | [diff] [blame] | 1927 | */ |
Mike Marciniszyn | 856cc4c | 2016-07-25 13:39:39 -0700 | [diff] [blame] | 1928 | static inline int rvt_qp_is_avail( |
| 1929 | struct rvt_qp *qp, |
| 1930 | struct rvt_dev_info *rdi, |
| 1931 | bool reserved_op) |
Mike Marciniszyn | 46a80d6 | 2016-02-14 12:10:04 -0800 | [diff] [blame] | 1932 | { |
| 1933 | u32 slast; |
Mike Marciniszyn | 856cc4c | 2016-07-25 13:39:39 -0700 | [diff] [blame] | 1934 | u32 avail; |
| 1935 | u32 reserved_used; |
Mike Marciniszyn | 46a80d6 | 2016-02-14 12:10:04 -0800 | [diff] [blame] | 1936 | |
Mike Marciniszyn | 856cc4c | 2016-07-25 13:39:39 -0700 | [diff] [blame] | 1937 | /* see rvt_qp_wqe_unreserve() */ |
| 1938 | smp_mb__before_atomic(); |
Mike Marciniszyn | 856cc4c | 2016-07-25 13:39:39 -0700 | [diff] [blame] | 1939 | if (unlikely(reserved_op)) { |
| 1940 | /* see rvt_qp_wqe_unreserve() */ |
Mike Marciniszyn | 4a9ceb7 | 2019-06-13 08:30:52 -0400 | [diff] [blame] | 1941 | reserved_used = atomic_read(&qp->s_reserved_used); |
Mike Marciniszyn | 856cc4c | 2016-07-25 13:39:39 -0700 | [diff] [blame] | 1942 | if (reserved_used >= rdi->dparms.reserved_operations) |
| 1943 | return -ENOMEM; |
| 1944 | return 0; |
| 1945 | } |
| 1946 | /* non-reserved operations */ |
| 1947 | if (likely(qp->s_avail)) |
| 1948 | return 0; |
Mike Marciniszyn | 4a9ceb7 | 2019-06-13 08:30:52 -0400 | [diff] [blame] | 1949 | /* See rvt_qp_complete_swqe() */ |
| 1950 | slast = smp_load_acquire(&qp->s_last); |
Mike Marciniszyn | 46a80d6 | 2016-02-14 12:10:04 -0800 | [diff] [blame] | 1951 | if (qp->s_head >= slast) |
Mike Marciniszyn | 856cc4c | 2016-07-25 13:39:39 -0700 | [diff] [blame] | 1952 | avail = qp->s_size - (qp->s_head - slast); |
Mike Marciniszyn | 46a80d6 | 2016-02-14 12:10:04 -0800 | [diff] [blame] | 1953 | else |
Mike Marciniszyn | 856cc4c | 2016-07-25 13:39:39 -0700 | [diff] [blame] | 1954 | avail = slast - qp->s_head; |
| 1955 | |
Mike Marciniszyn | 856cc4c | 2016-07-25 13:39:39 -0700 | [diff] [blame] | 1956 | reserved_used = atomic_read(&qp->s_reserved_used); |
| 1957 | avail = avail - 1 - |
| 1958 | (rdi->dparms.reserved_operations - reserved_used); |
| 1959 | /* insure we don't assign a negative s_avail */ |
| 1960 | if ((s32)avail <= 0) |
| 1961 | return -ENOMEM; |
| 1962 | qp->s_avail = avail; |
| 1963 | if (WARN_ON(qp->s_avail > |
| 1964 | (qp->s_size - 1 - rdi->dparms.reserved_operations))) |
| 1965 | rvt_pr_err(rdi, |
| 1966 | "More avail entries than QP RB size.\nQP: %u, size: %u, avail: %u\nhead: %u, tail: %u, cur: %u, acked: %u, last: %u", |
| 1967 | qp->ibqp.qp_num, qp->s_size, qp->s_avail, |
| 1968 | qp->s_head, qp->s_tail, qp->s_cur, |
| 1969 | qp->s_acked, qp->s_last); |
| 1970 | return 0; |
Mike Marciniszyn | 46a80d6 | 2016-02-14 12:10:04 -0800 | [diff] [blame] | 1971 | } |
| 1972 | |
| 1973 | /** |
Dennis Dalessandro | bfbac09 | 2016-01-22 13:00:22 -0800 | [diff] [blame] | 1974 | * rvt_post_one_wr - post one RC, UC, or UD send work request |
| 1975 | * @qp: the QP to post on |
| 1976 | * @wr: the work request to send |
| 1977 | */ |
Mike Marciniszyn | 91702b4 | 2016-02-14 12:45:44 -0800 | [diff] [blame] | 1978 | static int rvt_post_one_wr(struct rvt_qp *qp, |
Bart Van Assche | f696bf6 | 2018-07-18 09:25:14 -0700 | [diff] [blame] | 1979 | const struct ib_send_wr *wr, |
Michael J. Ruhl | 0b79b27 | 2018-09-10 09:49:27 -0700 | [diff] [blame] | 1980 | bool *call_send) |
Dennis Dalessandro | bfbac09 | 2016-01-22 13:00:22 -0800 | [diff] [blame] | 1981 | { |
| 1982 | struct rvt_swqe *wqe; |
| 1983 | u32 next; |
| 1984 | int i; |
| 1985 | int j; |
| 1986 | int acc; |
| 1987 | struct rvt_lkey_table *rkt; |
| 1988 | struct rvt_pd *pd; |
| 1989 | struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); |
Mike Marciniszyn | 46a80d6 | 2016-02-14 12:10:04 -0800 | [diff] [blame] | 1990 | u8 log_pmtu; |
Mike Marciniszyn | 3ffea7d | 2017-07-29 08:43:43 -0700 | [diff] [blame] | 1991 | int ret; |
Mike Marciniszyn | 2821c50 | 2016-07-01 16:02:24 -0700 | [diff] [blame] | 1992 | size_t cplen; |
Mike Marciniszyn | 856cc4c | 2016-07-25 13:39:39 -0700 | [diff] [blame] | 1993 | bool reserved_op; |
Jianxin Xiong | d9b13c2 | 2016-07-25 13:39:45 -0700 | [diff] [blame] | 1994 | int local_ops_delayed = 0; |
Dennis Dalessandro | bfbac09 | 2016-01-22 13:00:22 -0800 | [diff] [blame] | 1995 | |
Mike Marciniszyn | afcf8f7 | 2016-07-01 16:02:07 -0700 | [diff] [blame] | 1996 | BUILD_BUG_ON(IB_QPT_MAX >= (sizeof(u32) * BITS_PER_BYTE)); |
| 1997 | |
Dennis Dalessandro | bfbac09 | 2016-01-22 13:00:22 -0800 | [diff] [blame] | 1998 | /* IB spec says that num_sge == 0 is OK. */ |
| 1999 | if (unlikely(wr->num_sge > qp->s_max_sge)) |
| 2000 | return -EINVAL; |
| 2001 | |
Mike Marciniszyn | 2821c50 | 2016-07-01 16:02:24 -0700 | [diff] [blame] | 2002 | ret = rvt_qp_valid_operation(qp, rdi->post_parms, wr); |
| 2003 | if (ret < 0) |
| 2004 | return ret; |
| 2005 | cplen = ret; |
| 2006 | |
Jianxin Xiong | d9f8723 | 2016-07-25 13:38:25 -0700 | [diff] [blame] | 2007 | /* |
Jianxin Xiong | d9b13c2 | 2016-07-25 13:39:45 -0700 | [diff] [blame] | 2008 | * Local operations include fast register and local invalidate. |
| 2009 | * Fast register needs to be processed immediately because the |
| 2010 | * registered lkey may be used by following work requests and the |
| 2011 | * lkey needs to be valid at the time those requests are posted. |
| 2012 | * Local invalidate can be processed immediately if fencing is |
| 2013 | * not required and no previous local invalidate ops are pending. |
| 2014 | * Signaled local operations that have been processed immediately |
| 2015 | * need to have requests with "completion only" flags set posted |
| 2016 | * to the send queue in order to generate completions. |
Jianxin Xiong | d9f8723 | 2016-07-25 13:38:25 -0700 | [diff] [blame] | 2017 | */ |
Jianxin Xiong | d9b13c2 | 2016-07-25 13:39:45 -0700 | [diff] [blame] | 2018 | if ((rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL)) { |
Jianxin Xiong | d9f8723 | 2016-07-25 13:38:25 -0700 | [diff] [blame] | 2019 | switch (wr->opcode) { |
| 2020 | case IB_WR_REG_MR: |
Jianxin Xiong | d9b13c2 | 2016-07-25 13:39:45 -0700 | [diff] [blame] | 2021 | ret = rvt_fast_reg_mr(qp, |
| 2022 | reg_wr(wr)->mr, |
| 2023 | reg_wr(wr)->key, |
| 2024 | reg_wr(wr)->access); |
| 2025 | if (ret || !(wr->send_flags & IB_SEND_SIGNALED)) |
| 2026 | return ret; |
| 2027 | break; |
Jianxin Xiong | d9f8723 | 2016-07-25 13:38:25 -0700 | [diff] [blame] | 2028 | case IB_WR_LOCAL_INV: |
Jianxin Xiong | d9b13c2 | 2016-07-25 13:39:45 -0700 | [diff] [blame] | 2029 | if ((wr->send_flags & IB_SEND_FENCE) || |
| 2030 | atomic_read(&qp->local_ops_pending)) { |
| 2031 | local_ops_delayed = 1; |
| 2032 | } else { |
| 2033 | ret = rvt_invalidate_rkey( |
| 2034 | qp, wr->ex.invalidate_rkey); |
| 2035 | if (ret || !(wr->send_flags & IB_SEND_SIGNALED)) |
| 2036 | return ret; |
| 2037 | } |
| 2038 | break; |
Jianxin Xiong | d9f8723 | 2016-07-25 13:38:25 -0700 | [diff] [blame] | 2039 | default: |
| 2040 | return -EINVAL; |
| 2041 | } |
| 2042 | } |
| 2043 | |
Mike Marciniszyn | 856cc4c | 2016-07-25 13:39:39 -0700 | [diff] [blame] | 2044 | reserved_op = rdi->post_parms[wr->opcode].flags & |
| 2045 | RVT_OPERATION_USE_RESERVE; |
Mike Marciniszyn | 46a80d6 | 2016-02-14 12:10:04 -0800 | [diff] [blame] | 2046 | /* check for avail */ |
Mike Marciniszyn | 856cc4c | 2016-07-25 13:39:39 -0700 | [diff] [blame] | 2047 | ret = rvt_qp_is_avail(qp, rdi, reserved_op); |
| 2048 | if (ret) |
| 2049 | return ret; |
Dennis Dalessandro | bfbac09 | 2016-01-22 13:00:22 -0800 | [diff] [blame] | 2050 | next = qp->s_head + 1; |
| 2051 | if (next >= qp->s_size) |
| 2052 | next = 0; |
Ira Weiny | 60c30f5 | 2016-02-03 14:14:45 -0800 | [diff] [blame] | 2053 | |
Dennis Dalessandro | bfbac09 | 2016-01-22 13:00:22 -0800 | [diff] [blame] | 2054 | rkt = &rdi->lkey_table; |
| 2055 | pd = ibpd_to_rvtpd(qp->ibqp.pd); |
| 2056 | wqe = rvt_get_swqe_ptr(qp, qp->s_head); |
| 2057 | |
Mike Marciniszyn | 2821c50 | 2016-07-01 16:02:24 -0700 | [diff] [blame] | 2058 | /* cplen has length from above */ |
| 2059 | memcpy(&wqe->wr, wr, cplen); |
Dennis Dalessandro | bfbac09 | 2016-01-22 13:00:22 -0800 | [diff] [blame] | 2060 | |
| 2061 | wqe->length = 0; |
| 2062 | j = 0; |
| 2063 | if (wr->num_sge) { |
Mike Marciniszyn | 14fe13f | 2017-05-12 09:20:31 -0700 | [diff] [blame] | 2064 | struct rvt_sge *last_sge = NULL; |
| 2065 | |
Dennis Dalessandro | bfbac09 | 2016-01-22 13:00:22 -0800 | [diff] [blame] | 2066 | acc = wr->opcode >= IB_WR_RDMA_READ ? |
| 2067 | IB_ACCESS_LOCAL_WRITE : 0; |
| 2068 | for (i = 0; i < wr->num_sge; i++) { |
| 2069 | u32 length = wr->sg_list[i].length; |
Dennis Dalessandro | bfbac09 | 2016-01-22 13:00:22 -0800 | [diff] [blame] | 2070 | |
| 2071 | if (length == 0) |
| 2072 | continue; |
Mike Marciniszyn | 3ffea7d | 2017-07-29 08:43:43 -0700 | [diff] [blame] | 2073 | ret = rvt_lkey_ok(rkt, pd, &wqe->sg_list[j], last_sge, |
| 2074 | &wr->sg_list[i], acc); |
| 2075 | if (unlikely(ret < 0)) |
| 2076 | goto bail_inval_free; |
Dennis Dalessandro | bfbac09 | 2016-01-22 13:00:22 -0800 | [diff] [blame] | 2077 | wqe->length += length; |
Mike Marciniszyn | 3ffea7d | 2017-07-29 08:43:43 -0700 | [diff] [blame] | 2078 | if (ret) |
Mike Marciniszyn | 14fe13f | 2017-05-12 09:20:31 -0700 | [diff] [blame] | 2079 | last_sge = &wqe->sg_list[j]; |
Mike Marciniszyn | 3ffea7d | 2017-07-29 08:43:43 -0700 | [diff] [blame] | 2080 | j += ret; |
Dennis Dalessandro | bfbac09 | 2016-01-22 13:00:22 -0800 | [diff] [blame] | 2081 | } |
| 2082 | wqe->wr.num_sge = j; |
| 2083 | } |
Mike Marciniszyn | 46a80d6 | 2016-02-14 12:10:04 -0800 | [diff] [blame] | 2084 | |
Kaike Wan | d205a06a | 2018-09-26 10:26:44 -0700 | [diff] [blame] | 2085 | /* |
| 2086 | * Calculate and set SWQE PSN values prior to handing it off |
| 2087 | * to the driver's check routine. This give the driver the |
| 2088 | * opportunity to adjust PSN values based on internal checks. |
| 2089 | */ |
Mike Marciniszyn | 46a80d6 | 2016-02-14 12:10:04 -0800 | [diff] [blame] | 2090 | log_pmtu = qp->log_pmtu; |
Mike Marciniszyn | 52cdbcc | 2019-04-11 07:16:23 -0700 | [diff] [blame] | 2091 | if (qp->allowed_ops == IB_OPCODE_UD) { |
Michael J. Ruhl | 2b0ad2d | 2019-06-28 14:22:11 -0400 | [diff] [blame^] | 2092 | struct rvt_ah *ah = rvt_get_swqe_ah(wqe); |
Mike Marciniszyn | 46a80d6 | 2016-02-14 12:10:04 -0800 | [diff] [blame] | 2093 | |
| 2094 | log_pmtu = ah->log_pmtu; |
Michael J. Ruhl | d310c4b | 2019-06-28 14:22:04 -0400 | [diff] [blame] | 2095 | rdma_copy_ah_attr(wqe->ud_wr.attr, &ah->attr); |
Dennis Dalessandro | bfbac09 | 2016-01-22 13:00:22 -0800 | [diff] [blame] | 2096 | } |
Mike Marciniszyn | 46a80d6 | 2016-02-14 12:10:04 -0800 | [diff] [blame] | 2097 | |
Jianxin Xiong | d9f8723 | 2016-07-25 13:38:25 -0700 | [diff] [blame] | 2098 | if (rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL) { |
Jianxin Xiong | d9b13c2 | 2016-07-25 13:39:45 -0700 | [diff] [blame] | 2099 | if (local_ops_delayed) |
| 2100 | atomic_inc(&qp->local_ops_pending); |
| 2101 | else |
| 2102 | wqe->wr.send_flags |= RVT_SEND_COMPLETION_ONLY; |
Jianxin Xiong | d9f8723 | 2016-07-25 13:38:25 -0700 | [diff] [blame] | 2103 | wqe->ssn = 0; |
| 2104 | wqe->psn = 0; |
| 2105 | wqe->lpsn = 0; |
| 2106 | } else { |
| 2107 | wqe->ssn = qp->s_ssn++; |
| 2108 | wqe->psn = qp->s_next_psn; |
| 2109 | wqe->lpsn = wqe->psn + |
| 2110 | (wqe->length ? |
| 2111 | ((wqe->length - 1) >> log_pmtu) : |
| 2112 | 0); |
Jianxin Xiong | d9f8723 | 2016-07-25 13:38:25 -0700 | [diff] [blame] | 2113 | } |
Kaike Wan | d205a06a | 2018-09-26 10:26:44 -0700 | [diff] [blame] | 2114 | |
| 2115 | /* general part of wqe valid - allow for driver checks */ |
| 2116 | if (rdi->driver_f.setup_wqe) { |
| 2117 | ret = rdi->driver_f.setup_wqe(qp, wqe, call_send); |
| 2118 | if (ret < 0) |
| 2119 | goto bail_inval_free_ref; |
| 2120 | } |
| 2121 | |
| 2122 | if (!(rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL)) |
| 2123 | qp->s_next_psn = wqe->lpsn + 1; |
| 2124 | |
Mike Marciniszyn | 44dcfa4 | 2017-03-20 17:26:01 -0700 | [diff] [blame] | 2125 | if (unlikely(reserved_op)) { |
| 2126 | wqe->wr.send_flags |= RVT_SEND_RESERVE_USED; |
Mike Marciniszyn | 856cc4c | 2016-07-25 13:39:39 -0700 | [diff] [blame] | 2127 | rvt_qp_wqe_reserve(qp, wqe); |
Mike Marciniszyn | 44dcfa4 | 2017-03-20 17:26:01 -0700 | [diff] [blame] | 2128 | } else { |
| 2129 | wqe->wr.send_flags &= ~RVT_SEND_RESERVE_USED; |
Mike Marciniszyn | 856cc4c | 2016-07-25 13:39:39 -0700 | [diff] [blame] | 2130 | qp->s_avail--; |
Mike Marciniszyn | 44dcfa4 | 2017-03-20 17:26:01 -0700 | [diff] [blame] | 2131 | } |
Mike Marciniszyn | 14fe13f | 2017-05-12 09:20:31 -0700 | [diff] [blame] | 2132 | trace_rvt_post_one_wr(qp, wqe, wr->num_sge); |
Mike Marciniszyn | 46a80d6 | 2016-02-14 12:10:04 -0800 | [diff] [blame] | 2133 | smp_wmb(); /* see request builders */ |
Dennis Dalessandro | bfbac09 | 2016-01-22 13:00:22 -0800 | [diff] [blame] | 2134 | qp->s_head = next; |
| 2135 | |
| 2136 | return 0; |
| 2137 | |
Kaike Wan | d205a06a | 2018-09-26 10:26:44 -0700 | [diff] [blame] | 2138 | bail_inval_free_ref: |
Mike Marciniszyn | 52cdbcc | 2019-04-11 07:16:23 -0700 | [diff] [blame] | 2139 | if (qp->allowed_ops == IB_OPCODE_UD) |
Michael J. Ruhl | d310c4b | 2019-06-28 14:22:04 -0400 | [diff] [blame] | 2140 | rdma_destroy_ah_attr(wqe->ud_wr.attr); |
Dennis Dalessandro | bfbac09 | 2016-01-22 13:00:22 -0800 | [diff] [blame] | 2141 | bail_inval_free: |
| 2142 | /* release mr holds */ |
| 2143 | while (j) { |
| 2144 | struct rvt_sge *sge = &wqe->sg_list[--j]; |
| 2145 | |
| 2146 | rvt_put_mr(sge->mr); |
| 2147 | } |
Mike Marciniszyn | 46a80d6 | 2016-02-14 12:10:04 -0800 | [diff] [blame] | 2148 | return ret; |
Dennis Dalessandro | bfbac09 | 2016-01-22 13:00:22 -0800 | [diff] [blame] | 2149 | } |
| 2150 | |
| 2151 | /** |
Dennis Dalessandro | 8cf4020 | 2016-01-06 10:01:17 -0800 | [diff] [blame] | 2152 | * rvt_post_send - post a send on a QP |
| 2153 | * @ibqp: the QP to post the send on |
| 2154 | * @wr: the list of work requests to post |
| 2155 | * @bad_wr: the first bad WR is put here |
| 2156 | * |
| 2157 | * This may be called from interrupt context. |
Dennis Dalessandro | 90793f7 | 2016-02-14 12:10:29 -0800 | [diff] [blame] | 2158 | * |
| 2159 | * Return: 0 on success else errno |
Dennis Dalessandro | 8cf4020 | 2016-01-06 10:01:17 -0800 | [diff] [blame] | 2160 | */ |
Bart Van Assche | d34ac5c | 2018-07-18 09:25:32 -0700 | [diff] [blame] | 2161 | int rvt_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, |
| 2162 | const struct ib_send_wr **bad_wr) |
Dennis Dalessandro | 8cf4020 | 2016-01-06 10:01:17 -0800 | [diff] [blame] | 2163 | { |
Dennis Dalessandro | bfbac09 | 2016-01-22 13:00:22 -0800 | [diff] [blame] | 2164 | struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); |
| 2165 | struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); |
| 2166 | unsigned long flags = 0; |
Michael J. Ruhl | 0b79b27 | 2018-09-10 09:49:27 -0700 | [diff] [blame] | 2167 | bool call_send; |
Dennis Dalessandro | bfbac09 | 2016-01-22 13:00:22 -0800 | [diff] [blame] | 2168 | unsigned nreq = 0; |
| 2169 | int err = 0; |
Dennis Dalessandro | 8cf4020 | 2016-01-06 10:01:17 -0800 | [diff] [blame] | 2170 | |
Mike Marciniszyn | 46a80d6 | 2016-02-14 12:10:04 -0800 | [diff] [blame] | 2171 | spin_lock_irqsave(&qp->s_hlock, flags); |
Dennis Dalessandro | bfbac09 | 2016-01-22 13:00:22 -0800 | [diff] [blame] | 2172 | |
| 2173 | /* |
| 2174 | * Ensure QP state is such that we can send. If not bail out early, |
| 2175 | * there is no need to do this every time we post a send. |
| 2176 | */ |
| 2177 | if (unlikely(!(ib_rvt_state_ops[qp->state] & RVT_POST_SEND_OK))) { |
Mike Marciniszyn | 46a80d6 | 2016-02-14 12:10:04 -0800 | [diff] [blame] | 2178 | spin_unlock_irqrestore(&qp->s_hlock, flags); |
Dennis Dalessandro | bfbac09 | 2016-01-22 13:00:22 -0800 | [diff] [blame] | 2179 | return -EINVAL; |
| 2180 | } |
| 2181 | |
| 2182 | /* |
| 2183 | * If the send queue is empty, and we only have a single WR then just go |
| 2184 | * ahead and kick the send engine into gear. Otherwise we will always |
| 2185 | * just schedule the send to happen later. |
| 2186 | */ |
Mark Rutland | 6aa7de0 | 2017-10-23 14:07:29 -0700 | [diff] [blame] | 2187 | call_send = qp->s_head == READ_ONCE(qp->s_last) && !wr->next; |
Dennis Dalessandro | bfbac09 | 2016-01-22 13:00:22 -0800 | [diff] [blame] | 2188 | |
| 2189 | for (; wr; wr = wr->next) { |
Mike Marciniszyn | 91702b4 | 2016-02-14 12:45:44 -0800 | [diff] [blame] | 2190 | err = rvt_post_one_wr(qp, wr, &call_send); |
Dennis Dalessandro | bfbac09 | 2016-01-22 13:00:22 -0800 | [diff] [blame] | 2191 | if (unlikely(err)) { |
| 2192 | *bad_wr = wr; |
| 2193 | goto bail; |
| 2194 | } |
| 2195 | nreq++; |
| 2196 | } |
| 2197 | bail: |
Mike Marciniszyn | 46a80d6 | 2016-02-14 12:10:04 -0800 | [diff] [blame] | 2198 | spin_unlock_irqrestore(&qp->s_hlock, flags); |
| 2199 | if (nreq) { |
Michael J. Ruhl | 0b79b27 | 2018-09-10 09:49:27 -0700 | [diff] [blame] | 2200 | /* |
| 2201 | * Only call do_send if there is exactly one packet, and the |
| 2202 | * driver said it was ok. |
| 2203 | */ |
| 2204 | if (nreq == 1 && call_send) |
Mike Marciniszyn | 46a80d6 | 2016-02-14 12:10:04 -0800 | [diff] [blame] | 2205 | rdi->driver_f.do_send(qp); |
Jubin John | e6d2e01 | 2016-04-12 10:47:00 -0700 | [diff] [blame] | 2206 | else |
| 2207 | rdi->driver_f.schedule_send_no_lock(qp); |
Mike Marciniszyn | 46a80d6 | 2016-02-14 12:10:04 -0800 | [diff] [blame] | 2208 | } |
Dennis Dalessandro | bfbac09 | 2016-01-22 13:00:22 -0800 | [diff] [blame] | 2209 | return err; |
Dennis Dalessandro | 8cf4020 | 2016-01-06 10:01:17 -0800 | [diff] [blame] | 2210 | } |
| 2211 | |
| 2212 | /** |
| 2213 | * rvt_post_srq_receive - post a receive on a shared receive queue |
| 2214 | * @ibsrq: the SRQ to post the receive on |
| 2215 | * @wr: the list of work requests to post |
| 2216 | * @bad_wr: A pointer to the first WR to cause a problem is put here |
| 2217 | * |
| 2218 | * This may be called from interrupt context. |
Dennis Dalessandro | 90793f7 | 2016-02-14 12:10:29 -0800 | [diff] [blame] | 2219 | * |
| 2220 | * Return: 0 on success else errno |
Dennis Dalessandro | 8cf4020 | 2016-01-06 10:01:17 -0800 | [diff] [blame] | 2221 | */ |
Bart Van Assche | d34ac5c | 2018-07-18 09:25:32 -0700 | [diff] [blame] | 2222 | int rvt_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, |
| 2223 | const struct ib_recv_wr **bad_wr) |
Dennis Dalessandro | 8cf4020 | 2016-01-06 10:01:17 -0800 | [diff] [blame] | 2224 | { |
Jubin John | b8f881b | 2016-02-03 14:14:36 -0800 | [diff] [blame] | 2225 | struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq); |
Kamenee Arumugam | dabac6e | 2019-06-28 14:04:24 -0400 | [diff] [blame] | 2226 | struct rvt_krwq *wq; |
Jubin John | b8f881b | 2016-02-03 14:14:36 -0800 | [diff] [blame] | 2227 | unsigned long flags; |
| 2228 | |
| 2229 | for (; wr; wr = wr->next) { |
| 2230 | struct rvt_rwqe *wqe; |
| 2231 | u32 next; |
| 2232 | int i; |
| 2233 | |
| 2234 | if ((unsigned)wr->num_sge > srq->rq.max_sge) { |
| 2235 | *bad_wr = wr; |
| 2236 | return -EINVAL; |
| 2237 | } |
| 2238 | |
Kamenee Arumugam | f592ae3 | 2019-06-28 14:04:30 -0400 | [diff] [blame] | 2239 | spin_lock_irqsave(&srq->rq.kwq->p_lock, flags); |
Kamenee Arumugam | dabac6e | 2019-06-28 14:04:24 -0400 | [diff] [blame] | 2240 | wq = srq->rq.kwq; |
Jubin John | b8f881b | 2016-02-03 14:14:36 -0800 | [diff] [blame] | 2241 | next = wq->head + 1; |
| 2242 | if (next >= srq->rq.size) |
| 2243 | next = 0; |
Kamenee Arumugam | dabac6e | 2019-06-28 14:04:24 -0400 | [diff] [blame] | 2244 | if (next == READ_ONCE(wq->tail)) { |
Kamenee Arumugam | f592ae3 | 2019-06-28 14:04:30 -0400 | [diff] [blame] | 2245 | spin_unlock_irqrestore(&srq->rq.kwq->p_lock, flags); |
Jubin John | b8f881b | 2016-02-03 14:14:36 -0800 | [diff] [blame] | 2246 | *bad_wr = wr; |
| 2247 | return -ENOMEM; |
| 2248 | } |
| 2249 | |
| 2250 | wqe = rvt_get_rwqe_ptr(&srq->rq, wq->head); |
| 2251 | wqe->wr_id = wr->wr_id; |
| 2252 | wqe->num_sge = wr->num_sge; |
| 2253 | for (i = 0; i < wr->num_sge; i++) |
| 2254 | wqe->sg_list[i] = wr->sg_list[i]; |
| 2255 | /* Make sure queue entry is written before the head index. */ |
Kamenee Arumugam | dabac6e | 2019-06-28 14:04:24 -0400 | [diff] [blame] | 2256 | smp_store_release(&wq->head, next); |
Kamenee Arumugam | f592ae3 | 2019-06-28 14:04:30 -0400 | [diff] [blame] | 2257 | spin_unlock_irqrestore(&srq->rq.kwq->p_lock, flags); |
Jubin John | b8f881b | 2016-02-03 14:14:36 -0800 | [diff] [blame] | 2258 | } |
| 2259 | return 0; |
Dennis Dalessandro | 8cf4020 | 2016-01-06 10:01:17 -0800 | [diff] [blame] | 2260 | } |
Brian Welty | beb5a04 | 2017-02-08 05:27:01 -0800 | [diff] [blame] | 2261 | |
Brian Welty | 832369f | 2018-05-02 06:44:03 -0700 | [diff] [blame] | 2262 | /* |
| 2263 | * Validate a RWQE and fill in the SGE state. |
| 2264 | * Return 1 if OK. |
| 2265 | */ |
| 2266 | static int init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe) |
| 2267 | { |
| 2268 | int i, j, ret; |
| 2269 | struct ib_wc wc; |
| 2270 | struct rvt_lkey_table *rkt; |
| 2271 | struct rvt_pd *pd; |
| 2272 | struct rvt_sge_state *ss; |
| 2273 | struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); |
| 2274 | |
| 2275 | rkt = &rdi->lkey_table; |
| 2276 | pd = ibpd_to_rvtpd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd); |
| 2277 | ss = &qp->r_sge; |
| 2278 | ss->sg_list = qp->r_sg_list; |
| 2279 | qp->r_len = 0; |
| 2280 | for (i = j = 0; i < wqe->num_sge; i++) { |
| 2281 | if (wqe->sg_list[i].length == 0) |
| 2282 | continue; |
| 2283 | /* Check LKEY */ |
| 2284 | ret = rvt_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge, |
| 2285 | NULL, &wqe->sg_list[i], |
| 2286 | IB_ACCESS_LOCAL_WRITE); |
| 2287 | if (unlikely(ret <= 0)) |
| 2288 | goto bad_lkey; |
| 2289 | qp->r_len += wqe->sg_list[i].length; |
| 2290 | j++; |
| 2291 | } |
| 2292 | ss->num_sge = j; |
| 2293 | ss->total_len = qp->r_len; |
| 2294 | return 1; |
| 2295 | |
| 2296 | bad_lkey: |
| 2297 | while (j) { |
| 2298 | struct rvt_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge; |
| 2299 | |
| 2300 | rvt_put_mr(sge->mr); |
| 2301 | } |
| 2302 | ss->num_sge = 0; |
| 2303 | memset(&wc, 0, sizeof(wc)); |
| 2304 | wc.wr_id = wqe->wr_id; |
| 2305 | wc.status = IB_WC_LOC_PROT_ERR; |
| 2306 | wc.opcode = IB_WC_RECV; |
| 2307 | wc.qp = &qp->ibqp; |
| 2308 | /* Signal solicited completion event. */ |
| 2309 | rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1); |
| 2310 | return 0; |
| 2311 | } |
| 2312 | |
| 2313 | /** |
Kamenee Arumugam | f592ae3 | 2019-06-28 14:04:30 -0400 | [diff] [blame] | 2314 | * get_count - count numbers of request work queue entries |
| 2315 | * in circular buffer |
| 2316 | * @rq: data structure for request queue entry |
| 2317 | * @tail: tail indices of the circular buffer |
| 2318 | * @head: head indices of the circular buffer |
| 2319 | * |
| 2320 | * Return - total number of entries in the circular buffer |
| 2321 | */ |
| 2322 | static u32 get_count(struct rvt_rq *rq, u32 tail, u32 head) |
| 2323 | { |
| 2324 | u32 count; |
| 2325 | |
| 2326 | count = head; |
| 2327 | |
| 2328 | if (count >= rq->size) |
| 2329 | count = 0; |
| 2330 | if (count < tail) |
| 2331 | count += rq->size - tail; |
| 2332 | else |
| 2333 | count -= tail; |
| 2334 | |
| 2335 | return count; |
| 2336 | } |
| 2337 | |
| 2338 | /** |
Kamenee Arumugam | dabac6e | 2019-06-28 14:04:24 -0400 | [diff] [blame] | 2339 | * get_rvt_head - get head indices of the circular buffer |
| 2340 | * @rq: data structure for request queue entry |
| 2341 | * @ip: the QP |
| 2342 | * |
| 2343 | * Return - head index value |
| 2344 | */ |
| 2345 | static inline u32 get_rvt_head(struct rvt_rq *rq, void *ip) |
| 2346 | { |
| 2347 | u32 head; |
| 2348 | |
| 2349 | if (ip) |
| 2350 | head = RDMA_READ_UAPI_ATOMIC(rq->wq->head); |
| 2351 | else |
| 2352 | head = rq->kwq->head; |
| 2353 | |
| 2354 | return head; |
| 2355 | } |
| 2356 | |
| 2357 | /** |
Brian Welty | 832369f | 2018-05-02 06:44:03 -0700 | [diff] [blame] | 2358 | * rvt_get_rwqe - copy the next RWQE into the QP's RWQE |
| 2359 | * @qp: the QP |
| 2360 | * @wr_id_only: update qp->r_wr_id only, not qp->r_sge |
| 2361 | * |
| 2362 | * Return -1 if there is a local error, 0 if no RWQE is available, |
| 2363 | * otherwise return 1. |
| 2364 | * |
| 2365 | * Can be called from interrupt level. |
| 2366 | */ |
| 2367 | int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only) |
| 2368 | { |
| 2369 | unsigned long flags; |
| 2370 | struct rvt_rq *rq; |
Kamenee Arumugam | f592ae3 | 2019-06-28 14:04:30 -0400 | [diff] [blame] | 2371 | struct rvt_krwq *kwq = NULL; |
Brian Welty | 832369f | 2018-05-02 06:44:03 -0700 | [diff] [blame] | 2372 | struct rvt_rwq *wq; |
| 2373 | struct rvt_srq *srq; |
| 2374 | struct rvt_rwqe *wqe; |
| 2375 | void (*handler)(struct ib_event *, void *); |
| 2376 | u32 tail; |
Kamenee Arumugam | dabac6e | 2019-06-28 14:04:24 -0400 | [diff] [blame] | 2377 | u32 head; |
Brian Welty | 832369f | 2018-05-02 06:44:03 -0700 | [diff] [blame] | 2378 | int ret; |
Kamenee Arumugam | dabac6e | 2019-06-28 14:04:24 -0400 | [diff] [blame] | 2379 | void *ip = NULL; |
Brian Welty | 832369f | 2018-05-02 06:44:03 -0700 | [diff] [blame] | 2380 | |
| 2381 | if (qp->ibqp.srq) { |
| 2382 | srq = ibsrq_to_rvtsrq(qp->ibqp.srq); |
| 2383 | handler = srq->ibsrq.event_handler; |
| 2384 | rq = &srq->rq; |
Kamenee Arumugam | dabac6e | 2019-06-28 14:04:24 -0400 | [diff] [blame] | 2385 | ip = srq->ip; |
Brian Welty | 832369f | 2018-05-02 06:44:03 -0700 | [diff] [blame] | 2386 | } else { |
| 2387 | srq = NULL; |
| 2388 | handler = NULL; |
| 2389 | rq = &qp->r_rq; |
Kamenee Arumugam | dabac6e | 2019-06-28 14:04:24 -0400 | [diff] [blame] | 2390 | ip = qp->ip; |
Brian Welty | 832369f | 2018-05-02 06:44:03 -0700 | [diff] [blame] | 2391 | } |
| 2392 | |
Kamenee Arumugam | f592ae3 | 2019-06-28 14:04:30 -0400 | [diff] [blame] | 2393 | spin_lock_irqsave(&rq->kwq->c_lock, flags); |
Brian Welty | 832369f | 2018-05-02 06:44:03 -0700 | [diff] [blame] | 2394 | if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) { |
| 2395 | ret = 0; |
| 2396 | goto unlock; |
| 2397 | } |
Kamenee Arumugam | f592ae3 | 2019-06-28 14:04:30 -0400 | [diff] [blame] | 2398 | kwq = rq->kwq; |
Kamenee Arumugam | dabac6e | 2019-06-28 14:04:24 -0400 | [diff] [blame] | 2399 | if (ip) { |
| 2400 | wq = rq->wq; |
| 2401 | tail = RDMA_READ_UAPI_ATOMIC(wq->tail); |
| 2402 | } else { |
Kamenee Arumugam | dabac6e | 2019-06-28 14:04:24 -0400 | [diff] [blame] | 2403 | tail = kwq->tail; |
| 2404 | } |
Brian Welty | 832369f | 2018-05-02 06:44:03 -0700 | [diff] [blame] | 2405 | |
Brian Welty | 832369f | 2018-05-02 06:44:03 -0700 | [diff] [blame] | 2406 | /* Validate tail before using it since it is user writable. */ |
| 2407 | if (tail >= rq->size) |
| 2408 | tail = 0; |
Kamenee Arumugam | dabac6e | 2019-06-28 14:04:24 -0400 | [diff] [blame] | 2409 | |
Kamenee Arumugam | f592ae3 | 2019-06-28 14:04:30 -0400 | [diff] [blame] | 2410 | if (kwq->count < RVT_RWQ_COUNT_THRESHOLD) { |
| 2411 | head = get_rvt_head(rq, ip); |
| 2412 | kwq->count = get_count(rq, tail, head); |
| 2413 | } |
| 2414 | if (unlikely(kwq->count == 0)) { |
Brian Welty | 832369f | 2018-05-02 06:44:03 -0700 | [diff] [blame] | 2415 | ret = 0; |
| 2416 | goto unlock; |
| 2417 | } |
Kamenee Arumugam | dabac6e | 2019-06-28 14:04:24 -0400 | [diff] [blame] | 2418 | /* Make sure entry is read after the count is read. */ |
Brian Welty | 832369f | 2018-05-02 06:44:03 -0700 | [diff] [blame] | 2419 | smp_rmb(); |
| 2420 | wqe = rvt_get_rwqe_ptr(rq, tail); |
| 2421 | /* |
| 2422 | * Even though we update the tail index in memory, the verbs |
| 2423 | * consumer is not supposed to post more entries until a |
| 2424 | * completion is generated. |
| 2425 | */ |
| 2426 | if (++tail >= rq->size) |
| 2427 | tail = 0; |
Kamenee Arumugam | dabac6e | 2019-06-28 14:04:24 -0400 | [diff] [blame] | 2428 | if (ip) |
| 2429 | RDMA_WRITE_UAPI_ATOMIC(wq->tail, tail); |
| 2430 | else |
| 2431 | kwq->tail = tail; |
Brian Welty | 832369f | 2018-05-02 06:44:03 -0700 | [diff] [blame] | 2432 | if (!wr_id_only && !init_sge(qp, wqe)) { |
| 2433 | ret = -1; |
| 2434 | goto unlock; |
| 2435 | } |
| 2436 | qp->r_wr_id = wqe->wr_id; |
| 2437 | |
Kamenee Arumugam | f592ae3 | 2019-06-28 14:04:30 -0400 | [diff] [blame] | 2438 | kwq->count--; |
Brian Welty | 832369f | 2018-05-02 06:44:03 -0700 | [diff] [blame] | 2439 | ret = 1; |
| 2440 | set_bit(RVT_R_WRID_VALID, &qp->r_aflags); |
| 2441 | if (handler) { |
Brian Welty | 832369f | 2018-05-02 06:44:03 -0700 | [diff] [blame] | 2442 | /* |
| 2443 | * Validate head pointer value and compute |
| 2444 | * the number of remaining WQEs. |
| 2445 | */ |
Kamenee Arumugam | f592ae3 | 2019-06-28 14:04:30 -0400 | [diff] [blame] | 2446 | if (kwq->count < srq->limit) { |
| 2447 | kwq->count = get_count(rq, tail, get_rvt_head(rq, ip)); |
| 2448 | if (kwq->count < srq->limit) { |
| 2449 | struct ib_event ev; |
Brian Welty | 832369f | 2018-05-02 06:44:03 -0700 | [diff] [blame] | 2450 | |
Kamenee Arumugam | f592ae3 | 2019-06-28 14:04:30 -0400 | [diff] [blame] | 2451 | srq->limit = 0; |
| 2452 | spin_unlock_irqrestore(&rq->kwq->c_lock, flags); |
| 2453 | ev.device = qp->ibqp.device; |
| 2454 | ev.element.srq = qp->ibqp.srq; |
| 2455 | ev.event = IB_EVENT_SRQ_LIMIT_REACHED; |
| 2456 | handler(&ev, srq->ibsrq.srq_context); |
| 2457 | goto bail; |
| 2458 | } |
Brian Welty | 832369f | 2018-05-02 06:44:03 -0700 | [diff] [blame] | 2459 | } |
| 2460 | } |
| 2461 | unlock: |
Kamenee Arumugam | f592ae3 | 2019-06-28 14:04:30 -0400 | [diff] [blame] | 2462 | spin_unlock_irqrestore(&rq->kwq->c_lock, flags); |
Brian Welty | 832369f | 2018-05-02 06:44:03 -0700 | [diff] [blame] | 2463 | bail: |
| 2464 | return ret; |
| 2465 | } |
| 2466 | EXPORT_SYMBOL(rvt_get_rwqe); |
| 2467 | |
Brian Welty | beb5a04 | 2017-02-08 05:27:01 -0800 | [diff] [blame] | 2468 | /** |
| 2469 | * qp_comm_est - handle trap with QP established |
| 2470 | * @qp: the QP |
| 2471 | */ |
| 2472 | void rvt_comm_est(struct rvt_qp *qp) |
| 2473 | { |
| 2474 | qp->r_flags |= RVT_R_COMM_EST; |
| 2475 | if (qp->ibqp.event_handler) { |
| 2476 | struct ib_event ev; |
| 2477 | |
| 2478 | ev.device = qp->ibqp.device; |
| 2479 | ev.element.qp = &qp->ibqp; |
| 2480 | ev.event = IB_EVENT_COMM_EST; |
| 2481 | qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); |
| 2482 | } |
| 2483 | } |
| 2484 | EXPORT_SYMBOL(rvt_comm_est); |
| 2485 | |
| 2486 | void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err) |
| 2487 | { |
| 2488 | unsigned long flags; |
| 2489 | int lastwqe; |
| 2490 | |
| 2491 | spin_lock_irqsave(&qp->s_lock, flags); |
| 2492 | lastwqe = rvt_error_qp(qp, err); |
| 2493 | spin_unlock_irqrestore(&qp->s_lock, flags); |
| 2494 | |
| 2495 | if (lastwqe) { |
| 2496 | struct ib_event ev; |
| 2497 | |
| 2498 | ev.device = qp->ibqp.device; |
| 2499 | ev.element.qp = &qp->ibqp; |
| 2500 | ev.event = IB_EVENT_QP_LAST_WQE_REACHED; |
| 2501 | qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); |
| 2502 | } |
| 2503 | } |
| 2504 | EXPORT_SYMBOL(rvt_rc_error); |
Venkata Sandeep Dhanalakota | 11a10d4 | 2017-02-08 05:27:13 -0800 | [diff] [blame] | 2505 | |
Don Hiatt | 881fccb | 2017-02-08 05:28:19 -0800 | [diff] [blame] | 2506 | /* |
| 2507 | * rvt_rnr_tbl_to_usec - return index into ib_rvt_rnr_table |
| 2508 | * @index - the index |
| 2509 | * return usec from an index into ib_rvt_rnr_table |
| 2510 | */ |
| 2511 | unsigned long rvt_rnr_tbl_to_usec(u32 index) |
| 2512 | { |
Don Hiatt | 832666c | 2017-02-08 05:28:25 -0800 | [diff] [blame] | 2513 | return ib_rvt_rnr_table[(index & IB_AETH_CREDIT_MASK)]; |
Don Hiatt | 881fccb | 2017-02-08 05:28:19 -0800 | [diff] [blame] | 2514 | } |
| 2515 | EXPORT_SYMBOL(rvt_rnr_tbl_to_usec); |
| 2516 | |
Venkata Sandeep Dhanalakota | 11a10d4 | 2017-02-08 05:27:13 -0800 | [diff] [blame] | 2517 | static inline unsigned long rvt_aeth_to_usec(u32 aeth) |
| 2518 | { |
Don Hiatt | 832666c | 2017-02-08 05:28:25 -0800 | [diff] [blame] | 2519 | return ib_rvt_rnr_table[(aeth >> IB_AETH_CREDIT_SHIFT) & |
| 2520 | IB_AETH_CREDIT_MASK]; |
Venkata Sandeep Dhanalakota | 11a10d4 | 2017-02-08 05:27:13 -0800 | [diff] [blame] | 2521 | } |
| 2522 | |
| 2523 | /* |
Kaike Wan | 039cd3d | 2019-01-23 19:31:57 -0800 | [diff] [blame] | 2524 | * rvt_add_retry_timer_ext - add/start a retry timer |
Venkata Sandeep Dhanalakota | 11a10d4 | 2017-02-08 05:27:13 -0800 | [diff] [blame] | 2525 | * @qp - the QP |
Kaike Wan | 039cd3d | 2019-01-23 19:31:57 -0800 | [diff] [blame] | 2526 | * @shift - timeout shift to wait for multiple packets |
Venkata Sandeep Dhanalakota | 11a10d4 | 2017-02-08 05:27:13 -0800 | [diff] [blame] | 2527 | * add a retry timer on the QP |
| 2528 | */ |
Kaike Wan | 039cd3d | 2019-01-23 19:31:57 -0800 | [diff] [blame] | 2529 | void rvt_add_retry_timer_ext(struct rvt_qp *qp, u8 shift) |
Venkata Sandeep Dhanalakota | 11a10d4 | 2017-02-08 05:27:13 -0800 | [diff] [blame] | 2530 | { |
| 2531 | struct ib_qp *ibqp = &qp->ibqp; |
| 2532 | struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); |
| 2533 | |
| 2534 | lockdep_assert_held(&qp->s_lock); |
| 2535 | qp->s_flags |= RVT_S_TIMER; |
| 2536 | /* 4.096 usec. * (1 << qp->timeout) */ |
Kaike Wan | 039cd3d | 2019-01-23 19:31:57 -0800 | [diff] [blame] | 2537 | qp->s_timer.expires = jiffies + rdi->busy_jiffies + |
| 2538 | (qp->timeout_jiffies << shift); |
Venkata Sandeep Dhanalakota | 11a10d4 | 2017-02-08 05:27:13 -0800 | [diff] [blame] | 2539 | add_timer(&qp->s_timer); |
| 2540 | } |
Kaike Wan | 039cd3d | 2019-01-23 19:31:57 -0800 | [diff] [blame] | 2541 | EXPORT_SYMBOL(rvt_add_retry_timer_ext); |
Venkata Sandeep Dhanalakota | 11a10d4 | 2017-02-08 05:27:13 -0800 | [diff] [blame] | 2542 | |
| 2543 | /** |
| 2544 | * rvt_add_rnr_timer - add/start an rnr timer |
| 2545 | * @qp - the QP |
| 2546 | * @aeth - aeth of RNR timeout, simulated aeth for loopback |
| 2547 | * add an rnr timer on the QP |
| 2548 | */ |
| 2549 | void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth) |
| 2550 | { |
| 2551 | u32 to; |
| 2552 | |
| 2553 | lockdep_assert_held(&qp->s_lock); |
| 2554 | qp->s_flags |= RVT_S_WAIT_RNR; |
| 2555 | to = rvt_aeth_to_usec(aeth); |
Kaike Wan | 57f6b66 | 2017-12-18 19:57:28 -0800 | [diff] [blame] | 2556 | trace_rvt_rnrnak_add(qp, to); |
Venkata Sandeep Dhanalakota | 11a10d4 | 2017-02-08 05:27:13 -0800 | [diff] [blame] | 2557 | hrtimer_start(&qp->s_rnr_timer, |
Mike Marciniszyn | 3ce459c | 2018-05-15 18:31:24 -0700 | [diff] [blame] | 2558 | ns_to_ktime(1000 * to), HRTIMER_MODE_REL_PINNED); |
Venkata Sandeep Dhanalakota | 11a10d4 | 2017-02-08 05:27:13 -0800 | [diff] [blame] | 2559 | } |
| 2560 | EXPORT_SYMBOL(rvt_add_rnr_timer); |
| 2561 | |
| 2562 | /** |
| 2563 | * rvt_stop_rc_timers - stop all timers |
| 2564 | * @qp - the QP |
| 2565 | * stop any pending timers |
| 2566 | */ |
| 2567 | void rvt_stop_rc_timers(struct rvt_qp *qp) |
| 2568 | { |
| 2569 | lockdep_assert_held(&qp->s_lock); |
| 2570 | /* Remove QP from all timers */ |
| 2571 | if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) { |
| 2572 | qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR); |
| 2573 | del_timer(&qp->s_timer); |
| 2574 | hrtimer_try_to_cancel(&qp->s_rnr_timer); |
| 2575 | } |
| 2576 | } |
| 2577 | EXPORT_SYMBOL(rvt_stop_rc_timers); |
| 2578 | |
| 2579 | /** |
| 2580 | * rvt_stop_rnr_timer - stop an rnr timer |
| 2581 | * @qp - the QP |
| 2582 | * |
| 2583 | * stop an rnr timer and return if the timer |
| 2584 | * had been pending. |
| 2585 | */ |
Kaike Wan | 57f6b66 | 2017-12-18 19:57:28 -0800 | [diff] [blame] | 2586 | static void rvt_stop_rnr_timer(struct rvt_qp *qp) |
Venkata Sandeep Dhanalakota | 11a10d4 | 2017-02-08 05:27:13 -0800 | [diff] [blame] | 2587 | { |
Venkata Sandeep Dhanalakota | 11a10d4 | 2017-02-08 05:27:13 -0800 | [diff] [blame] | 2588 | lockdep_assert_held(&qp->s_lock); |
| 2589 | /* Remove QP from rnr timer */ |
| 2590 | if (qp->s_flags & RVT_S_WAIT_RNR) { |
| 2591 | qp->s_flags &= ~RVT_S_WAIT_RNR; |
Kaike Wan | 57f6b66 | 2017-12-18 19:57:28 -0800 | [diff] [blame] | 2592 | trace_rvt_rnrnak_stop(qp, 0); |
Venkata Sandeep Dhanalakota | 11a10d4 | 2017-02-08 05:27:13 -0800 | [diff] [blame] | 2593 | } |
Venkata Sandeep Dhanalakota | 11a10d4 | 2017-02-08 05:27:13 -0800 | [diff] [blame] | 2594 | } |
| 2595 | |
| 2596 | /** |
| 2597 | * rvt_del_timers_sync - wait for any timeout routines to exit |
| 2598 | * @qp - the QP |
| 2599 | */ |
| 2600 | void rvt_del_timers_sync(struct rvt_qp *qp) |
| 2601 | { |
| 2602 | del_timer_sync(&qp->s_timer); |
| 2603 | hrtimer_cancel(&qp->s_rnr_timer); |
| 2604 | } |
| 2605 | EXPORT_SYMBOL(rvt_del_timers_sync); |
| 2606 | |
| 2607 | /** |
| 2608 | * This is called from s_timer for missing responses. |
| 2609 | */ |
Kees Cook | a2930e5 | 2017-10-16 15:51:13 -0700 | [diff] [blame] | 2610 | static void rvt_rc_timeout(struct timer_list *t) |
Venkata Sandeep Dhanalakota | 11a10d4 | 2017-02-08 05:27:13 -0800 | [diff] [blame] | 2611 | { |
Kees Cook | a2930e5 | 2017-10-16 15:51:13 -0700 | [diff] [blame] | 2612 | struct rvt_qp *qp = from_timer(qp, t, s_timer); |
Venkata Sandeep Dhanalakota | 11a10d4 | 2017-02-08 05:27:13 -0800 | [diff] [blame] | 2613 | struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); |
| 2614 | unsigned long flags; |
| 2615 | |
| 2616 | spin_lock_irqsave(&qp->r_lock, flags); |
| 2617 | spin_lock(&qp->s_lock); |
| 2618 | if (qp->s_flags & RVT_S_TIMER) { |
Sebastian Sanchez | 5f14e4e | 2017-03-20 17:25:55 -0700 | [diff] [blame] | 2619 | struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1]; |
| 2620 | |
Venkata Sandeep Dhanalakota | 11a10d4 | 2017-02-08 05:27:13 -0800 | [diff] [blame] | 2621 | qp->s_flags &= ~RVT_S_TIMER; |
Sebastian Sanchez | 5f14e4e | 2017-03-20 17:25:55 -0700 | [diff] [blame] | 2622 | rvp->n_rc_timeouts++; |
Venkata Sandeep Dhanalakota | 11a10d4 | 2017-02-08 05:27:13 -0800 | [diff] [blame] | 2623 | del_timer(&qp->s_timer); |
Sebastian Sanchez | 5f14e4e | 2017-03-20 17:25:55 -0700 | [diff] [blame] | 2624 | trace_rvt_rc_timeout(qp, qp->s_last_psn + 1); |
Venkata Sandeep Dhanalakota | 11a10d4 | 2017-02-08 05:27:13 -0800 | [diff] [blame] | 2625 | if (rdi->driver_f.notify_restart_rc) |
| 2626 | rdi->driver_f.notify_restart_rc(qp, |
| 2627 | qp->s_last_psn + 1, |
| 2628 | 1); |
| 2629 | rdi->driver_f.schedule_send(qp); |
| 2630 | } |
| 2631 | spin_unlock(&qp->s_lock); |
| 2632 | spin_unlock_irqrestore(&qp->r_lock, flags); |
| 2633 | } |
| 2634 | |
| 2635 | /* |
| 2636 | * This is called from s_timer for RNR timeouts. |
| 2637 | */ |
| 2638 | enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t) |
| 2639 | { |
| 2640 | struct rvt_qp *qp = container_of(t, struct rvt_qp, s_rnr_timer); |
| 2641 | struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); |
| 2642 | unsigned long flags; |
| 2643 | |
| 2644 | spin_lock_irqsave(&qp->s_lock, flags); |
| 2645 | rvt_stop_rnr_timer(qp); |
Kaike Wan | 57f6b66 | 2017-12-18 19:57:28 -0800 | [diff] [blame] | 2646 | trace_rvt_rnrnak_timeout(qp, 0); |
Venkata Sandeep Dhanalakota | 11a10d4 | 2017-02-08 05:27:13 -0800 | [diff] [blame] | 2647 | rdi->driver_f.schedule_send(qp); |
| 2648 | spin_unlock_irqrestore(&qp->s_lock, flags); |
| 2649 | return HRTIMER_NORESTART; |
| 2650 | } |
| 2651 | EXPORT_SYMBOL(rvt_rc_rnr_retry); |
Mike Marciniszyn | 4734b4f | 2017-08-28 11:23:45 -0700 | [diff] [blame] | 2652 | |
| 2653 | /** |
| 2654 | * rvt_qp_iter_init - initial for QP iteration |
Randy Dunlap | 4f9a301 | 2018-01-05 16:22:32 -0800 | [diff] [blame] | 2655 | * @rdi: rvt devinfo |
| 2656 | * @v: u64 value |
Mike Marciniszyn | 4734b4f | 2017-08-28 11:23:45 -0700 | [diff] [blame] | 2657 | * |
| 2658 | * This returns an iterator suitable for iterating QPs |
| 2659 | * in the system. |
| 2660 | * |
| 2661 | * The @cb is a user defined callback and @v is a 64 |
| 2662 | * bit value passed to and relevant for processing in the |
| 2663 | * @cb. An example use case would be to alter QP processing |
| 2664 | * based on criteria not part of the rvt_qp. |
| 2665 | * |
| 2666 | * Use cases that require memory allocation to succeed |
| 2667 | * must preallocate appropriately. |
| 2668 | * |
| 2669 | * Return: a pointer to an rvt_qp_iter or NULL |
| 2670 | */ |
| 2671 | struct rvt_qp_iter *rvt_qp_iter_init(struct rvt_dev_info *rdi, |
| 2672 | u64 v, |
| 2673 | void (*cb)(struct rvt_qp *qp, u64 v)) |
| 2674 | { |
| 2675 | struct rvt_qp_iter *i; |
| 2676 | |
| 2677 | i = kzalloc(sizeof(*i), GFP_KERNEL); |
| 2678 | if (!i) |
| 2679 | return NULL; |
| 2680 | |
| 2681 | i->rdi = rdi; |
| 2682 | /* number of special QPs (SMI/GSI) for device */ |
| 2683 | i->specials = rdi->ibdev.phys_port_cnt * 2; |
| 2684 | i->v = v; |
| 2685 | i->cb = cb; |
| 2686 | |
| 2687 | return i; |
| 2688 | } |
| 2689 | EXPORT_SYMBOL(rvt_qp_iter_init); |
| 2690 | |
| 2691 | /** |
| 2692 | * rvt_qp_iter_next - return the next QP in iter |
| 2693 | * @iter - the iterator |
| 2694 | * |
| 2695 | * Fine grained QP iterator suitable for use |
| 2696 | * with debugfs seq_file mechanisms. |
| 2697 | * |
| 2698 | * Updates iter->qp with the current QP when the return |
| 2699 | * value is 0. |
| 2700 | * |
| 2701 | * Return: 0 - iter->qp is valid 1 - no more QPs |
| 2702 | */ |
| 2703 | int rvt_qp_iter_next(struct rvt_qp_iter *iter) |
| 2704 | __must_hold(RCU) |
| 2705 | { |
| 2706 | int n = iter->n; |
| 2707 | int ret = 1; |
| 2708 | struct rvt_qp *pqp = iter->qp; |
| 2709 | struct rvt_qp *qp; |
| 2710 | struct rvt_dev_info *rdi = iter->rdi; |
| 2711 | |
| 2712 | /* |
| 2713 | * The approach is to consider the special qps |
| 2714 | * as additional table entries before the |
| 2715 | * real hash table. Since the qp code sets |
| 2716 | * the qp->next hash link to NULL, this works just fine. |
| 2717 | * |
| 2718 | * iter->specials is 2 * # ports |
| 2719 | * |
| 2720 | * n = 0..iter->specials is the special qp indices |
| 2721 | * |
| 2722 | * n = iter->specials..rdi->qp_dev->qp_table_size+iter->specials are |
| 2723 | * the potential hash bucket entries |
| 2724 | * |
| 2725 | */ |
| 2726 | for (; n < rdi->qp_dev->qp_table_size + iter->specials; n++) { |
| 2727 | if (pqp) { |
| 2728 | qp = rcu_dereference(pqp->next); |
| 2729 | } else { |
| 2730 | if (n < iter->specials) { |
| 2731 | struct rvt_ibport *rvp; |
| 2732 | int pidx; |
| 2733 | |
| 2734 | pidx = n % rdi->ibdev.phys_port_cnt; |
| 2735 | rvp = rdi->ports[pidx]; |
| 2736 | qp = rcu_dereference(rvp->qp[n & 1]); |
| 2737 | } else { |
| 2738 | qp = rcu_dereference( |
| 2739 | rdi->qp_dev->qp_table[ |
| 2740 | (n - iter->specials)]); |
| 2741 | } |
| 2742 | } |
| 2743 | pqp = qp; |
| 2744 | if (qp) { |
| 2745 | iter->qp = qp; |
| 2746 | iter->n = n; |
| 2747 | return 0; |
| 2748 | } |
| 2749 | } |
| 2750 | return ret; |
| 2751 | } |
| 2752 | EXPORT_SYMBOL(rvt_qp_iter_next); |
| 2753 | |
| 2754 | /** |
| 2755 | * rvt_qp_iter - iterate all QPs |
| 2756 | * @rdi - rvt devinfo |
| 2757 | * @v - a 64 bit value |
| 2758 | * @cb - a callback |
| 2759 | * |
| 2760 | * This provides a way for iterating all QPs. |
| 2761 | * |
| 2762 | * The @cb is a user defined callback and @v is a 64 |
| 2763 | * bit value passed to and relevant for processing in the |
| 2764 | * cb. An example use case would be to alter QP processing |
| 2765 | * based on criteria not part of the rvt_qp. |
| 2766 | * |
| 2767 | * The code has an internal iterator to simplify |
| 2768 | * non seq_file use cases. |
| 2769 | */ |
| 2770 | void rvt_qp_iter(struct rvt_dev_info *rdi, |
| 2771 | u64 v, |
| 2772 | void (*cb)(struct rvt_qp *qp, u64 v)) |
| 2773 | { |
| 2774 | int ret; |
| 2775 | struct rvt_qp_iter i = { |
| 2776 | .rdi = rdi, |
| 2777 | .specials = rdi->ibdev.phys_port_cnt * 2, |
| 2778 | .v = v, |
| 2779 | .cb = cb |
| 2780 | }; |
| 2781 | |
| 2782 | rcu_read_lock(); |
| 2783 | do { |
| 2784 | ret = rvt_qp_iter_next(&i); |
| 2785 | if (!ret) { |
| 2786 | rvt_get_qp(i.qp); |
| 2787 | rcu_read_unlock(); |
| 2788 | i.cb(i.qp, i.v); |
| 2789 | rcu_read_lock(); |
| 2790 | rvt_put_qp(i.qp); |
| 2791 | } |
| 2792 | } while (!ret); |
| 2793 | rcu_read_unlock(); |
| 2794 | } |
| 2795 | EXPORT_SYMBOL(rvt_qp_iter); |
Brian Welty | 019f118 | 2018-09-26 10:44:33 -0700 | [diff] [blame] | 2796 | |
Venkata Sandeep Dhanalakota | 116aa03 | 2018-09-26 10:44:42 -0700 | [diff] [blame] | 2797 | /* |
| 2798 | * This should be called with s_lock held. |
| 2799 | */ |
| 2800 | void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe, |
| 2801 | enum ib_wc_status status) |
| 2802 | { |
| 2803 | u32 old_last, last; |
Mike Marciniszyn | 4a9ceb7 | 2019-06-13 08:30:52 -0400 | [diff] [blame] | 2804 | struct rvt_dev_info *rdi; |
Venkata Sandeep Dhanalakota | 116aa03 | 2018-09-26 10:44:42 -0700 | [diff] [blame] | 2805 | |
| 2806 | if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND)) |
| 2807 | return; |
Mike Marciniszyn | 4a9ceb7 | 2019-06-13 08:30:52 -0400 | [diff] [blame] | 2808 | rdi = ib_to_rvt(qp->ibqp.device); |
Venkata Sandeep Dhanalakota | 116aa03 | 2018-09-26 10:44:42 -0700 | [diff] [blame] | 2809 | |
Mike Marciniszyn | 4a9ceb7 | 2019-06-13 08:30:52 -0400 | [diff] [blame] | 2810 | old_last = qp->s_last; |
| 2811 | trace_rvt_qp_send_completion(qp, wqe, old_last); |
| 2812 | last = rvt_qp_complete_swqe(qp, wqe, rdi->wc_opcode[wqe->wr.opcode], |
| 2813 | status); |
Venkata Sandeep Dhanalakota | 116aa03 | 2018-09-26 10:44:42 -0700 | [diff] [blame] | 2814 | if (qp->s_acked == old_last) |
| 2815 | qp->s_acked = last; |
| 2816 | if (qp->s_cur == old_last) |
| 2817 | qp->s_cur = last; |
| 2818 | if (qp->s_tail == old_last) |
| 2819 | qp->s_tail = last; |
| 2820 | if (qp->state == IB_QPS_SQD && last == qp->s_cur) |
| 2821 | qp->s_draining = 0; |
| 2822 | } |
| 2823 | EXPORT_SYMBOL(rvt_send_complete); |
| 2824 | |
Brian Welty | 019f118 | 2018-09-26 10:44:33 -0700 | [diff] [blame] | 2825 | /** |
| 2826 | * rvt_copy_sge - copy data to SGE memory |
| 2827 | * @qp: associated QP |
| 2828 | * @ss: the SGE state |
| 2829 | * @data: the data to copy |
| 2830 | * @length: the length of the data |
| 2831 | * @release: boolean to release MR |
| 2832 | * @copy_last: do a separate copy of the last 8 bytes |
| 2833 | */ |
| 2834 | void rvt_copy_sge(struct rvt_qp *qp, struct rvt_sge_state *ss, |
| 2835 | void *data, u32 length, |
| 2836 | bool release, bool copy_last) |
| 2837 | { |
| 2838 | struct rvt_sge *sge = &ss->sge; |
| 2839 | int i; |
| 2840 | bool in_last = false; |
| 2841 | bool cacheless_copy = false; |
| 2842 | struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); |
| 2843 | struct rvt_wss *wss = rdi->wss; |
| 2844 | unsigned int sge_copy_mode = rdi->dparms.sge_copy_mode; |
| 2845 | |
| 2846 | if (sge_copy_mode == RVT_SGE_COPY_CACHELESS) { |
| 2847 | cacheless_copy = length >= PAGE_SIZE; |
| 2848 | } else if (sge_copy_mode == RVT_SGE_COPY_ADAPTIVE) { |
| 2849 | if (length >= PAGE_SIZE) { |
| 2850 | /* |
| 2851 | * NOTE: this *assumes*: |
| 2852 | * o The first vaddr is the dest. |
| 2853 | * o If multiple pages, then vaddr is sequential. |
| 2854 | */ |
| 2855 | wss_insert(wss, sge->vaddr); |
| 2856 | if (length >= (2 * PAGE_SIZE)) |
| 2857 | wss_insert(wss, (sge->vaddr + PAGE_SIZE)); |
| 2858 | |
| 2859 | cacheless_copy = wss_exceeds_threshold(wss); |
| 2860 | } else { |
| 2861 | wss_advance_clean_counter(wss); |
| 2862 | } |
| 2863 | } |
| 2864 | |
| 2865 | if (copy_last) { |
| 2866 | if (length > 8) { |
| 2867 | length -= 8; |
| 2868 | } else { |
| 2869 | copy_last = false; |
| 2870 | in_last = true; |
| 2871 | } |
| 2872 | } |
| 2873 | |
| 2874 | again: |
| 2875 | while (length) { |
| 2876 | u32 len = rvt_get_sge_length(sge, length); |
| 2877 | |
| 2878 | WARN_ON_ONCE(len == 0); |
| 2879 | if (unlikely(in_last)) { |
| 2880 | /* enforce byte transfer ordering */ |
| 2881 | for (i = 0; i < len; i++) |
| 2882 | ((u8 *)sge->vaddr)[i] = ((u8 *)data)[i]; |
| 2883 | } else if (cacheless_copy) { |
| 2884 | cacheless_memcpy(sge->vaddr, data, len); |
| 2885 | } else { |
| 2886 | memcpy(sge->vaddr, data, len); |
| 2887 | } |
| 2888 | rvt_update_sge(ss, len, release); |
| 2889 | data += len; |
| 2890 | length -= len; |
| 2891 | } |
| 2892 | |
| 2893 | if (copy_last) { |
| 2894 | copy_last = false; |
| 2895 | in_last = true; |
| 2896 | length = 8; |
| 2897 | goto again; |
| 2898 | } |
| 2899 | } |
| 2900 | EXPORT_SYMBOL(rvt_copy_sge); |
Venkata Sandeep Dhanalakota | 1570346 | 2018-09-26 10:44:52 -0700 | [diff] [blame] | 2901 | |
Michael J. Ruhl | d757c60 | 2019-02-26 08:45:25 -0800 | [diff] [blame] | 2902 | static enum ib_wc_status loopback_qp_drop(struct rvt_ibport *rvp, |
| 2903 | struct rvt_qp *sqp) |
| 2904 | { |
| 2905 | rvp->n_pkt_drops++; |
| 2906 | /* |
| 2907 | * For RC, the requester would timeout and retry so |
| 2908 | * shortcut the timeouts and just signal too many retries. |
| 2909 | */ |
| 2910 | return sqp->ibqp.qp_type == IB_QPT_RC ? |
| 2911 | IB_WC_RETRY_EXC_ERR : IB_WC_SUCCESS; |
| 2912 | } |
| 2913 | |
Venkata Sandeep Dhanalakota | 1570346 | 2018-09-26 10:44:52 -0700 | [diff] [blame] | 2914 | /** |
| 2915 | * ruc_loopback - handle UC and RC loopback requests |
| 2916 | * @sqp: the sending QP |
| 2917 | * |
| 2918 | * This is called from rvt_do_send() to forward a WQE addressed to the same HFI |
| 2919 | * Note that although we are single threaded due to the send engine, we still |
| 2920 | * have to protect against post_send(). We don't have to worry about |
| 2921 | * receive interrupts since this is a connected protocol and all packets |
| 2922 | * will pass through here. |
| 2923 | */ |
| 2924 | void rvt_ruc_loopback(struct rvt_qp *sqp) |
| 2925 | { |
| 2926 | struct rvt_ibport *rvp = NULL; |
| 2927 | struct rvt_dev_info *rdi = ib_to_rvt(sqp->ibqp.device); |
| 2928 | struct rvt_qp *qp; |
| 2929 | struct rvt_swqe *wqe; |
| 2930 | struct rvt_sge *sge; |
| 2931 | unsigned long flags; |
| 2932 | struct ib_wc wc; |
| 2933 | u64 sdata; |
| 2934 | atomic64_t *maddr; |
| 2935 | enum ib_wc_status send_status; |
| 2936 | bool release; |
| 2937 | int ret; |
| 2938 | bool copy_last = false; |
| 2939 | int local_ops = 0; |
| 2940 | |
| 2941 | rcu_read_lock(); |
| 2942 | rvp = rdi->ports[sqp->port_num - 1]; |
| 2943 | |
| 2944 | /* |
| 2945 | * Note that we check the responder QP state after |
| 2946 | * checking the requester's state. |
| 2947 | */ |
| 2948 | |
| 2949 | qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), rvp, |
| 2950 | sqp->remote_qpn); |
| 2951 | |
| 2952 | spin_lock_irqsave(&sqp->s_lock, flags); |
| 2953 | |
| 2954 | /* Return if we are already busy processing a work request. */ |
| 2955 | if ((sqp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT)) || |
| 2956 | !(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_OR_FLUSH_SEND)) |
| 2957 | goto unlock; |
| 2958 | |
| 2959 | sqp->s_flags |= RVT_S_BUSY; |
| 2960 | |
| 2961 | again: |
| 2962 | if (sqp->s_last == READ_ONCE(sqp->s_head)) |
| 2963 | goto clr_busy; |
| 2964 | wqe = rvt_get_swqe_ptr(sqp, sqp->s_last); |
| 2965 | |
| 2966 | /* Return if it is not OK to start a new work request. */ |
| 2967 | if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_NEXT_SEND_OK)) { |
| 2968 | if (!(ib_rvt_state_ops[sqp->state] & RVT_FLUSH_SEND)) |
| 2969 | goto clr_busy; |
| 2970 | /* We are in the error state, flush the work request. */ |
| 2971 | send_status = IB_WC_WR_FLUSH_ERR; |
| 2972 | goto flush_send; |
| 2973 | } |
| 2974 | |
| 2975 | /* |
| 2976 | * We can rely on the entry not changing without the s_lock |
| 2977 | * being held until we update s_last. |
| 2978 | * We increment s_cur to indicate s_last is in progress. |
| 2979 | */ |
| 2980 | if (sqp->s_last == sqp->s_cur) { |
| 2981 | if (++sqp->s_cur >= sqp->s_size) |
| 2982 | sqp->s_cur = 0; |
| 2983 | } |
| 2984 | spin_unlock_irqrestore(&sqp->s_lock, flags); |
| 2985 | |
Michael J. Ruhl | d757c60 | 2019-02-26 08:45:25 -0800 | [diff] [blame] | 2986 | if (!qp) { |
| 2987 | send_status = loopback_qp_drop(rvp, sqp); |
| 2988 | goto serr_no_r_lock; |
| 2989 | } |
| 2990 | spin_lock_irqsave(&qp->r_lock, flags); |
| 2991 | if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) || |
Venkata Sandeep Dhanalakota | 1570346 | 2018-09-26 10:44:52 -0700 | [diff] [blame] | 2992 | qp->ibqp.qp_type != sqp->ibqp.qp_type) { |
Michael J. Ruhl | d757c60 | 2019-02-26 08:45:25 -0800 | [diff] [blame] | 2993 | send_status = loopback_qp_drop(rvp, sqp); |
Venkata Sandeep Dhanalakota | 1570346 | 2018-09-26 10:44:52 -0700 | [diff] [blame] | 2994 | goto serr; |
| 2995 | } |
| 2996 | |
| 2997 | memset(&wc, 0, sizeof(wc)); |
| 2998 | send_status = IB_WC_SUCCESS; |
| 2999 | |
| 3000 | release = true; |
| 3001 | sqp->s_sge.sge = wqe->sg_list[0]; |
| 3002 | sqp->s_sge.sg_list = wqe->sg_list + 1; |
| 3003 | sqp->s_sge.num_sge = wqe->wr.num_sge; |
| 3004 | sqp->s_len = wqe->length; |
| 3005 | switch (wqe->wr.opcode) { |
| 3006 | case IB_WR_REG_MR: |
| 3007 | goto send_comp; |
| 3008 | |
| 3009 | case IB_WR_LOCAL_INV: |
| 3010 | if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) { |
| 3011 | if (rvt_invalidate_rkey(sqp, |
| 3012 | wqe->wr.ex.invalidate_rkey)) |
| 3013 | send_status = IB_WC_LOC_PROT_ERR; |
| 3014 | local_ops = 1; |
| 3015 | } |
| 3016 | goto send_comp; |
| 3017 | |
| 3018 | case IB_WR_SEND_WITH_INV: |
Venkata Sandeep Dhanalakota | 1570346 | 2018-09-26 10:44:52 -0700 | [diff] [blame] | 3019 | case IB_WR_SEND_WITH_IMM: |
Venkata Sandeep Dhanalakota | 1570346 | 2018-09-26 10:44:52 -0700 | [diff] [blame] | 3020 | case IB_WR_SEND: |
Venkata Sandeep Dhanalakota | 1570346 | 2018-09-26 10:44:52 -0700 | [diff] [blame] | 3021 | ret = rvt_get_rwqe(qp, false); |
| 3022 | if (ret < 0) |
| 3023 | goto op_err; |
| 3024 | if (!ret) |
| 3025 | goto rnr_nak; |
Mike Marciniszyn | 09ce351d | 2019-01-17 12:42:16 -0800 | [diff] [blame] | 3026 | if (wqe->length > qp->r_len) |
| 3027 | goto inv_err; |
Mike Marciniszyn | 38bbc9f | 2019-02-26 08:45:16 -0800 | [diff] [blame] | 3028 | switch (wqe->wr.opcode) { |
| 3029 | case IB_WR_SEND_WITH_INV: |
| 3030 | if (!rvt_invalidate_rkey(qp, |
| 3031 | wqe->wr.ex.invalidate_rkey)) { |
| 3032 | wc.wc_flags = IB_WC_WITH_INVALIDATE; |
| 3033 | wc.ex.invalidate_rkey = |
| 3034 | wqe->wr.ex.invalidate_rkey; |
| 3035 | } |
| 3036 | break; |
| 3037 | case IB_WR_SEND_WITH_IMM: |
| 3038 | wc.wc_flags = IB_WC_WITH_IMM; |
| 3039 | wc.ex.imm_data = wqe->wr.ex.imm_data; |
| 3040 | break; |
| 3041 | default: |
| 3042 | break; |
| 3043 | } |
Venkata Sandeep Dhanalakota | 1570346 | 2018-09-26 10:44:52 -0700 | [diff] [blame] | 3044 | break; |
| 3045 | |
| 3046 | case IB_WR_RDMA_WRITE_WITH_IMM: |
| 3047 | if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) |
| 3048 | goto inv_err; |
| 3049 | wc.wc_flags = IB_WC_WITH_IMM; |
| 3050 | wc.ex.imm_data = wqe->wr.ex.imm_data; |
| 3051 | ret = rvt_get_rwqe(qp, true); |
| 3052 | if (ret < 0) |
| 3053 | goto op_err; |
| 3054 | if (!ret) |
| 3055 | goto rnr_nak; |
| 3056 | /* skip copy_last set and qp_access_flags recheck */ |
| 3057 | goto do_write; |
| 3058 | case IB_WR_RDMA_WRITE: |
| 3059 | copy_last = rvt_is_user_qp(qp); |
| 3060 | if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) |
| 3061 | goto inv_err; |
| 3062 | do_write: |
| 3063 | if (wqe->length == 0) |
| 3064 | break; |
| 3065 | if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, wqe->length, |
| 3066 | wqe->rdma_wr.remote_addr, |
| 3067 | wqe->rdma_wr.rkey, |
| 3068 | IB_ACCESS_REMOTE_WRITE))) |
| 3069 | goto acc_err; |
| 3070 | qp->r_sge.sg_list = NULL; |
| 3071 | qp->r_sge.num_sge = 1; |
| 3072 | qp->r_sge.total_len = wqe->length; |
| 3073 | break; |
| 3074 | |
| 3075 | case IB_WR_RDMA_READ: |
| 3076 | if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ))) |
| 3077 | goto inv_err; |
| 3078 | if (unlikely(!rvt_rkey_ok(qp, &sqp->s_sge.sge, wqe->length, |
| 3079 | wqe->rdma_wr.remote_addr, |
| 3080 | wqe->rdma_wr.rkey, |
| 3081 | IB_ACCESS_REMOTE_READ))) |
| 3082 | goto acc_err; |
| 3083 | release = false; |
| 3084 | sqp->s_sge.sg_list = NULL; |
| 3085 | sqp->s_sge.num_sge = 1; |
| 3086 | qp->r_sge.sge = wqe->sg_list[0]; |
| 3087 | qp->r_sge.sg_list = wqe->sg_list + 1; |
| 3088 | qp->r_sge.num_sge = wqe->wr.num_sge; |
| 3089 | qp->r_sge.total_len = wqe->length; |
| 3090 | break; |
| 3091 | |
| 3092 | case IB_WR_ATOMIC_CMP_AND_SWP: |
| 3093 | case IB_WR_ATOMIC_FETCH_AND_ADD: |
| 3094 | if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) |
| 3095 | goto inv_err; |
| 3096 | if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64), |
| 3097 | wqe->atomic_wr.remote_addr, |
| 3098 | wqe->atomic_wr.rkey, |
| 3099 | IB_ACCESS_REMOTE_ATOMIC))) |
| 3100 | goto acc_err; |
| 3101 | /* Perform atomic OP and save result. */ |
| 3102 | maddr = (atomic64_t *)qp->r_sge.sge.vaddr; |
| 3103 | sdata = wqe->atomic_wr.compare_add; |
| 3104 | *(u64 *)sqp->s_sge.sge.vaddr = |
| 3105 | (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ? |
| 3106 | (u64)atomic64_add_return(sdata, maddr) - sdata : |
| 3107 | (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr, |
| 3108 | sdata, wqe->atomic_wr.swap); |
| 3109 | rvt_put_mr(qp->r_sge.sge.mr); |
| 3110 | qp->r_sge.num_sge = 0; |
| 3111 | goto send_comp; |
| 3112 | |
| 3113 | default: |
| 3114 | send_status = IB_WC_LOC_QP_OP_ERR; |
| 3115 | goto serr; |
| 3116 | } |
| 3117 | |
| 3118 | sge = &sqp->s_sge.sge; |
| 3119 | while (sqp->s_len) { |
Michael J. Ruhl | db421a5 | 2019-01-23 19:08:29 -0800 | [diff] [blame] | 3120 | u32 len = rvt_get_sge_length(sge, sqp->s_len); |
Venkata Sandeep Dhanalakota | 1570346 | 2018-09-26 10:44:52 -0700 | [diff] [blame] | 3121 | |
Venkata Sandeep Dhanalakota | 1570346 | 2018-09-26 10:44:52 -0700 | [diff] [blame] | 3122 | WARN_ON_ONCE(len == 0); |
| 3123 | rvt_copy_sge(qp, &qp->r_sge, sge->vaddr, |
| 3124 | len, release, copy_last); |
Michael J. Ruhl | db421a5 | 2019-01-23 19:08:29 -0800 | [diff] [blame] | 3125 | rvt_update_sge(&sqp->s_sge, len, !release); |
Venkata Sandeep Dhanalakota | 1570346 | 2018-09-26 10:44:52 -0700 | [diff] [blame] | 3126 | sqp->s_len -= len; |
| 3127 | } |
| 3128 | if (release) |
| 3129 | rvt_put_ss(&qp->r_sge); |
| 3130 | |
| 3131 | if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) |
| 3132 | goto send_comp; |
| 3133 | |
| 3134 | if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM) |
| 3135 | wc.opcode = IB_WC_RECV_RDMA_WITH_IMM; |
| 3136 | else |
| 3137 | wc.opcode = IB_WC_RECV; |
| 3138 | wc.wr_id = qp->r_wr_id; |
| 3139 | wc.status = IB_WC_SUCCESS; |
| 3140 | wc.byte_len = wqe->length; |
| 3141 | wc.qp = &qp->ibqp; |
| 3142 | wc.src_qp = qp->remote_qpn; |
| 3143 | wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX; |
| 3144 | wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr); |
| 3145 | wc.port_num = 1; |
| 3146 | /* Signal completion event if the solicited bit is set. */ |
Kamenee Arumugam | 5136bfe | 2019-06-28 14:21:52 -0400 | [diff] [blame] | 3147 | rvt_recv_cq(qp, &wc, wqe->wr.send_flags & IB_SEND_SOLICITED); |
Venkata Sandeep Dhanalakota | 1570346 | 2018-09-26 10:44:52 -0700 | [diff] [blame] | 3148 | |
| 3149 | send_comp: |
Michael J. Ruhl | d757c60 | 2019-02-26 08:45:25 -0800 | [diff] [blame] | 3150 | spin_unlock_irqrestore(&qp->r_lock, flags); |
Venkata Sandeep Dhanalakota | 1570346 | 2018-09-26 10:44:52 -0700 | [diff] [blame] | 3151 | spin_lock_irqsave(&sqp->s_lock, flags); |
| 3152 | rvp->n_loop_pkts++; |
| 3153 | flush_send: |
| 3154 | sqp->s_rnr_retry = sqp->s_rnr_retry_cnt; |
| 3155 | rvt_send_complete(sqp, wqe, send_status); |
| 3156 | if (local_ops) { |
| 3157 | atomic_dec(&sqp->local_ops_pending); |
| 3158 | local_ops = 0; |
| 3159 | } |
| 3160 | goto again; |
| 3161 | |
| 3162 | rnr_nak: |
| 3163 | /* Handle RNR NAK */ |
| 3164 | if (qp->ibqp.qp_type == IB_QPT_UC) |
| 3165 | goto send_comp; |
| 3166 | rvp->n_rnr_naks++; |
| 3167 | /* |
| 3168 | * Note: we don't need the s_lock held since the BUSY flag |
| 3169 | * makes this single threaded. |
| 3170 | */ |
| 3171 | if (sqp->s_rnr_retry == 0) { |
| 3172 | send_status = IB_WC_RNR_RETRY_EXC_ERR; |
| 3173 | goto serr; |
| 3174 | } |
| 3175 | if (sqp->s_rnr_retry_cnt < 7) |
| 3176 | sqp->s_rnr_retry--; |
Michael J. Ruhl | d757c60 | 2019-02-26 08:45:25 -0800 | [diff] [blame] | 3177 | spin_unlock_irqrestore(&qp->r_lock, flags); |
Venkata Sandeep Dhanalakota | 1570346 | 2018-09-26 10:44:52 -0700 | [diff] [blame] | 3178 | spin_lock_irqsave(&sqp->s_lock, flags); |
| 3179 | if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_RECV_OK)) |
| 3180 | goto clr_busy; |
| 3181 | rvt_add_rnr_timer(sqp, qp->r_min_rnr_timer << |
| 3182 | IB_AETH_CREDIT_SHIFT); |
| 3183 | goto clr_busy; |
| 3184 | |
| 3185 | op_err: |
| 3186 | send_status = IB_WC_REM_OP_ERR; |
| 3187 | wc.status = IB_WC_LOC_QP_OP_ERR; |
| 3188 | goto err; |
| 3189 | |
| 3190 | inv_err: |
Mike Marciniszyn | 09ce351d | 2019-01-17 12:42:16 -0800 | [diff] [blame] | 3191 | send_status = |
| 3192 | sqp->ibqp.qp_type == IB_QPT_RC ? |
| 3193 | IB_WC_REM_INV_REQ_ERR : |
| 3194 | IB_WC_SUCCESS; |
Venkata Sandeep Dhanalakota | 1570346 | 2018-09-26 10:44:52 -0700 | [diff] [blame] | 3195 | wc.status = IB_WC_LOC_QP_OP_ERR; |
| 3196 | goto err; |
| 3197 | |
| 3198 | acc_err: |
| 3199 | send_status = IB_WC_REM_ACCESS_ERR; |
| 3200 | wc.status = IB_WC_LOC_PROT_ERR; |
| 3201 | err: |
| 3202 | /* responder goes to error state */ |
| 3203 | rvt_rc_error(qp, wc.status); |
| 3204 | |
| 3205 | serr: |
Michael J. Ruhl | d757c60 | 2019-02-26 08:45:25 -0800 | [diff] [blame] | 3206 | spin_unlock_irqrestore(&qp->r_lock, flags); |
| 3207 | serr_no_r_lock: |
Venkata Sandeep Dhanalakota | 1570346 | 2018-09-26 10:44:52 -0700 | [diff] [blame] | 3208 | spin_lock_irqsave(&sqp->s_lock, flags); |
| 3209 | rvt_send_complete(sqp, wqe, send_status); |
| 3210 | if (sqp->ibqp.qp_type == IB_QPT_RC) { |
| 3211 | int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR); |
| 3212 | |
| 3213 | sqp->s_flags &= ~RVT_S_BUSY; |
| 3214 | spin_unlock_irqrestore(&sqp->s_lock, flags); |
| 3215 | if (lastwqe) { |
| 3216 | struct ib_event ev; |
| 3217 | |
| 3218 | ev.device = sqp->ibqp.device; |
| 3219 | ev.element.qp = &sqp->ibqp; |
| 3220 | ev.event = IB_EVENT_QP_LAST_WQE_REACHED; |
| 3221 | sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context); |
| 3222 | } |
| 3223 | goto done; |
| 3224 | } |
| 3225 | clr_busy: |
| 3226 | sqp->s_flags &= ~RVT_S_BUSY; |
| 3227 | unlock: |
| 3228 | spin_unlock_irqrestore(&sqp->s_lock, flags); |
| 3229 | done: |
| 3230 | rcu_read_unlock(); |
| 3231 | } |
| 3232 | EXPORT_SYMBOL(rvt_ruc_loopback); |