blob: 94421268b84c2e2210ed1a34d1adf154cfe0e09f [file] [log] [blame]
Dennis Dalessandrob518d3e2016-01-06 09:56:15 -08001/*
2 * Copyright(c) 2015 Intel Corporation.
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47
Dennis Dalessandro3b0b3fb2016-01-22 13:00:35 -080048#include <linux/hash.h>
Dennis Dalessandro0acb0cc2016-01-06 10:04:46 -080049#include <linux/bitops.h>
50#include <linux/lockdep.h>
Dennis Dalessandro515667f2016-01-22 12:50:17 -080051#include <linux/vmalloc.h>
52#include <linux/slab.h>
53#include <rdma/ib_verbs.h>
Dennis Dalessandrob518d3e2016-01-06 09:56:15 -080054#include "qp.h"
Dennis Dalessandro515667f2016-01-22 12:50:17 -080055#include "vt.h"
Dennis Dalessandro3b0b3fb2016-01-22 13:00:35 -080056#include "trace.h"
Dennis Dalessandrob518d3e2016-01-06 09:56:15 -080057
Dennis Dalessandrobfbac092016-01-22 13:00:22 -080058/*
59 * Note that it is OK to post send work requests in the SQE and ERR
60 * states; rvt_do_send() will process them and generate error
61 * completions as per IB 1.2 C10-96.
62 */
63const int ib_rvt_state_ops[IB_QPS_ERR + 1] = {
64 [IB_QPS_RESET] = 0,
65 [IB_QPS_INIT] = RVT_POST_RECV_OK,
66 [IB_QPS_RTR] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK,
67 [IB_QPS_RTS] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
68 RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK |
69 RVT_PROCESS_NEXT_SEND_OK,
70 [IB_QPS_SQD] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
71 RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK,
72 [IB_QPS_SQE] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
73 RVT_POST_SEND_OK | RVT_FLUSH_SEND,
74 [IB_QPS_ERR] = RVT_POST_RECV_OK | RVT_FLUSH_RECV |
75 RVT_POST_SEND_OK | RVT_FLUSH_SEND,
76};
77EXPORT_SYMBOL(ib_rvt_state_ops);
78
Mike Marciniszynd2b8d4d2016-01-22 12:50:43 -080079static void get_map_page(struct rvt_qpn_table *qpt,
80 struct rvt_qpn_map *map,
81 gfp_t gfp)
Dennis Dalessandro0acb0cc2016-01-06 10:04:46 -080082{
Mike Marciniszynd2b8d4d2016-01-22 12:50:43 -080083 unsigned long page = get_zeroed_page(gfp);
Dennis Dalessandro0acb0cc2016-01-06 10:04:46 -080084
85 /*
86 * Free the page if someone raced with us installing it.
87 */
88
89 spin_lock(&qpt->lock);
90 if (map->page)
91 free_page(page);
92 else
93 map->page = (void *)page;
94 spin_unlock(&qpt->lock);
95}
96
97/**
98 * init_qpn_table - initialize the QP number table for a device
99 * @qpt: the QPN table
100 */
101static int init_qpn_table(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt)
102{
103 u32 offset, i;
104 struct rvt_qpn_map *map;
105 int ret = 0;
106
Harish Chegondifef2efd2016-01-22 12:50:30 -0800107 if (!(rdi->dparms.qpn_res_end >= rdi->dparms.qpn_res_start))
Dennis Dalessandro0acb0cc2016-01-06 10:04:46 -0800108 return -EINVAL;
109
110 spin_lock_init(&qpt->lock);
111
112 qpt->last = rdi->dparms.qpn_start;
113 qpt->incr = rdi->dparms.qpn_inc << rdi->dparms.qos_shift;
114
115 /*
116 * Drivers may want some QPs beyond what we need for verbs let them use
117 * our qpn table. No need for two. Lets go ahead and mark the bitmaps
118 * for those. The reserved range must be *after* the range which verbs
119 * will pick from.
120 */
121
122 /* Figure out number of bit maps needed before reserved range */
123 qpt->nmaps = rdi->dparms.qpn_res_start / RVT_BITS_PER_PAGE;
124
125 /* This should always be zero */
126 offset = rdi->dparms.qpn_res_start & RVT_BITS_PER_PAGE_MASK;
127
128 /* Starting with the first reserved bit map */
129 map = &qpt->map[qpt->nmaps];
130
131 rvt_pr_info(rdi, "Reserving QPNs from 0x%x to 0x%x for non-verbs use\n",
132 rdi->dparms.qpn_res_start, rdi->dparms.qpn_res_end);
Harish Chegondifef2efd2016-01-22 12:50:30 -0800133 for (i = rdi->dparms.qpn_res_start; i <= rdi->dparms.qpn_res_end; i++) {
Dennis Dalessandro0acb0cc2016-01-06 10:04:46 -0800134 if (!map->page) {
Mike Marciniszynd2b8d4d2016-01-22 12:50:43 -0800135 get_map_page(qpt, map, GFP_KERNEL);
Dennis Dalessandro0acb0cc2016-01-06 10:04:46 -0800136 if (!map->page) {
137 ret = -ENOMEM;
138 break;
139 }
140 }
141 set_bit(offset, map->page);
142 offset++;
143 if (offset == RVT_BITS_PER_PAGE) {
144 /* next page */
145 qpt->nmaps++;
146 map++;
147 offset = 0;
148 }
149 }
150 return ret;
151}
152
153/**
154 * free_qpn_table - free the QP number table for a device
155 * @qpt: the QPN table
156 */
157static void free_qpn_table(struct rvt_qpn_table *qpt)
158{
159 int i;
160
161 for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
162 free_page((unsigned long)qpt->map[i].page);
163}
164
165int rvt_driver_qp_init(struct rvt_dev_info *rdi)
166{
167 int i;
168 int ret = -ENOMEM;
169
170 if (rdi->flags & RVT_FLAG_QP_INIT_DRIVER) {
171 rvt_pr_info(rdi, "Driver is doing QP init.\n");
172 return 0;
173 }
174
175 if (!rdi->dparms.qp_table_size)
176 return -EINVAL;
177
178 /*
179 * If driver is not doing any QP allocation then make sure it is
180 * providing the necessary QP functions.
181 */
Dennis Dalessandro515667f2016-01-22 12:50:17 -0800182 if (!rdi->driver_f.free_all_qps ||
183 !rdi->driver_f.qp_priv_alloc ||
184 !rdi->driver_f.qp_priv_free ||
185 !rdi->driver_f.notify_qp_reset)
Dennis Dalessandro0acb0cc2016-01-06 10:04:46 -0800186 return -EINVAL;
187
188 /* allocate parent object */
189 rdi->qp_dev = kzalloc(sizeof(*rdi->qp_dev), GFP_KERNEL);
190 if (!rdi->qp_dev)
191 return -ENOMEM;
192
193 /* allocate hash table */
194 rdi->qp_dev->qp_table_size = rdi->dparms.qp_table_size;
195 rdi->qp_dev->qp_table_bits = ilog2(rdi->dparms.qp_table_size);
196 rdi->qp_dev->qp_table =
197 kmalloc(rdi->qp_dev->qp_table_size *
198 sizeof(*rdi->qp_dev->qp_table),
199 GFP_KERNEL);
200 if (!rdi->qp_dev->qp_table)
201 goto no_qp_table;
202
203 for (i = 0; i < rdi->qp_dev->qp_table_size; i++)
204 RCU_INIT_POINTER(rdi->qp_dev->qp_table[i], NULL);
205
206 spin_lock_init(&rdi->qp_dev->qpt_lock);
207
208 /* initialize qpn map */
209 if (init_qpn_table(rdi, &rdi->qp_dev->qpn_table))
210 goto fail_table;
211
Dennis Dalessandro515667f2016-01-22 12:50:17 -0800212 spin_lock_init(&rdi->n_qps_lock);
213
214 return 0;
Dennis Dalessandro0acb0cc2016-01-06 10:04:46 -0800215
216fail_table:
217 kfree(rdi->qp_dev->qp_table);
218 free_qpn_table(&rdi->qp_dev->qpn_table);
219
220no_qp_table:
221 kfree(rdi->qp_dev);
222
223 return ret;
224}
225
226/**
227 * free_all_qps - check for QPs still in use
228 * @qpt: the QP table to empty
229 *
230 * There should not be any QPs still in use.
231 * Free memory for table.
232 */
Dennis Dalessandro515667f2016-01-22 12:50:17 -0800233static unsigned rvt_free_all_qps(struct rvt_dev_info *rdi)
Dennis Dalessandro0acb0cc2016-01-06 10:04:46 -0800234{
235 unsigned long flags;
236 struct rvt_qp *qp;
237 unsigned n, qp_inuse = 0;
238 spinlock_t *ql; /* work around too long line below */
239
Dennis Dalessandro515667f2016-01-22 12:50:17 -0800240 if (rdi->driver_f.free_all_qps)
241 qp_inuse = rdi->driver_f.free_all_qps(rdi);
Dennis Dalessandro0acb0cc2016-01-06 10:04:46 -0800242
243 if (!rdi->qp_dev)
Dennis Dalessandro515667f2016-01-22 12:50:17 -0800244 return qp_inuse;
Dennis Dalessandro0acb0cc2016-01-06 10:04:46 -0800245
246 ql = &rdi->qp_dev->qpt_lock;
Dennis Dalessandro515667f2016-01-22 12:50:17 -0800247 spin_lock_irqsave(ql, flags);
Dennis Dalessandro0acb0cc2016-01-06 10:04:46 -0800248 for (n = 0; n < rdi->qp_dev->qp_table_size; n++) {
249 qp = rcu_dereference_protected(rdi->qp_dev->qp_table[n],
250 lockdep_is_held(ql));
251 RCU_INIT_POINTER(rdi->qp_dev->qp_table[n], NULL);
Dennis Dalessandro515667f2016-01-22 12:50:17 -0800252
253 for (; qp; qp = rcu_dereference_protected(qp->next,
254 lockdep_is_held(ql)))
Dennis Dalessandro0acb0cc2016-01-06 10:04:46 -0800255 qp_inuse++;
Dennis Dalessandro0acb0cc2016-01-06 10:04:46 -0800256 }
257 spin_unlock_irqrestore(ql, flags);
258 synchronize_rcu();
259 return qp_inuse;
260}
261
262void rvt_qp_exit(struct rvt_dev_info *rdi)
263{
Dennis Dalessandro515667f2016-01-22 12:50:17 -0800264 u32 qps_inuse = rvt_free_all_qps(rdi);
Dennis Dalessandro0acb0cc2016-01-06 10:04:46 -0800265
Dennis Dalessandro0acb0cc2016-01-06 10:04:46 -0800266 if (qps_inuse)
267 rvt_pr_err(rdi, "QP memory leak! %u still in use\n",
268 qps_inuse);
269 if (!rdi->qp_dev)
270 return;
271
Dennis Dalessandro515667f2016-01-22 12:50:17 -0800272 if (rdi->flags & RVT_FLAG_QP_INIT_DRIVER)
273 return; /* driver did the qp init so nothing else to do */
274
Dennis Dalessandro0acb0cc2016-01-06 10:04:46 -0800275 kfree(rdi->qp_dev->qp_table);
276 free_qpn_table(&rdi->qp_dev->qpn_table);
277 kfree(rdi->qp_dev);
278}
279
Dennis Dalessandro515667f2016-01-22 12:50:17 -0800280static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
281 struct rvt_qpn_map *map, unsigned off)
282{
283 return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
284}
285
286/*
287 * Allocate the next available QPN or
288 * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
289 */
290static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
Mike Marciniszynd2b8d4d2016-01-22 12:50:43 -0800291 enum ib_qp_type type, u8 port, gfp_t gfp)
Dennis Dalessandro515667f2016-01-22 12:50:17 -0800292{
293 u32 i, offset, max_scan, qpn;
294 struct rvt_qpn_map *map;
295 u32 ret;
296
297 if (rdi->driver_f.alloc_qpn)
Mike Marciniszynd2b8d4d2016-01-22 12:50:43 -0800298 return rdi->driver_f.alloc_qpn(rdi, qpt, type, port,
299 GFP_KERNEL);
Dennis Dalessandro515667f2016-01-22 12:50:17 -0800300
301 if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
302 unsigned n;
303
304 ret = type == IB_QPT_GSI;
305 n = 1 << (ret + 2 * (port - 1));
306 spin_lock(&qpt->lock);
307 if (qpt->flags & n)
308 ret = -EINVAL;
309 else
310 qpt->flags |= n;
311 spin_unlock(&qpt->lock);
312 goto bail;
313 }
314
315 qpn = qpt->last + qpt->incr;
316 if (qpn >= RVT_QPN_MAX)
317 qpn = qpt->incr | ((qpt->last & 1) ^ 1);
318 /* offset carries bit 0 */
319 offset = qpn & RVT_BITS_PER_PAGE_MASK;
320 map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
321 max_scan = qpt->nmaps - !offset;
322 for (i = 0;;) {
323 if (unlikely(!map->page)) {
Mike Marciniszynd2b8d4d2016-01-22 12:50:43 -0800324 get_map_page(qpt, map, gfp);
Dennis Dalessandro515667f2016-01-22 12:50:17 -0800325 if (unlikely(!map->page))
326 break;
327 }
328 do {
329 if (!test_and_set_bit(offset, map->page)) {
330 qpt->last = qpn;
331 ret = qpn;
332 goto bail;
333 }
334 offset += qpt->incr;
335 /*
336 * This qpn might be bogus if offset >= BITS_PER_PAGE.
337 * That is OK. It gets re-assigned below
338 */
339 qpn = mk_qpn(qpt, map, offset);
340 } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX);
341 /*
342 * In order to keep the number of pages allocated to a
343 * minimum, we scan the all existing pages before increasing
344 * the size of the bitmap table.
345 */
346 if (++i > max_scan) {
347 if (qpt->nmaps == RVT_QPNMAP_ENTRIES)
348 break;
349 map = &qpt->map[qpt->nmaps++];
350 /* start at incr with current bit 0 */
351 offset = qpt->incr | (offset & 1);
352 } else if (map < &qpt->map[qpt->nmaps]) {
353 ++map;
354 /* start at incr with current bit 0 */
355 offset = qpt->incr | (offset & 1);
356 } else {
357 map = &qpt->map[0];
358 /* wrap to first map page, invert bit 0 */
359 offset = qpt->incr | ((offset & 1) ^ 1);
360 }
361 /* there can be no bits at shift and below */
362 WARN_ON(offset & (rdi->dparms.qos_shift - 1));
363 qpn = mk_qpn(qpt, map, offset);
364 }
365
366 ret = -ENOMEM;
367
368bail:
369 return ret;
370}
371
372static void free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
373{
374 struct rvt_qpn_map *map;
375
376 map = qpt->map + qpn / RVT_BITS_PER_PAGE;
377 if (map->page)
378 clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
379}
380
381/**
382 * reset_qp - initialize the QP state to the reset state
383 * @qp: the QP to reset
384 * @type: the QP type
Dennis Dalessandro3b0b3fb2016-01-22 13:00:35 -0800385 * r and s lock are required to be held by the caller
Dennis Dalessandro515667f2016-01-22 12:50:17 -0800386 */
Dennis Dalessandro5a9cf6f2016-01-22 12:50:24 -0800387void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
388 enum ib_qp_type type)
Dennis Dalessandro515667f2016-01-22 12:50:17 -0800389{
Dennis Dalessandro3b0b3fb2016-01-22 13:00:35 -0800390 if (qp->state != IB_QPS_RESET) {
391 qp->state = IB_QPS_RESET;
392
393 /* Let drivers flush their waitlist */
394 rdi->driver_f.flush_qp_waiters(qp);
395 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT);
396 spin_unlock(&qp->s_lock);
397 spin_unlock_irq(&qp->r_lock);
398
399 /* Stop the send queue and the retry timer */
400 rdi->driver_f.stop_send_queue(qp);
401 del_timer_sync(&qp->s_timer);
402
403 /* Wait for things to stop */
404 rdi->driver_f.quiesce_qp(qp);
405
406 /* take qp out the hash and wait for it to be unused */
407 rvt_remove_qp(rdi, qp);
408 wait_event(qp->wait, !atomic_read(&qp->refcount));
409
410 /* grab the lock b/c it was locked at call time */
411 spin_lock_irq(&qp->r_lock);
412 spin_lock(&qp->s_lock);
413
414 rvt_clear_mr_refs(qp, 1);
415 }
Dennis Dalessandro515667f2016-01-22 12:50:17 -0800416
417 /*
Dennis Dalessandro3b0b3fb2016-01-22 13:00:35 -0800418 * Let the driver do any tear down it needs to for a qp
419 * that has been reset
Dennis Dalessandro515667f2016-01-22 12:50:17 -0800420 */
421 rdi->driver_f.notify_qp_reset(qp);
422
Dennis Dalessandro3b0b3fb2016-01-22 13:00:35 -0800423 qp->remote_qpn = 0;
424 qp->qkey = 0;
425 qp->qp_access_flags = 0;
Dennis Dalessandro515667f2016-01-22 12:50:17 -0800426 qp->s_flags &= RVT_S_SIGNAL_REQ_WR;
427 qp->s_hdrwords = 0;
428 qp->s_wqe = NULL;
429 qp->s_draining = 0;
430 qp->s_next_psn = 0;
431 qp->s_last_psn = 0;
432 qp->s_sending_psn = 0;
433 qp->s_sending_hpsn = 0;
434 qp->s_psn = 0;
435 qp->r_psn = 0;
436 qp->r_msn = 0;
437 if (type == IB_QPT_RC) {
438 qp->s_state = IB_OPCODE_RC_SEND_LAST;
439 qp->r_state = IB_OPCODE_RC_SEND_LAST;
440 } else {
441 qp->s_state = IB_OPCODE_UC_SEND_LAST;
442 qp->r_state = IB_OPCODE_UC_SEND_LAST;
443 }
444 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
445 qp->r_nak_state = 0;
446 qp->r_aflags = 0;
447 qp->r_flags = 0;
448 qp->s_head = 0;
449 qp->s_tail = 0;
450 qp->s_cur = 0;
451 qp->s_acked = 0;
452 qp->s_last = 0;
453 qp->s_ssn = 1;
454 qp->s_lsn = 0;
455 qp->s_mig_state = IB_MIG_MIGRATED;
456 memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue));
457 qp->r_head_ack_queue = 0;
458 qp->s_tail_ack_queue = 0;
459 qp->s_num_rd_atomic = 0;
460 if (qp->r_rq.wq) {
461 qp->r_rq.wq->head = 0;
462 qp->r_rq.wq->tail = 0;
463 }
464 qp->r_sge.num_sge = 0;
465}
Dennis Dalessandro5a9cf6f2016-01-22 12:50:24 -0800466EXPORT_SYMBOL(rvt_reset_qp);
Dennis Dalessandro515667f2016-01-22 12:50:17 -0800467
Dennis Dalessandrob518d3e2016-01-06 09:56:15 -0800468/**
469 * rvt_create_qp - create a queue pair for a device
470 * @ibpd: the protection domain who's device we create the queue pair for
471 * @init_attr: the attributes of the queue pair
472 * @udata: user data for libibverbs.so
473 *
Dennis Dalessandro515667f2016-01-22 12:50:17 -0800474 * Queue pair creation is mostly an rvt issue. However, drivers have their own
475 * unique idea of what queue pair numbers mean. For instance there is a reserved
476 * range for PSM.
477 *
Dennis Dalessandrob518d3e2016-01-06 09:56:15 -0800478 * Returns the queue pair on success, otherwise returns an errno.
479 *
480 * Called by the ib_create_qp() core verbs function.
481 */
482struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
483 struct ib_qp_init_attr *init_attr,
484 struct ib_udata *udata)
485{
Dennis Dalessandro515667f2016-01-22 12:50:17 -0800486 struct rvt_qp *qp;
487 int err;
488 struct rvt_swqe *swq = NULL;
489 size_t sz;
490 size_t sg_list_sz;
491 struct ib_qp *ret = ERR_PTR(-ENOMEM);
492 struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device);
493 void *priv = NULL;
Mike Marciniszynd2b8d4d2016-01-22 12:50:43 -0800494 gfp_t gfp;
Dennis Dalessandro515667f2016-01-22 12:50:17 -0800495
496 if (!rdi)
497 return ERR_PTR(-EINVAL);
498
499 if (init_attr->cap.max_send_sge > rdi->dparms.props.max_sge ||
500 init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr ||
Mike Marciniszynd2b8d4d2016-01-22 12:50:43 -0800501 init_attr->create_flags & ~(IB_QP_CREATE_USE_GFP_NOIO))
Dennis Dalessandro515667f2016-01-22 12:50:17 -0800502 return ERR_PTR(-EINVAL);
503
Mike Marciniszynd2b8d4d2016-01-22 12:50:43 -0800504 /* GFP_NOIO is applicable to RC QP's only */
505
506 if (init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO &&
507 init_attr->qp_type != IB_QPT_RC)
508 return ERR_PTR(-EINVAL);
509
510 gfp = init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO ?
511 GFP_NOIO : GFP_KERNEL;
512
Dennis Dalessandro515667f2016-01-22 12:50:17 -0800513 /* Check receive queue parameters if no SRQ is specified. */
514 if (!init_attr->srq) {
515 if (init_attr->cap.max_recv_sge > rdi->dparms.props.max_sge ||
516 init_attr->cap.max_recv_wr > rdi->dparms.props.max_qp_wr)
517 return ERR_PTR(-EINVAL);
518
519 if (init_attr->cap.max_send_sge +
520 init_attr->cap.max_send_wr +
521 init_attr->cap.max_recv_sge +
522 init_attr->cap.max_recv_wr == 0)
523 return ERR_PTR(-EINVAL);
524 }
525
526 switch (init_attr->qp_type) {
527 case IB_QPT_SMI:
528 case IB_QPT_GSI:
529 if (init_attr->port_num == 0 ||
530 init_attr->port_num > ibpd->device->phys_port_cnt)
531 return ERR_PTR(-EINVAL);
532 case IB_QPT_UC:
533 case IB_QPT_RC:
534 case IB_QPT_UD:
535 sz = sizeof(struct rvt_sge) *
536 init_attr->cap.max_send_sge +
537 sizeof(struct rvt_swqe);
Mike Marciniszynd2b8d4d2016-01-22 12:50:43 -0800538 if (gfp == GFP_NOIO)
539 swq = __vmalloc(
540 (init_attr->cap.max_send_wr + 1) * sz,
541 gfp, PAGE_KERNEL);
542 else
543 swq = vmalloc(
544 (init_attr->cap.max_send_wr + 1) * sz);
Dennis Dalessandro515667f2016-01-22 12:50:17 -0800545 if (!swq)
546 return ERR_PTR(-ENOMEM);
547
548 sz = sizeof(*qp);
549 sg_list_sz = 0;
550 if (init_attr->srq) {
551 struct rvt_srq *srq = ibsrq_to_rvtsrq(init_attr->srq);
552
553 if (srq->rq.max_sge > 1)
554 sg_list_sz = sizeof(*qp->r_sg_list) *
555 (srq->rq.max_sge - 1);
556 } else if (init_attr->cap.max_recv_sge > 1)
557 sg_list_sz = sizeof(*qp->r_sg_list) *
558 (init_attr->cap.max_recv_sge - 1);
Mike Marciniszynd2b8d4d2016-01-22 12:50:43 -0800559 qp = kzalloc(sz + sg_list_sz, gfp);
Dennis Dalessandro515667f2016-01-22 12:50:17 -0800560 if (!qp)
561 goto bail_swq;
562
563 RCU_INIT_POINTER(qp->next, NULL);
564
565 /*
566 * Driver needs to set up it's private QP structure and do any
567 * initialization that is needed.
568 */
Mike Marciniszynd2b8d4d2016-01-22 12:50:43 -0800569 priv = rdi->driver_f.qp_priv_alloc(rdi, qp, gfp);
Dennis Dalessandro515667f2016-01-22 12:50:17 -0800570 if (!priv)
571 goto bail_qp;
572 qp->priv = priv;
573 qp->timeout_jiffies =
574 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
575 1000UL);
576 if (init_attr->srq) {
577 sz = 0;
578 } else {
579 qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
580 qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
581 sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
582 sizeof(struct rvt_rwqe);
Mike Marciniszynd2b8d4d2016-01-22 12:50:43 -0800583 if (udata)
584 qp->r_rq.wq = vmalloc_user(
585 sizeof(struct rvt_rwq) +
586 qp->r_rq.size * sz);
587 else if (gfp == GFP_NOIO)
588 qp->r_rq.wq = __vmalloc(
589 sizeof(struct rvt_rwq) +
590 qp->r_rq.size * sz,
591 gfp, PAGE_KERNEL);
592 else
593 qp->r_rq.wq = vmalloc(
594 sizeof(struct rvt_rwq) +
595 qp->r_rq.size * sz);
Dennis Dalessandro515667f2016-01-22 12:50:17 -0800596 if (!qp->r_rq.wq)
597 goto bail_driver_priv;
598 }
599
600 /*
601 * ib_create_qp() will initialize qp->ibqp
602 * except for qp->ibqp.qp_num.
603 */
604 spin_lock_init(&qp->r_lock);
605 spin_lock_init(&qp->s_lock);
606 spin_lock_init(&qp->r_rq.lock);
607 atomic_set(&qp->refcount, 0);
608 init_waitqueue_head(&qp->wait);
609 init_timer(&qp->s_timer);
610 qp->s_timer.data = (unsigned long)qp;
611 INIT_LIST_HEAD(&qp->rspwait);
612 qp->state = IB_QPS_RESET;
613 qp->s_wq = swq;
614 qp->s_size = init_attr->cap.max_send_wr + 1;
615 qp->s_max_sge = init_attr->cap.max_send_sge;
616 if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
617 qp->s_flags = RVT_S_SIGNAL_REQ_WR;
618
619 err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table,
620 init_attr->qp_type,
Mike Marciniszynd2b8d4d2016-01-22 12:50:43 -0800621 init_attr->port_num, gfp);
Dennis Dalessandro515667f2016-01-22 12:50:17 -0800622 if (err < 0) {
623 ret = ERR_PTR(err);
624 goto bail_rq_wq;
625 }
626 qp->ibqp.qp_num = err;
627 qp->port_num = init_attr->port_num;
Dennis Dalessandro5a9cf6f2016-01-22 12:50:24 -0800628 rvt_reset_qp(rdi, qp, init_attr->qp_type);
Dennis Dalessandro515667f2016-01-22 12:50:17 -0800629 break;
630
631 default:
632 /* Don't support raw QPs */
633 return ERR_PTR(-EINVAL);
634 }
635
636 init_attr->cap.max_inline_data = 0;
637
Dennis Dalessandrob518d3e2016-01-06 09:56:15 -0800638 /*
Dennis Dalessandro515667f2016-01-22 12:50:17 -0800639 * Return the address of the RWQ as the offset to mmap.
Dennis Dalessandrobfbac092016-01-22 13:00:22 -0800640 * See rvt_mmap() for details.
Dennis Dalessandrob518d3e2016-01-06 09:56:15 -0800641 */
Dennis Dalessandro515667f2016-01-22 12:50:17 -0800642 if (udata && udata->outlen >= sizeof(__u64)) {
643 if (!qp->r_rq.wq) {
644 __u64 offset = 0;
645
646 err = ib_copy_to_udata(udata, &offset,
647 sizeof(offset));
648 if (err) {
649 ret = ERR_PTR(err);
650 goto bail_qpn;
651 }
652 } else {
653 u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz;
654
655 qp->ip = rvt_create_mmap_info(rdi, s,
656 ibpd->uobject->context,
657 qp->r_rq.wq);
658 if (!qp->ip) {
659 ret = ERR_PTR(-ENOMEM);
660 goto bail_qpn;
661 }
662
663 err = ib_copy_to_udata(udata, &qp->ip->offset,
664 sizeof(qp->ip->offset));
665 if (err) {
666 ret = ERR_PTR(err);
667 goto bail_ip;
668 }
669 }
670 }
671
672 spin_lock(&rdi->n_qps_lock);
673 if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) {
674 spin_unlock(&rdi->n_qps_lock);
675 ret = ERR_PTR(-ENOMEM);
676 goto bail_ip;
677 }
678
679 rdi->n_qps_allocated++;
680 spin_unlock(&rdi->n_qps_lock);
681
682 if (qp->ip) {
683 spin_lock_irq(&rdi->pending_lock);
684 list_add(&qp->ip->pending_mmaps, &rdi->pending_mmaps);
685 spin_unlock_irq(&rdi->pending_lock);
686 }
687
688 ret = &qp->ibqp;
689
690 /*
691 * We have our QP and its good, now keep track of what types of opcodes
692 * can be processed on this QP. We do this by keeping track of what the
693 * 3 high order bits of the opcode are.
694 */
695 switch (init_attr->qp_type) {
696 case IB_QPT_SMI:
697 case IB_QPT_GSI:
698 case IB_QPT_UD:
699 qp->allowed_ops = IB_OPCODE_UD_SEND_ONLY & RVT_OPCODE_QP_MASK;
700 break;
701 case IB_QPT_RC:
702 qp->allowed_ops = IB_OPCODE_RC_SEND_ONLY & RVT_OPCODE_QP_MASK;
703 break;
704 case IB_QPT_UC:
705 qp->allowed_ops = IB_OPCODE_UC_SEND_ONLY & RVT_OPCODE_QP_MASK;
706 break;
707 default:
708 ret = ERR_PTR(-EINVAL);
709 goto bail_ip;
710 }
711
712 return ret;
713
714bail_ip:
715 kref_put(&qp->ip->ref, rvt_release_mmap_info);
716
717bail_qpn:
718 free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
719
720bail_rq_wq:
721 vfree(qp->r_rq.wq);
722
723bail_driver_priv:
724 rdi->driver_f.qp_priv_free(rdi, qp);
725
726bail_qp:
727 kfree(qp);
728
729bail_swq:
730 vfree(swq);
731
732 return ret;
Dennis Dalessandrob518d3e2016-01-06 09:56:15 -0800733}
734
Dennis Dalessandro3b0b3fb2016-01-22 13:00:35 -0800735void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends)
736{
737 unsigned n;
738
739 if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
740 rvt_put_ss(&qp->s_rdma_read_sge);
741
742 rvt_put_ss(&qp->r_sge);
743
744 if (clr_sends) {
745 while (qp->s_last != qp->s_head) {
746 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_last);
747 unsigned i;
748
749 for (i = 0; i < wqe->wr.num_sge; i++) {
750 struct rvt_sge *sge = &wqe->sg_list[i];
751
752 rvt_put_mr(sge->mr);
753 }
754 if (qp->ibqp.qp_type == IB_QPT_UD ||
755 qp->ibqp.qp_type == IB_QPT_SMI ||
756 qp->ibqp.qp_type == IB_QPT_GSI)
757 atomic_dec(&ibah_to_rvtah(
758 wqe->ud_wr.ah)->refcount);
759 if (++qp->s_last >= qp->s_size)
760 qp->s_last = 0;
761 }
762 if (qp->s_rdma_mr) {
763 rvt_put_mr(qp->s_rdma_mr);
764 qp->s_rdma_mr = NULL;
765 }
766 }
767
768 if (qp->ibqp.qp_type != IB_QPT_RC)
769 return;
770
771 for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) {
772 struct rvt_ack_entry *e = &qp->s_ack_queue[n];
773
774 if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST &&
775 e->rdma_sge.mr) {
776 rvt_put_mr(e->rdma_sge.mr);
777 e->rdma_sge.mr = NULL;
778 }
779 }
780}
781EXPORT_SYMBOL(rvt_clear_mr_refs);
782
783/**
784 * rvt_error_qp - put a QP into the error state
785 * @qp: the QP to put into the error state
786 * @err: the receive completion error to signal if a RWQE is active
787 *
788 * Flushes both send and receive work queues.
789 * Returns true if last WQE event should be generated.
790 * The QP r_lock and s_lock should be held and interrupts disabled.
791 * If we are already in error state, just return.
792 */
793int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err)
794{
795 struct ib_wc wc;
796 int ret = 0;
797 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
798
799 if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
800 goto bail;
801
802 qp->state = IB_QPS_ERR;
803
804 if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
805 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
806 del_timer(&qp->s_timer);
807 }
808
809 if (qp->s_flags & RVT_S_ANY_WAIT_SEND)
810 qp->s_flags &= ~RVT_S_ANY_WAIT_SEND;
811
812 rdi->driver_f.notify_error_qp(qp);
813
814 /* Schedule the sending tasklet to drain the send work queue. */
815 if (qp->s_last != qp->s_head)
816 rdi->driver_f.schedule_send(qp);
817
818 rvt_clear_mr_refs(qp, 0);
819
820 memset(&wc, 0, sizeof(wc));
821 wc.qp = &qp->ibqp;
822 wc.opcode = IB_WC_RECV;
823
824 if (test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) {
825 wc.wr_id = qp->r_wr_id;
826 wc.status = err;
827 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
828 }
829 wc.status = IB_WC_WR_FLUSH_ERR;
830
831 if (qp->r_rq.wq) {
832 struct rvt_rwq *wq;
833 u32 head;
834 u32 tail;
835
836 spin_lock(&qp->r_rq.lock);
837
838 /* sanity check pointers before trusting them */
839 wq = qp->r_rq.wq;
840 head = wq->head;
841 if (head >= qp->r_rq.size)
842 head = 0;
843 tail = wq->tail;
844 if (tail >= qp->r_rq.size)
845 tail = 0;
846 while (tail != head) {
847 wc.wr_id = rvt_get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
848 if (++tail >= qp->r_rq.size)
849 tail = 0;
850 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
851 }
852 wq->tail = tail;
853
854 spin_unlock(&qp->r_rq.lock);
855 } else if (qp->ibqp.event_handler) {
856 ret = 1;
857 }
858
859bail:
860 return ret;
861}
862EXPORT_SYMBOL(rvt_error_qp);
863
864/*
865 * Put the QP into the hash table.
866 * The hash table holds a reference to the QP.
867 */
868static void rvt_insert_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
869{
870 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
871 unsigned long flags;
872
873 atomic_inc(&qp->refcount);
874 spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
875
876 if (qp->ibqp.qp_num <= 1) {
877 rcu_assign_pointer(rvp->qp[qp->ibqp.qp_num], qp);
878 } else {
879 u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
880
881 qp->next = rdi->qp_dev->qp_table[n];
882 rcu_assign_pointer(rdi->qp_dev->qp_table[n], qp);
883 trace_rvt_qpinsert(qp, n);
884 }
885
886 spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
887}
888
889/*
890 * Remove the QP from the table so it can't be found asynchronously by
891 * the receive routine.
892 */
893void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
894{
895 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
896 u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
897 unsigned long flags;
898 int removed = 1;
899
900 spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
901
902 if (rcu_dereference_protected(rvp->qp[0],
903 lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
904 RCU_INIT_POINTER(rvp->qp[0], NULL);
905 } else if (rcu_dereference_protected(rvp->qp[1],
906 lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
907 RCU_INIT_POINTER(rvp->qp[1], NULL);
908 } else {
909 struct rvt_qp *q;
910 struct rvt_qp __rcu **qpp;
911
912 removed = 0;
913 qpp = &rdi->qp_dev->qp_table[n];
914 for (; (q = rcu_dereference_protected(*qpp,
915 lockdep_is_held(&rdi->qp_dev->qpt_lock))) != NULL;
916 qpp = &q->next) {
917 if (q == qp) {
918 RCU_INIT_POINTER(*qpp,
919 rcu_dereference_protected(qp->next,
920 lockdep_is_held(&rdi->qp_dev->qpt_lock)));
921 removed = 1;
922 trace_rvt_qpremove(qp, n);
923 break;
924 }
925 }
926 }
927
928 spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
929 if (removed) {
930 synchronize_rcu();
931 if (atomic_dec_and_test(&qp->refcount))
932 wake_up(&qp->wait);
933 }
934}
935EXPORT_SYMBOL(rvt_remove_qp);
936
Dennis Dalessandrob518d3e2016-01-06 09:56:15 -0800937/**
938 * qib_modify_qp - modify the attributes of a queue pair
939 * @ibqp: the queue pair who's attributes we're modifying
940 * @attr: the new attributes
941 * @attr_mask: the mask of attributes to modify
942 * @udata: user data for libibverbs.so
943 *
944 * Returns 0 on success, otherwise returns an errno.
945 */
946int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
947 int attr_mask, struct ib_udata *udata)
948{
Dennis Dalessandro3b0b3fb2016-01-22 13:00:35 -0800949 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
950 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
951 enum ib_qp_state cur_state, new_state;
952 struct ib_event ev;
953 int lastwqe = 0;
954 int mig = 0;
955 int pmtu = 0; /* for gcc warning only */
956 enum rdma_link_layer link;
957
958 link = rdma_port_get_link_layer(ibqp->device, qp->port_num);
959
960 spin_lock_irq(&qp->r_lock);
961 spin_lock(&qp->s_lock);
962
963 cur_state = attr_mask & IB_QP_CUR_STATE ?
964 attr->cur_qp_state : qp->state;
965 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
966
967 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
968 attr_mask, link))
969 goto inval;
970
971 if (attr_mask & IB_QP_AV) {
972 if (attr->ah_attr.dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
973 goto inval;
974 if (rvt_check_ah(qp->ibqp.device, &attr->ah_attr))
975 goto inval;
976 }
977
978 if (attr_mask & IB_QP_ALT_PATH) {
979 if (attr->alt_ah_attr.dlid >=
980 be16_to_cpu(IB_MULTICAST_LID_BASE))
981 goto inval;
982 if (rvt_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
983 goto inval;
984 if (attr->alt_pkey_index >= rvt_get_npkeys(rdi))
985 goto inval;
986 }
987
988 if (attr_mask & IB_QP_PKEY_INDEX)
989 if (attr->pkey_index >= rvt_get_npkeys(rdi))
990 goto inval;
991
992 if (attr_mask & IB_QP_MIN_RNR_TIMER)
993 if (attr->min_rnr_timer > 31)
994 goto inval;
995
996 if (attr_mask & IB_QP_PORT)
997 if (qp->ibqp.qp_type == IB_QPT_SMI ||
998 qp->ibqp.qp_type == IB_QPT_GSI ||
999 attr->port_num == 0 ||
1000 attr->port_num > ibqp->device->phys_port_cnt)
1001 goto inval;
1002
1003 if (attr_mask & IB_QP_DEST_QPN)
1004 if (attr->dest_qp_num > RVT_QPN_MASK)
1005 goto inval;
1006
1007 if (attr_mask & IB_QP_RETRY_CNT)
1008 if (attr->retry_cnt > 7)
1009 goto inval;
1010
1011 if (attr_mask & IB_QP_RNR_RETRY)
1012 if (attr->rnr_retry > 7)
1013 goto inval;
1014
Dennis Dalessandrob518d3e2016-01-06 09:56:15 -08001015 /*
Dennis Dalessandro3b0b3fb2016-01-22 13:00:35 -08001016 * Don't allow invalid path_mtu values. OK to set greater
1017 * than the active mtu (or even the max_cap, if we have tuned
1018 * that to a small mtu. We'll set qp->path_mtu
1019 * to the lesser of requested attribute mtu and active,
1020 * for packetizing messages.
1021 * Note that the QP port has to be set in INIT and MTU in RTR.
Dennis Dalessandrob518d3e2016-01-06 09:56:15 -08001022 */
Dennis Dalessandro3b0b3fb2016-01-22 13:00:35 -08001023 if (attr_mask & IB_QP_PATH_MTU) {
1024 pmtu = rdi->driver_f.get_pmtu_from_attr(rdi, qp, attr);
1025 if (pmtu < 0)
1026 goto inval;
1027 }
1028
1029 if (attr_mask & IB_QP_PATH_MIG_STATE) {
1030 if (attr->path_mig_state == IB_MIG_REARM) {
1031 if (qp->s_mig_state == IB_MIG_ARMED)
1032 goto inval;
1033 if (new_state != IB_QPS_RTS)
1034 goto inval;
1035 } else if (attr->path_mig_state == IB_MIG_MIGRATED) {
1036 if (qp->s_mig_state == IB_MIG_REARM)
1037 goto inval;
1038 if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
1039 goto inval;
1040 if (qp->s_mig_state == IB_MIG_ARMED)
1041 mig = 1;
1042 } else {
1043 goto inval;
1044 }
1045 }
1046
1047 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1048 if (attr->max_dest_rd_atomic > rdi->dparms.max_rdma_atomic)
1049 goto inval;
1050
1051 switch (new_state) {
1052 case IB_QPS_RESET:
1053 if (qp->state != IB_QPS_RESET)
1054 rvt_reset_qp(rdi, qp, ibqp->qp_type);
1055 break;
1056
1057 case IB_QPS_RTR:
1058 /* Allow event to re-trigger if QP set to RTR more than once */
1059 qp->r_flags &= ~RVT_R_COMM_EST;
1060 qp->state = new_state;
1061 break;
1062
1063 case IB_QPS_SQD:
1064 qp->s_draining = qp->s_last != qp->s_cur;
1065 qp->state = new_state;
1066 break;
1067
1068 case IB_QPS_SQE:
1069 if (qp->ibqp.qp_type == IB_QPT_RC)
1070 goto inval;
1071 qp->state = new_state;
1072 break;
1073
1074 case IB_QPS_ERR:
1075 lastwqe = rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1076 break;
1077
1078 default:
1079 qp->state = new_state;
1080 break;
1081 }
1082
1083 if (attr_mask & IB_QP_PKEY_INDEX)
1084 qp->s_pkey_index = attr->pkey_index;
1085
1086 if (attr_mask & IB_QP_PORT)
1087 qp->port_num = attr->port_num;
1088
1089 if (attr_mask & IB_QP_DEST_QPN)
1090 qp->remote_qpn = attr->dest_qp_num;
1091
1092 if (attr_mask & IB_QP_SQ_PSN) {
1093 qp->s_next_psn = attr->sq_psn & rdi->dparms.psn_modify_mask;
1094 qp->s_psn = qp->s_next_psn;
1095 qp->s_sending_psn = qp->s_next_psn;
1096 qp->s_last_psn = qp->s_next_psn - 1;
1097 qp->s_sending_hpsn = qp->s_last_psn;
1098 }
1099
1100 if (attr_mask & IB_QP_RQ_PSN)
1101 qp->r_psn = attr->rq_psn & rdi->dparms.psn_modify_mask;
1102
1103 if (attr_mask & IB_QP_ACCESS_FLAGS)
1104 qp->qp_access_flags = attr->qp_access_flags;
1105
1106 if (attr_mask & IB_QP_AV) {
1107 qp->remote_ah_attr = attr->ah_attr;
1108 qp->s_srate = attr->ah_attr.static_rate;
1109 qp->srate_mbps = ib_rate_to_mbps(qp->s_srate);
1110 }
1111
1112 if (attr_mask & IB_QP_ALT_PATH) {
1113 qp->alt_ah_attr = attr->alt_ah_attr;
1114 qp->s_alt_pkey_index = attr->alt_pkey_index;
1115 }
1116
1117 if (attr_mask & IB_QP_PATH_MIG_STATE) {
1118 qp->s_mig_state = attr->path_mig_state;
1119 if (mig) {
1120 qp->remote_ah_attr = qp->alt_ah_attr;
1121 qp->port_num = qp->alt_ah_attr.port_num;
1122 qp->s_pkey_index = qp->s_alt_pkey_index;
1123
1124 /*
1125 * Ignored by drivers which do not support it. Not
1126 * really worth creating a call back into the driver
1127 * just to set a flag.
1128 */
1129 qp->s_flags |= RVT_S_AHG_CLEAR;
1130 }
1131 }
1132
1133 if (attr_mask & IB_QP_PATH_MTU) {
1134 qp->pmtu = rdi->driver_f.mtu_from_qp(rdi, qp, pmtu);
1135 qp->path_mtu = rdi->driver_f.mtu_to_path_mtu(qp->pmtu);
1136 }
1137
1138 if (attr_mask & IB_QP_RETRY_CNT) {
1139 qp->s_retry_cnt = attr->retry_cnt;
1140 qp->s_retry = attr->retry_cnt;
1141 }
1142
1143 if (attr_mask & IB_QP_RNR_RETRY) {
1144 qp->s_rnr_retry_cnt = attr->rnr_retry;
1145 qp->s_rnr_retry = attr->rnr_retry;
1146 }
1147
1148 if (attr_mask & IB_QP_MIN_RNR_TIMER)
1149 qp->r_min_rnr_timer = attr->min_rnr_timer;
1150
1151 if (attr_mask & IB_QP_TIMEOUT) {
1152 qp->timeout = attr->timeout;
1153 qp->timeout_jiffies =
1154 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
1155 1000UL);
1156 }
1157
1158 if (attr_mask & IB_QP_QKEY)
1159 qp->qkey = attr->qkey;
1160
1161 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1162 qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
1163
1164 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
1165 qp->s_max_rd_atomic = attr->max_rd_atomic;
1166
1167 spin_unlock(&qp->s_lock);
1168 spin_unlock_irq(&qp->r_lock);
1169
1170 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1171 rvt_insert_qp(rdi, qp);
1172
1173 if (lastwqe) {
1174 ev.device = qp->ibqp.device;
1175 ev.element.qp = &qp->ibqp;
1176 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
1177 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1178 }
1179 if (mig) {
1180 ev.device = qp->ibqp.device;
1181 ev.element.qp = &qp->ibqp;
1182 ev.event = IB_EVENT_PATH_MIG;
1183 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1184 }
1185 return 0;
1186
1187inval:
1188 spin_unlock(&qp->s_lock);
1189 spin_unlock_irq(&qp->r_lock);
1190 return -EINVAL;
Dennis Dalessandrob518d3e2016-01-06 09:56:15 -08001191}
1192
1193/**
1194 * rvt_destroy_qp - destroy a queue pair
1195 * @ibqp: the queue pair to destroy
1196 *
1197 * Returns 0 on success.
1198 *
1199 * Note that this can be called while the QP is actively sending or
1200 * receiving!
1201 */
1202int rvt_destroy_qp(struct ib_qp *ibqp)
1203{
1204 /*
1205 * VT-DRIVER-API: qp_flush()
1206 * Driver provies a mechanism to flush and wait for that flush to
1207 * finish.
1208 */
1209
1210 return -EOPNOTSUPP;
1211}
1212
1213int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1214 int attr_mask, struct ib_qp_init_attr *init_attr)
1215{
1216 return -EOPNOTSUPP;
1217}
Dennis Dalessandro8cf40202016-01-06 10:01:17 -08001218
1219/**
1220 * rvt_post_receive - post a receive on a QP
1221 * @ibqp: the QP to post the receive on
1222 * @wr: the WR to post
1223 * @bad_wr: the first bad WR is put here
1224 *
1225 * This may be called from interrupt context.
1226 */
1227int rvt_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1228 struct ib_recv_wr **bad_wr)
1229{
1230 /*
1231 * When a packet arrives the driver needs to call up to rvt to process
1232 * the packet. The UD, RC, UC processing will be done in rvt, however
1233 * the driver should be able to override this if it so choses. Perhaps a
1234 * set of function pointers set up at registration time.
1235 */
1236
1237 return -EOPNOTSUPP;
1238}
1239
1240/**
Dennis Dalessandrobfbac092016-01-22 13:00:22 -08001241 * rvt_post_one_wr - post one RC, UC, or UD send work request
1242 * @qp: the QP to post on
1243 * @wr: the work request to send
1244 */
1245static int rvt_post_one_wr(struct rvt_qp *qp, struct ib_send_wr *wr)
1246{
1247 struct rvt_swqe *wqe;
1248 u32 next;
1249 int i;
1250 int j;
1251 int acc;
1252 struct rvt_lkey_table *rkt;
1253 struct rvt_pd *pd;
1254 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
1255
1256 /* IB spec says that num_sge == 0 is OK. */
1257 if (unlikely(wr->num_sge > qp->s_max_sge))
1258 return -EINVAL;
1259
1260 /*
1261 * Don't allow RDMA reads or atomic operations on UC or
1262 * undefined operations.
1263 * Make sure buffer is large enough to hold the result for atomics.
1264 */
1265 if (qp->ibqp.qp_type == IB_QPT_UC) {
1266 if ((unsigned)wr->opcode >= IB_WR_RDMA_READ)
1267 return -EINVAL;
1268 } else if (qp->ibqp.qp_type != IB_QPT_RC) {
1269 /* Check IB_QPT_SMI, IB_QPT_GSI, IB_QPT_UD opcode */
1270 if (wr->opcode != IB_WR_SEND &&
1271 wr->opcode != IB_WR_SEND_WITH_IMM)
1272 return -EINVAL;
1273 /* Check UD destination address PD */
1274 if (qp->ibqp.pd != ud_wr(wr)->ah->pd)
1275 return -EINVAL;
1276 } else if ((unsigned)wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD) {
1277 return -EINVAL;
1278 } else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP &&
1279 (wr->num_sge == 0 ||
1280 wr->sg_list[0].length < sizeof(u64) ||
1281 wr->sg_list[0].addr & (sizeof(u64) - 1))) {
1282 return -EINVAL;
1283 } else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic) {
1284 return -EINVAL;
1285 }
1286
1287 next = qp->s_head + 1;
1288 if (next >= qp->s_size)
1289 next = 0;
1290 if (next == qp->s_last)
1291 return -ENOMEM;
1292
1293 rkt = &rdi->lkey_table;
1294 pd = ibpd_to_rvtpd(qp->ibqp.pd);
1295 wqe = rvt_get_swqe_ptr(qp, qp->s_head);
1296
1297 if (qp->ibqp.qp_type != IB_QPT_UC &&
1298 qp->ibqp.qp_type != IB_QPT_RC)
1299 memcpy(&wqe->ud_wr, ud_wr(wr), sizeof(wqe->ud_wr));
1300 else if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
1301 wr->opcode == IB_WR_RDMA_WRITE ||
1302 wr->opcode == IB_WR_RDMA_READ)
1303 memcpy(&wqe->rdma_wr, rdma_wr(wr), sizeof(wqe->rdma_wr));
1304 else if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1305 wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
1306 memcpy(&wqe->atomic_wr, atomic_wr(wr), sizeof(wqe->atomic_wr));
1307 else
1308 memcpy(&wqe->wr, wr, sizeof(wqe->wr));
1309
1310 wqe->length = 0;
1311 j = 0;
1312 if (wr->num_sge) {
1313 acc = wr->opcode >= IB_WR_RDMA_READ ?
1314 IB_ACCESS_LOCAL_WRITE : 0;
1315 for (i = 0; i < wr->num_sge; i++) {
1316 u32 length = wr->sg_list[i].length;
1317 int ok;
1318
1319 if (length == 0)
1320 continue;
1321 ok = rvt_lkey_ok(rkt, pd, &wqe->sg_list[j],
1322 &wr->sg_list[i], acc);
1323 if (!ok)
1324 goto bail_inval_free;
1325 wqe->length += length;
1326 j++;
1327 }
1328 wqe->wr.num_sge = j;
1329 }
1330 if (qp->ibqp.qp_type == IB_QPT_UC ||
1331 qp->ibqp.qp_type == IB_QPT_RC) {
1332 if (wqe->length > 0x80000000U)
1333 goto bail_inval_free;
1334 } else {
1335 atomic_inc(&ibah_to_rvtah(ud_wr(wr)->ah)->refcount);
1336 }
1337 wqe->ssn = qp->s_ssn++;
1338 qp->s_head = next;
1339
1340 return 0;
1341
1342bail_inval_free:
1343 /* release mr holds */
1344 while (j) {
1345 struct rvt_sge *sge = &wqe->sg_list[--j];
1346
1347 rvt_put_mr(sge->mr);
1348 }
1349 return -EINVAL;
1350}
1351
1352/**
Dennis Dalessandro8cf40202016-01-06 10:01:17 -08001353 * rvt_post_send - post a send on a QP
1354 * @ibqp: the QP to post the send on
1355 * @wr: the list of work requests to post
1356 * @bad_wr: the first bad WR is put here
1357 *
1358 * This may be called from interrupt context.
1359 */
1360int rvt_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1361 struct ib_send_wr **bad_wr)
1362{
Dennis Dalessandrobfbac092016-01-22 13:00:22 -08001363 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1364 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1365 unsigned long flags = 0;
1366 int call_send;
1367 unsigned nreq = 0;
1368 int err = 0;
Dennis Dalessandro8cf40202016-01-06 10:01:17 -08001369
Dennis Dalessandrobfbac092016-01-22 13:00:22 -08001370 spin_lock_irqsave(&qp->s_lock, flags);
1371
1372 /*
1373 * Ensure QP state is such that we can send. If not bail out early,
1374 * there is no need to do this every time we post a send.
1375 */
1376 if (unlikely(!(ib_rvt_state_ops[qp->state] & RVT_POST_SEND_OK))) {
1377 spin_unlock_irqrestore(&qp->s_lock, flags);
1378 return -EINVAL;
1379 }
1380
1381 /*
1382 * If the send queue is empty, and we only have a single WR then just go
1383 * ahead and kick the send engine into gear. Otherwise we will always
1384 * just schedule the send to happen later.
1385 */
1386 call_send = qp->s_head == ACCESS_ONCE(qp->s_last) && !wr->next;
1387
1388 for (; wr; wr = wr->next) {
1389 err = rvt_post_one_wr(qp, wr);
1390 if (unlikely(err)) {
1391 *bad_wr = wr;
1392 goto bail;
1393 }
1394 nreq++;
1395 }
1396bail:
1397 if (nreq && !call_send)
1398 rdi->driver_f.schedule_send(qp);
1399 spin_unlock_irqrestore(&qp->s_lock, flags);
1400 if (nreq && call_send)
1401 rdi->driver_f.do_send(qp);
1402 return err;
Dennis Dalessandro8cf40202016-01-06 10:01:17 -08001403}
1404
1405/**
1406 * rvt_post_srq_receive - post a receive on a shared receive queue
1407 * @ibsrq: the SRQ to post the receive on
1408 * @wr: the list of work requests to post
1409 * @bad_wr: A pointer to the first WR to cause a problem is put here
1410 *
1411 * This may be called from interrupt context.
1412 */
1413int rvt_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
1414 struct ib_recv_wr **bad_wr)
1415{
1416 return -EOPNOTSUPP;
1417}
Dennis Dalessandro3b0b3fb2016-01-22 13:00:35 -08001418
1419void rvt_free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
1420{
1421 struct rvt_qpn_map *map;
1422
1423 map = qpt->map + qpn / RVT_BITS_PER_PAGE;
1424 if (map->page)
1425 clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
1426}
1427EXPORT_SYMBOL(rvt_free_qpn);
1428
1429void rvt_dec_qp_cnt(struct rvt_dev_info *rdi)
1430{
1431 spin_lock(&rdi->n_qps_lock);
1432 rdi->n_qps_allocated--;
1433 spin_unlock(&rdi->n_qps_lock);
1434}
1435EXPORT_SYMBOL(rvt_dec_qp_cnt);