blob: 92de39c4a7c1ed3d25488e306816b97b92101790 [file] [log] [blame]
Moni Shoua8700e3e2016-06-16 16:45:23 +03001/*
2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#ifndef RXE_VERBS_H
35#define RXE_VERBS_H
36
37#include <linux/interrupt.h>
Bart Van Asschebb3ffb72018-01-12 15:11:59 -080038#include <linux/workqueue.h>
Moni Shoua8700e3e2016-06-16 16:45:23 +030039#include <rdma/rdma_user_rxe.h>
40#include "rxe_pool.h"
41#include "rxe_task.h"
Yonatan Cohen0b1e5b92017-03-10 18:23:56 +020042#include "rxe_hw_counters.h"
Moni Shoua8700e3e2016-06-16 16:45:23 +030043
44static inline int pkey_match(u16 key1, u16 key2)
45{
46 return (((key1 & 0x7fff) != 0) &&
47 ((key1 & 0x7fff) == (key2 & 0x7fff)) &&
48 ((key1 & 0x8000) || (key2 & 0x8000))) ? 1 : 0;
49}
50
51/* Return >0 if psn_a > psn_b
52 * 0 if psn_a == psn_b
53 * <0 if psn_a < psn_b
54 */
55static inline int psn_compare(u32 psn_a, u32 psn_b)
56{
57 s32 diff;
58
59 diff = (psn_a - psn_b) << 8;
60 return diff;
61}
62
63struct rxe_ucontext {
Leon Romanovskya2a074e2019-02-12 20:39:16 +020064 struct ib_ucontext ibuc;
Moni Shoua8700e3e2016-06-16 16:45:23 +030065 struct rxe_pool_entry pelem;
Moni Shoua8700e3e2016-06-16 16:45:23 +030066};
67
68struct rxe_pd {
Leon Romanovsky21a428a2019-02-03 14:55:51 +020069 struct ib_pd ibpd;
Moni Shoua8700e3e2016-06-16 16:45:23 +030070 struct rxe_pool_entry pelem;
Moni Shoua8700e3e2016-06-16 16:45:23 +030071};
72
73struct rxe_ah {
Moni Shoua8700e3e2016-06-16 16:45:23 +030074 struct ib_ah ibah;
Leon Romanovskyd3456912019-04-03 16:42:42 +030075 struct rxe_pool_entry pelem;
Moni Shoua8700e3e2016-06-16 16:45:23 +030076 struct rxe_pd *pd;
77 struct rxe_av av;
78};
79
80struct rxe_cqe {
81 union {
82 struct ib_wc ibwc;
83 struct ib_uverbs_wc uibwc;
84 };
85};
86
87struct rxe_cq {
Moni Shoua8700e3e2016-06-16 16:45:23 +030088 struct ib_cq ibcq;
Leon Romanovskye39afe32019-05-28 14:37:29 +030089 struct rxe_pool_entry pelem;
Moni Shoua8700e3e2016-06-16 16:45:23 +030090 struct rxe_queue *queue;
91 spinlock_t cq_lock;
92 u8 notify;
Andrew Boyerbfc3ae02017-08-28 16:11:50 -040093 bool is_dying;
Moni Shoua8700e3e2016-06-16 16:45:23 +030094 int is_user;
95 struct tasklet_struct comp_task;
96};
97
98enum wqe_state {
99 wqe_state_posted,
100 wqe_state_processing,
101 wqe_state_pending,
102 wqe_state_done,
103 wqe_state_error,
104};
105
106struct rxe_sq {
107 int max_wr;
108 int max_sge;
109 int max_inline;
110 spinlock_t sq_lock; /* guard queue */
111 struct rxe_queue *queue;
112};
113
114struct rxe_rq {
115 int max_wr;
116 int max_sge;
117 spinlock_t producer_lock; /* guard queue producer */
118 spinlock_t consumer_lock; /* guard queue consumer */
119 struct rxe_queue *queue;
120};
121
122struct rxe_srq {
Moni Shoua8700e3e2016-06-16 16:45:23 +0300123 struct ib_srq ibsrq;
Leon Romanovsky68e326d2019-04-03 16:42:43 +0300124 struct rxe_pool_entry pelem;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300125 struct rxe_pd *pd;
126 struct rxe_rq rq;
127 u32 srq_num;
128
129 int limit;
130 int error;
131};
132
133enum rxe_qp_state {
134 QP_STATE_RESET,
135 QP_STATE_INIT,
136 QP_STATE_READY,
137 QP_STATE_DRAIN, /* req only */
138 QP_STATE_DRAINED, /* req only */
139 QP_STATE_ERROR
140};
141
Moni Shoua8700e3e2016-06-16 16:45:23 +0300142struct rxe_req_info {
143 enum rxe_qp_state state;
144 int wqe_index;
145 u32 psn;
146 int opcode;
147 atomic_t rd_atomic;
148 int wait_fence;
149 int need_rd_atomic;
150 int wait_psn;
151 int need_retry;
152 int noack_pkts;
153 struct rxe_task task;
154};
155
156struct rxe_comp_info {
157 u32 psn;
158 int opcode;
159 int timeout;
160 int timeout_retry;
Vijay Immanuel4e4c53d2018-06-13 18:47:30 -0700161 int started_retry;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300162 u32 retry_cnt;
163 u32 rnr_retry;
164 struct rxe_task task;
165};
166
167enum rdatm_res_state {
168 rdatm_res_state_next,
169 rdatm_res_state_new,
170 rdatm_res_state_replay,
171};
172
173struct resp_res {
174 int type;
Vijay Immanuelb97db582018-06-12 18:20:49 -0700175 int replay;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300176 u32 first_psn;
177 u32 last_psn;
178 u32 cur_psn;
179 enum rdatm_res_state state;
180
181 union {
182 struct {
183 struct sk_buff *skb;
184 } atomic;
185 struct {
186 struct rxe_mem *mr;
187 u64 va_org;
188 u32 rkey;
189 u32 length;
190 u64 va;
191 u32 resid;
192 } read;
193 };
194};
195
196struct rxe_resp_info {
197 enum rxe_qp_state state;
198 u32 msn;
199 u32 psn;
Vijay Immanuelb97db582018-06-12 18:20:49 -0700200 u32 ack_psn;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300201 int opcode;
202 int drop_msg;
203 int goto_error;
204 int sent_psn_nak;
205 enum ib_wc_status status;
206 u8 aeth_syndrome;
207
208 /* Receive only */
209 struct rxe_recv_wqe *wqe;
210
211 /* RDMA read / atomic only */
212 u64 va;
213 struct rxe_mem *mr;
214 u32 resid;
215 u32 rkey;
Konstantin Taranovbdce1292019-06-27 16:06:43 +0200216 u32 length;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300217 u64 atomic_orig;
218
219 /* SRQ only */
220 struct {
221 struct rxe_recv_wqe wqe;
222 struct ib_sge sge[RXE_MAX_SGE];
223 } srq_wqe;
224
225 /* Responder resources. It's a circular list where the oldest
226 * resource is dropped first.
227 */
228 struct resp_res *resources;
229 unsigned int res_head;
230 unsigned int res_tail;
231 struct resp_res *res;
232 struct rxe_task task;
233};
234
235struct rxe_qp {
236 struct rxe_pool_entry pelem;
237 struct ib_qp ibqp;
238 struct ib_qp_attr attr;
239 unsigned int valid;
240 unsigned int mtu;
241 int is_user;
242
243 struct rxe_pd *pd;
244 struct rxe_srq *srq;
245 struct rxe_cq *scq;
246 struct rxe_cq *rcq;
247
248 enum ib_sig_type sq_sig_type;
249
250 struct rxe_sq sq;
251 struct rxe_rq rq;
252
253 struct socket *sk;
Andrew Boyerb9109b7d2017-08-28 16:11:53 -0400254 u32 dst_cookie;
Vijay Immanueld3c04a32018-07-05 18:43:47 -0700255 u16 src_port;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300256
257 struct rxe_av pri_av;
258 struct rxe_av alt_av;
259
260 /* list of mcast groups qp has joined (for cleanup) */
261 struct list_head grp_list;
262 spinlock_t grp_lock; /* guard grp_list */
263
264 struct sk_buff_head req_pkts;
265 struct sk_buff_head resp_pkts;
266 struct sk_buff_head send_pkts;
267
268 struct rxe_req_info req;
269 struct rxe_comp_info comp;
270 struct rxe_resp_info resp;
271
272 atomic_t ssn;
273 atomic_t skb_out;
274 int need_req_skb;
275
276 /* Timer for retranmitting packet when ACKs have been lost. RC
277 * only. The requester sets it when it is not already
278 * started. The responder resets it whenever an ack is
279 * received.
280 */
281 struct timer_list retrans_timer;
282 u64 qp_timeout_jiffies;
283
284 /* Timer for handling RNR NAKS. */
285 struct timer_list rnr_nak_timer;
286
287 spinlock_t state_lock; /* guard requester and completer */
Bart Van Asschebb3ffb72018-01-12 15:11:59 -0800288
289 struct execute_work cleanup_work;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300290};
291
292enum rxe_mem_state {
293 RXE_MEM_STATE_ZOMBIE,
294 RXE_MEM_STATE_INVALID,
295 RXE_MEM_STATE_FREE,
296 RXE_MEM_STATE_VALID,
297};
298
299enum rxe_mem_type {
300 RXE_MEM_TYPE_NONE,
301 RXE_MEM_TYPE_DMA,
302 RXE_MEM_TYPE_MR,
303 RXE_MEM_TYPE_FMR,
304 RXE_MEM_TYPE_MW,
305};
306
307#define RXE_BUF_PER_MAP (PAGE_SIZE / sizeof(struct rxe_phys_buf))
308
309struct rxe_phys_buf {
310 u64 addr;
311 u64 size;
312};
313
314struct rxe_map {
315 struct rxe_phys_buf buf[RXE_BUF_PER_MAP];
316};
317
318struct rxe_mem {
319 struct rxe_pool_entry pelem;
320 union {
321 struct ib_mr ibmr;
322 struct ib_mw ibmw;
323 };
324
325 struct rxe_pd *pd;
326 struct ib_umem *umem;
327
328 u32 lkey;
329 u32 rkey;
330
331 enum rxe_mem_state state;
332 enum rxe_mem_type type;
333 u64 va;
334 u64 iova;
335 size_t length;
336 u32 offset;
337 int access;
338
339 int page_shift;
340 int page_mask;
341 int map_shift;
342 int map_mask;
343
344 u32 num_buf;
345 u32 nbuf;
346
347 u32 max_buf;
348 u32 num_map;
349
350 struct rxe_map **map;
351};
352
353struct rxe_mc_grp {
354 struct rxe_pool_entry pelem;
355 spinlock_t mcg_lock; /* guard group */
356 struct rxe_dev *rxe;
357 struct list_head qp_list;
358 union ib_gid mgid;
359 int num_qp;
360 u32 qkey;
361 u16 pkey;
362};
363
364struct rxe_mc_elem {
365 struct rxe_pool_entry pelem;
366 struct list_head qp_list;
367 struct list_head grp_list;
368 struct rxe_qp *qp;
369 struct rxe_mc_grp *grp;
370};
371
372struct rxe_port {
373 struct ib_port_attr attr;
374 u16 *pkey_tbl;
375 __be64 port_guid;
376 __be64 subnet_prefix;
377 spinlock_t port_lock; /* guard port */
378 unsigned int mtu_cap;
379 /* special QPs */
380 u32 qp_smi_index;
381 u32 qp_gsi_index;
382};
383
Moni Shoua8700e3e2016-06-16 16:45:23 +0300384struct rxe_dev {
385 struct ib_device ib_dev;
386 struct ib_device_attr attr;
Bart Van Assche97458fd2019-10-25 15:58:28 -0700387 struct device_dma_parameters dma_parms;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300388 int max_ucontext;
389 int max_inline_data;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300390 struct mutex usdev_lock;
391
Moni Shoua8700e3e2016-06-16 16:45:23 +0300392 struct net_device *ndev;
393
394 int xmit_errors;
395
396 struct rxe_pool uc_pool;
397 struct rxe_pool pd_pool;
398 struct rxe_pool ah_pool;
399 struct rxe_pool srq_pool;
400 struct rxe_pool qp_pool;
401 struct rxe_pool cq_pool;
402 struct rxe_pool mr_pool;
403 struct rxe_pool mw_pool;
404 struct rxe_pool mc_grp_pool;
405 struct rxe_pool mc_elem_pool;
406
407 spinlock_t pending_lock; /* guard pending_mmaps */
408 struct list_head pending_mmaps;
409
410 spinlock_t mmap_offset_lock; /* guard mmap_offset */
Jiewei Ke6ca18d82019-12-27 19:36:13 +0800411 u64 mmap_offset;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300412
Parav Panditd5108e62018-12-14 00:32:42 -0600413 atomic64_t stats_counters[RXE_NUM_OF_COUNTERS];
Yonatan Cohen0b1e5b92017-03-10 18:23:56 +0200414
Moni Shoua8700e3e2016-06-16 16:45:23 +0300415 struct rxe_port port;
yonatanccee26882017-04-20 20:55:55 +0300416 struct crypto_shash *tfm;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300417};
418
Parav Panditd5108e62018-12-14 00:32:42 -0600419static inline void rxe_counter_inc(struct rxe_dev *rxe, enum rxe_counters index)
Yonatan Cohen0b1e5b92017-03-10 18:23:56 +0200420{
Parav Panditd5108e62018-12-14 00:32:42 -0600421 atomic64_inc(&rxe->stats_counters[index]);
Yonatan Cohen0b1e5b92017-03-10 18:23:56 +0200422}
423
Moni Shoua8700e3e2016-06-16 16:45:23 +0300424static inline struct rxe_dev *to_rdev(struct ib_device *dev)
425{
426 return dev ? container_of(dev, struct rxe_dev, ib_dev) : NULL;
427}
428
429static inline struct rxe_ucontext *to_ruc(struct ib_ucontext *uc)
430{
431 return uc ? container_of(uc, struct rxe_ucontext, ibuc) : NULL;
432}
433
434static inline struct rxe_pd *to_rpd(struct ib_pd *pd)
435{
436 return pd ? container_of(pd, struct rxe_pd, ibpd) : NULL;
437}
438
439static inline struct rxe_ah *to_rah(struct ib_ah *ah)
440{
441 return ah ? container_of(ah, struct rxe_ah, ibah) : NULL;
442}
443
444static inline struct rxe_srq *to_rsrq(struct ib_srq *srq)
445{
446 return srq ? container_of(srq, struct rxe_srq, ibsrq) : NULL;
447}
448
449static inline struct rxe_qp *to_rqp(struct ib_qp *qp)
450{
451 return qp ? container_of(qp, struct rxe_qp, ibqp) : NULL;
452}
453
454static inline struct rxe_cq *to_rcq(struct ib_cq *cq)
455{
456 return cq ? container_of(cq, struct rxe_cq, ibcq) : NULL;
457}
458
459static inline struct rxe_mem *to_rmr(struct ib_mr *mr)
460{
461 return mr ? container_of(mr, struct rxe_mem, ibmr) : NULL;
462}
463
464static inline struct rxe_mem *to_rmw(struct ib_mw *mw)
465{
466 return mw ? container_of(mw, struct rxe_mem, ibmw) : NULL;
467}
468
Steve Wise66920e12019-02-15 11:03:57 -0800469int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300470
Bart Van Assche32404fb2017-01-10 11:15:46 -0800471void rxe_mc_cleanup(struct rxe_pool_entry *arg);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300472
473#endif /* RXE_VERBS_H */