Thomas Gleixner | 2025cf9 | 2019-05-29 07:18:02 -0700 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Christoph Hellwig | a060b56 | 2016-05-03 18:01:09 +0200 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (c) 2016 HGST, a Western Digital Company. |
Christoph Hellwig | a060b56 | 2016-05-03 18:01:09 +0200 | [diff] [blame] | 4 | */ |
| 5 | #ifndef _RDMA_RW_H |
| 6 | #define _RDMA_RW_H |
| 7 | |
| 8 | #include <linux/dma-mapping.h> |
| 9 | #include <linux/scatterlist.h> |
| 10 | #include <rdma/ib_verbs.h> |
| 11 | #include <rdma/rdma_cm.h> |
| 12 | #include <rdma/mr_pool.h> |
| 13 | |
| 14 | struct rdma_rw_ctx { |
| 15 | /* number of RDMA READ/WRITE WRs (not counting MR WRs) */ |
| 16 | u32 nr_ops; |
| 17 | |
| 18 | /* tag for the union below: */ |
| 19 | u8 type; |
| 20 | |
| 21 | union { |
| 22 | /* for mapping a single SGE: */ |
| 23 | struct { |
| 24 | struct ib_sge sge; |
| 25 | struct ib_rdma_wr wr; |
| 26 | } single; |
| 27 | |
| 28 | /* for mapping of multiple SGEs: */ |
| 29 | struct { |
| 30 | struct ib_sge *sges; |
| 31 | struct ib_rdma_wr *wrs; |
| 32 | } map; |
| 33 | |
| 34 | /* for registering multiple WRs: */ |
| 35 | struct rdma_rw_reg_ctx { |
| 36 | struct ib_sge sge; |
| 37 | struct ib_rdma_wr wr; |
| 38 | struct ib_reg_wr reg_wr; |
| 39 | struct ib_send_wr inv_wr; |
| 40 | struct ib_mr *mr; |
| 41 | } *reg; |
| 42 | }; |
| 43 | }; |
| 44 | |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 45 | int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num, |
Christoph Hellwig | a060b56 | 2016-05-03 18:01:09 +0200 | [diff] [blame] | 46 | struct scatterlist *sg, u32 sg_cnt, u32 sg_offset, |
| 47 | u64 remote_addr, u32 rkey, enum dma_data_direction dir); |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 48 | void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp, |
| 49 | u32 port_num, struct scatterlist *sg, u32 sg_cnt, |
| 50 | enum dma_data_direction dir); |
Christoph Hellwig | a060b56 | 2016-05-03 18:01:09 +0200 | [diff] [blame] | 51 | |
Christoph Hellwig | 0e353e3 | 2016-05-03 18:01:12 +0200 | [diff] [blame] | 52 | int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 53 | u32 port_num, struct scatterlist *sg, u32 sg_cnt, |
Christoph Hellwig | 0e353e3 | 2016-05-03 18:01:12 +0200 | [diff] [blame] | 54 | struct scatterlist *prot_sg, u32 prot_sg_cnt, |
| 55 | struct ib_sig_attrs *sig_attrs, u64 remote_addr, u32 rkey, |
| 56 | enum dma_data_direction dir); |
| 57 | void rdma_rw_ctx_destroy_signature(struct rdma_rw_ctx *ctx, struct ib_qp *qp, |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 58 | u32 port_num, struct scatterlist *sg, u32 sg_cnt, |
Christoph Hellwig | 0e353e3 | 2016-05-03 18:01:12 +0200 | [diff] [blame] | 59 | struct scatterlist *prot_sg, u32 prot_sg_cnt, |
| 60 | enum dma_data_direction dir); |
| 61 | |
Christoph Hellwig | a060b56 | 2016-05-03 18:01:09 +0200 | [diff] [blame] | 62 | struct ib_send_wr *rdma_rw_ctx_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp, |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 63 | u32 port_num, struct ib_cqe *cqe, struct ib_send_wr *chain_wr); |
| 64 | int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num, |
Christoph Hellwig | a060b56 | 2016-05-03 18:01:09 +0200 | [diff] [blame] | 65 | struct ib_cqe *cqe, struct ib_send_wr *chain_wr); |
| 66 | |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 67 | unsigned int rdma_rw_mr_factor(struct ib_device *device, u32 port_num, |
Chuck Lever | 0062818 | 2017-08-28 15:06:14 -0400 | [diff] [blame] | 68 | unsigned int maxpages); |
Christoph Hellwig | a060b56 | 2016-05-03 18:01:09 +0200 | [diff] [blame] | 69 | void rdma_rw_init_qp(struct ib_device *dev, struct ib_qp_init_attr *attr); |
| 70 | int rdma_rw_init_mrs(struct ib_qp *qp, struct ib_qp_init_attr *attr); |
| 71 | void rdma_rw_cleanup_mrs(struct ib_qp *qp); |
| 72 | |
| 73 | #endif /* _RDMA_RW_H */ |