blob: d606cac482338b701d2594bc10bcb3e7ca75566f [file] [log] [blame]
Thomas Gleixner2025cf92019-05-29 07:18:02 -07001/* SPDX-License-Identifier: GPL-2.0-only */
Christoph Hellwiga060b562016-05-03 18:01:09 +02002/*
3 * Copyright (c) 2016 HGST, a Western Digital Company.
Christoph Hellwiga060b562016-05-03 18:01:09 +02004 */
5#ifndef _RDMA_RW_H
6#define _RDMA_RW_H
7
8#include <linux/dma-mapping.h>
9#include <linux/scatterlist.h>
10#include <rdma/ib_verbs.h>
11#include <rdma/rdma_cm.h>
12#include <rdma/mr_pool.h>
13
14struct rdma_rw_ctx {
15 /* number of RDMA READ/WRITE WRs (not counting MR WRs) */
16 u32 nr_ops;
17
18 /* tag for the union below: */
19 u8 type;
20
21 union {
22 /* for mapping a single SGE: */
23 struct {
24 struct ib_sge sge;
25 struct ib_rdma_wr wr;
26 } single;
27
28 /* for mapping of multiple SGEs: */
29 struct {
30 struct ib_sge *sges;
31 struct ib_rdma_wr *wrs;
32 } map;
33
34 /* for registering multiple WRs: */
35 struct rdma_rw_reg_ctx {
36 struct ib_sge sge;
37 struct ib_rdma_wr wr;
38 struct ib_reg_wr reg_wr;
39 struct ib_send_wr inv_wr;
40 struct ib_mr *mr;
41 } *reg;
42 };
43};
44
Mark Bloch1fb7f892021-03-01 09:04:20 +020045int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num,
Christoph Hellwiga060b562016-05-03 18:01:09 +020046 struct scatterlist *sg, u32 sg_cnt, u32 sg_offset,
47 u64 remote_addr, u32 rkey, enum dma_data_direction dir);
Mark Bloch1fb7f892021-03-01 09:04:20 +020048void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
49 u32 port_num, struct scatterlist *sg, u32 sg_cnt,
50 enum dma_data_direction dir);
Christoph Hellwiga060b562016-05-03 18:01:09 +020051
Christoph Hellwig0e353e32016-05-03 18:01:12 +020052int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
Mark Bloch1fb7f892021-03-01 09:04:20 +020053 u32 port_num, struct scatterlist *sg, u32 sg_cnt,
Christoph Hellwig0e353e32016-05-03 18:01:12 +020054 struct scatterlist *prot_sg, u32 prot_sg_cnt,
55 struct ib_sig_attrs *sig_attrs, u64 remote_addr, u32 rkey,
56 enum dma_data_direction dir);
57void rdma_rw_ctx_destroy_signature(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
Mark Bloch1fb7f892021-03-01 09:04:20 +020058 u32 port_num, struct scatterlist *sg, u32 sg_cnt,
Christoph Hellwig0e353e32016-05-03 18:01:12 +020059 struct scatterlist *prot_sg, u32 prot_sg_cnt,
60 enum dma_data_direction dir);
61
Christoph Hellwiga060b562016-05-03 18:01:09 +020062struct ib_send_wr *rdma_rw_ctx_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
Mark Bloch1fb7f892021-03-01 09:04:20 +020063 u32 port_num, struct ib_cqe *cqe, struct ib_send_wr *chain_wr);
64int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num,
Christoph Hellwiga060b562016-05-03 18:01:09 +020065 struct ib_cqe *cqe, struct ib_send_wr *chain_wr);
66
Mark Bloch1fb7f892021-03-01 09:04:20 +020067unsigned int rdma_rw_mr_factor(struct ib_device *device, u32 port_num,
Chuck Lever00628182017-08-28 15:06:14 -040068 unsigned int maxpages);
Christoph Hellwiga060b562016-05-03 18:01:09 +020069void rdma_rw_init_qp(struct ib_device *dev, struct ib_qp_init_attr *attr);
70int rdma_rw_init_mrs(struct ib_qp *qp, struct ib_qp_init_attr *attr);
71void rdma_rw_cleanup_mrs(struct ib_qp *qp);
72
73#endif /* _RDMA_RW_H */