blob: 7d79897959a40b34e47daad232a2dd084265dddb [file] [log] [blame]
Tom Tuckerc06b5402007-12-12 16:13:25 -06001/*
Steve Wise0bf48282014-05-28 15:12:01 -05002 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
Tom Tuckerc06b5402007-12-12 16:13:25 -06003 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the BSD-type
9 * license below:
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 *
18 * Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials provided
21 * with the distribution.
22 *
23 * Neither the name of the Network Appliance, Inc. nor the names of
24 * its contributors may be used to endorse or promote products
25 * derived from this software without specific prior written
26 * permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Author: Tom Tucker <tom@opengridcomputing.com>
41 */
42
43#include <linux/sunrpc/debug.h>
44#include <linux/sunrpc/rpc_rdma.h>
45#include <linux/spinlock.h>
46#include <asm/unaligned.h>
47#include <rdma/ib_verbs.h>
48#include <rdma/rdma_cm.h>
49#include <linux/sunrpc/svc_rdma.h>
50
51#define RPCDBG_FACILITY RPCDBG_SVCXPRT
52
Tom Tuckerafd566e2008-10-03 15:45:03 -050053static int map_xdr(struct svcxprt_rdma *xprt,
54 struct xdr_buf *xdr,
55 struct svc_rdma_req_map *vec)
Tom Tuckerc06b5402007-12-12 16:13:25 -060056{
Tom Tuckerc06b5402007-12-12 16:13:25 -060057 int sge_no;
Tom Tuckerc06b5402007-12-12 16:13:25 -060058 u32 sge_bytes;
59 u32 page_bytes;
Tom Tucker34d16e42008-07-02 14:56:13 -050060 u32 page_off;
Tom Tuckerc06b5402007-12-12 16:13:25 -060061 int page_no;
62
Chuck Lever3fe04ee2015-01-13 11:03:03 -050063 if (xdr->len !=
64 (xdr->head[0].iov_len + xdr->page_len + xdr->tail[0].iov_len)) {
65 pr_err("svcrdma: map_xdr: XDR buffer length error\n");
66 return -EIO;
67 }
Tom Tucker34d16e42008-07-02 14:56:13 -050068
Tom Tuckerc06b5402007-12-12 16:13:25 -060069 /* Skip the first sge, this is for the RPCRDMA header */
70 sge_no = 1;
71
72 /* Head SGE */
Tom Tucker34d16e42008-07-02 14:56:13 -050073 vec->sge[sge_no].iov_base = xdr->head[0].iov_base;
74 vec->sge[sge_no].iov_len = xdr->head[0].iov_len;
Tom Tuckerc06b5402007-12-12 16:13:25 -060075 sge_no++;
76
77 /* pages SGE */
78 page_no = 0;
79 page_bytes = xdr->page_len;
80 page_off = xdr->page_base;
Tom Tucker34d16e42008-07-02 14:56:13 -050081 while (page_bytes) {
82 vec->sge[sge_no].iov_base =
83 page_address(xdr->pages[page_no]) + page_off;
84 sge_bytes = min_t(u32, page_bytes, (PAGE_SIZE - page_off));
Tom Tuckerc06b5402007-12-12 16:13:25 -060085 page_bytes -= sge_bytes;
Tom Tucker34d16e42008-07-02 14:56:13 -050086 vec->sge[sge_no].iov_len = sge_bytes;
Tom Tuckerc06b5402007-12-12 16:13:25 -060087
88 sge_no++;
89 page_no++;
90 page_off = 0; /* reset for next time through loop */
91 }
92
93 /* Tail SGE */
Tom Tucker34d16e42008-07-02 14:56:13 -050094 if (xdr->tail[0].iov_len) {
95 vec->sge[sge_no].iov_base = xdr->tail[0].iov_base;
96 vec->sge[sge_no].iov_len = xdr->tail[0].iov_len;
Tom Tuckerc06b5402007-12-12 16:13:25 -060097 sge_no++;
98 }
99
Tom Talpeyb1e1e152009-03-11 14:37:55 -0400100 dprintk("svcrdma: map_xdr: sge_no %d page_no %d "
Tom Talpey2e3c2302009-03-12 22:21:21 -0400101 "page_base %u page_len %u head_len %zu tail_len %zu\n",
Tom Talpeyb1e1e152009-03-11 14:37:55 -0400102 sge_no, page_no, xdr->page_base, xdr->page_len,
103 xdr->head[0].iov_len, xdr->tail[0].iov_len);
104
Tom Tucker34d16e42008-07-02 14:56:13 -0500105 vec->count = sge_no;
Tom Tuckerafd566e2008-10-03 15:45:03 -0500106 return 0;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600107}
108
Tom Tuckerb432e6b2010-10-12 15:33:52 -0500109static dma_addr_t dma_map_xdr(struct svcxprt_rdma *xprt,
110 struct xdr_buf *xdr,
111 u32 xdr_off, size_t len, int dir)
112{
113 struct page *page;
114 dma_addr_t dma_addr;
115 if (xdr_off < xdr->head[0].iov_len) {
116 /* This offset is in the head */
117 xdr_off += (unsigned long)xdr->head[0].iov_base & ~PAGE_MASK;
118 page = virt_to_page(xdr->head[0].iov_base);
119 } else {
120 xdr_off -= xdr->head[0].iov_len;
121 if (xdr_off < xdr->page_len) {
122 /* This offset is in the page list */
Jeff Layton3cbe01a92014-03-17 13:10:05 -0400123 xdr_off += xdr->page_base;
Tom Tuckerb432e6b2010-10-12 15:33:52 -0500124 page = xdr->pages[xdr_off >> PAGE_SHIFT];
125 xdr_off &= ~PAGE_MASK;
126 } else {
127 /* This offset is in the tail */
128 xdr_off -= xdr->page_len;
129 xdr_off += (unsigned long)
130 xdr->tail[0].iov_base & ~PAGE_MASK;
131 page = virt_to_page(xdr->tail[0].iov_base);
132 }
133 }
134 dma_addr = ib_dma_map_page(xprt->sc_cm_id->device, page, xdr_off,
135 min_t(size_t, PAGE_SIZE, len), dir);
136 return dma_addr;
137}
138
Tom Tuckerc06b5402007-12-12 16:13:25 -0600139/* Assumptions:
140 * - The specified write_len can be represented in sc_max_sge * PAGE_SIZE
141 */
142static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
143 u32 rmr, u64 to,
144 u32 xdr_off, int write_len,
Tom Tucker34d16e42008-07-02 14:56:13 -0500145 struct svc_rdma_req_map *vec)
Tom Tuckerc06b5402007-12-12 16:13:25 -0600146{
Tom Tuckerc06b5402007-12-12 16:13:25 -0600147 struct ib_send_wr write_wr;
148 struct ib_sge *sge;
149 int xdr_sge_no;
150 int sge_no;
151 int sge_bytes;
152 int sge_off;
153 int bc;
154 struct svc_rdma_op_ctxt *ctxt;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600155
Chuck Lever3fe04ee2015-01-13 11:03:03 -0500156 if (vec->count > RPCSVC_MAXPAGES) {
157 pr_err("svcrdma: Too many pages (%lu)\n", vec->count);
158 return -EIO;
159 }
160
Tom Tuckerc06b5402007-12-12 16:13:25 -0600161 dprintk("svcrdma: RDMA_WRITE rmr=%x, to=%llx, xdr_off=%d, "
Tom Tucker34d16e42008-07-02 14:56:13 -0500162 "write_len=%d, vec->sge=%p, vec->count=%lu\n",
Roland Dreierbb50c802008-02-08 16:02:04 -0800163 rmr, (unsigned long long)to, xdr_off,
Tom Tucker34d16e42008-07-02 14:56:13 -0500164 write_len, vec->sge, vec->count);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600165
166 ctxt = svc_rdma_get_context(xprt);
Tom Tucker34d16e42008-07-02 14:56:13 -0500167 ctxt->direction = DMA_TO_DEVICE;
168 sge = ctxt->sge;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600169
170 /* Find the SGE associated with xdr_off */
Tom Tucker34d16e42008-07-02 14:56:13 -0500171 for (bc = xdr_off, xdr_sge_no = 1; bc && xdr_sge_no < vec->count;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600172 xdr_sge_no++) {
Tom Tucker34d16e42008-07-02 14:56:13 -0500173 if (vec->sge[xdr_sge_no].iov_len > bc)
Tom Tuckerc06b5402007-12-12 16:13:25 -0600174 break;
Tom Tucker34d16e42008-07-02 14:56:13 -0500175 bc -= vec->sge[xdr_sge_no].iov_len;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600176 }
177
178 sge_off = bc;
179 bc = write_len;
180 sge_no = 0;
181
182 /* Copy the remaining SGE */
Tom Tuckerafd566e2008-10-03 15:45:03 -0500183 while (bc != 0) {
184 sge_bytes = min_t(size_t,
185 bc, vec->sge[xdr_sge_no].iov_len-sge_off);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600186 sge[sge_no].length = sge_bytes;
Steve Wise0bf48282014-05-28 15:12:01 -0500187 sge[sge_no].addr =
188 dma_map_xdr(xprt, &rqstp->rq_res, xdr_off,
189 sge_bytes, DMA_TO_DEVICE);
190 xdr_off += sge_bytes;
191 if (ib_dma_mapping_error(xprt->sc_cm_id->device,
192 sge[sge_no].addr))
193 goto err;
194 atomic_inc(&xprt->sc_dma_used);
195 sge[sge_no].lkey = xprt->sc_dma_lkey;
Tom Tuckerafd566e2008-10-03 15:45:03 -0500196 ctxt->count++;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600197 sge_off = 0;
198 sge_no++;
199 xdr_sge_no++;
Chuck Lever3fe04ee2015-01-13 11:03:03 -0500200 if (xdr_sge_no > vec->count) {
201 pr_err("svcrdma: Too many sges (%d)\n", xdr_sge_no);
202 goto err;
203 }
Tom Tuckerc06b5402007-12-12 16:13:25 -0600204 bc -= sge_bytes;
Steve Wise25594292014-07-09 13:49:15 -0500205 if (sge_no == xprt->sc_max_sge)
206 break;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600207 }
208
Tom Tuckerc06b5402007-12-12 16:13:25 -0600209 /* Prepare WRITE WR */
210 memset(&write_wr, 0, sizeof write_wr);
211 ctxt->wr_op = IB_WR_RDMA_WRITE;
212 write_wr.wr_id = (unsigned long)ctxt;
213 write_wr.sg_list = &sge[0];
214 write_wr.num_sge = sge_no;
215 write_wr.opcode = IB_WR_RDMA_WRITE;
216 write_wr.send_flags = IB_SEND_SIGNALED;
217 write_wr.wr.rdma.rkey = rmr;
218 write_wr.wr.rdma.remote_addr = to;
219
220 /* Post It */
221 atomic_inc(&rdma_stat_write);
Tom Tucker34d16e42008-07-02 14:56:13 -0500222 if (svc_rdma_send(xprt, &write_wr))
223 goto err;
Steve Wise25594292014-07-09 13:49:15 -0500224 return write_len - bc;
Tom Tucker34d16e42008-07-02 14:56:13 -0500225 err:
Tom Tucker4a843862010-10-12 15:33:57 -0500226 svc_rdma_unmap_dma(ctxt);
Tom Tucker34d16e42008-07-02 14:56:13 -0500227 svc_rdma_put_context(ctxt, 0);
228 /* Fatal error, close transport */
229 return -EIO;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600230}
231
232static int send_write_chunks(struct svcxprt_rdma *xprt,
233 struct rpcrdma_msg *rdma_argp,
234 struct rpcrdma_msg *rdma_resp,
235 struct svc_rqst *rqstp,
Tom Tucker34d16e42008-07-02 14:56:13 -0500236 struct svc_rdma_req_map *vec)
Tom Tuckerc06b5402007-12-12 16:13:25 -0600237{
238 u32 xfer_len = rqstp->rq_res.page_len + rqstp->rq_res.tail[0].iov_len;
239 int write_len;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600240 u32 xdr_off;
241 int chunk_off;
242 int chunk_no;
243 struct rpcrdma_write_array *arg_ary;
244 struct rpcrdma_write_array *res_ary;
245 int ret;
246
247 arg_ary = svc_rdma_get_write_array(rdma_argp);
248 if (!arg_ary)
249 return 0;
250 res_ary = (struct rpcrdma_write_array *)
251 &rdma_resp->rm_body.rm_chunks[1];
252
Tom Tuckerc06b5402007-12-12 16:13:25 -0600253 /* Write chunks start at the pagelist */
254 for (xdr_off = rqstp->rq_res.head[0].iov_len, chunk_no = 0;
255 xfer_len && chunk_no < arg_ary->wc_nchunks;
256 chunk_no++) {
257 struct rpcrdma_segment *arg_ch;
258 u64 rs_offset;
259
260 arg_ch = &arg_ary->wc_array[chunk_no].wc_target;
Tom Tuckercec56c82012-02-15 11:30:00 -0600261 write_len = min(xfer_len, ntohl(arg_ch->rs_length));
Tom Tuckerc06b5402007-12-12 16:13:25 -0600262
263 /* Prepare the response chunk given the length actually
264 * written */
Tom Tuckercec56c82012-02-15 11:30:00 -0600265 xdr_decode_hyper((__be32 *)&arg_ch->rs_offset, &rs_offset);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600266 svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no,
Tom Tuckercec56c82012-02-15 11:30:00 -0600267 arg_ch->rs_handle,
268 arg_ch->rs_offset,
269 write_len);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600270 chunk_off = 0;
271 while (write_len) {
Tom Tuckerc06b5402007-12-12 16:13:25 -0600272 ret = send_write(xprt, rqstp,
Tom Tuckercec56c82012-02-15 11:30:00 -0600273 ntohl(arg_ch->rs_handle),
Tom Tuckerc06b5402007-12-12 16:13:25 -0600274 rs_offset + chunk_off,
275 xdr_off,
Steve Wise25594292014-07-09 13:49:15 -0500276 write_len,
Tom Tucker34d16e42008-07-02 14:56:13 -0500277 vec);
Steve Wise25594292014-07-09 13:49:15 -0500278 if (ret <= 0) {
Tom Tuckerc06b5402007-12-12 16:13:25 -0600279 dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n",
280 ret);
281 return -EIO;
282 }
Steve Wise25594292014-07-09 13:49:15 -0500283 chunk_off += ret;
284 xdr_off += ret;
285 xfer_len -= ret;
286 write_len -= ret;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600287 }
288 }
289 /* Update the req with the number of chunks actually used */
290 svc_rdma_xdr_encode_write_list(rdma_resp, chunk_no);
291
292 return rqstp->rq_res.page_len + rqstp->rq_res.tail[0].iov_len;
293}
294
295static int send_reply_chunks(struct svcxprt_rdma *xprt,
296 struct rpcrdma_msg *rdma_argp,
297 struct rpcrdma_msg *rdma_resp,
298 struct svc_rqst *rqstp,
Tom Tucker34d16e42008-07-02 14:56:13 -0500299 struct svc_rdma_req_map *vec)
Tom Tuckerc06b5402007-12-12 16:13:25 -0600300{
301 u32 xfer_len = rqstp->rq_res.len;
302 int write_len;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600303 u32 xdr_off;
304 int chunk_no;
305 int chunk_off;
Tom Tuckercec56c82012-02-15 11:30:00 -0600306 int nchunks;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600307 struct rpcrdma_segment *ch;
308 struct rpcrdma_write_array *arg_ary;
309 struct rpcrdma_write_array *res_ary;
310 int ret;
311
312 arg_ary = svc_rdma_get_reply_array(rdma_argp);
313 if (!arg_ary)
314 return 0;
315 /* XXX: need to fix when reply lists occur with read-list and or
316 * write-list */
317 res_ary = (struct rpcrdma_write_array *)
318 &rdma_resp->rm_body.rm_chunks[2];
319
Tom Tuckerc06b5402007-12-12 16:13:25 -0600320 /* xdr offset starts at RPC message */
Tom Tuckercec56c82012-02-15 11:30:00 -0600321 nchunks = ntohl(arg_ary->wc_nchunks);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600322 for (xdr_off = 0, chunk_no = 0;
Tom Tuckercec56c82012-02-15 11:30:00 -0600323 xfer_len && chunk_no < nchunks;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600324 chunk_no++) {
325 u64 rs_offset;
326 ch = &arg_ary->wc_array[chunk_no].wc_target;
Tom Tuckercec56c82012-02-15 11:30:00 -0600327 write_len = min(xfer_len, htonl(ch->rs_length));
Tom Tuckerc06b5402007-12-12 16:13:25 -0600328
Tom Tuckerc06b5402007-12-12 16:13:25 -0600329 /* Prepare the reply chunk given the length actually
330 * written */
Tom Tuckercec56c82012-02-15 11:30:00 -0600331 xdr_decode_hyper((__be32 *)&ch->rs_offset, &rs_offset);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600332 svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no,
Tom Tuckercec56c82012-02-15 11:30:00 -0600333 ch->rs_handle, ch->rs_offset,
334 write_len);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600335 chunk_off = 0;
336 while (write_len) {
Tom Tuckerc06b5402007-12-12 16:13:25 -0600337 ret = send_write(xprt, rqstp,
Tom Tuckercec56c82012-02-15 11:30:00 -0600338 ntohl(ch->rs_handle),
Tom Tuckerc06b5402007-12-12 16:13:25 -0600339 rs_offset + chunk_off,
340 xdr_off,
Steve Wise25594292014-07-09 13:49:15 -0500341 write_len,
Tom Tucker34d16e42008-07-02 14:56:13 -0500342 vec);
Steve Wise25594292014-07-09 13:49:15 -0500343 if (ret <= 0) {
Tom Tuckerc06b5402007-12-12 16:13:25 -0600344 dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n",
345 ret);
346 return -EIO;
347 }
Steve Wise25594292014-07-09 13:49:15 -0500348 chunk_off += ret;
349 xdr_off += ret;
350 xfer_len -= ret;
351 write_len -= ret;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600352 }
353 }
354 /* Update the req with the number of chunks actually used */
355 svc_rdma_xdr_encode_reply_array(res_ary, chunk_no);
356
357 return rqstp->rq_res.len;
358}
359
360/* This function prepares the portion of the RPCRDMA message to be
361 * sent in the RDMA_SEND. This function is called after data sent via
362 * RDMA has already been transmitted. There are three cases:
363 * - The RPCRDMA header, RPC header, and payload are all sent in a
364 * single RDMA_SEND. This is the "inline" case.
365 * - The RPCRDMA header and some portion of the RPC header and data
366 * are sent via this RDMA_SEND and another portion of the data is
367 * sent via RDMA.
368 * - The RPCRDMA header [NOMSG] is sent in this RDMA_SEND and the RPC
369 * header and data are all transmitted via RDMA.
370 * In all three cases, this function prepares the RPCRDMA header in
371 * sge[0], the 'type' parameter indicates the type to place in the
372 * RPCRDMA header, and the 'byte_count' field indicates how much of
Tom Tuckerb432e6b2010-10-12 15:33:52 -0500373 * the XDR to include in this RDMA_SEND. NB: The offset of the payload
374 * to send is zero in the XDR.
Tom Tuckerc06b5402007-12-12 16:13:25 -0600375 */
376static int send_reply(struct svcxprt_rdma *rdma,
377 struct svc_rqst *rqstp,
378 struct page *page,
379 struct rpcrdma_msg *rdma_resp,
380 struct svc_rdma_op_ctxt *ctxt,
Tom Tucker34d16e42008-07-02 14:56:13 -0500381 struct svc_rdma_req_map *vec,
Tom Tuckerc06b5402007-12-12 16:13:25 -0600382 int byte_count)
383{
384 struct ib_send_wr send_wr;
385 int sge_no;
386 int sge_bytes;
387 int page_no;
J. Bruce Fieldsafc59402012-12-10 18:01:37 -0500388 int pages;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600389 int ret;
390
Tom Tucker0e7f0112008-04-23 16:49:54 -0500391 /* Post a recv buffer to handle another request. */
392 ret = svc_rdma_post_recv(rdma);
393 if (ret) {
394 printk(KERN_INFO
395 "svcrdma: could not post a receive buffer, err=%d."
396 "Closing transport %p.\n", ret, rdma);
397 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
Tom Tucker5ac461a2008-04-25 18:08:59 -0500398 svc_rdma_put_context(ctxt, 0);
399 return -ENOTCONN;
Tom Tucker0e7f0112008-04-23 16:49:54 -0500400 }
401
Tom Tuckerc06b5402007-12-12 16:13:25 -0600402 /* Prepare the context */
403 ctxt->pages[0] = page;
404 ctxt->count = 1;
405
406 /* Prepare the SGE for the RPCRDMA Header */
Steve Wise98779be2009-05-14 16:34:28 -0500407 ctxt->sge[0].lkey = rdma->sc_dma_lkey;
408 ctxt->sge[0].length = svc_rdma_xdr_get_reply_hdr_len(rdma_resp);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600409 ctxt->sge[0].addr =
Tom Tuckerb432e6b2010-10-12 15:33:52 -0500410 ib_dma_map_page(rdma->sc_cm_id->device, page, 0,
411 ctxt->sge[0].length, DMA_TO_DEVICE);
Tom Tuckerafd566e2008-10-03 15:45:03 -0500412 if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr))
413 goto err;
414 atomic_inc(&rdma->sc_dma_used);
415
Tom Tuckerc06b5402007-12-12 16:13:25 -0600416 ctxt->direction = DMA_TO_DEVICE;
Tom Tuckerafd566e2008-10-03 15:45:03 -0500417
Tom Tuckerb432e6b2010-10-12 15:33:52 -0500418 /* Map the payload indicated by 'byte_count' */
Tom Tucker34d16e42008-07-02 14:56:13 -0500419 for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) {
Tom Tuckerb432e6b2010-10-12 15:33:52 -0500420 int xdr_off = 0;
Tom Tucker34d16e42008-07-02 14:56:13 -0500421 sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600422 byte_count -= sge_bytes;
Steve Wise0bf48282014-05-28 15:12:01 -0500423 ctxt->sge[sge_no].addr =
424 dma_map_xdr(rdma, &rqstp->rq_res, xdr_off,
425 sge_bytes, DMA_TO_DEVICE);
426 xdr_off += sge_bytes;
427 if (ib_dma_mapping_error(rdma->sc_cm_id->device,
428 ctxt->sge[sge_no].addr))
429 goto err;
430 atomic_inc(&rdma->sc_dma_used);
431 ctxt->sge[sge_no].lkey = rdma->sc_dma_lkey;
Tom Tucker34d16e42008-07-02 14:56:13 -0500432 ctxt->sge[sge_no].length = sge_bytes;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600433 }
Chuck Lever3fe04ee2015-01-13 11:03:03 -0500434 if (byte_count != 0) {
435 pr_err("svcrdma: Could not map %d bytes\n", byte_count);
436 goto err;
437 }
Tom Tuckerc06b5402007-12-12 16:13:25 -0600438
439 /* Save all respages in the ctxt and remove them from the
440 * respages array. They are our pages until the I/O
441 * completes.
442 */
J. Bruce Fieldsafc59402012-12-10 18:01:37 -0500443 pages = rqstp->rq_next_page - rqstp->rq_respages;
444 for (page_no = 0; page_no < pages; page_no++) {
Tom Tuckerc06b5402007-12-12 16:13:25 -0600445 ctxt->pages[page_no+1] = rqstp->rq_respages[page_no];
446 ctxt->count++;
447 rqstp->rq_respages[page_no] = NULL;
Tom Tuckerafd566e2008-10-03 15:45:03 -0500448 /*
449 * If there are more pages than SGE, terminate SGE
450 * list so that svc_rdma_unmap_dma doesn't attempt to
451 * unmap garbage.
452 */
Tom Tucker34d16e42008-07-02 14:56:13 -0500453 if (page_no+1 >= sge_no)
454 ctxt->sge[page_no+1].length = 0;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600455 }
Tom Tucker7e4359e2014-03-25 15:14:57 -0500456 rqstp->rq_next_page = rqstp->rq_respages + 1;
Steve Wise0bf48282014-05-28 15:12:01 -0500457
Chuck Lever3fe04ee2015-01-13 11:03:03 -0500458 if (sge_no > rdma->sc_max_sge) {
459 pr_err("svcrdma: Too many sges (%d)\n", sge_no);
460 goto err;
461 }
Tom Tuckerc06b5402007-12-12 16:13:25 -0600462 memset(&send_wr, 0, sizeof send_wr);
463 ctxt->wr_op = IB_WR_SEND;
464 send_wr.wr_id = (unsigned long)ctxt;
465 send_wr.sg_list = ctxt->sge;
466 send_wr.num_sge = sge_no;
467 send_wr.opcode = IB_WR_SEND;
468 send_wr.send_flags = IB_SEND_SIGNALED;
469
470 ret = svc_rdma_send(rdma, &send_wr);
471 if (ret)
Tom Tuckerafd566e2008-10-03 15:45:03 -0500472 goto err;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600473
Tom Tuckerafd566e2008-10-03 15:45:03 -0500474 return 0;
475
476 err:
Steve Wise21515e42009-04-29 14:14:00 -0500477 svc_rdma_unmap_dma(ctxt);
Tom Tuckerafd566e2008-10-03 15:45:03 -0500478 svc_rdma_put_context(ctxt, 1);
479 return -EIO;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600480}
481
482void svc_rdma_prep_reply_hdr(struct svc_rqst *rqstp)
483{
484}
485
486/*
487 * Return the start of an xdr buffer.
488 */
489static void *xdr_start(struct xdr_buf *xdr)
490{
491 return xdr->head[0].iov_base -
492 (xdr->len -
493 xdr->page_len -
494 xdr->tail[0].iov_len -
495 xdr->head[0].iov_len);
496}
497
498int svc_rdma_sendto(struct svc_rqst *rqstp)
499{
500 struct svc_xprt *xprt = rqstp->rq_xprt;
501 struct svcxprt_rdma *rdma =
502 container_of(xprt, struct svcxprt_rdma, sc_xprt);
503 struct rpcrdma_msg *rdma_argp;
504 struct rpcrdma_msg *rdma_resp;
505 struct rpcrdma_write_array *reply_ary;
506 enum rpcrdma_proc reply_type;
507 int ret;
508 int inline_bytes;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600509 struct page *res_page;
510 struct svc_rdma_op_ctxt *ctxt;
Tom Tucker34d16e42008-07-02 14:56:13 -0500511 struct svc_rdma_req_map *vec;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600512
513 dprintk("svcrdma: sending response for rqstp=%p\n", rqstp);
514
515 /* Get the RDMA request header. */
516 rdma_argp = xdr_start(&rqstp->rq_arg);
517
Tom Tucker34d16e42008-07-02 14:56:13 -0500518 /* Build an req vec for the XDR */
Tom Tuckerc06b5402007-12-12 16:13:25 -0600519 ctxt = svc_rdma_get_context(rdma);
520 ctxt->direction = DMA_TO_DEVICE;
Tom Tucker34d16e42008-07-02 14:56:13 -0500521 vec = svc_rdma_get_req_map();
Tom Tuckerafd566e2008-10-03 15:45:03 -0500522 ret = map_xdr(rdma, &rqstp->rq_res, vec);
523 if (ret)
524 goto err0;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600525 inline_bytes = rqstp->rq_res.len;
526
527 /* Create the RDMA response header */
528 res_page = svc_rdma_get_page();
529 rdma_resp = page_address(res_page);
530 reply_ary = svc_rdma_get_reply_array(rdma_argp);
531 if (reply_ary)
532 reply_type = RDMA_NOMSG;
533 else
534 reply_type = RDMA_MSG;
535 svc_rdma_xdr_encode_reply_header(rdma, rdma_argp,
536 rdma_resp, reply_type);
537
538 /* Send any write-chunk data and build resp write-list */
539 ret = send_write_chunks(rdma, rdma_argp, rdma_resp,
Tom Tucker34d16e42008-07-02 14:56:13 -0500540 rqstp, vec);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600541 if (ret < 0) {
542 printk(KERN_ERR "svcrdma: failed to send write chunks, rc=%d\n",
543 ret);
Tom Tuckerafd566e2008-10-03 15:45:03 -0500544 goto err1;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600545 }
546 inline_bytes -= ret;
547
548 /* Send any reply-list data and update resp reply-list */
549 ret = send_reply_chunks(rdma, rdma_argp, rdma_resp,
Tom Tucker34d16e42008-07-02 14:56:13 -0500550 rqstp, vec);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600551 if (ret < 0) {
552 printk(KERN_ERR "svcrdma: failed to send reply chunks, rc=%d\n",
553 ret);
Tom Tuckerafd566e2008-10-03 15:45:03 -0500554 goto err1;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600555 }
556 inline_bytes -= ret;
557
Tom Tucker34d16e42008-07-02 14:56:13 -0500558 ret = send_reply(rdma, rqstp, res_page, rdma_resp, ctxt, vec,
Tom Tuckerc06b5402007-12-12 16:13:25 -0600559 inline_bytes);
Tom Tucker34d16e42008-07-02 14:56:13 -0500560 svc_rdma_put_req_map(vec);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600561 dprintk("svcrdma: send_reply returns %d\n", ret);
562 return ret;
Tom Tuckerafd566e2008-10-03 15:45:03 -0500563
564 err1:
565 put_page(res_page);
566 err0:
Tom Tucker34d16e42008-07-02 14:56:13 -0500567 svc_rdma_put_req_map(vec);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600568 svc_rdma_put_context(ctxt, 0);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600569 return ret;
570}