blob: 67e1684aee6d0b9c868de91ee227b41c7b3269ac [file] [log] [blame]
Chuck Levera2268cf2018-05-04 15:34:32 -04001// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -04002/*
Chuck Lever62b56a62017-10-30 16:22:14 -04003 * Copyright (c) 2014-2017 Oracle. All rights reserved.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04004 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the BSD-type
10 * license below:
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 *
16 * Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 *
19 * Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials provided
22 * with the distribution.
23 *
24 * Neither the name of the Network Appliance, Inc. nor the names of
25 * its contributors may be used to endorse or promote products
26 * derived from this software without specific prior written
27 * permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 */
41
42/*
43 * rpc_rdma.c
44 *
45 * This file contains the guts of the RPC RDMA protocol, and
46 * does marshaling/unmarshaling, etc. It is also where interfacing
47 * to the Linux RPC framework lives.
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -040048 */
49
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -040050#include <linux/highmem.h>
51
Chuck Leverbd2abef2018-05-07 15:27:16 -040052#include <linux/sunrpc/svc_rdma.h>
53
Chuck Leverb6e717cb2018-05-07 15:27:05 -040054#include "xprt_rdma.h"
55#include <trace/events/rpcrdma.h>
56
Jeff Laytonf895b252014-11-17 16:58:04 -050057#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -040058# define RPCDBG_FACILITY RPCDBG_TRANS
59#endif
60
Chuck Lever302d3de2016-05-02 14:41:05 -040061/* Returns size of largest RPC-over-RDMA header in a Call message
62 *
Chuck Lever94f58c52016-05-02 14:41:30 -040063 * The largest Call header contains a full-size Read list and a
64 * minimal Reply chunk.
Chuck Lever302d3de2016-05-02 14:41:05 -040065 */
66static unsigned int rpcrdma_max_call_header_size(unsigned int maxsegs)
67{
68 unsigned int size;
69
70 /* Fixed header fields and list discriminators */
71 size = RPCRDMA_HDRLEN_MIN;
72
73 /* Maximum Read list size */
Chuck Lever2232df52017-10-30 16:21:57 -040074 size = maxsegs * rpcrdma_readchunk_maxsz * sizeof(__be32);
Chuck Lever302d3de2016-05-02 14:41:05 -040075
Chuck Lever94f58c52016-05-02 14:41:30 -040076 /* Minimal Read chunk size */
77 size += sizeof(__be32); /* segment count */
Chuck Lever2232df52017-10-30 16:21:57 -040078 size += rpcrdma_segment_maxsz * sizeof(__be32);
Chuck Lever94f58c52016-05-02 14:41:30 -040079 size += sizeof(__be32); /* list discriminator */
80
Chuck Lever302d3de2016-05-02 14:41:05 -040081 dprintk("RPC: %s: max call header size = %u\n",
82 __func__, size);
83 return size;
84}
85
86/* Returns size of largest RPC-over-RDMA header in a Reply message
87 *
88 * There is only one Write list or one Reply chunk per Reply
89 * message. The larger list is the Write list.
90 */
91static unsigned int rpcrdma_max_reply_header_size(unsigned int maxsegs)
92{
93 unsigned int size;
94
95 /* Fixed header fields and list discriminators */
96 size = RPCRDMA_HDRLEN_MIN;
97
98 /* Maximum Write list size */
Chuck Lever302d3de2016-05-02 14:41:05 -040099 size = sizeof(__be32); /* segment count */
Chuck Lever2232df52017-10-30 16:21:57 -0400100 size += maxsegs * rpcrdma_segment_maxsz * sizeof(__be32);
Chuck Lever302d3de2016-05-02 14:41:05 -0400101 size += sizeof(__be32); /* list discriminator */
102
103 dprintk("RPC: %s: max reply header size = %u\n",
104 __func__, size);
105 return size;
106}
107
Chuck Lever94087e92019-04-24 09:40:20 -0400108/**
109 * rpcrdma_set_max_header_sizes - Initialize inline payload sizes
110 * @r_xprt: transport instance to initialize
111 *
112 * The max_inline fields contain the maximum size of an RPC message
113 * so the marshaling code doesn't have to repeat this calculation
114 * for every RPC.
115 */
Chuck Lever87cfb9a2016-09-15 10:57:07 -0400116void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *r_xprt)
Chuck Lever302d3de2016-05-02 14:41:05 -0400117{
Chuck Lever94087e92019-04-24 09:40:20 -0400118 unsigned int maxsegs = r_xprt->rx_ia.ri_max_segs;
119 struct rpcrdma_ep *ep = &r_xprt->rx_ep;
Chuck Lever87cfb9a2016-09-15 10:57:07 -0400120
Chuck Lever94087e92019-04-24 09:40:20 -0400121 ep->rep_max_inline_send =
122 ep->rep_inline_send - rpcrdma_max_call_header_size(maxsegs);
123 ep->rep_max_inline_recv =
124 ep->rep_inline_recv - rpcrdma_max_reply_header_size(maxsegs);
Chuck Lever302d3de2016-05-02 14:41:05 -0400125}
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400126
Chuck Lever5457ced2015-08-03 13:03:49 -0400127/* The client can send a request inline as long as the RPCRDMA header
128 * plus the RPC call fit under the transport's inline limit. If the
129 * combined call message size exceeds that limit, the client must use
Chuck Lever16f906d2017-02-08 17:00:10 -0500130 * a Read chunk for this operation.
131 *
132 * A Read chunk is also required if sending the RPC call inline would
133 * exceed this device's max_sge limit.
Chuck Lever5457ced2015-08-03 13:03:49 -0400134 */
Chuck Lever302d3de2016-05-02 14:41:05 -0400135static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt,
136 struct rpc_rqst *rqst)
Chuck Lever5457ced2015-08-03 13:03:49 -0400137{
Chuck Lever16f906d2017-02-08 17:00:10 -0500138 struct xdr_buf *xdr = &rqst->rq_snd_buf;
139 unsigned int count, remaining, offset;
Chuck Lever5457ced2015-08-03 13:03:49 -0400140
Chuck Lever94087e92019-04-24 09:40:20 -0400141 if (xdr->len > r_xprt->rx_ep.rep_max_inline_send)
Chuck Lever16f906d2017-02-08 17:00:10 -0500142 return false;
143
144 if (xdr->page_len) {
145 remaining = xdr->page_len;
Chuck Leverd933cc32017-06-08 11:53:16 -0400146 offset = offset_in_page(xdr->page_base);
Chuck Lever1179e2c2018-01-31 12:34:05 -0500147 count = RPCRDMA_MIN_SEND_SGES;
Chuck Lever16f906d2017-02-08 17:00:10 -0500148 while (remaining) {
149 remaining -= min_t(unsigned int,
150 PAGE_SIZE - offset, remaining);
151 offset = 0;
152 if (++count > r_xprt->rx_ia.ri_max_send_sges)
153 return false;
154 }
155 }
156
157 return true;
Chuck Lever5457ced2015-08-03 13:03:49 -0400158}
159
160/* The client can't know how large the actual reply will be. Thus it
161 * plans for the largest possible reply for that particular ULP
162 * operation. If the maximum combined reply message size exceeds that
163 * limit, the client must provide a write list or a reply chunk for
164 * this request.
165 */
Chuck Lever302d3de2016-05-02 14:41:05 -0400166static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt,
167 struct rpc_rqst *rqst)
Chuck Lever5457ced2015-08-03 13:03:49 -0400168{
Chuck Lever94087e92019-04-24 09:40:20 -0400169 return rqst->rq_rcv_buf.buflen <= r_xprt->rx_ep.rep_max_inline_recv;
Chuck Lever5457ced2015-08-03 13:03:49 -0400170}
171
Chuck Leverd4550bb2019-02-11 11:23:49 -0500172/* The client is required to provide a Reply chunk if the maximum
173 * size of the non-payload part of the RPC Reply is larger than
174 * the inline threshold.
175 */
176static bool
177rpcrdma_nonpayload_inline(const struct rpcrdma_xprt *r_xprt,
178 const struct rpc_rqst *rqst)
179{
180 const struct xdr_buf *buf = &rqst->rq_rcv_buf;
Chuck Leverd4550bb2019-02-11 11:23:49 -0500181
Chuck Lever94087e92019-04-24 09:40:20 -0400182 return (buf->head[0].iov_len + buf->tail[0].iov_len) <
183 r_xprt->rx_ep.rep_max_inline_recv;
Chuck Leverd4550bb2019-02-11 11:23:49 -0500184}
185
Chuck Lever28d9d562017-08-14 15:38:22 -0400186/* Split @vec on page boundaries into SGEs. FMR registers pages, not
187 * a byte range. Other modes coalesce these SGEs into a single MR
188 * when they can.
189 *
190 * Returns pointer to next available SGE, and bumps the total number
191 * of SGEs consumed.
Chuck Lever821c7912016-03-04 11:27:52 -0500192 */
Chuck Lever28d9d562017-08-14 15:38:22 -0400193static struct rpcrdma_mr_seg *
194rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg,
195 unsigned int *n)
Chuck Lever821c7912016-03-04 11:27:52 -0500196{
Chuck Lever28d9d562017-08-14 15:38:22 -0400197 u32 remaining, page_offset;
Chuck Lever821c7912016-03-04 11:27:52 -0500198 char *base;
199
200 base = vec->iov_base;
201 page_offset = offset_in_page(base);
202 remaining = vec->iov_len;
Chuck Lever28d9d562017-08-14 15:38:22 -0400203 while (remaining) {
204 seg->mr_page = NULL;
205 seg->mr_offset = base;
206 seg->mr_len = min_t(u32, PAGE_SIZE - page_offset, remaining);
207 remaining -= seg->mr_len;
208 base += seg->mr_len;
209 ++seg;
210 ++(*n);
Chuck Lever821c7912016-03-04 11:27:52 -0500211 page_offset = 0;
212 }
Chuck Lever28d9d562017-08-14 15:38:22 -0400213 return seg;
Chuck Lever821c7912016-03-04 11:27:52 -0500214}
215
Chuck Lever28d9d562017-08-14 15:38:22 -0400216/* Convert @xdrbuf into SGEs no larger than a page each. As they
217 * are registered, these SGEs are then coalesced into RDMA segments
218 * when the selected memreg mode supports it.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400219 *
Chuck Lever28d9d562017-08-14 15:38:22 -0400220 * Returns positive number of SGEs consumed, or a negative errno.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400221 */
222
223static int
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500224rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf,
225 unsigned int pos, enum rpcrdma_chunktype type,
226 struct rpcrdma_mr_seg *seg)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400227{
Chuck Lever28d9d562017-08-14 15:38:22 -0400228 unsigned long page_base;
229 unsigned int len, n;
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000230 struct page **ppages;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400231
Chuck Lever5ab81422016-06-29 13:54:25 -0400232 n = 0;
Chuck Lever28d9d562017-08-14 15:38:22 -0400233 if (pos == 0)
234 seg = rpcrdma_convert_kvec(&xdrbuf->head[0], seg, &n);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400235
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000236 len = xdrbuf->page_len;
237 ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT);
Chuck Leverd933cc32017-06-08 11:53:16 -0400238 page_base = offset_in_page(xdrbuf->page_base);
Chuck Lever28d9d562017-08-14 15:38:22 -0400239 while (len) {
Chuck Lever15303d9e2018-12-19 10:59:17 -0500240 /* ACL likes to be lazy in allocating pages - ACLs
241 * are small by default but can get huge.
242 */
243 if (unlikely(xdrbuf->flags & XDRBUF_SPARSE_PAGES)) {
244 if (!*ppages)
Chuck Lever52db6f92019-04-24 09:38:55 -0400245 *ppages = alloc_page(GFP_NOWAIT | __GFP_NOWARN);
Chuck Lever28d9d562017-08-14 15:38:22 -0400246 if (!*ppages)
Chuck Levera8f688e2018-05-04 15:35:46 -0400247 return -ENOBUFS;
Shirley Ma196c6992014-05-28 10:34:24 -0400248 }
Chuck Lever28d9d562017-08-14 15:38:22 -0400249 seg->mr_page = *ppages;
250 seg->mr_offset = (char *)page_base;
251 seg->mr_len = min_t(u32, PAGE_SIZE - page_base, len);
252 len -= seg->mr_len;
253 ++ppages;
254 ++seg;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400255 ++n;
Chuck Lever28d9d562017-08-14 15:38:22 -0400256 page_base = 0;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400257 }
258
Chuck Lever24abdf12017-02-08 16:59:46 -0500259 /* When encoding a Read chunk, the tail iovec contains an
260 * XDR pad and may be omitted.
261 */
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500262 if (type == rpcrdma_readch && r_xprt->rx_ia.ri_implicit_roundup)
Chuck Lever28d9d562017-08-14 15:38:22 -0400263 goto out;
Chuck Lever677eb172015-08-03 13:04:17 -0400264
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500265 /* When encoding a Write chunk, some servers need to see an
266 * extra segment for non-XDR-aligned Write chunks. The upper
267 * layer provides space in the tail iovec that may be used
268 * for this purpose.
Chuck Leverc8b920b2016-09-15 10:57:16 -0400269 */
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500270 if (type == rpcrdma_writech && r_xprt->rx_ia.ri_implicit_roundup)
Chuck Lever28d9d562017-08-14 15:38:22 -0400271 goto out;
Chuck Leverc8b920b2016-09-15 10:57:16 -0400272
Chuck Lever28d9d562017-08-14 15:38:22 -0400273 if (xdrbuf->tail[0].iov_len)
274 seg = rpcrdma_convert_kvec(&xdrbuf->tail[0], seg, &n);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400275
Chuck Lever28d9d562017-08-14 15:38:22 -0400276out:
277 if (unlikely(n > RPCRDMA_MAX_SEGS))
278 return -EIO;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400279 return n;
280}
281
Chuck Lever39f4cd92017-08-10 12:47:36 -0400282static inline int
283encode_item_present(struct xdr_stream *xdr)
284{
285 __be32 *p;
286
287 p = xdr_reserve_space(xdr, sizeof(*p));
288 if (unlikely(!p))
289 return -EMSGSIZE;
290
291 *p = xdr_one;
292 return 0;
293}
294
295static inline int
296encode_item_not_present(struct xdr_stream *xdr)
297{
298 __be32 *p;
299
300 p = xdr_reserve_space(xdr, sizeof(*p));
301 if (unlikely(!p))
302 return -EMSGSIZE;
303
304 *p = xdr_zero;
305 return 0;
306}
307
308static void
Chuck Lever96cedde2017-12-14 20:57:55 -0500309xdr_encode_rdma_segment(__be32 *iptr, struct rpcrdma_mr *mr)
Chuck Lever94f58c52016-05-02 14:41:30 -0400310{
Chuck Lever96cedde2017-12-14 20:57:55 -0500311 *iptr++ = cpu_to_be32(mr->mr_handle);
312 *iptr++ = cpu_to_be32(mr->mr_length);
313 xdr_encode_hyper(iptr, mr->mr_offset);
Chuck Lever94f58c52016-05-02 14:41:30 -0400314}
315
Chuck Lever39f4cd92017-08-10 12:47:36 -0400316static int
Chuck Lever96cedde2017-12-14 20:57:55 -0500317encode_rdma_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr)
Chuck Lever39f4cd92017-08-10 12:47:36 -0400318{
319 __be32 *p;
320
321 p = xdr_reserve_space(xdr, 4 * sizeof(*p));
322 if (unlikely(!p))
323 return -EMSGSIZE;
324
Chuck Lever96cedde2017-12-14 20:57:55 -0500325 xdr_encode_rdma_segment(p, mr);
Chuck Lever39f4cd92017-08-10 12:47:36 -0400326 return 0;
327}
328
329static int
Chuck Lever96cedde2017-12-14 20:57:55 -0500330encode_read_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr,
Chuck Lever39f4cd92017-08-10 12:47:36 -0400331 u32 position)
332{
333 __be32 *p;
334
335 p = xdr_reserve_space(xdr, 6 * sizeof(*p));
336 if (unlikely(!p))
337 return -EMSGSIZE;
338
339 *p++ = xdr_one; /* Item present */
340 *p++ = cpu_to_be32(position);
Chuck Lever96cedde2017-12-14 20:57:55 -0500341 xdr_encode_rdma_segment(p, mr);
Chuck Lever39f4cd92017-08-10 12:47:36 -0400342 return 0;
343}
344
Chuck Lever3b39f522019-08-19 18:45:37 -0400345static struct rpcrdma_mr_seg *rpcrdma_mr_prepare(struct rpcrdma_xprt *r_xprt,
346 struct rpcrdma_req *req,
347 struct rpcrdma_mr_seg *seg,
348 int nsegs, bool writing,
349 struct rpcrdma_mr **mr)
350{
Chuck Lever6dc6ec92019-08-19 18:47:10 -0400351 *mr = rpcrdma_mr_pop(&req->rl_free_mrs);
352 if (!*mr) {
353 *mr = rpcrdma_mr_get(r_xprt);
354 if (!*mr)
355 goto out_getmr_err;
356 trace_xprtrdma_mr_get(req);
357 (*mr)->mr_req = req;
358 }
Chuck Lever3b39f522019-08-19 18:45:37 -0400359
360 rpcrdma_mr_push(*mr, &req->rl_registered);
361 return frwr_map(r_xprt, seg, nsegs, writing, req->rl_slot.rq_xid, *mr);
362
363out_getmr_err:
364 trace_xprtrdma_nomrs(req);
365 xprt_wait_for_buffer_space(&r_xprt->rx_xprt);
366 if (r_xprt->rx_ep.rep_connected != -ENODEV)
367 schedule_work(&r_xprt->rx_buf.rb_refresh_worker);
368 return ERR_PTR(-EAGAIN);
369}
370
Chuck Lever39f4cd92017-08-10 12:47:36 -0400371/* Register and XDR encode the Read list. Supports encoding a list of read
Chuck Lever94f58c52016-05-02 14:41:30 -0400372 * segments that belong to a single read chunk.
373 *
374 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
375 *
376 * Read chunklist (a linked list):
377 * N elements, position P (same P for all chunks of same arg!):
378 * 1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0
379 *
Chuck Lever39f4cd92017-08-10 12:47:36 -0400380 * Returns zero on success, or a negative errno if a failure occurred.
381 * @xdr is advanced to the next position in the stream.
382 *
383 * Only a single @pos value is currently supported.
Chuck Lever94f58c52016-05-02 14:41:30 -0400384 */
Chuck Lever1738de32019-08-19 18:51:03 -0400385static int rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt,
386 struct rpcrdma_req *req,
387 struct rpc_rqst *rqst,
388 enum rpcrdma_chunktype rtype)
Chuck Lever94f58c52016-05-02 14:41:30 -0400389{
Chuck Lever39f4cd92017-08-10 12:47:36 -0400390 struct xdr_stream *xdr = &req->rl_stream;
Chuck Lever5ab81422016-06-29 13:54:25 -0400391 struct rpcrdma_mr_seg *seg;
Chuck Lever96cedde2017-12-14 20:57:55 -0500392 struct rpcrdma_mr *mr;
Chuck Lever94f58c52016-05-02 14:41:30 -0400393 unsigned int pos;
Chuck Lever6748b0ca2017-08-14 15:38:30 -0400394 int nsegs;
Chuck Lever94f58c52016-05-02 14:41:30 -0400395
Chuck Lever6a6c6de2019-06-19 10:33:31 -0400396 if (rtype == rpcrdma_noch)
397 goto done;
398
Chuck Lever94f58c52016-05-02 14:41:30 -0400399 pos = rqst->rq_snd_buf.head[0].iov_len;
400 if (rtype == rpcrdma_areadch)
401 pos = 0;
Chuck Lever5ab81422016-06-29 13:54:25 -0400402 seg = req->rl_segments;
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500403 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_snd_buf, pos,
404 rtype, seg);
Chuck Lever94f58c52016-05-02 14:41:30 -0400405 if (nsegs < 0)
Chuck Lever39f4cd92017-08-10 12:47:36 -0400406 return nsegs;
Chuck Lever94f58c52016-05-02 14:41:30 -0400407
408 do {
Chuck Lever3b39f522019-08-19 18:45:37 -0400409 seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, false, &mr);
Chuck Lever6748b0ca2017-08-14 15:38:30 -0400410 if (IS_ERR(seg))
Chuck Levered3aa742018-05-04 15:35:52 -0400411 return PTR_ERR(seg);
Chuck Lever94f58c52016-05-02 14:41:30 -0400412
Chuck Lever96cedde2017-12-14 20:57:55 -0500413 if (encode_read_segment(xdr, mr, pos) < 0)
Chuck Lever39f4cd92017-08-10 12:47:36 -0400414 return -EMSGSIZE;
Chuck Lever94f58c52016-05-02 14:41:30 -0400415
Chuck Leveraba118312018-12-19 10:59:49 -0500416 trace_xprtrdma_chunk_read(rqst->rq_task, pos, mr, nsegs);
Chuck Lever94f58c52016-05-02 14:41:30 -0400417 r_xprt->rx_stats.read_chunk_count++;
Chuck Lever96cedde2017-12-14 20:57:55 -0500418 nsegs -= mr->mr_nents;
Chuck Lever94f58c52016-05-02 14:41:30 -0400419 } while (nsegs);
Chuck Lever94f58c52016-05-02 14:41:30 -0400420
Chuck Lever6a6c6de2019-06-19 10:33:31 -0400421done:
422 return encode_item_not_present(xdr);
Chuck Lever94f58c52016-05-02 14:41:30 -0400423}
424
Chuck Lever39f4cd92017-08-10 12:47:36 -0400425/* Register and XDR encode the Write list. Supports encoding a list
426 * containing one array of plain segments that belong to a single
427 * write chunk.
Chuck Lever94f58c52016-05-02 14:41:30 -0400428 *
429 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
430 *
431 * Write chunklist (a list of (one) counted array):
432 * N elements:
433 * 1 - N - HLOO - HLOO - ... - HLOO - 0
434 *
Chuck Lever39f4cd92017-08-10 12:47:36 -0400435 * Returns zero on success, or a negative errno if a failure occurred.
436 * @xdr is advanced to the next position in the stream.
437 *
438 * Only a single Write chunk is currently supported.
Chuck Lever94f58c52016-05-02 14:41:30 -0400439 */
Chuck Lever1738de32019-08-19 18:51:03 -0400440static int rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt,
441 struct rpcrdma_req *req,
442 struct rpc_rqst *rqst,
443 enum rpcrdma_chunktype wtype)
Chuck Lever94f58c52016-05-02 14:41:30 -0400444{
Chuck Lever39f4cd92017-08-10 12:47:36 -0400445 struct xdr_stream *xdr = &req->rl_stream;
Chuck Lever5ab81422016-06-29 13:54:25 -0400446 struct rpcrdma_mr_seg *seg;
Chuck Lever96cedde2017-12-14 20:57:55 -0500447 struct rpcrdma_mr *mr;
Chuck Lever6748b0ca2017-08-14 15:38:30 -0400448 int nsegs, nchunks;
Chuck Lever94f58c52016-05-02 14:41:30 -0400449 __be32 *segcount;
450
Chuck Lever6a6c6de2019-06-19 10:33:31 -0400451 if (wtype != rpcrdma_writech)
452 goto done;
453
Chuck Lever5ab81422016-06-29 13:54:25 -0400454 seg = req->rl_segments;
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500455 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf,
Chuck Lever94f58c52016-05-02 14:41:30 -0400456 rqst->rq_rcv_buf.head[0].iov_len,
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500457 wtype, seg);
Chuck Lever94f58c52016-05-02 14:41:30 -0400458 if (nsegs < 0)
Chuck Lever39f4cd92017-08-10 12:47:36 -0400459 return nsegs;
Chuck Lever94f58c52016-05-02 14:41:30 -0400460
Chuck Lever39f4cd92017-08-10 12:47:36 -0400461 if (encode_item_present(xdr) < 0)
462 return -EMSGSIZE;
463 segcount = xdr_reserve_space(xdr, sizeof(*segcount));
464 if (unlikely(!segcount))
465 return -EMSGSIZE;
466 /* Actual value encoded below */
Chuck Lever94f58c52016-05-02 14:41:30 -0400467
468 nchunks = 0;
469 do {
Chuck Lever3b39f522019-08-19 18:45:37 -0400470 seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, true, &mr);
Chuck Lever6748b0ca2017-08-14 15:38:30 -0400471 if (IS_ERR(seg))
Chuck Levered3aa742018-05-04 15:35:52 -0400472 return PTR_ERR(seg);
Chuck Lever94f58c52016-05-02 14:41:30 -0400473
Chuck Lever96cedde2017-12-14 20:57:55 -0500474 if (encode_rdma_segment(xdr, mr) < 0)
Chuck Lever39f4cd92017-08-10 12:47:36 -0400475 return -EMSGSIZE;
Chuck Lever94f58c52016-05-02 14:41:30 -0400476
Chuck Leveraba118312018-12-19 10:59:49 -0500477 trace_xprtrdma_chunk_write(rqst->rq_task, mr, nsegs);
Chuck Lever94f58c52016-05-02 14:41:30 -0400478 r_xprt->rx_stats.write_chunk_count++;
Chuck Leveraae23492018-01-03 15:38:09 -0500479 r_xprt->rx_stats.total_rdma_request += mr->mr_length;
Chuck Lever94f58c52016-05-02 14:41:30 -0400480 nchunks++;
Chuck Lever96cedde2017-12-14 20:57:55 -0500481 nsegs -= mr->mr_nents;
Chuck Lever94f58c52016-05-02 14:41:30 -0400482 } while (nsegs);
Chuck Lever94f58c52016-05-02 14:41:30 -0400483
484 /* Update count of segments in this Write chunk */
485 *segcount = cpu_to_be32(nchunks);
486
Chuck Lever6a6c6de2019-06-19 10:33:31 -0400487done:
488 return encode_item_not_present(xdr);
Chuck Lever94f58c52016-05-02 14:41:30 -0400489}
490
Chuck Lever39f4cd92017-08-10 12:47:36 -0400491/* Register and XDR encode the Reply chunk. Supports encoding an array
492 * of plain segments that belong to a single write (reply) chunk.
Chuck Lever94f58c52016-05-02 14:41:30 -0400493 *
494 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
495 *
496 * Reply chunk (a counted array):
497 * N elements:
498 * 1 - N - HLOO - HLOO - ... - HLOO
499 *
Chuck Lever39f4cd92017-08-10 12:47:36 -0400500 * Returns zero on success, or a negative errno if a failure occurred.
501 * @xdr is advanced to the next position in the stream.
Chuck Lever94f58c52016-05-02 14:41:30 -0400502 */
Chuck Lever1738de32019-08-19 18:51:03 -0400503static int rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt,
504 struct rpcrdma_req *req,
505 struct rpc_rqst *rqst,
506 enum rpcrdma_chunktype wtype)
Chuck Lever94f58c52016-05-02 14:41:30 -0400507{
Chuck Lever39f4cd92017-08-10 12:47:36 -0400508 struct xdr_stream *xdr = &req->rl_stream;
Chuck Lever5ab81422016-06-29 13:54:25 -0400509 struct rpcrdma_mr_seg *seg;
Chuck Lever96cedde2017-12-14 20:57:55 -0500510 struct rpcrdma_mr *mr;
Chuck Lever6748b0ca2017-08-14 15:38:30 -0400511 int nsegs, nchunks;
Chuck Lever94f58c52016-05-02 14:41:30 -0400512 __be32 *segcount;
513
Chuck Lever6a6c6de2019-06-19 10:33:31 -0400514 if (wtype != rpcrdma_replych)
515 return encode_item_not_present(xdr);
516
Chuck Lever5ab81422016-06-29 13:54:25 -0400517 seg = req->rl_segments;
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500518 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 0, wtype, seg);
Chuck Lever94f58c52016-05-02 14:41:30 -0400519 if (nsegs < 0)
Chuck Lever39f4cd92017-08-10 12:47:36 -0400520 return nsegs;
Chuck Lever94f58c52016-05-02 14:41:30 -0400521
Chuck Lever39f4cd92017-08-10 12:47:36 -0400522 if (encode_item_present(xdr) < 0)
523 return -EMSGSIZE;
524 segcount = xdr_reserve_space(xdr, sizeof(*segcount));
525 if (unlikely(!segcount))
526 return -EMSGSIZE;
527 /* Actual value encoded below */
Chuck Lever94f58c52016-05-02 14:41:30 -0400528
529 nchunks = 0;
530 do {
Chuck Lever3b39f522019-08-19 18:45:37 -0400531 seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, true, &mr);
Chuck Lever6748b0ca2017-08-14 15:38:30 -0400532 if (IS_ERR(seg))
Chuck Levered3aa742018-05-04 15:35:52 -0400533 return PTR_ERR(seg);
Chuck Lever94f58c52016-05-02 14:41:30 -0400534
Chuck Lever96cedde2017-12-14 20:57:55 -0500535 if (encode_rdma_segment(xdr, mr) < 0)
Chuck Lever39f4cd92017-08-10 12:47:36 -0400536 return -EMSGSIZE;
Chuck Lever94f58c52016-05-02 14:41:30 -0400537
Chuck Leveraba118312018-12-19 10:59:49 -0500538 trace_xprtrdma_chunk_reply(rqst->rq_task, mr, nsegs);
Chuck Lever94f58c52016-05-02 14:41:30 -0400539 r_xprt->rx_stats.reply_chunk_count++;
Chuck Leveraae23492018-01-03 15:38:09 -0500540 r_xprt->rx_stats.total_rdma_request += mr->mr_length;
Chuck Lever94f58c52016-05-02 14:41:30 -0400541 nchunks++;
Chuck Lever96cedde2017-12-14 20:57:55 -0500542 nsegs -= mr->mr_nents;
Chuck Lever94f58c52016-05-02 14:41:30 -0400543 } while (nsegs);
Chuck Lever94f58c52016-05-02 14:41:30 -0400544
545 /* Update count of segments in the Reply chunk */
546 *segcount = cpu_to_be32(nchunks);
547
Chuck Lever39f4cd92017-08-10 12:47:36 -0400548 return 0;
Chuck Lever94f58c52016-05-02 14:41:30 -0400549}
550
Chuck Lever0ab11522019-06-19 10:33:15 -0400551static void rpcrdma_sendctx_done(struct kref *kref)
552{
553 struct rpcrdma_req *req =
554 container_of(kref, struct rpcrdma_req, rl_kref);
555 struct rpcrdma_rep *rep = req->rl_reply;
556
557 rpcrdma_complete_rqst(rep);
558 rep->rr_rxprt->rx_stats.reply_waits_for_send++;
559}
560
Chuck Lever394b2c72017-10-20 10:47:47 -0400561/**
Chuck Leverdbcc53a2019-04-24 09:39:53 -0400562 * rpcrdma_sendctx_unmap - DMA-unmap Send buffer
Chuck Leverae729502017-10-20 10:48:12 -0400563 * @sc: sendctx containing SGEs to unmap
Chuck Lever394b2c72017-10-20 10:47:47 -0400564 *
565 */
Chuck Leverdbcc53a2019-04-24 09:39:53 -0400566void rpcrdma_sendctx_unmap(struct rpcrdma_sendctx *sc)
Chuck Lever394b2c72017-10-20 10:47:47 -0400567{
568 struct ib_sge *sge;
Chuck Lever394b2c72017-10-20 10:47:47 -0400569
Chuck Lever0ab11522019-06-19 10:33:15 -0400570 if (!sc->sc_unmap_count)
571 return;
572
Chuck Lever394b2c72017-10-20 10:47:47 -0400573 /* The first two SGEs contain the transport header and
574 * the inline buffer. These are always left mapped so
575 * they can be cheaply re-used.
576 */
Chuck Leverdbcc53a2019-04-24 09:39:53 -0400577 for (sge = &sc->sc_sges[2]; sc->sc_unmap_count;
578 ++sge, --sc->sc_unmap_count)
579 ib_dma_unmap_page(sc->sc_device, sge->addr, sge->length,
580 DMA_TO_DEVICE);
Chuck Lever01bb35c2017-10-20 10:48:36 -0400581
Chuck Lever0ab11522019-06-19 10:33:15 -0400582 kref_put(&sc->sc_req->rl_kref, rpcrdma_sendctx_done);
Chuck Lever394b2c72017-10-20 10:47:47 -0400583}
584
Chuck Levera062a2a2017-10-20 10:48:03 -0400585/* Prepare an SGE for the RPC-over-RDMA transport header.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400586 */
Chuck Leverd2832af2019-04-24 09:39:32 -0400587static bool rpcrdma_prepare_hdr_sge(struct rpcrdma_xprt *r_xprt,
588 struct rpcrdma_req *req, u32 len)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400589{
Chuck Leverae729502017-10-20 10:48:12 -0400590 struct rpcrdma_sendctx *sc = req->rl_sendctx;
Chuck Lever655fec62016-09-15 10:57:24 -0400591 struct rpcrdma_regbuf *rb = req->rl_rdmabuf;
Chuck Leverae729502017-10-20 10:48:12 -0400592 struct ib_sge *sge = sc->sc_sges;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400593
Chuck Leverd2832af2019-04-24 09:39:32 -0400594 if (!rpcrdma_regbuf_dma_map(r_xprt, rb))
Chuck Levera062a2a2017-10-20 10:48:03 -0400595 goto out_regbuf;
596 sge->addr = rdmab_addr(rb);
Chuck Lever655fec62016-09-15 10:57:24 -0400597 sge->length = len;
Chuck Levera062a2a2017-10-20 10:48:03 -0400598 sge->lkey = rdmab_lkey(rb);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400599
Chuck Leverd2832af2019-04-24 09:39:32 -0400600 ib_dma_sync_single_for_device(rdmab_device(rb), sge->addr, sge->length,
601 DMA_TO_DEVICE);
Chuck Leverae729502017-10-20 10:48:12 -0400602 sc->sc_wr.num_sge++;
Chuck Lever655fec62016-09-15 10:57:24 -0400603 return true;
Chuck Lever857f9ac2017-10-20 10:47:55 -0400604
605out_regbuf:
606 pr_err("rpcrdma: failed to DMA map a Send buffer\n");
607 return false;
Chuck Lever655fec62016-09-15 10:57:24 -0400608}
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400609
Chuck Lever655fec62016-09-15 10:57:24 -0400610/* Prepare the Send SGEs. The head and tail iovec, and each entry
611 * in the page list, gets its own SGE.
612 */
Chuck Leverd2832af2019-04-24 09:39:32 -0400613static bool rpcrdma_prepare_msg_sges(struct rpcrdma_xprt *r_xprt,
614 struct rpcrdma_req *req,
615 struct xdr_buf *xdr,
616 enum rpcrdma_chunktype rtype)
Chuck Lever655fec62016-09-15 10:57:24 -0400617{
Chuck Leverae729502017-10-20 10:48:12 -0400618 struct rpcrdma_sendctx *sc = req->rl_sendctx;
Chuck Lever655fec62016-09-15 10:57:24 -0400619 unsigned int sge_no, page_base, len, remaining;
620 struct rpcrdma_regbuf *rb = req->rl_sendbuf;
Chuck Leverae729502017-10-20 10:48:12 -0400621 struct ib_sge *sge = sc->sc_sges;
Chuck Lever655fec62016-09-15 10:57:24 -0400622 struct page *page, **ppages;
Tom Talpeyb38ab402009-03-11 14:37:55 -0400623
Chuck Lever655fec62016-09-15 10:57:24 -0400624 /* The head iovec is straightforward, as it is already
625 * DMA-mapped. Sync the content that has changed.
626 */
Chuck Leverd2832af2019-04-24 09:39:32 -0400627 if (!rpcrdma_regbuf_dma_map(r_xprt, rb))
Chuck Lever857f9ac2017-10-20 10:47:55 -0400628 goto out_regbuf;
Chuck Leverdbcc53a2019-04-24 09:39:53 -0400629 sc->sc_device = rdmab_device(rb);
Chuck Lever655fec62016-09-15 10:57:24 -0400630 sge_no = 1;
631 sge[sge_no].addr = rdmab_addr(rb);
632 sge[sge_no].length = xdr->head[0].iov_len;
633 sge[sge_no].lkey = rdmab_lkey(rb);
Chuck Lever91a10c52017-04-11 13:23:02 -0400634 ib_dma_sync_single_for_device(rdmab_device(rb), sge[sge_no].addr,
Chuck Lever655fec62016-09-15 10:57:24 -0400635 sge[sge_no].length, DMA_TO_DEVICE);
636
637 /* If there is a Read chunk, the page list is being handled
638 * via explicit RDMA, and thus is skipped here. However, the
639 * tail iovec may include an XDR pad for the page list, as
640 * well as additional content, and may not reside in the
641 * same page as the head iovec.
642 */
643 if (rtype == rpcrdma_readch) {
644 len = xdr->tail[0].iov_len;
645
646 /* Do not include the tail if it is only an XDR pad */
647 if (len < 4)
648 goto out;
649
650 page = virt_to_page(xdr->tail[0].iov_base);
Chuck Leverd933cc32017-06-08 11:53:16 -0400651 page_base = offset_in_page(xdr->tail[0].iov_base);
Chuck Lever655fec62016-09-15 10:57:24 -0400652
653 /* If the content in the page list is an odd length,
654 * xdr_write_pages() has added a pad at the beginning
655 * of the tail iovec. Force the tail's non-pad content
656 * to land at the next XDR position in the Send message.
657 */
658 page_base += len & 3;
659 len -= len & 3;
660 goto map_tail;
661 }
662
663 /* If there is a page list present, temporarily DMA map
664 * and prepare an SGE for each page to be sent.
665 */
666 if (xdr->page_len) {
667 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
Chuck Leverd933cc32017-06-08 11:53:16 -0400668 page_base = offset_in_page(xdr->page_base);
Chuck Lever655fec62016-09-15 10:57:24 -0400669 remaining = xdr->page_len;
670 while (remaining) {
671 sge_no++;
672 if (sge_no > RPCRDMA_MAX_SEND_SGES - 2)
673 goto out_mapping_overflow;
674
675 len = min_t(u32, PAGE_SIZE - page_base, remaining);
Chuck Leverd2832af2019-04-24 09:39:32 -0400676 sge[sge_no].addr =
677 ib_dma_map_page(rdmab_device(rb), *ppages,
678 page_base, len, DMA_TO_DEVICE);
679 if (ib_dma_mapping_error(rdmab_device(rb),
680 sge[sge_no].addr))
Chuck Lever655fec62016-09-15 10:57:24 -0400681 goto out_mapping_err;
682 sge[sge_no].length = len;
Chuck Leverd2832af2019-04-24 09:39:32 -0400683 sge[sge_no].lkey = rdmab_lkey(rb);
Chuck Lever655fec62016-09-15 10:57:24 -0400684
Chuck Leverae729502017-10-20 10:48:12 -0400685 sc->sc_unmap_count++;
Chuck Lever655fec62016-09-15 10:57:24 -0400686 ppages++;
687 remaining -= len;
688 page_base = 0;
Tom Talpeyb38ab402009-03-11 14:37:55 -0400689 }
Tom Talpeyb38ab402009-03-11 14:37:55 -0400690 }
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000691
Chuck Lever655fec62016-09-15 10:57:24 -0400692 /* The tail iovec is not always constructed in the same
693 * page where the head iovec resides (see, for example,
694 * gss_wrap_req_priv). To neatly accommodate that case,
695 * DMA map it separately.
696 */
697 if (xdr->tail[0].iov_len) {
698 page = virt_to_page(xdr->tail[0].iov_base);
Chuck Leverd933cc32017-06-08 11:53:16 -0400699 page_base = offset_in_page(xdr->tail[0].iov_base);
Chuck Lever655fec62016-09-15 10:57:24 -0400700 len = xdr->tail[0].iov_len;
701
702map_tail:
703 sge_no++;
Chuck Leverd2832af2019-04-24 09:39:32 -0400704 sge[sge_no].addr =
705 ib_dma_map_page(rdmab_device(rb), page, page_base, len,
706 DMA_TO_DEVICE);
707 if (ib_dma_mapping_error(rdmab_device(rb), sge[sge_no].addr))
Chuck Lever655fec62016-09-15 10:57:24 -0400708 goto out_mapping_err;
709 sge[sge_no].length = len;
Chuck Leverd2832af2019-04-24 09:39:32 -0400710 sge[sge_no].lkey = rdmab_lkey(rb);
Chuck Leverae729502017-10-20 10:48:12 -0400711 sc->sc_unmap_count++;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400712 }
Chuck Lever655fec62016-09-15 10:57:24 -0400713
714out:
Chuck Leverae729502017-10-20 10:48:12 -0400715 sc->sc_wr.num_sge += sge_no;
Chuck Lever01bb35c2017-10-20 10:48:36 -0400716 if (sc->sc_unmap_count)
Chuck Lever0ab11522019-06-19 10:33:15 -0400717 kref_get(&req->rl_kref);
Chuck Lever655fec62016-09-15 10:57:24 -0400718 return true;
719
Chuck Lever857f9ac2017-10-20 10:47:55 -0400720out_regbuf:
721 pr_err("rpcrdma: failed to DMA map a Send buffer\n");
722 return false;
723
Chuck Lever655fec62016-09-15 10:57:24 -0400724out_mapping_overflow:
Chuck Leverdbcc53a2019-04-24 09:39:53 -0400725 rpcrdma_sendctx_unmap(sc);
Chuck Lever655fec62016-09-15 10:57:24 -0400726 pr_err("rpcrdma: too many Send SGEs (%u)\n", sge_no);
727 return false;
728
729out_mapping_err:
Chuck Leverdbcc53a2019-04-24 09:39:53 -0400730 rpcrdma_sendctx_unmap(sc);
Chuck Lever53b2c1c2018-12-19 11:00:06 -0500731 trace_xprtrdma_dma_maperr(sge[sge_no].addr);
Chuck Lever655fec62016-09-15 10:57:24 -0400732 return false;
733}
734
Chuck Lever857f9ac2017-10-20 10:47:55 -0400735/**
736 * rpcrdma_prepare_send_sges - Construct SGEs for a Send WR
737 * @r_xprt: controlling transport
738 * @req: context of RPC Call being marshalled
739 * @hdrlen: size of transport header, in bytes
740 * @xdr: xdr_buf containing RPC Call
741 * @rtype: chunk type being encoded
742 *
743 * Returns 0 on success; otherwise a negative errno is returned.
744 */
745int
746rpcrdma_prepare_send_sges(struct rpcrdma_xprt *r_xprt,
747 struct rpcrdma_req *req, u32 hdrlen,
748 struct xdr_buf *xdr, enum rpcrdma_chunktype rtype)
Chuck Lever655fec62016-09-15 10:57:24 -0400749{
Chuck Lever05eb06d2019-06-19 10:32:48 -0400750 int ret;
751
752 ret = -EAGAIN;
Chuck Leverdbcc53a2019-04-24 09:39:53 -0400753 req->rl_sendctx = rpcrdma_sendctx_get_locked(r_xprt);
Chuck Leverae729502017-10-20 10:48:12 -0400754 if (!req->rl_sendctx)
Chuck Lever05eb06d2019-06-19 10:32:48 -0400755 goto err;
Chuck Leverae729502017-10-20 10:48:12 -0400756 req->rl_sendctx->sc_wr.num_sge = 0;
757 req->rl_sendctx->sc_unmap_count = 0;
Chuck Lever01bb35c2017-10-20 10:48:36 -0400758 req->rl_sendctx->sc_req = req;
Chuck Lever0ab11522019-06-19 10:33:15 -0400759 kref_init(&req->rl_kref);
Chuck Lever655fec62016-09-15 10:57:24 -0400760
Chuck Lever05eb06d2019-06-19 10:32:48 -0400761 ret = -EIO;
Chuck Leverd2832af2019-04-24 09:39:32 -0400762 if (!rpcrdma_prepare_hdr_sge(r_xprt, req, hdrlen))
Chuck Lever05eb06d2019-06-19 10:32:48 -0400763 goto err;
Chuck Lever655fec62016-09-15 10:57:24 -0400764 if (rtype != rpcrdma_areadch)
Chuck Leverd2832af2019-04-24 09:39:32 -0400765 if (!rpcrdma_prepare_msg_sges(r_xprt, req, xdr, rtype))
Chuck Lever05eb06d2019-06-19 10:32:48 -0400766 goto err;
Chuck Lever857f9ac2017-10-20 10:47:55 -0400767 return 0;
Chuck Lever05eb06d2019-06-19 10:32:48 -0400768
769err:
770 trace_xprtrdma_prepsend_failed(&req->rl_slot, ret);
771 return ret;
Chuck Lever655fec62016-09-15 10:57:24 -0400772}
773
Chuck Lever09e60642017-08-10 12:47:12 -0400774/**
775 * rpcrdma_marshal_req - Marshal and send one RPC request
776 * @r_xprt: controlling transport
777 * @rqst: RPC request to be marshaled
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400778 *
Chuck Lever09e60642017-08-10 12:47:12 -0400779 * For the RPC in "rqst", this function:
780 * - Chooses the transfer mode (eg., RDMA_MSG or RDMA_NOMSG)
781 * - Registers Read, Write, and Reply chunks
782 * - Constructs the transport header
783 * - Posts a Send WR to send the transport header and request
784 *
785 * Returns:
786 * %0 if the RPC was sent successfully,
787 * %-ENOTCONN if the connection was lost,
Chuck Lever9e679d52018-02-28 15:30:44 -0500788 * %-EAGAIN if the caller should call again with the same arguments,
789 * %-ENOBUFS if the caller should call again after a delay,
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400790 * %-EMSGSIZE if the transport header is too small,
Chuck Lever09e60642017-08-10 12:47:12 -0400791 * %-EIO if a permanent problem occurred while marshaling.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400792 */
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400793int
Chuck Lever09e60642017-08-10 12:47:12 -0400794rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400795{
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400796 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400797 struct xdr_stream *xdr = &req->rl_stream;
Chuck Levere2377942015-03-30 14:33:53 -0400798 enum rpcrdma_chunktype rtype, wtype;
Chuck Lever65b80172016-06-29 13:55:06 -0400799 bool ddp_allowed;
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400800 __be32 *p;
Chuck Lever39f4cd92017-08-10 12:47:36 -0400801 int ret;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400802
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400803 rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
Chuck Lever8cec3db2019-04-24 09:39:16 -0400804 xdr_init_encode(xdr, &req->rl_hdrbuf, rdmab_data(req->rl_rdmabuf),
805 rqst);
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400806
807 /* Fixed header fields */
Chuck Lever39f4cd92017-08-10 12:47:36 -0400808 ret = -EMSGSIZE;
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400809 p = xdr_reserve_space(xdr, 4 * sizeof(*p));
810 if (!p)
811 goto out_err;
812 *p++ = rqst->rq_xid;
813 *p++ = rpcrdma_version;
814 *p++ = cpu_to_be32(r_xprt->rx_buf.rb_max_requests);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400815
Chuck Lever65b80172016-06-29 13:55:06 -0400816 /* When the ULP employs a GSS flavor that guarantees integrity
817 * or privacy, direct data placement of individual data items
818 * is not allowed.
819 */
820 ddp_allowed = !(rqst->rq_cred->cr_auth->au_flags &
821 RPCAUTH_AUTH_DATATOUCH);
822
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400823 /*
824 * Chunks needed for results?
825 *
826 * o If the expected result is under the inline threshold, all ops
Chuck Lever33943b22015-08-03 13:04:08 -0400827 * return as inline.
Chuck Levercce6dee2016-05-02 14:41:14 -0400828 * o Large read ops return data as write chunk(s), header as
829 * inline.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400830 * o Large non-read ops return as a single reply chunk.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400831 */
Chuck Levercce6dee2016-05-02 14:41:14 -0400832 if (rpcrdma_results_inline(r_xprt, rqst))
Chuck Lever02eb57d82015-08-03 13:03:58 -0400833 wtype = rpcrdma_noch;
Chuck Leverd4550bb2019-02-11 11:23:49 -0500834 else if ((ddp_allowed && rqst->rq_rcv_buf.flags & XDRBUF_READ) &&
835 rpcrdma_nonpayload_inline(r_xprt, rqst))
Chuck Levercce6dee2016-05-02 14:41:14 -0400836 wtype = rpcrdma_writech;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400837 else
Chuck Levere2377942015-03-30 14:33:53 -0400838 wtype = rpcrdma_replych;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400839
840 /*
841 * Chunks needed for arguments?
842 *
843 * o If the total request is under the inline threshold, all ops
844 * are sent as inline.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400845 * o Large write ops transmit data as read chunk(s), header as
846 * inline.
Chuck Lever2fcc2132015-08-03 13:04:26 -0400847 * o Large non-write ops are sent with the entire message as a
848 * single read chunk (protocol 0-position special case).
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400849 *
Chuck Lever2fcc2132015-08-03 13:04:26 -0400850 * This assumes that the upper layer does not present a request
851 * that both has a data payload, and whose non-data arguments
852 * by themselves are larger than the inline threshold.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400853 */
Chuck Lever302d3de2016-05-02 14:41:05 -0400854 if (rpcrdma_args_inline(r_xprt, rqst)) {
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400855 *p++ = rdma_msg;
Chuck Levere2377942015-03-30 14:33:53 -0400856 rtype = rpcrdma_noch;
Chuck Lever65b80172016-06-29 13:55:06 -0400857 } else if (ddp_allowed && rqst->rq_snd_buf.flags & XDRBUF_WRITE) {
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400858 *p++ = rdma_msg;
Chuck Levere2377942015-03-30 14:33:53 -0400859 rtype = rpcrdma_readch;
Chuck Lever2fcc2132015-08-03 13:04:26 -0400860 } else {
Chuck Lever860477d2015-08-03 13:04:45 -0400861 r_xprt->rx_stats.nomsg_call_count++;
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400862 *p++ = rdma_nomsg;
Chuck Lever2fcc2132015-08-03 13:04:26 -0400863 rtype = rpcrdma_areadch;
Chuck Lever2fcc2132015-08-03 13:04:26 -0400864 }
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400865
Chuck Levera2b64702017-12-14 20:57:14 -0500866 /* If this is a retransmit, discard previously registered
867 * chunks. Very likely the connection has been replaced,
868 * so these registrations are invalid and unusable.
869 */
Chuck Lever265a38d2019-08-19 18:44:04 -0400870 frwr_reset(req);
Chuck Levera2b64702017-12-14 20:57:14 -0500871
Chuck Lever94f58c52016-05-02 14:41:30 -0400872 /* This implementation supports the following combinations
873 * of chunk lists in one RPC-over-RDMA Call message:
874 *
875 * - Read list
876 * - Write list
877 * - Reply chunk
878 * - Read list + Reply chunk
879 *
880 * It might not yet support the following combinations:
881 *
882 * - Read list + Write list
883 *
884 * It does not support the following combinations:
885 *
886 * - Write list + Reply chunk
887 * - Read list + Write list + Reply chunk
888 *
889 * This implementation supports only a single chunk in each
890 * Read or Write list. Thus for example the client cannot
891 * send a Call message with a Position Zero Read chunk and a
892 * regular Read chunk at the same time.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400893 */
Chuck Lever6a6c6de2019-06-19 10:33:31 -0400894 ret = rpcrdma_encode_read_list(r_xprt, req, rqst, rtype);
Chuck Lever39f4cd92017-08-10 12:47:36 -0400895 if (ret)
Chuck Lever18c0fb32017-02-08 17:00:27 -0500896 goto out_err;
Chuck Lever6a6c6de2019-06-19 10:33:31 -0400897 ret = rpcrdma_encode_write_list(r_xprt, req, rqst, wtype);
Chuck Lever39f4cd92017-08-10 12:47:36 -0400898 if (ret)
899 goto out_err;
Chuck Lever6a6c6de2019-06-19 10:33:31 -0400900 ret = rpcrdma_encode_reply_chunk(r_xprt, req, rqst, wtype);
Chuck Lever18c0fb32017-02-08 17:00:27 -0500901 if (ret)
Chuck Lever94f58c52016-05-02 14:41:30 -0400902 goto out_err;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400903
Chuck Lever13100512019-06-19 10:32:43 -0400904 ret = rpcrdma_prepare_send_sges(r_xprt, req, req->rl_hdrbuf.len,
Chuck Lever857f9ac2017-10-20 10:47:55 -0400905 &rqst->rq_snd_buf, rtype);
906 if (ret)
Chuck Lever18c0fb32017-02-08 17:00:27 -0500907 goto out_err;
Chuck Lever13100512019-06-19 10:32:43 -0400908
909 trace_xprtrdma_marshal(req, rtype, wtype);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400910 return 0;
Chuck Lever302d3de2016-05-02 14:41:05 -0400911
Chuck Lever18c0fb32017-02-08 17:00:27 -0500912out_err:
Chuck Lever17e4c442019-04-24 09:39:48 -0400913 trace_xprtrdma_marshal_failed(rqst, ret);
Chuck Lever05eb06d2019-06-19 10:32:48 -0400914 r_xprt->rx_stats.failed_marshal_count++;
Chuck Lever40088f02019-06-19 10:33:04 -0400915 frwr_reset(req);
Chuck Lever39f4cd92017-08-10 12:47:36 -0400916 return ret;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400917}
918
Chuck Levercb0ae1f2016-06-29 13:54:41 -0400919/**
920 * rpcrdma_inline_fixup - Scatter inline received data into rqst's iovecs
921 * @rqst: controlling RPC request
922 * @srcp: points to RPC message payload in receive buffer
923 * @copy_len: remaining length of receive buffer content
924 * @pad: Write chunk pad bytes needed (zero for pure inline)
925 *
926 * The upper layer has set the maximum number of bytes it can
927 * receive in each component of rq_rcv_buf. These values are set in
928 * the head.iov_len, page_len, tail.iov_len, and buflen fields.
Chuck Levercfabe2c2016-06-29 13:54:49 -0400929 *
930 * Unlike the TCP equivalent (xdr_partial_copy_from_skb), in
931 * many cases this function simply updates iov_base pointers in
932 * rq_rcv_buf to point directly to the received reply data, to
933 * avoid copying reply data.
Chuck Lever64695bde2016-06-29 13:54:58 -0400934 *
935 * Returns the count of bytes which had to be memcopied.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400936 */
Chuck Lever64695bde2016-06-29 13:54:58 -0400937static unsigned long
Tom Talpey9191ca32008-10-09 15:01:11 -0400938rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400939{
Chuck Lever64695bde2016-06-29 13:54:58 -0400940 unsigned long fixup_copy_count;
941 int i, npages, curlen;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400942 char *destp;
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000943 struct page **ppages;
944 int page_base;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400945
Chuck Levercb0ae1f2016-06-29 13:54:41 -0400946 /* The head iovec is redirected to the RPC reply message
947 * in the receive buffer, to avoid a memcopy.
948 */
949 rqst->rq_rcv_buf.head[0].iov_base = srcp;
Chuck Levercfabe2c2016-06-29 13:54:49 -0400950 rqst->rq_private_buf.head[0].iov_base = srcp;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400951
Chuck Levercb0ae1f2016-06-29 13:54:41 -0400952 /* The contents of the receive buffer that follow
953 * head.iov_len bytes are copied into the page list.
954 */
955 curlen = rqst->rq_rcv_buf.head[0].iov_len;
956 if (curlen > copy_len)
957 curlen = copy_len;
Chuck Levere11b7c92017-12-20 16:31:04 -0500958 trace_xprtrdma_fixup(rqst, copy_len, curlen);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400959 srcp += curlen;
960 copy_len -= curlen;
961
Chuck Leverd933cc32017-06-08 11:53:16 -0400962 ppages = rqst->rq_rcv_buf.pages +
963 (rqst->rq_rcv_buf.page_base >> PAGE_SHIFT);
964 page_base = offset_in_page(rqst->rq_rcv_buf.page_base);
Chuck Lever64695bde2016-06-29 13:54:58 -0400965 fixup_copy_count = 0;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400966 if (copy_len && rqst->rq_rcv_buf.page_len) {
Chuck Lever80414ab2016-06-29 13:54:33 -0400967 int pagelist_len;
968
969 pagelist_len = rqst->rq_rcv_buf.page_len;
970 if (pagelist_len > copy_len)
971 pagelist_len = copy_len;
972 npages = PAGE_ALIGN(page_base + pagelist_len) >> PAGE_SHIFT;
Chuck Lever64695bde2016-06-29 13:54:58 -0400973 for (i = 0; i < npages; i++) {
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000974 curlen = PAGE_SIZE - page_base;
Chuck Lever80414ab2016-06-29 13:54:33 -0400975 if (curlen > pagelist_len)
976 curlen = pagelist_len;
977
Chuck Levere11b7c92017-12-20 16:31:04 -0500978 trace_xprtrdma_fixup_pg(rqst, i, srcp,
979 copy_len, curlen);
Cong Wangb8541782011-11-25 23:14:40 +0800980 destp = kmap_atomic(ppages[i]);
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000981 memcpy(destp + page_base, srcp, curlen);
982 flush_dcache_page(ppages[i]);
Cong Wangb8541782011-11-25 23:14:40 +0800983 kunmap_atomic(destp);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400984 srcp += curlen;
985 copy_len -= curlen;
Chuck Lever64695bde2016-06-29 13:54:58 -0400986 fixup_copy_count += curlen;
Chuck Lever80414ab2016-06-29 13:54:33 -0400987 pagelist_len -= curlen;
988 if (!pagelist_len)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400989 break;
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000990 page_base = 0;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400991 }
Chuck Levercb0ae1f2016-06-29 13:54:41 -0400992
993 /* Implicit padding for the last segment in a Write
994 * chunk is inserted inline at the front of the tail
995 * iovec. The upper layer ignores the content of
996 * the pad. Simply ensure inline content in the tail
997 * that follows the Write chunk is properly aligned.
998 */
999 if (pad)
1000 srcp -= pad;
Chuck Lever2b7bbc92014-03-12 12:51:30 -04001001 }
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001002
Chuck Levercb0ae1f2016-06-29 13:54:41 -04001003 /* The tail iovec is redirected to the remaining data
1004 * in the receive buffer, to avoid a memcopy.
1005 */
Chuck Levercfabe2c2016-06-29 13:54:49 -04001006 if (copy_len || pad) {
Chuck Levercb0ae1f2016-06-29 13:54:41 -04001007 rqst->rq_rcv_buf.tail[0].iov_base = srcp;
Chuck Levercfabe2c2016-06-29 13:54:49 -04001008 rqst->rq_private_buf.tail[0].iov_base = srcp;
1009 }
Tom Talpey9191ca32008-10-09 15:01:11 -04001010
Chuck Lever64695bde2016-06-29 13:54:58 -04001011 return fixup_copy_count;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001012}
1013
Chuck Lever63cae472015-10-24 17:28:08 -04001014/* By convention, backchannel calls arrive via rdma_msg type
1015 * messages, and never populate the chunk lists. This makes
1016 * the RPC/RDMA header small and fixed in size, so it is
1017 * straightforward to check the RPC header's direction field.
1018 */
1019static bool
Chuck Lever5381e0e2017-10-16 15:01:14 -04001020rpcrdma_is_bcall(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
Chuck Lever41c8f702017-08-03 14:30:11 -04001021#if defined(CONFIG_SUNRPC_BACKCHANNEL)
Chuck Lever63cae472015-10-24 17:28:08 -04001022{
Chuck Lever41c8f702017-08-03 14:30:11 -04001023 struct xdr_stream *xdr = &rep->rr_stream;
1024 __be32 *p;
Chuck Lever63cae472015-10-24 17:28:08 -04001025
Chuck Lever5381e0e2017-10-16 15:01:14 -04001026 if (rep->rr_proc != rdma_msg)
Chuck Lever63cae472015-10-24 17:28:08 -04001027 return false;
1028
Chuck Lever41c8f702017-08-03 14:30:11 -04001029 /* Peek at stream contents without advancing. */
1030 p = xdr_inline_decode(xdr, 0);
1031
1032 /* Chunk lists */
1033 if (*p++ != xdr_zero)
Chuck Lever63cae472015-10-24 17:28:08 -04001034 return false;
Chuck Lever41c8f702017-08-03 14:30:11 -04001035 if (*p++ != xdr_zero)
1036 return false;
1037 if (*p++ != xdr_zero)
Chuck Lever63cae472015-10-24 17:28:08 -04001038 return false;
1039
Chuck Lever41c8f702017-08-03 14:30:11 -04001040 /* RPC header */
Chuck Lever5381e0e2017-10-16 15:01:14 -04001041 if (*p++ != rep->rr_xid)
Chuck Lever41c8f702017-08-03 14:30:11 -04001042 return false;
1043 if (*p != cpu_to_be32(RPC_CALL))
1044 return false;
1045
1046 /* Now that we are sure this is a backchannel call,
1047 * advance to the RPC header.
1048 */
1049 p = xdr_inline_decode(xdr, 3 * sizeof(*p));
1050 if (unlikely(!p))
1051 goto out_short;
1052
1053 rpcrdma_bc_receive_call(r_xprt, rep);
Chuck Lever63cae472015-10-24 17:28:08 -04001054 return true;
Chuck Lever41c8f702017-08-03 14:30:11 -04001055
1056out_short:
1057 pr_warn("RPC/RDMA short backward direction call\n");
Chuck Lever63cae472015-10-24 17:28:08 -04001058 return true;
1059}
Chuck Lever41c8f702017-08-03 14:30:11 -04001060#else /* CONFIG_SUNRPC_BACKCHANNEL */
1061{
1062 return false;
Chuck Lever63cae472015-10-24 17:28:08 -04001063}
1064#endif /* CONFIG_SUNRPC_BACKCHANNEL */
1065
Chuck Lever264b0cd2017-08-03 14:30:27 -04001066static int decode_rdma_segment(struct xdr_stream *xdr, u32 *length)
1067{
Chuck Levere11b7c92017-12-20 16:31:04 -05001068 u32 handle;
1069 u64 offset;
Chuck Lever264b0cd2017-08-03 14:30:27 -04001070 __be32 *p;
1071
1072 p = xdr_inline_decode(xdr, 4 * sizeof(*p));
1073 if (unlikely(!p))
1074 return -EIO;
1075
Chuck Levere11b7c92017-12-20 16:31:04 -05001076 handle = be32_to_cpup(p++);
1077 *length = be32_to_cpup(p++);
1078 xdr_decode_hyper(p, &offset);
Chuck Lever264b0cd2017-08-03 14:30:27 -04001079
Chuck Levere11b7c92017-12-20 16:31:04 -05001080 trace_xprtrdma_decode_seg(handle, *length, offset);
Chuck Lever264b0cd2017-08-03 14:30:27 -04001081 return 0;
1082}
1083
1084static int decode_write_chunk(struct xdr_stream *xdr, u32 *length)
1085{
1086 u32 segcount, seglength;
1087 __be32 *p;
1088
1089 p = xdr_inline_decode(xdr, sizeof(*p));
1090 if (unlikely(!p))
1091 return -EIO;
1092
1093 *length = 0;
1094 segcount = be32_to_cpup(p);
1095 while (segcount--) {
1096 if (decode_rdma_segment(xdr, &seglength))
1097 return -EIO;
1098 *length += seglength;
1099 }
1100
Chuck Lever264b0cd2017-08-03 14:30:27 -04001101 return 0;
1102}
1103
1104/* In RPC-over-RDMA Version One replies, a Read list is never
1105 * expected. This decoder is a stub that returns an error if
1106 * a Read list is present.
1107 */
1108static int decode_read_list(struct xdr_stream *xdr)
1109{
1110 __be32 *p;
1111
1112 p = xdr_inline_decode(xdr, sizeof(*p));
1113 if (unlikely(!p))
1114 return -EIO;
1115 if (unlikely(*p != xdr_zero))
1116 return -EIO;
1117 return 0;
1118}
1119
1120/* Supports only one Write chunk in the Write list
1121 */
1122static int decode_write_list(struct xdr_stream *xdr, u32 *length)
1123{
1124 u32 chunklen;
1125 bool first;
1126 __be32 *p;
1127
1128 *length = 0;
1129 first = true;
1130 do {
1131 p = xdr_inline_decode(xdr, sizeof(*p));
1132 if (unlikely(!p))
1133 return -EIO;
1134 if (*p == xdr_zero)
1135 break;
1136 if (!first)
1137 return -EIO;
1138
1139 if (decode_write_chunk(xdr, &chunklen))
1140 return -EIO;
1141 *length += chunklen;
1142 first = false;
1143 } while (true);
1144 return 0;
1145}
1146
1147static int decode_reply_chunk(struct xdr_stream *xdr, u32 *length)
1148{
1149 __be32 *p;
1150
1151 p = xdr_inline_decode(xdr, sizeof(*p));
1152 if (unlikely(!p))
1153 return -EIO;
1154
1155 *length = 0;
1156 if (*p != xdr_zero)
1157 if (decode_write_chunk(xdr, length))
1158 return -EIO;
1159 return 0;
1160}
1161
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001162static int
1163rpcrdma_decode_msg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1164 struct rpc_rqst *rqst)
1165{
1166 struct xdr_stream *xdr = &rep->rr_stream;
Chuck Lever264b0cd2017-08-03 14:30:27 -04001167 u32 writelist, replychunk, rpclen;
1168 char *base;
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001169
Chuck Lever264b0cd2017-08-03 14:30:27 -04001170 /* Decode the chunk lists */
1171 if (decode_read_list(xdr))
1172 return -EIO;
1173 if (decode_write_list(xdr, &writelist))
1174 return -EIO;
1175 if (decode_reply_chunk(xdr, &replychunk))
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001176 return -EIO;
1177
Chuck Lever264b0cd2017-08-03 14:30:27 -04001178 /* RDMA_MSG sanity checks */
1179 if (unlikely(replychunk))
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001180 return -EIO;
1181
Chuck Lever264b0cd2017-08-03 14:30:27 -04001182 /* Build the RPC reply's Payload stream in rqst->rq_rcv_buf */
1183 base = (char *)xdr_inline_decode(xdr, 0);
1184 rpclen = xdr_stream_remaining(xdr);
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001185 r_xprt->rx_stats.fixup_copy_count +=
Chuck Lever264b0cd2017-08-03 14:30:27 -04001186 rpcrdma_inline_fixup(rqst, base, rpclen, writelist & 3);
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001187
Chuck Lever264b0cd2017-08-03 14:30:27 -04001188 r_xprt->rx_stats.total_rdma_reply += writelist;
1189 return rpclen + xdr_align_size(writelist);
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001190}
1191
1192static noinline int
1193rpcrdma_decode_nomsg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
1194{
1195 struct xdr_stream *xdr = &rep->rr_stream;
Chuck Lever264b0cd2017-08-03 14:30:27 -04001196 u32 writelist, replychunk;
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001197
Chuck Lever264b0cd2017-08-03 14:30:27 -04001198 /* Decode the chunk lists */
1199 if (decode_read_list(xdr))
1200 return -EIO;
1201 if (decode_write_list(xdr, &writelist))
1202 return -EIO;
1203 if (decode_reply_chunk(xdr, &replychunk))
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001204 return -EIO;
1205
Chuck Lever264b0cd2017-08-03 14:30:27 -04001206 /* RDMA_NOMSG sanity checks */
1207 if (unlikely(writelist))
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001208 return -EIO;
Chuck Lever264b0cd2017-08-03 14:30:27 -04001209 if (unlikely(!replychunk))
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001210 return -EIO;
1211
Chuck Lever264b0cd2017-08-03 14:30:27 -04001212 /* Reply chunk buffer already is the reply vector */
1213 r_xprt->rx_stats.total_rdma_reply += replychunk;
1214 return replychunk;
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001215}
1216
1217static noinline int
1218rpcrdma_decode_error(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1219 struct rpc_rqst *rqst)
1220{
1221 struct xdr_stream *xdr = &rep->rr_stream;
1222 __be32 *p;
1223
1224 p = xdr_inline_decode(xdr, sizeof(*p));
1225 if (unlikely(!p))
1226 return -EIO;
1227
1228 switch (*p) {
1229 case err_vers:
1230 p = xdr_inline_decode(xdr, 2 * sizeof(*p));
1231 if (!p)
1232 break;
Chuck Leverddbb3472018-12-19 10:59:39 -05001233 dprintk("RPC: %s: server reports "
1234 "version error (%u-%u), xid %08x\n", __func__,
1235 be32_to_cpup(p), be32_to_cpu(*(p + 1)),
1236 be32_to_cpu(rep->rr_xid));
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001237 break;
1238 case err_chunk:
Chuck Leverddbb3472018-12-19 10:59:39 -05001239 dprintk("RPC: %s: server reports "
1240 "header decoding error, xid %08x\n", __func__,
1241 be32_to_cpu(rep->rr_xid));
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001242 break;
1243 default:
Chuck Leverddbb3472018-12-19 10:59:39 -05001244 dprintk("RPC: %s: server reports "
1245 "unrecognized error %d, xid %08x\n", __func__,
1246 be32_to_cpup(p), be32_to_cpu(rep->rr_xid));
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001247 }
1248
1249 r_xprt->rx_stats.bad_reply_count++;
1250 return -EREMOTEIO;
1251}
1252
Chuck Levere1352c92017-10-16 15:01:22 -04001253/* Perform XID lookup, reconstruction of the RPC reply, and
1254 * RPC completion while holding the transport lock to ensure
1255 * the rep, rqst, and rq_task pointers remain stable.
1256 */
1257void rpcrdma_complete_rqst(struct rpcrdma_rep *rep)
1258{
1259 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1260 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1261 struct rpc_rqst *rqst = rep->rr_rqst;
Chuck Levere1352c92017-10-16 15:01:22 -04001262 int status;
1263
1264 xprt->reestablish_timeout = 0;
1265
1266 switch (rep->rr_proc) {
1267 case rdma_msg:
1268 status = rpcrdma_decode_msg(r_xprt, rep, rqst);
1269 break;
1270 case rdma_nomsg:
1271 status = rpcrdma_decode_nomsg(r_xprt, rep);
1272 break;
1273 case rdma_error:
1274 status = rpcrdma_decode_error(r_xprt, rep, rqst);
1275 break;
1276 default:
1277 status = -EIO;
1278 }
1279 if (status < 0)
1280 goto out_badheader;
1281
1282out:
Trond Myklebust75c84152018-08-31 10:21:00 -04001283 spin_lock(&xprt->queue_lock);
Chuck Levere1352c92017-10-16 15:01:22 -04001284 xprt_complete_rqst(rqst->rq_task, status);
1285 xprt_unpin_rqst(rqst);
Trond Myklebust75c84152018-08-31 10:21:00 -04001286 spin_unlock(&xprt->queue_lock);
Chuck Levere1352c92017-10-16 15:01:22 -04001287 return;
1288
1289/* If the incoming reply terminated a pending RPC, the next
1290 * RPC call will post a replacement receive buffer as it is
1291 * being marshaled.
1292 */
1293out_badheader:
Chuck Leverb4a7f912017-12-20 16:30:48 -05001294 trace_xprtrdma_reply_hdr(rep);
Chuck Levere1352c92017-10-16 15:01:22 -04001295 r_xprt->rx_stats.bad_reply_count++;
Chuck Levere1352c92017-10-16 15:01:22 -04001296 goto out;
1297}
1298
Chuck Lever0ab11522019-06-19 10:33:15 -04001299static void rpcrdma_reply_done(struct kref *kref)
Chuck Lever0ba6f372017-10-20 10:48:28 -04001300{
Chuck Lever0ab11522019-06-19 10:33:15 -04001301 struct rpcrdma_req *req =
1302 container_of(kref, struct rpcrdma_req, rl_kref);
Chuck Lever01bb35c2017-10-20 10:48:36 -04001303
Chuck Lever0ab11522019-06-19 10:33:15 -04001304 rpcrdma_complete_rqst(req->rl_reply);
Chuck Lever0ba6f372017-10-20 10:48:28 -04001305}
1306
Chuck Leverd8099fe2019-06-19 10:33:10 -04001307/**
1308 * rpcrdma_reply_handler - Process received RPC/RDMA messages
1309 * @rep: Incoming rpcrdma_rep object to process
Chuck Leverfe97b472015-10-24 17:27:10 -04001310 *
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001311 * Errors must result in the RPC task either being awakened, or
1312 * allowed to timeout, to discover the errors at that time.
1313 */
Chuck Leverd8f532d2017-10-16 15:01:30 -04001314void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001315{
Chuck Lever431af642017-06-08 11:52:20 -04001316 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
Chuck Lever431af642017-06-08 11:52:20 -04001317 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
Chuck Leverbe798f92017-10-16 15:01:39 -04001318 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001319 struct rpcrdma_req *req;
1320 struct rpc_rqst *rqst;
Chuck Leverbe798f92017-10-16 15:01:39 -04001321 u32 credits;
Chuck Lever5381e0e2017-10-16 15:01:14 -04001322 __be32 *p;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001323
Chuck Lever7c8d9e72018-05-04 15:35:20 -04001324 /* Fixed transport header fields */
Chuck Lever5381e0e2017-10-16 15:01:14 -04001325 xdr_init_decode(&rep->rr_stream, &rep->rr_hdrbuf,
Chuck Lever0ccc61b2019-02-11 11:24:05 -05001326 rep->rr_hdrbuf.head[0].iov_base, NULL);
Chuck Lever5381e0e2017-10-16 15:01:14 -04001327 p = xdr_inline_decode(&rep->rr_stream, 4 * sizeof(*p));
Chuck Lever96f87782017-08-03 14:30:03 -04001328 if (unlikely(!p))
Chuck Leverb0e178a2015-10-24 17:26:54 -04001329 goto out_shortreply;
Chuck Lever5381e0e2017-10-16 15:01:14 -04001330 rep->rr_xid = *p++;
1331 rep->rr_vers = *p++;
Chuck Leverbe798f92017-10-16 15:01:39 -04001332 credits = be32_to_cpu(*p++);
Chuck Lever5381e0e2017-10-16 15:01:14 -04001333 rep->rr_proc = *p++;
Chuck Leverb0e178a2015-10-24 17:26:54 -04001334
Chuck Lever5381e0e2017-10-16 15:01:14 -04001335 if (rep->rr_vers != rpcrdma_version)
Chuck Lever61433af2017-10-16 15:01:06 -04001336 goto out_badversion;
1337
Chuck Lever5381e0e2017-10-16 15:01:14 -04001338 if (rpcrdma_is_bcall(r_xprt, rep))
Chuck Lever41c8f702017-08-03 14:30:11 -04001339 return;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001340
Chuck Leverfe97b472015-10-24 17:27:10 -04001341 /* Match incoming rpcrdma_rep to an rpcrdma_req to
1342 * get context for handling any incoming chunks.
1343 */
Trond Myklebust75c84152018-08-31 10:21:00 -04001344 spin_lock(&xprt->queue_lock);
Chuck Lever5381e0e2017-10-16 15:01:14 -04001345 rqst = xprt_lookup_rqst(xprt, rep->rr_xid);
Chuck Lever9590d082017-08-23 17:05:58 -04001346 if (!rqst)
1347 goto out_norqst;
1348 xprt_pin_rqst(rqst);
Trond Myklebust93bdcf92018-10-18 17:29:00 -04001349 spin_unlock(&xprt->queue_lock);
Chuck Leverbe798f92017-10-16 15:01:39 -04001350
1351 if (credits == 0)
1352 credits = 1; /* don't deadlock */
1353 else if (credits > buf->rb_max_requests)
1354 credits = buf->rb_max_requests;
Chuck Lever91ca1862018-10-01 14:25:09 -04001355 if (buf->rb_credits != credits) {
Trond Myklebustb5e92412019-05-02 11:21:08 -04001356 spin_lock(&xprt->transport_lock);
Chuck Lever91ca1862018-10-01 14:25:09 -04001357 buf->rb_credits = credits;
1358 xprt->cwnd = credits << RPC_CWNDSHIFT;
Trond Myklebustb5e92412019-05-02 11:21:08 -04001359 spin_unlock(&xprt->transport_lock);
Chuck Lever91ca1862018-10-01 14:25:09 -04001360 }
Chuck Leverbe798f92017-10-16 15:01:39 -04001361
Chuck Lever9590d082017-08-23 17:05:58 -04001362 req = rpcr_to_rdmar(rqst);
Chuck Lever07e10302018-12-07 11:11:44 -05001363 if (req->rl_reply) {
1364 trace_xprtrdma_leaked_rep(rqst, req->rl_reply);
1365 rpcrdma_recv_buffer_put(req->rl_reply);
1366 }
Chuck Lever4b196dc62017-06-08 11:51:56 -04001367 req->rl_reply = rep;
Chuck Levere1352c92017-10-16 15:01:22 -04001368 rep->rr_rqst = rqst;
Chuck Lever431af642017-06-08 11:52:20 -04001369
Chuck Leverb4a7f912017-12-20 16:30:48 -05001370 trace_xprtrdma_reply(rqst->rq_task, rep, req, credits);
Chuck Leverd8099fe2019-06-19 10:33:10 -04001371
1372 if (rep->rr_wc_flags & IB_WC_WITH_INVALIDATE)
1373 frwr_reminv(rep, &req->rl_registered);
Chuck Lever0ab11522019-06-19 10:33:15 -04001374 if (!list_empty(&req->rl_registered))
Chuck Leverd8099fe2019-06-19 10:33:10 -04001375 frwr_unmap_async(r_xprt, req);
1376 /* LocalInv completion will complete the RPC */
Chuck Lever0ab11522019-06-19 10:33:15 -04001377 else
1378 kref_put(&req->rl_kref, rpcrdma_reply_done);
Chuck Leverb0e178a2015-10-24 17:26:54 -04001379 return;
1380
Chuck Lever61433af2017-10-16 15:01:06 -04001381out_badversion:
Chuck Leverb4a7f912017-12-20 16:30:48 -05001382 trace_xprtrdma_reply_vers(rep);
Chuck Lever6ceea362018-12-19 10:58:24 -05001383 goto out;
Chuck Lever61433af2017-10-16 15:01:06 -04001384
Chuck Lever431af642017-06-08 11:52:20 -04001385out_norqst:
Trond Myklebust75c84152018-08-31 10:21:00 -04001386 spin_unlock(&xprt->queue_lock);
Chuck Leverb4a7f912017-12-20 16:30:48 -05001387 trace_xprtrdma_reply_rqst(rep);
Chuck Lever6ceea362018-12-19 10:58:24 -05001388 goto out;
Chuck Leverb0e178a2015-10-24 17:26:54 -04001389
Chuck Lever9590d082017-08-23 17:05:58 -04001390out_shortreply:
Chuck Leverb4a7f912017-12-20 16:30:48 -05001391 trace_xprtrdma_reply_short(rep);
Chuck Leverb0e178a2015-10-24 17:26:54 -04001392
Chuck Lever6ceea362018-12-19 10:58:24 -05001393out:
Chuck Lever7c8d9e72018-05-04 15:35:20 -04001394 rpcrdma_recv_buffer_put(rep);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001395}