blob: 45cba06655ea903cd224d22c79ef4f02863c82e5 [file] [log] [blame]
Chuck Levera2268cf2018-05-04 15:34:32 -04001// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -04002/*
Chuck Lever62b56a62017-10-30 16:22:14 -04003 * Copyright (c) 2014-2017 Oracle. All rights reserved.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04004 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the BSD-type
10 * license below:
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 *
16 * Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 *
19 * Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials provided
22 * with the distribution.
23 *
24 * Neither the name of the Network Appliance, Inc. nor the names of
25 * its contributors may be used to endorse or promote products
26 * derived from this software without specific prior written
27 * permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 */
41
42/*
43 * rpc_rdma.c
44 *
45 * This file contains the guts of the RPC RDMA protocol, and
46 * does marshaling/unmarshaling, etc. It is also where interfacing
47 * to the Linux RPC framework lives.
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -040048 */
49
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -040050#include <linux/highmem.h>
51
Chuck Leverbd2abef2018-05-07 15:27:16 -040052#include <linux/sunrpc/svc_rdma.h>
53
Chuck Leverb6e717cb2018-05-07 15:27:05 -040054#include "xprt_rdma.h"
55#include <trace/events/rpcrdma.h>
56
Jeff Laytonf895b252014-11-17 16:58:04 -050057#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -040058# define RPCDBG_FACILITY RPCDBG_TRANS
59#endif
60
Chuck Lever302d3de2016-05-02 14:41:05 -040061/* Returns size of largest RPC-over-RDMA header in a Call message
62 *
Chuck Lever94f58c52016-05-02 14:41:30 -040063 * The largest Call header contains a full-size Read list and a
64 * minimal Reply chunk.
Chuck Lever302d3de2016-05-02 14:41:05 -040065 */
66static unsigned int rpcrdma_max_call_header_size(unsigned int maxsegs)
67{
68 unsigned int size;
69
70 /* Fixed header fields and list discriminators */
71 size = RPCRDMA_HDRLEN_MIN;
72
73 /* Maximum Read list size */
Chuck Lever2232df52017-10-30 16:21:57 -040074 size = maxsegs * rpcrdma_readchunk_maxsz * sizeof(__be32);
Chuck Lever302d3de2016-05-02 14:41:05 -040075
Chuck Lever94f58c52016-05-02 14:41:30 -040076 /* Minimal Read chunk size */
77 size += sizeof(__be32); /* segment count */
Chuck Lever2232df52017-10-30 16:21:57 -040078 size += rpcrdma_segment_maxsz * sizeof(__be32);
Chuck Lever94f58c52016-05-02 14:41:30 -040079 size += sizeof(__be32); /* list discriminator */
80
Chuck Lever302d3de2016-05-02 14:41:05 -040081 dprintk("RPC: %s: max call header size = %u\n",
82 __func__, size);
83 return size;
84}
85
86/* Returns size of largest RPC-over-RDMA header in a Reply message
87 *
88 * There is only one Write list or one Reply chunk per Reply
89 * message. The larger list is the Write list.
90 */
91static unsigned int rpcrdma_max_reply_header_size(unsigned int maxsegs)
92{
93 unsigned int size;
94
95 /* Fixed header fields and list discriminators */
96 size = RPCRDMA_HDRLEN_MIN;
97
98 /* Maximum Write list size */
Chuck Lever302d3de2016-05-02 14:41:05 -040099 size = sizeof(__be32); /* segment count */
Chuck Lever2232df52017-10-30 16:21:57 -0400100 size += maxsegs * rpcrdma_segment_maxsz * sizeof(__be32);
Chuck Lever302d3de2016-05-02 14:41:05 -0400101 size += sizeof(__be32); /* list discriminator */
102
103 dprintk("RPC: %s: max reply header size = %u\n",
104 __func__, size);
105 return size;
106}
107
Chuck Lever87cfb9a2016-09-15 10:57:07 -0400108void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *r_xprt)
Chuck Lever302d3de2016-05-02 14:41:05 -0400109{
Chuck Lever87cfb9a2016-09-15 10:57:07 -0400110 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
111 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
112 unsigned int maxsegs = ia->ri_max_segs;
113
Chuck Lever302d3de2016-05-02 14:41:05 -0400114 ia->ri_max_inline_write = cdata->inline_wsize -
115 rpcrdma_max_call_header_size(maxsegs);
116 ia->ri_max_inline_read = cdata->inline_rsize -
117 rpcrdma_max_reply_header_size(maxsegs);
118}
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400119
Chuck Lever5457ced2015-08-03 13:03:49 -0400120/* The client can send a request inline as long as the RPCRDMA header
121 * plus the RPC call fit under the transport's inline limit. If the
122 * combined call message size exceeds that limit, the client must use
Chuck Lever16f906d2017-02-08 17:00:10 -0500123 * a Read chunk for this operation.
124 *
125 * A Read chunk is also required if sending the RPC call inline would
126 * exceed this device's max_sge limit.
Chuck Lever5457ced2015-08-03 13:03:49 -0400127 */
Chuck Lever302d3de2016-05-02 14:41:05 -0400128static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt,
129 struct rpc_rqst *rqst)
Chuck Lever5457ced2015-08-03 13:03:49 -0400130{
Chuck Lever16f906d2017-02-08 17:00:10 -0500131 struct xdr_buf *xdr = &rqst->rq_snd_buf;
132 unsigned int count, remaining, offset;
Chuck Lever5457ced2015-08-03 13:03:49 -0400133
Chuck Lever16f906d2017-02-08 17:00:10 -0500134 if (xdr->len > r_xprt->rx_ia.ri_max_inline_write)
135 return false;
136
137 if (xdr->page_len) {
138 remaining = xdr->page_len;
Chuck Leverd933cc32017-06-08 11:53:16 -0400139 offset = offset_in_page(xdr->page_base);
Chuck Lever1179e2c2018-01-31 12:34:05 -0500140 count = RPCRDMA_MIN_SEND_SGES;
Chuck Lever16f906d2017-02-08 17:00:10 -0500141 while (remaining) {
142 remaining -= min_t(unsigned int,
143 PAGE_SIZE - offset, remaining);
144 offset = 0;
145 if (++count > r_xprt->rx_ia.ri_max_send_sges)
146 return false;
147 }
148 }
149
150 return true;
Chuck Lever5457ced2015-08-03 13:03:49 -0400151}
152
153/* The client can't know how large the actual reply will be. Thus it
154 * plans for the largest possible reply for that particular ULP
155 * operation. If the maximum combined reply message size exceeds that
156 * limit, the client must provide a write list or a reply chunk for
157 * this request.
158 */
Chuck Lever302d3de2016-05-02 14:41:05 -0400159static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt,
160 struct rpc_rqst *rqst)
Chuck Lever5457ced2015-08-03 13:03:49 -0400161{
Chuck Lever302d3de2016-05-02 14:41:05 -0400162 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
Chuck Lever5457ced2015-08-03 13:03:49 -0400163
Chuck Lever302d3de2016-05-02 14:41:05 -0400164 return rqst->rq_rcv_buf.buflen <= ia->ri_max_inline_read;
Chuck Lever5457ced2015-08-03 13:03:49 -0400165}
166
Chuck Leverd4550bb2019-02-11 11:23:49 -0500167/* The client is required to provide a Reply chunk if the maximum
168 * size of the non-payload part of the RPC Reply is larger than
169 * the inline threshold.
170 */
171static bool
172rpcrdma_nonpayload_inline(const struct rpcrdma_xprt *r_xprt,
173 const struct rpc_rqst *rqst)
174{
175 const struct xdr_buf *buf = &rqst->rq_rcv_buf;
176 const struct rpcrdma_ia *ia = &r_xprt->rx_ia;
177
178 return buf->head[0].iov_len + buf->tail[0].iov_len <
179 ia->ri_max_inline_read;
180}
181
Chuck Lever28d9d562017-08-14 15:38:22 -0400182/* Split @vec on page boundaries into SGEs. FMR registers pages, not
183 * a byte range. Other modes coalesce these SGEs into a single MR
184 * when they can.
185 *
186 * Returns pointer to next available SGE, and bumps the total number
187 * of SGEs consumed.
Chuck Lever821c7912016-03-04 11:27:52 -0500188 */
Chuck Lever28d9d562017-08-14 15:38:22 -0400189static struct rpcrdma_mr_seg *
190rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg,
191 unsigned int *n)
Chuck Lever821c7912016-03-04 11:27:52 -0500192{
Chuck Lever28d9d562017-08-14 15:38:22 -0400193 u32 remaining, page_offset;
Chuck Lever821c7912016-03-04 11:27:52 -0500194 char *base;
195
196 base = vec->iov_base;
197 page_offset = offset_in_page(base);
198 remaining = vec->iov_len;
Chuck Lever28d9d562017-08-14 15:38:22 -0400199 while (remaining) {
200 seg->mr_page = NULL;
201 seg->mr_offset = base;
202 seg->mr_len = min_t(u32, PAGE_SIZE - page_offset, remaining);
203 remaining -= seg->mr_len;
204 base += seg->mr_len;
205 ++seg;
206 ++(*n);
Chuck Lever821c7912016-03-04 11:27:52 -0500207 page_offset = 0;
208 }
Chuck Lever28d9d562017-08-14 15:38:22 -0400209 return seg;
Chuck Lever821c7912016-03-04 11:27:52 -0500210}
211
Chuck Lever28d9d562017-08-14 15:38:22 -0400212/* Convert @xdrbuf into SGEs no larger than a page each. As they
213 * are registered, these SGEs are then coalesced into RDMA segments
214 * when the selected memreg mode supports it.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400215 *
Chuck Lever28d9d562017-08-14 15:38:22 -0400216 * Returns positive number of SGEs consumed, or a negative errno.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400217 */
218
219static int
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500220rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf,
221 unsigned int pos, enum rpcrdma_chunktype type,
222 struct rpcrdma_mr_seg *seg)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400223{
Chuck Lever28d9d562017-08-14 15:38:22 -0400224 unsigned long page_base;
225 unsigned int len, n;
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000226 struct page **ppages;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400227
Chuck Lever5ab81422016-06-29 13:54:25 -0400228 n = 0;
Chuck Lever28d9d562017-08-14 15:38:22 -0400229 if (pos == 0)
230 seg = rpcrdma_convert_kvec(&xdrbuf->head[0], seg, &n);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400231
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000232 len = xdrbuf->page_len;
233 ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT);
Chuck Leverd933cc32017-06-08 11:53:16 -0400234 page_base = offset_in_page(xdrbuf->page_base);
Chuck Lever28d9d562017-08-14 15:38:22 -0400235 while (len) {
Chuck Lever15303d9e2018-12-19 10:59:17 -0500236 /* ACL likes to be lazy in allocating pages - ACLs
237 * are small by default but can get huge.
238 */
239 if (unlikely(xdrbuf->flags & XDRBUF_SPARSE_PAGES)) {
240 if (!*ppages)
Chuck Lever52db6f92019-04-24 09:38:55 -0400241 *ppages = alloc_page(GFP_NOWAIT | __GFP_NOWARN);
Chuck Lever28d9d562017-08-14 15:38:22 -0400242 if (!*ppages)
Chuck Levera8f688e2018-05-04 15:35:46 -0400243 return -ENOBUFS;
Shirley Ma196c6992014-05-28 10:34:24 -0400244 }
Chuck Lever28d9d562017-08-14 15:38:22 -0400245 seg->mr_page = *ppages;
246 seg->mr_offset = (char *)page_base;
247 seg->mr_len = min_t(u32, PAGE_SIZE - page_base, len);
248 len -= seg->mr_len;
249 ++ppages;
250 ++seg;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400251 ++n;
Chuck Lever28d9d562017-08-14 15:38:22 -0400252 page_base = 0;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400253 }
254
Chuck Lever24abdf12017-02-08 16:59:46 -0500255 /* When encoding a Read chunk, the tail iovec contains an
256 * XDR pad and may be omitted.
257 */
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500258 if (type == rpcrdma_readch && r_xprt->rx_ia.ri_implicit_roundup)
Chuck Lever28d9d562017-08-14 15:38:22 -0400259 goto out;
Chuck Lever677eb172015-08-03 13:04:17 -0400260
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500261 /* When encoding a Write chunk, some servers need to see an
262 * extra segment for non-XDR-aligned Write chunks. The upper
263 * layer provides space in the tail iovec that may be used
264 * for this purpose.
Chuck Leverc8b920b2016-09-15 10:57:16 -0400265 */
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500266 if (type == rpcrdma_writech && r_xprt->rx_ia.ri_implicit_roundup)
Chuck Lever28d9d562017-08-14 15:38:22 -0400267 goto out;
Chuck Leverc8b920b2016-09-15 10:57:16 -0400268
Chuck Lever28d9d562017-08-14 15:38:22 -0400269 if (xdrbuf->tail[0].iov_len)
270 seg = rpcrdma_convert_kvec(&xdrbuf->tail[0], seg, &n);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400271
Chuck Lever28d9d562017-08-14 15:38:22 -0400272out:
273 if (unlikely(n > RPCRDMA_MAX_SEGS))
274 return -EIO;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400275 return n;
276}
277
Chuck Lever39f4cd92017-08-10 12:47:36 -0400278static inline int
279encode_item_present(struct xdr_stream *xdr)
280{
281 __be32 *p;
282
283 p = xdr_reserve_space(xdr, sizeof(*p));
284 if (unlikely(!p))
285 return -EMSGSIZE;
286
287 *p = xdr_one;
288 return 0;
289}
290
291static inline int
292encode_item_not_present(struct xdr_stream *xdr)
293{
294 __be32 *p;
295
296 p = xdr_reserve_space(xdr, sizeof(*p));
297 if (unlikely(!p))
298 return -EMSGSIZE;
299
300 *p = xdr_zero;
301 return 0;
302}
303
304static void
Chuck Lever96cedde2017-12-14 20:57:55 -0500305xdr_encode_rdma_segment(__be32 *iptr, struct rpcrdma_mr *mr)
Chuck Lever94f58c52016-05-02 14:41:30 -0400306{
Chuck Lever96cedde2017-12-14 20:57:55 -0500307 *iptr++ = cpu_to_be32(mr->mr_handle);
308 *iptr++ = cpu_to_be32(mr->mr_length);
309 xdr_encode_hyper(iptr, mr->mr_offset);
Chuck Lever94f58c52016-05-02 14:41:30 -0400310}
311
Chuck Lever39f4cd92017-08-10 12:47:36 -0400312static int
Chuck Lever96cedde2017-12-14 20:57:55 -0500313encode_rdma_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr)
Chuck Lever39f4cd92017-08-10 12:47:36 -0400314{
315 __be32 *p;
316
317 p = xdr_reserve_space(xdr, 4 * sizeof(*p));
318 if (unlikely(!p))
319 return -EMSGSIZE;
320
Chuck Lever96cedde2017-12-14 20:57:55 -0500321 xdr_encode_rdma_segment(p, mr);
Chuck Lever39f4cd92017-08-10 12:47:36 -0400322 return 0;
323}
324
325static int
Chuck Lever96cedde2017-12-14 20:57:55 -0500326encode_read_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr,
Chuck Lever39f4cd92017-08-10 12:47:36 -0400327 u32 position)
328{
329 __be32 *p;
330
331 p = xdr_reserve_space(xdr, 6 * sizeof(*p));
332 if (unlikely(!p))
333 return -EMSGSIZE;
334
335 *p++ = xdr_one; /* Item present */
336 *p++ = cpu_to_be32(position);
Chuck Lever96cedde2017-12-14 20:57:55 -0500337 xdr_encode_rdma_segment(p, mr);
Chuck Lever39f4cd92017-08-10 12:47:36 -0400338 return 0;
339}
340
341/* Register and XDR encode the Read list. Supports encoding a list of read
Chuck Lever94f58c52016-05-02 14:41:30 -0400342 * segments that belong to a single read chunk.
343 *
344 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
345 *
346 * Read chunklist (a linked list):
347 * N elements, position P (same P for all chunks of same arg!):
348 * 1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0
349 *
Chuck Lever39f4cd92017-08-10 12:47:36 -0400350 * Returns zero on success, or a negative errno if a failure occurred.
351 * @xdr is advanced to the next position in the stream.
352 *
353 * Only a single @pos value is currently supported.
Chuck Lever94f58c52016-05-02 14:41:30 -0400354 */
Chuck Lever39f4cd92017-08-10 12:47:36 -0400355static noinline int
356rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
357 struct rpc_rqst *rqst, enum rpcrdma_chunktype rtype)
Chuck Lever94f58c52016-05-02 14:41:30 -0400358{
Chuck Lever39f4cd92017-08-10 12:47:36 -0400359 struct xdr_stream *xdr = &req->rl_stream;
Chuck Lever5ab81422016-06-29 13:54:25 -0400360 struct rpcrdma_mr_seg *seg;
Chuck Lever96cedde2017-12-14 20:57:55 -0500361 struct rpcrdma_mr *mr;
Chuck Lever94f58c52016-05-02 14:41:30 -0400362 unsigned int pos;
Chuck Lever6748b0ca2017-08-14 15:38:30 -0400363 int nsegs;
Chuck Lever94f58c52016-05-02 14:41:30 -0400364
365 pos = rqst->rq_snd_buf.head[0].iov_len;
366 if (rtype == rpcrdma_areadch)
367 pos = 0;
Chuck Lever5ab81422016-06-29 13:54:25 -0400368 seg = req->rl_segments;
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500369 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_snd_buf, pos,
370 rtype, seg);
Chuck Lever94f58c52016-05-02 14:41:30 -0400371 if (nsegs < 0)
Chuck Lever39f4cd92017-08-10 12:47:36 -0400372 return nsegs;
Chuck Lever94f58c52016-05-02 14:41:30 -0400373
374 do {
Chuck Lever0a93fbc2018-12-19 10:59:07 -0500375 seg = frwr_map(r_xprt, seg, nsegs, false, rqst->rq_xid, &mr);
Chuck Lever6748b0ca2017-08-14 15:38:30 -0400376 if (IS_ERR(seg))
Chuck Levered3aa742018-05-04 15:35:52 -0400377 return PTR_ERR(seg);
Chuck Lever96cedde2017-12-14 20:57:55 -0500378 rpcrdma_mr_push(mr, &req->rl_registered);
Chuck Lever94f58c52016-05-02 14:41:30 -0400379
Chuck Lever96cedde2017-12-14 20:57:55 -0500380 if (encode_read_segment(xdr, mr, pos) < 0)
Chuck Lever39f4cd92017-08-10 12:47:36 -0400381 return -EMSGSIZE;
Chuck Lever94f58c52016-05-02 14:41:30 -0400382
Chuck Leveraba118312018-12-19 10:59:49 -0500383 trace_xprtrdma_chunk_read(rqst->rq_task, pos, mr, nsegs);
Chuck Lever94f58c52016-05-02 14:41:30 -0400384 r_xprt->rx_stats.read_chunk_count++;
Chuck Lever96cedde2017-12-14 20:57:55 -0500385 nsegs -= mr->mr_nents;
Chuck Lever94f58c52016-05-02 14:41:30 -0400386 } while (nsegs);
Chuck Lever94f58c52016-05-02 14:41:30 -0400387
Chuck Lever39f4cd92017-08-10 12:47:36 -0400388 return 0;
Chuck Lever94f58c52016-05-02 14:41:30 -0400389}
390
Chuck Lever39f4cd92017-08-10 12:47:36 -0400391/* Register and XDR encode the Write list. Supports encoding a list
392 * containing one array of plain segments that belong to a single
393 * write chunk.
Chuck Lever94f58c52016-05-02 14:41:30 -0400394 *
395 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
396 *
397 * Write chunklist (a list of (one) counted array):
398 * N elements:
399 * 1 - N - HLOO - HLOO - ... - HLOO - 0
400 *
Chuck Lever39f4cd92017-08-10 12:47:36 -0400401 * Returns zero on success, or a negative errno if a failure occurred.
402 * @xdr is advanced to the next position in the stream.
403 *
404 * Only a single Write chunk is currently supported.
Chuck Lever94f58c52016-05-02 14:41:30 -0400405 */
Chuck Lever39f4cd92017-08-10 12:47:36 -0400406static noinline int
Chuck Lever94f58c52016-05-02 14:41:30 -0400407rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
Chuck Lever39f4cd92017-08-10 12:47:36 -0400408 struct rpc_rqst *rqst, enum rpcrdma_chunktype wtype)
Chuck Lever94f58c52016-05-02 14:41:30 -0400409{
Chuck Lever39f4cd92017-08-10 12:47:36 -0400410 struct xdr_stream *xdr = &req->rl_stream;
Chuck Lever5ab81422016-06-29 13:54:25 -0400411 struct rpcrdma_mr_seg *seg;
Chuck Lever96cedde2017-12-14 20:57:55 -0500412 struct rpcrdma_mr *mr;
Chuck Lever6748b0ca2017-08-14 15:38:30 -0400413 int nsegs, nchunks;
Chuck Lever94f58c52016-05-02 14:41:30 -0400414 __be32 *segcount;
415
Chuck Lever5ab81422016-06-29 13:54:25 -0400416 seg = req->rl_segments;
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500417 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf,
Chuck Lever94f58c52016-05-02 14:41:30 -0400418 rqst->rq_rcv_buf.head[0].iov_len,
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500419 wtype, seg);
Chuck Lever94f58c52016-05-02 14:41:30 -0400420 if (nsegs < 0)
Chuck Lever39f4cd92017-08-10 12:47:36 -0400421 return nsegs;
Chuck Lever94f58c52016-05-02 14:41:30 -0400422
Chuck Lever39f4cd92017-08-10 12:47:36 -0400423 if (encode_item_present(xdr) < 0)
424 return -EMSGSIZE;
425 segcount = xdr_reserve_space(xdr, sizeof(*segcount));
426 if (unlikely(!segcount))
427 return -EMSGSIZE;
428 /* Actual value encoded below */
Chuck Lever94f58c52016-05-02 14:41:30 -0400429
430 nchunks = 0;
431 do {
Chuck Lever0a93fbc2018-12-19 10:59:07 -0500432 seg = frwr_map(r_xprt, seg, nsegs, true, rqst->rq_xid, &mr);
Chuck Lever6748b0ca2017-08-14 15:38:30 -0400433 if (IS_ERR(seg))
Chuck Levered3aa742018-05-04 15:35:52 -0400434 return PTR_ERR(seg);
Chuck Lever96cedde2017-12-14 20:57:55 -0500435 rpcrdma_mr_push(mr, &req->rl_registered);
Chuck Lever94f58c52016-05-02 14:41:30 -0400436
Chuck Lever96cedde2017-12-14 20:57:55 -0500437 if (encode_rdma_segment(xdr, mr) < 0)
Chuck Lever39f4cd92017-08-10 12:47:36 -0400438 return -EMSGSIZE;
Chuck Lever94f58c52016-05-02 14:41:30 -0400439
Chuck Leveraba118312018-12-19 10:59:49 -0500440 trace_xprtrdma_chunk_write(rqst->rq_task, mr, nsegs);
Chuck Lever94f58c52016-05-02 14:41:30 -0400441 r_xprt->rx_stats.write_chunk_count++;
Chuck Leveraae23492018-01-03 15:38:09 -0500442 r_xprt->rx_stats.total_rdma_request += mr->mr_length;
Chuck Lever94f58c52016-05-02 14:41:30 -0400443 nchunks++;
Chuck Lever96cedde2017-12-14 20:57:55 -0500444 nsegs -= mr->mr_nents;
Chuck Lever94f58c52016-05-02 14:41:30 -0400445 } while (nsegs);
Chuck Lever94f58c52016-05-02 14:41:30 -0400446
447 /* Update count of segments in this Write chunk */
448 *segcount = cpu_to_be32(nchunks);
449
Chuck Lever39f4cd92017-08-10 12:47:36 -0400450 return 0;
Chuck Lever94f58c52016-05-02 14:41:30 -0400451}
452
Chuck Lever39f4cd92017-08-10 12:47:36 -0400453/* Register and XDR encode the Reply chunk. Supports encoding an array
454 * of plain segments that belong to a single write (reply) chunk.
Chuck Lever94f58c52016-05-02 14:41:30 -0400455 *
456 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
457 *
458 * Reply chunk (a counted array):
459 * N elements:
460 * 1 - N - HLOO - HLOO - ... - HLOO
461 *
Chuck Lever39f4cd92017-08-10 12:47:36 -0400462 * Returns zero on success, or a negative errno if a failure occurred.
463 * @xdr is advanced to the next position in the stream.
Chuck Lever94f58c52016-05-02 14:41:30 -0400464 */
Chuck Lever39f4cd92017-08-10 12:47:36 -0400465static noinline int
466rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
467 struct rpc_rqst *rqst, enum rpcrdma_chunktype wtype)
Chuck Lever94f58c52016-05-02 14:41:30 -0400468{
Chuck Lever39f4cd92017-08-10 12:47:36 -0400469 struct xdr_stream *xdr = &req->rl_stream;
Chuck Lever5ab81422016-06-29 13:54:25 -0400470 struct rpcrdma_mr_seg *seg;
Chuck Lever96cedde2017-12-14 20:57:55 -0500471 struct rpcrdma_mr *mr;
Chuck Lever6748b0ca2017-08-14 15:38:30 -0400472 int nsegs, nchunks;
Chuck Lever94f58c52016-05-02 14:41:30 -0400473 __be32 *segcount;
474
Chuck Lever5ab81422016-06-29 13:54:25 -0400475 seg = req->rl_segments;
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500476 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 0, wtype, seg);
Chuck Lever94f58c52016-05-02 14:41:30 -0400477 if (nsegs < 0)
Chuck Lever39f4cd92017-08-10 12:47:36 -0400478 return nsegs;
Chuck Lever94f58c52016-05-02 14:41:30 -0400479
Chuck Lever39f4cd92017-08-10 12:47:36 -0400480 if (encode_item_present(xdr) < 0)
481 return -EMSGSIZE;
482 segcount = xdr_reserve_space(xdr, sizeof(*segcount));
483 if (unlikely(!segcount))
484 return -EMSGSIZE;
485 /* Actual value encoded below */
Chuck Lever94f58c52016-05-02 14:41:30 -0400486
487 nchunks = 0;
488 do {
Chuck Lever0a93fbc2018-12-19 10:59:07 -0500489 seg = frwr_map(r_xprt, seg, nsegs, true, rqst->rq_xid, &mr);
Chuck Lever6748b0ca2017-08-14 15:38:30 -0400490 if (IS_ERR(seg))
Chuck Levered3aa742018-05-04 15:35:52 -0400491 return PTR_ERR(seg);
Chuck Lever96cedde2017-12-14 20:57:55 -0500492 rpcrdma_mr_push(mr, &req->rl_registered);
Chuck Lever94f58c52016-05-02 14:41:30 -0400493
Chuck Lever96cedde2017-12-14 20:57:55 -0500494 if (encode_rdma_segment(xdr, mr) < 0)
Chuck Lever39f4cd92017-08-10 12:47:36 -0400495 return -EMSGSIZE;
Chuck Lever94f58c52016-05-02 14:41:30 -0400496
Chuck Leveraba118312018-12-19 10:59:49 -0500497 trace_xprtrdma_chunk_reply(rqst->rq_task, mr, nsegs);
Chuck Lever94f58c52016-05-02 14:41:30 -0400498 r_xprt->rx_stats.reply_chunk_count++;
Chuck Leveraae23492018-01-03 15:38:09 -0500499 r_xprt->rx_stats.total_rdma_request += mr->mr_length;
Chuck Lever94f58c52016-05-02 14:41:30 -0400500 nchunks++;
Chuck Lever96cedde2017-12-14 20:57:55 -0500501 nsegs -= mr->mr_nents;
Chuck Lever94f58c52016-05-02 14:41:30 -0400502 } while (nsegs);
Chuck Lever94f58c52016-05-02 14:41:30 -0400503
504 /* Update count of segments in the Reply chunk */
505 *segcount = cpu_to_be32(nchunks);
506
Chuck Lever39f4cd92017-08-10 12:47:36 -0400507 return 0;
Chuck Lever94f58c52016-05-02 14:41:30 -0400508}
509
Chuck Lever394b2c72017-10-20 10:47:47 -0400510/**
Chuck Leverae729502017-10-20 10:48:12 -0400511 * rpcrdma_unmap_sendctx - DMA-unmap Send buffers
512 * @sc: sendctx containing SGEs to unmap
Chuck Lever394b2c72017-10-20 10:47:47 -0400513 *
514 */
515void
Chuck Leverae729502017-10-20 10:48:12 -0400516rpcrdma_unmap_sendctx(struct rpcrdma_sendctx *sc)
Chuck Lever394b2c72017-10-20 10:47:47 -0400517{
Chuck Leverae729502017-10-20 10:48:12 -0400518 struct rpcrdma_ia *ia = &sc->sc_xprt->rx_ia;
Chuck Lever394b2c72017-10-20 10:47:47 -0400519 struct ib_sge *sge;
520 unsigned int count;
521
522 /* The first two SGEs contain the transport header and
523 * the inline buffer. These are always left mapped so
524 * they can be cheaply re-used.
525 */
Chuck Leverae729502017-10-20 10:48:12 -0400526 sge = &sc->sc_sges[2];
527 for (count = sc->sc_unmap_count; count; ++sge, --count)
Chuck Lever394b2c72017-10-20 10:47:47 -0400528 ib_dma_unmap_page(ia->ri_device,
529 sge->addr, sge->length, DMA_TO_DEVICE);
Chuck Lever01bb35c2017-10-20 10:48:36 -0400530
531 if (test_and_clear_bit(RPCRDMA_REQ_F_TX_RESOURCES, &sc->sc_req->rl_flags)) {
532 smp_mb__after_atomic();
533 wake_up_bit(&sc->sc_req->rl_flags, RPCRDMA_REQ_F_TX_RESOURCES);
534 }
Chuck Lever394b2c72017-10-20 10:47:47 -0400535}
536
Chuck Levera062a2a2017-10-20 10:48:03 -0400537/* Prepare an SGE for the RPC-over-RDMA transport header.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400538 */
Chuck Leverd2832af2019-04-24 09:39:32 -0400539static bool rpcrdma_prepare_hdr_sge(struct rpcrdma_xprt *r_xprt,
540 struct rpcrdma_req *req, u32 len)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400541{
Chuck Leverae729502017-10-20 10:48:12 -0400542 struct rpcrdma_sendctx *sc = req->rl_sendctx;
Chuck Lever655fec62016-09-15 10:57:24 -0400543 struct rpcrdma_regbuf *rb = req->rl_rdmabuf;
Chuck Leverae729502017-10-20 10:48:12 -0400544 struct ib_sge *sge = sc->sc_sges;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400545
Chuck Leverd2832af2019-04-24 09:39:32 -0400546 if (!rpcrdma_regbuf_dma_map(r_xprt, rb))
Chuck Levera062a2a2017-10-20 10:48:03 -0400547 goto out_regbuf;
548 sge->addr = rdmab_addr(rb);
Chuck Lever655fec62016-09-15 10:57:24 -0400549 sge->length = len;
Chuck Levera062a2a2017-10-20 10:48:03 -0400550 sge->lkey = rdmab_lkey(rb);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400551
Chuck Leverd2832af2019-04-24 09:39:32 -0400552 ib_dma_sync_single_for_device(rdmab_device(rb), sge->addr, sge->length,
553 DMA_TO_DEVICE);
Chuck Leverae729502017-10-20 10:48:12 -0400554 sc->sc_wr.num_sge++;
Chuck Lever655fec62016-09-15 10:57:24 -0400555 return true;
Chuck Lever857f9ac2017-10-20 10:47:55 -0400556
557out_regbuf:
558 pr_err("rpcrdma: failed to DMA map a Send buffer\n");
559 return false;
Chuck Lever655fec62016-09-15 10:57:24 -0400560}
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400561
Chuck Lever655fec62016-09-15 10:57:24 -0400562/* Prepare the Send SGEs. The head and tail iovec, and each entry
563 * in the page list, gets its own SGE.
564 */
Chuck Leverd2832af2019-04-24 09:39:32 -0400565static bool rpcrdma_prepare_msg_sges(struct rpcrdma_xprt *r_xprt,
566 struct rpcrdma_req *req,
567 struct xdr_buf *xdr,
568 enum rpcrdma_chunktype rtype)
Chuck Lever655fec62016-09-15 10:57:24 -0400569{
Chuck Leverae729502017-10-20 10:48:12 -0400570 struct rpcrdma_sendctx *sc = req->rl_sendctx;
Chuck Lever655fec62016-09-15 10:57:24 -0400571 unsigned int sge_no, page_base, len, remaining;
572 struct rpcrdma_regbuf *rb = req->rl_sendbuf;
Chuck Leverae729502017-10-20 10:48:12 -0400573 struct ib_sge *sge = sc->sc_sges;
Chuck Lever655fec62016-09-15 10:57:24 -0400574 struct page *page, **ppages;
Tom Talpeyb38ab402009-03-11 14:37:55 -0400575
Chuck Lever655fec62016-09-15 10:57:24 -0400576 /* The head iovec is straightforward, as it is already
577 * DMA-mapped. Sync the content that has changed.
578 */
Chuck Leverd2832af2019-04-24 09:39:32 -0400579 if (!rpcrdma_regbuf_dma_map(r_xprt, rb))
Chuck Lever857f9ac2017-10-20 10:47:55 -0400580 goto out_regbuf;
Chuck Lever655fec62016-09-15 10:57:24 -0400581 sge_no = 1;
582 sge[sge_no].addr = rdmab_addr(rb);
583 sge[sge_no].length = xdr->head[0].iov_len;
584 sge[sge_no].lkey = rdmab_lkey(rb);
Chuck Lever91a10c52017-04-11 13:23:02 -0400585 ib_dma_sync_single_for_device(rdmab_device(rb), sge[sge_no].addr,
Chuck Lever655fec62016-09-15 10:57:24 -0400586 sge[sge_no].length, DMA_TO_DEVICE);
587
588 /* If there is a Read chunk, the page list is being handled
589 * via explicit RDMA, and thus is skipped here. However, the
590 * tail iovec may include an XDR pad for the page list, as
591 * well as additional content, and may not reside in the
592 * same page as the head iovec.
593 */
594 if (rtype == rpcrdma_readch) {
595 len = xdr->tail[0].iov_len;
596
597 /* Do not include the tail if it is only an XDR pad */
598 if (len < 4)
599 goto out;
600
601 page = virt_to_page(xdr->tail[0].iov_base);
Chuck Leverd933cc32017-06-08 11:53:16 -0400602 page_base = offset_in_page(xdr->tail[0].iov_base);
Chuck Lever655fec62016-09-15 10:57:24 -0400603
604 /* If the content in the page list is an odd length,
605 * xdr_write_pages() has added a pad at the beginning
606 * of the tail iovec. Force the tail's non-pad content
607 * to land at the next XDR position in the Send message.
608 */
609 page_base += len & 3;
610 len -= len & 3;
611 goto map_tail;
612 }
613
614 /* If there is a page list present, temporarily DMA map
615 * and prepare an SGE for each page to be sent.
616 */
617 if (xdr->page_len) {
618 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
Chuck Leverd933cc32017-06-08 11:53:16 -0400619 page_base = offset_in_page(xdr->page_base);
Chuck Lever655fec62016-09-15 10:57:24 -0400620 remaining = xdr->page_len;
621 while (remaining) {
622 sge_no++;
623 if (sge_no > RPCRDMA_MAX_SEND_SGES - 2)
624 goto out_mapping_overflow;
625
626 len = min_t(u32, PAGE_SIZE - page_base, remaining);
Chuck Leverd2832af2019-04-24 09:39:32 -0400627 sge[sge_no].addr =
628 ib_dma_map_page(rdmab_device(rb), *ppages,
629 page_base, len, DMA_TO_DEVICE);
630 if (ib_dma_mapping_error(rdmab_device(rb),
631 sge[sge_no].addr))
Chuck Lever655fec62016-09-15 10:57:24 -0400632 goto out_mapping_err;
633 sge[sge_no].length = len;
Chuck Leverd2832af2019-04-24 09:39:32 -0400634 sge[sge_no].lkey = rdmab_lkey(rb);
Chuck Lever655fec62016-09-15 10:57:24 -0400635
Chuck Leverae729502017-10-20 10:48:12 -0400636 sc->sc_unmap_count++;
Chuck Lever655fec62016-09-15 10:57:24 -0400637 ppages++;
638 remaining -= len;
639 page_base = 0;
Tom Talpeyb38ab402009-03-11 14:37:55 -0400640 }
Tom Talpeyb38ab402009-03-11 14:37:55 -0400641 }
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000642
Chuck Lever655fec62016-09-15 10:57:24 -0400643 /* The tail iovec is not always constructed in the same
644 * page where the head iovec resides (see, for example,
645 * gss_wrap_req_priv). To neatly accommodate that case,
646 * DMA map it separately.
647 */
648 if (xdr->tail[0].iov_len) {
649 page = virt_to_page(xdr->tail[0].iov_base);
Chuck Leverd933cc32017-06-08 11:53:16 -0400650 page_base = offset_in_page(xdr->tail[0].iov_base);
Chuck Lever655fec62016-09-15 10:57:24 -0400651 len = xdr->tail[0].iov_len;
652
653map_tail:
654 sge_no++;
Chuck Leverd2832af2019-04-24 09:39:32 -0400655 sge[sge_no].addr =
656 ib_dma_map_page(rdmab_device(rb), page, page_base, len,
657 DMA_TO_DEVICE);
658 if (ib_dma_mapping_error(rdmab_device(rb), sge[sge_no].addr))
Chuck Lever655fec62016-09-15 10:57:24 -0400659 goto out_mapping_err;
660 sge[sge_no].length = len;
Chuck Leverd2832af2019-04-24 09:39:32 -0400661 sge[sge_no].lkey = rdmab_lkey(rb);
Chuck Leverae729502017-10-20 10:48:12 -0400662 sc->sc_unmap_count++;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400663 }
Chuck Lever655fec62016-09-15 10:57:24 -0400664
665out:
Chuck Leverae729502017-10-20 10:48:12 -0400666 sc->sc_wr.num_sge += sge_no;
Chuck Lever01bb35c2017-10-20 10:48:36 -0400667 if (sc->sc_unmap_count)
668 __set_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags);
Chuck Lever655fec62016-09-15 10:57:24 -0400669 return true;
670
Chuck Lever857f9ac2017-10-20 10:47:55 -0400671out_regbuf:
672 pr_err("rpcrdma: failed to DMA map a Send buffer\n");
673 return false;
674
Chuck Lever655fec62016-09-15 10:57:24 -0400675out_mapping_overflow:
Chuck Leverae729502017-10-20 10:48:12 -0400676 rpcrdma_unmap_sendctx(sc);
Chuck Lever655fec62016-09-15 10:57:24 -0400677 pr_err("rpcrdma: too many Send SGEs (%u)\n", sge_no);
678 return false;
679
680out_mapping_err:
Chuck Leverae729502017-10-20 10:48:12 -0400681 rpcrdma_unmap_sendctx(sc);
Chuck Lever53b2c1c2018-12-19 11:00:06 -0500682 trace_xprtrdma_dma_maperr(sge[sge_no].addr);
Chuck Lever655fec62016-09-15 10:57:24 -0400683 return false;
684}
685
Chuck Lever857f9ac2017-10-20 10:47:55 -0400686/**
687 * rpcrdma_prepare_send_sges - Construct SGEs for a Send WR
688 * @r_xprt: controlling transport
689 * @req: context of RPC Call being marshalled
690 * @hdrlen: size of transport header, in bytes
691 * @xdr: xdr_buf containing RPC Call
692 * @rtype: chunk type being encoded
693 *
694 * Returns 0 on success; otherwise a negative errno is returned.
695 */
696int
697rpcrdma_prepare_send_sges(struct rpcrdma_xprt *r_xprt,
698 struct rpcrdma_req *req, u32 hdrlen,
699 struct xdr_buf *xdr, enum rpcrdma_chunktype rtype)
Chuck Lever655fec62016-09-15 10:57:24 -0400700{
Chuck Leverae729502017-10-20 10:48:12 -0400701 req->rl_sendctx = rpcrdma_sendctx_get_locked(&r_xprt->rx_buf);
702 if (!req->rl_sendctx)
Chuck Lever2fad6592018-05-04 15:35:57 -0400703 return -EAGAIN;
Chuck Leverae729502017-10-20 10:48:12 -0400704 req->rl_sendctx->sc_wr.num_sge = 0;
705 req->rl_sendctx->sc_unmap_count = 0;
Chuck Lever01bb35c2017-10-20 10:48:36 -0400706 req->rl_sendctx->sc_req = req;
707 __clear_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags);
Chuck Lever655fec62016-09-15 10:57:24 -0400708
Chuck Leverd2832af2019-04-24 09:39:32 -0400709 if (!rpcrdma_prepare_hdr_sge(r_xprt, req, hdrlen))
Chuck Lever857f9ac2017-10-20 10:47:55 -0400710 return -EIO;
Chuck Lever655fec62016-09-15 10:57:24 -0400711
712 if (rtype != rpcrdma_areadch)
Chuck Leverd2832af2019-04-24 09:39:32 -0400713 if (!rpcrdma_prepare_msg_sges(r_xprt, req, xdr, rtype))
Chuck Lever857f9ac2017-10-20 10:47:55 -0400714 return -EIO;
Chuck Lever655fec62016-09-15 10:57:24 -0400715
Chuck Lever857f9ac2017-10-20 10:47:55 -0400716 return 0;
Chuck Lever655fec62016-09-15 10:57:24 -0400717}
718
Chuck Lever09e60642017-08-10 12:47:12 -0400719/**
720 * rpcrdma_marshal_req - Marshal and send one RPC request
721 * @r_xprt: controlling transport
722 * @rqst: RPC request to be marshaled
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400723 *
Chuck Lever09e60642017-08-10 12:47:12 -0400724 * For the RPC in "rqst", this function:
725 * - Chooses the transfer mode (eg., RDMA_MSG or RDMA_NOMSG)
726 * - Registers Read, Write, and Reply chunks
727 * - Constructs the transport header
728 * - Posts a Send WR to send the transport header and request
729 *
730 * Returns:
731 * %0 if the RPC was sent successfully,
732 * %-ENOTCONN if the connection was lost,
Chuck Lever9e679d52018-02-28 15:30:44 -0500733 * %-EAGAIN if the caller should call again with the same arguments,
734 * %-ENOBUFS if the caller should call again after a delay,
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400735 * %-EMSGSIZE if the transport header is too small,
Chuck Lever09e60642017-08-10 12:47:12 -0400736 * %-EIO if a permanent problem occurred while marshaling.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400737 */
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400738int
Chuck Lever09e60642017-08-10 12:47:12 -0400739rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400740{
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400741 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400742 struct xdr_stream *xdr = &req->rl_stream;
Chuck Levere2377942015-03-30 14:33:53 -0400743 enum rpcrdma_chunktype rtype, wtype;
Chuck Lever65b80172016-06-29 13:55:06 -0400744 bool ddp_allowed;
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400745 __be32 *p;
Chuck Lever39f4cd92017-08-10 12:47:36 -0400746 int ret;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400747
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400748 rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
Chuck Lever8cec3db2019-04-24 09:39:16 -0400749 xdr_init_encode(xdr, &req->rl_hdrbuf, rdmab_data(req->rl_rdmabuf),
750 rqst);
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400751
752 /* Fixed header fields */
Chuck Lever39f4cd92017-08-10 12:47:36 -0400753 ret = -EMSGSIZE;
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400754 p = xdr_reserve_space(xdr, 4 * sizeof(*p));
755 if (!p)
756 goto out_err;
757 *p++ = rqst->rq_xid;
758 *p++ = rpcrdma_version;
759 *p++ = cpu_to_be32(r_xprt->rx_buf.rb_max_requests);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400760
Chuck Lever65b80172016-06-29 13:55:06 -0400761 /* When the ULP employs a GSS flavor that guarantees integrity
762 * or privacy, direct data placement of individual data items
763 * is not allowed.
764 */
765 ddp_allowed = !(rqst->rq_cred->cr_auth->au_flags &
766 RPCAUTH_AUTH_DATATOUCH);
767
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400768 /*
769 * Chunks needed for results?
770 *
771 * o If the expected result is under the inline threshold, all ops
Chuck Lever33943b22015-08-03 13:04:08 -0400772 * return as inline.
Chuck Levercce6dee2016-05-02 14:41:14 -0400773 * o Large read ops return data as write chunk(s), header as
774 * inline.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400775 * o Large non-read ops return as a single reply chunk.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400776 */
Chuck Levercce6dee2016-05-02 14:41:14 -0400777 if (rpcrdma_results_inline(r_xprt, rqst))
Chuck Lever02eb57d82015-08-03 13:03:58 -0400778 wtype = rpcrdma_noch;
Chuck Leverd4550bb2019-02-11 11:23:49 -0500779 else if ((ddp_allowed && rqst->rq_rcv_buf.flags & XDRBUF_READ) &&
780 rpcrdma_nonpayload_inline(r_xprt, rqst))
Chuck Levercce6dee2016-05-02 14:41:14 -0400781 wtype = rpcrdma_writech;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400782 else
Chuck Levere2377942015-03-30 14:33:53 -0400783 wtype = rpcrdma_replych;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400784
785 /*
786 * Chunks needed for arguments?
787 *
788 * o If the total request is under the inline threshold, all ops
789 * are sent as inline.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400790 * o Large write ops transmit data as read chunk(s), header as
791 * inline.
Chuck Lever2fcc2132015-08-03 13:04:26 -0400792 * o Large non-write ops are sent with the entire message as a
793 * single read chunk (protocol 0-position special case).
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400794 *
Chuck Lever2fcc2132015-08-03 13:04:26 -0400795 * This assumes that the upper layer does not present a request
796 * that both has a data payload, and whose non-data arguments
797 * by themselves are larger than the inline threshold.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400798 */
Chuck Lever302d3de2016-05-02 14:41:05 -0400799 if (rpcrdma_args_inline(r_xprt, rqst)) {
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400800 *p++ = rdma_msg;
Chuck Levere2377942015-03-30 14:33:53 -0400801 rtype = rpcrdma_noch;
Chuck Lever65b80172016-06-29 13:55:06 -0400802 } else if (ddp_allowed && rqst->rq_snd_buf.flags & XDRBUF_WRITE) {
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400803 *p++ = rdma_msg;
Chuck Levere2377942015-03-30 14:33:53 -0400804 rtype = rpcrdma_readch;
Chuck Lever2fcc2132015-08-03 13:04:26 -0400805 } else {
Chuck Lever860477d2015-08-03 13:04:45 -0400806 r_xprt->rx_stats.nomsg_call_count++;
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400807 *p++ = rdma_nomsg;
Chuck Lever2fcc2132015-08-03 13:04:26 -0400808 rtype = rpcrdma_areadch;
Chuck Lever2fcc2132015-08-03 13:04:26 -0400809 }
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400810
Chuck Levera2b64702017-12-14 20:57:14 -0500811 /* If this is a retransmit, discard previously registered
812 * chunks. Very likely the connection has been replaced,
813 * so these registrations are invalid and unusable.
814 */
815 while (unlikely(!list_empty(&req->rl_registered))) {
Chuck Lever96cedde2017-12-14 20:57:55 -0500816 struct rpcrdma_mr *mr;
Chuck Levera2b64702017-12-14 20:57:14 -0500817
Chuck Lever96cedde2017-12-14 20:57:55 -0500818 mr = rpcrdma_mr_pop(&req->rl_registered);
Chuck Lever61da8862018-10-01 14:25:25 -0400819 rpcrdma_mr_recycle(mr);
Chuck Levera2b64702017-12-14 20:57:14 -0500820 }
821
Chuck Lever94f58c52016-05-02 14:41:30 -0400822 /* This implementation supports the following combinations
823 * of chunk lists in one RPC-over-RDMA Call message:
824 *
825 * - Read list
826 * - Write list
827 * - Reply chunk
828 * - Read list + Reply chunk
829 *
830 * It might not yet support the following combinations:
831 *
832 * - Read list + Write list
833 *
834 * It does not support the following combinations:
835 *
836 * - Write list + Reply chunk
837 * - Read list + Write list + Reply chunk
838 *
839 * This implementation supports only a single chunk in each
840 * Read or Write list. Thus for example the client cannot
841 * send a Call message with a Position Zero Read chunk and a
842 * regular Read chunk at the same time.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400843 */
Chuck Lever39f4cd92017-08-10 12:47:36 -0400844 if (rtype != rpcrdma_noch) {
845 ret = rpcrdma_encode_read_list(r_xprt, req, rqst, rtype);
846 if (ret)
847 goto out_err;
848 }
849 ret = encode_item_not_present(xdr);
850 if (ret)
Chuck Lever18c0fb32017-02-08 17:00:27 -0500851 goto out_err;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400852
Chuck Lever39f4cd92017-08-10 12:47:36 -0400853 if (wtype == rpcrdma_writech) {
854 ret = rpcrdma_encode_write_list(r_xprt, req, rqst, wtype);
855 if (ret)
856 goto out_err;
857 }
858 ret = encode_item_not_present(xdr);
859 if (ret)
860 goto out_err;
861
862 if (wtype != rpcrdma_replych)
863 ret = encode_item_not_present(xdr);
864 else
865 ret = rpcrdma_encode_reply_chunk(r_xprt, req, rqst, wtype);
866 if (ret)
867 goto out_err;
868
Chuck Leverab03eff2017-12-20 16:30:40 -0500869 trace_xprtrdma_marshal(rqst, xdr_stream_pos(xdr), rtype, wtype);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400870
Chuck Lever857f9ac2017-10-20 10:47:55 -0400871 ret = rpcrdma_prepare_send_sges(r_xprt, req, xdr_stream_pos(xdr),
872 &rqst->rq_snd_buf, rtype);
873 if (ret)
Chuck Lever18c0fb32017-02-08 17:00:27 -0500874 goto out_err;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400875 return 0;
Chuck Lever302d3de2016-05-02 14:41:05 -0400876
Chuck Lever18c0fb32017-02-08 17:00:27 -0500877out_err:
Chuck Lever17e4c442019-04-24 09:39:48 -0400878 trace_xprtrdma_marshal_failed(rqst, ret);
Chuck Levered3aa742018-05-04 15:35:52 -0400879 switch (ret) {
880 case -EAGAIN:
Trond Myklebustc5445772018-09-03 23:39:27 -0400881 xprt_wait_for_buffer_space(rqst->rq_xprt);
Chuck Levered3aa742018-05-04 15:35:52 -0400882 break;
883 case -ENOBUFS:
884 break;
885 default:
886 r_xprt->rx_stats.failed_marshal_count++;
887 }
Chuck Lever39f4cd92017-08-10 12:47:36 -0400888 return ret;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400889}
890
Chuck Levercb0ae1f2016-06-29 13:54:41 -0400891/**
892 * rpcrdma_inline_fixup - Scatter inline received data into rqst's iovecs
893 * @rqst: controlling RPC request
894 * @srcp: points to RPC message payload in receive buffer
895 * @copy_len: remaining length of receive buffer content
896 * @pad: Write chunk pad bytes needed (zero for pure inline)
897 *
898 * The upper layer has set the maximum number of bytes it can
899 * receive in each component of rq_rcv_buf. These values are set in
900 * the head.iov_len, page_len, tail.iov_len, and buflen fields.
Chuck Levercfabe2c2016-06-29 13:54:49 -0400901 *
902 * Unlike the TCP equivalent (xdr_partial_copy_from_skb), in
903 * many cases this function simply updates iov_base pointers in
904 * rq_rcv_buf to point directly to the received reply data, to
905 * avoid copying reply data.
Chuck Lever64695bde2016-06-29 13:54:58 -0400906 *
907 * Returns the count of bytes which had to be memcopied.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400908 */
Chuck Lever64695bde2016-06-29 13:54:58 -0400909static unsigned long
Tom Talpey9191ca32008-10-09 15:01:11 -0400910rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400911{
Chuck Lever64695bde2016-06-29 13:54:58 -0400912 unsigned long fixup_copy_count;
913 int i, npages, curlen;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400914 char *destp;
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000915 struct page **ppages;
916 int page_base;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400917
Chuck Levercb0ae1f2016-06-29 13:54:41 -0400918 /* The head iovec is redirected to the RPC reply message
919 * in the receive buffer, to avoid a memcopy.
920 */
921 rqst->rq_rcv_buf.head[0].iov_base = srcp;
Chuck Levercfabe2c2016-06-29 13:54:49 -0400922 rqst->rq_private_buf.head[0].iov_base = srcp;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400923
Chuck Levercb0ae1f2016-06-29 13:54:41 -0400924 /* The contents of the receive buffer that follow
925 * head.iov_len bytes are copied into the page list.
926 */
927 curlen = rqst->rq_rcv_buf.head[0].iov_len;
928 if (curlen > copy_len)
929 curlen = copy_len;
Chuck Levere11b7c92017-12-20 16:31:04 -0500930 trace_xprtrdma_fixup(rqst, copy_len, curlen);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400931 srcp += curlen;
932 copy_len -= curlen;
933
Chuck Leverd933cc32017-06-08 11:53:16 -0400934 ppages = rqst->rq_rcv_buf.pages +
935 (rqst->rq_rcv_buf.page_base >> PAGE_SHIFT);
936 page_base = offset_in_page(rqst->rq_rcv_buf.page_base);
Chuck Lever64695bde2016-06-29 13:54:58 -0400937 fixup_copy_count = 0;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400938 if (copy_len && rqst->rq_rcv_buf.page_len) {
Chuck Lever80414ab2016-06-29 13:54:33 -0400939 int pagelist_len;
940
941 pagelist_len = rqst->rq_rcv_buf.page_len;
942 if (pagelist_len > copy_len)
943 pagelist_len = copy_len;
944 npages = PAGE_ALIGN(page_base + pagelist_len) >> PAGE_SHIFT;
Chuck Lever64695bde2016-06-29 13:54:58 -0400945 for (i = 0; i < npages; i++) {
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000946 curlen = PAGE_SIZE - page_base;
Chuck Lever80414ab2016-06-29 13:54:33 -0400947 if (curlen > pagelist_len)
948 curlen = pagelist_len;
949
Chuck Levere11b7c92017-12-20 16:31:04 -0500950 trace_xprtrdma_fixup_pg(rqst, i, srcp,
951 copy_len, curlen);
Cong Wangb8541782011-11-25 23:14:40 +0800952 destp = kmap_atomic(ppages[i]);
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000953 memcpy(destp + page_base, srcp, curlen);
954 flush_dcache_page(ppages[i]);
Cong Wangb8541782011-11-25 23:14:40 +0800955 kunmap_atomic(destp);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400956 srcp += curlen;
957 copy_len -= curlen;
Chuck Lever64695bde2016-06-29 13:54:58 -0400958 fixup_copy_count += curlen;
Chuck Lever80414ab2016-06-29 13:54:33 -0400959 pagelist_len -= curlen;
960 if (!pagelist_len)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400961 break;
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000962 page_base = 0;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400963 }
Chuck Levercb0ae1f2016-06-29 13:54:41 -0400964
965 /* Implicit padding for the last segment in a Write
966 * chunk is inserted inline at the front of the tail
967 * iovec. The upper layer ignores the content of
968 * the pad. Simply ensure inline content in the tail
969 * that follows the Write chunk is properly aligned.
970 */
971 if (pad)
972 srcp -= pad;
Chuck Lever2b7bbc92014-03-12 12:51:30 -0400973 }
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400974
Chuck Levercb0ae1f2016-06-29 13:54:41 -0400975 /* The tail iovec is redirected to the remaining data
976 * in the receive buffer, to avoid a memcopy.
977 */
Chuck Levercfabe2c2016-06-29 13:54:49 -0400978 if (copy_len || pad) {
Chuck Levercb0ae1f2016-06-29 13:54:41 -0400979 rqst->rq_rcv_buf.tail[0].iov_base = srcp;
Chuck Levercfabe2c2016-06-29 13:54:49 -0400980 rqst->rq_private_buf.tail[0].iov_base = srcp;
981 }
Tom Talpey9191ca32008-10-09 15:01:11 -0400982
Chuck Lever64695bde2016-06-29 13:54:58 -0400983 return fixup_copy_count;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400984}
985
Chuck Lever63cae472015-10-24 17:28:08 -0400986/* By convention, backchannel calls arrive via rdma_msg type
987 * messages, and never populate the chunk lists. This makes
988 * the RPC/RDMA header small and fixed in size, so it is
989 * straightforward to check the RPC header's direction field.
990 */
991static bool
Chuck Lever5381e0e2017-10-16 15:01:14 -0400992rpcrdma_is_bcall(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
Chuck Lever41c8f702017-08-03 14:30:11 -0400993#if defined(CONFIG_SUNRPC_BACKCHANNEL)
Chuck Lever63cae472015-10-24 17:28:08 -0400994{
Chuck Lever41c8f702017-08-03 14:30:11 -0400995 struct xdr_stream *xdr = &rep->rr_stream;
996 __be32 *p;
Chuck Lever63cae472015-10-24 17:28:08 -0400997
Chuck Lever5381e0e2017-10-16 15:01:14 -0400998 if (rep->rr_proc != rdma_msg)
Chuck Lever63cae472015-10-24 17:28:08 -0400999 return false;
1000
Chuck Lever41c8f702017-08-03 14:30:11 -04001001 /* Peek at stream contents without advancing. */
1002 p = xdr_inline_decode(xdr, 0);
1003
1004 /* Chunk lists */
1005 if (*p++ != xdr_zero)
Chuck Lever63cae472015-10-24 17:28:08 -04001006 return false;
Chuck Lever41c8f702017-08-03 14:30:11 -04001007 if (*p++ != xdr_zero)
1008 return false;
1009 if (*p++ != xdr_zero)
Chuck Lever63cae472015-10-24 17:28:08 -04001010 return false;
1011
Chuck Lever41c8f702017-08-03 14:30:11 -04001012 /* RPC header */
Chuck Lever5381e0e2017-10-16 15:01:14 -04001013 if (*p++ != rep->rr_xid)
Chuck Lever41c8f702017-08-03 14:30:11 -04001014 return false;
1015 if (*p != cpu_to_be32(RPC_CALL))
1016 return false;
1017
1018 /* Now that we are sure this is a backchannel call,
1019 * advance to the RPC header.
1020 */
1021 p = xdr_inline_decode(xdr, 3 * sizeof(*p));
1022 if (unlikely(!p))
1023 goto out_short;
1024
1025 rpcrdma_bc_receive_call(r_xprt, rep);
Chuck Lever63cae472015-10-24 17:28:08 -04001026 return true;
Chuck Lever41c8f702017-08-03 14:30:11 -04001027
1028out_short:
1029 pr_warn("RPC/RDMA short backward direction call\n");
Chuck Lever63cae472015-10-24 17:28:08 -04001030 return true;
1031}
Chuck Lever41c8f702017-08-03 14:30:11 -04001032#else /* CONFIG_SUNRPC_BACKCHANNEL */
1033{
1034 return false;
Chuck Lever63cae472015-10-24 17:28:08 -04001035}
1036#endif /* CONFIG_SUNRPC_BACKCHANNEL */
1037
Chuck Lever264b0cd2017-08-03 14:30:27 -04001038static int decode_rdma_segment(struct xdr_stream *xdr, u32 *length)
1039{
Chuck Levere11b7c92017-12-20 16:31:04 -05001040 u32 handle;
1041 u64 offset;
Chuck Lever264b0cd2017-08-03 14:30:27 -04001042 __be32 *p;
1043
1044 p = xdr_inline_decode(xdr, 4 * sizeof(*p));
1045 if (unlikely(!p))
1046 return -EIO;
1047
Chuck Levere11b7c92017-12-20 16:31:04 -05001048 handle = be32_to_cpup(p++);
1049 *length = be32_to_cpup(p++);
1050 xdr_decode_hyper(p, &offset);
Chuck Lever264b0cd2017-08-03 14:30:27 -04001051
Chuck Levere11b7c92017-12-20 16:31:04 -05001052 trace_xprtrdma_decode_seg(handle, *length, offset);
Chuck Lever264b0cd2017-08-03 14:30:27 -04001053 return 0;
1054}
1055
1056static int decode_write_chunk(struct xdr_stream *xdr, u32 *length)
1057{
1058 u32 segcount, seglength;
1059 __be32 *p;
1060
1061 p = xdr_inline_decode(xdr, sizeof(*p));
1062 if (unlikely(!p))
1063 return -EIO;
1064
1065 *length = 0;
1066 segcount = be32_to_cpup(p);
1067 while (segcount--) {
1068 if (decode_rdma_segment(xdr, &seglength))
1069 return -EIO;
1070 *length += seglength;
1071 }
1072
Chuck Lever264b0cd2017-08-03 14:30:27 -04001073 return 0;
1074}
1075
1076/* In RPC-over-RDMA Version One replies, a Read list is never
1077 * expected. This decoder is a stub that returns an error if
1078 * a Read list is present.
1079 */
1080static int decode_read_list(struct xdr_stream *xdr)
1081{
1082 __be32 *p;
1083
1084 p = xdr_inline_decode(xdr, sizeof(*p));
1085 if (unlikely(!p))
1086 return -EIO;
1087 if (unlikely(*p != xdr_zero))
1088 return -EIO;
1089 return 0;
1090}
1091
1092/* Supports only one Write chunk in the Write list
1093 */
1094static int decode_write_list(struct xdr_stream *xdr, u32 *length)
1095{
1096 u32 chunklen;
1097 bool first;
1098 __be32 *p;
1099
1100 *length = 0;
1101 first = true;
1102 do {
1103 p = xdr_inline_decode(xdr, sizeof(*p));
1104 if (unlikely(!p))
1105 return -EIO;
1106 if (*p == xdr_zero)
1107 break;
1108 if (!first)
1109 return -EIO;
1110
1111 if (decode_write_chunk(xdr, &chunklen))
1112 return -EIO;
1113 *length += chunklen;
1114 first = false;
1115 } while (true);
1116 return 0;
1117}
1118
1119static int decode_reply_chunk(struct xdr_stream *xdr, u32 *length)
1120{
1121 __be32 *p;
1122
1123 p = xdr_inline_decode(xdr, sizeof(*p));
1124 if (unlikely(!p))
1125 return -EIO;
1126
1127 *length = 0;
1128 if (*p != xdr_zero)
1129 if (decode_write_chunk(xdr, length))
1130 return -EIO;
1131 return 0;
1132}
1133
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001134static int
1135rpcrdma_decode_msg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1136 struct rpc_rqst *rqst)
1137{
1138 struct xdr_stream *xdr = &rep->rr_stream;
Chuck Lever264b0cd2017-08-03 14:30:27 -04001139 u32 writelist, replychunk, rpclen;
1140 char *base;
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001141
Chuck Lever264b0cd2017-08-03 14:30:27 -04001142 /* Decode the chunk lists */
1143 if (decode_read_list(xdr))
1144 return -EIO;
1145 if (decode_write_list(xdr, &writelist))
1146 return -EIO;
1147 if (decode_reply_chunk(xdr, &replychunk))
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001148 return -EIO;
1149
Chuck Lever264b0cd2017-08-03 14:30:27 -04001150 /* RDMA_MSG sanity checks */
1151 if (unlikely(replychunk))
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001152 return -EIO;
1153
Chuck Lever264b0cd2017-08-03 14:30:27 -04001154 /* Build the RPC reply's Payload stream in rqst->rq_rcv_buf */
1155 base = (char *)xdr_inline_decode(xdr, 0);
1156 rpclen = xdr_stream_remaining(xdr);
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001157 r_xprt->rx_stats.fixup_copy_count +=
Chuck Lever264b0cd2017-08-03 14:30:27 -04001158 rpcrdma_inline_fixup(rqst, base, rpclen, writelist & 3);
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001159
Chuck Lever264b0cd2017-08-03 14:30:27 -04001160 r_xprt->rx_stats.total_rdma_reply += writelist;
1161 return rpclen + xdr_align_size(writelist);
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001162}
1163
1164static noinline int
1165rpcrdma_decode_nomsg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
1166{
1167 struct xdr_stream *xdr = &rep->rr_stream;
Chuck Lever264b0cd2017-08-03 14:30:27 -04001168 u32 writelist, replychunk;
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001169
Chuck Lever264b0cd2017-08-03 14:30:27 -04001170 /* Decode the chunk lists */
1171 if (decode_read_list(xdr))
1172 return -EIO;
1173 if (decode_write_list(xdr, &writelist))
1174 return -EIO;
1175 if (decode_reply_chunk(xdr, &replychunk))
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001176 return -EIO;
1177
Chuck Lever264b0cd2017-08-03 14:30:27 -04001178 /* RDMA_NOMSG sanity checks */
1179 if (unlikely(writelist))
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001180 return -EIO;
Chuck Lever264b0cd2017-08-03 14:30:27 -04001181 if (unlikely(!replychunk))
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001182 return -EIO;
1183
Chuck Lever264b0cd2017-08-03 14:30:27 -04001184 /* Reply chunk buffer already is the reply vector */
1185 r_xprt->rx_stats.total_rdma_reply += replychunk;
1186 return replychunk;
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001187}
1188
1189static noinline int
1190rpcrdma_decode_error(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1191 struct rpc_rqst *rqst)
1192{
1193 struct xdr_stream *xdr = &rep->rr_stream;
1194 __be32 *p;
1195
1196 p = xdr_inline_decode(xdr, sizeof(*p));
1197 if (unlikely(!p))
1198 return -EIO;
1199
1200 switch (*p) {
1201 case err_vers:
1202 p = xdr_inline_decode(xdr, 2 * sizeof(*p));
1203 if (!p)
1204 break;
Chuck Leverddbb3472018-12-19 10:59:39 -05001205 dprintk("RPC: %s: server reports "
1206 "version error (%u-%u), xid %08x\n", __func__,
1207 be32_to_cpup(p), be32_to_cpu(*(p + 1)),
1208 be32_to_cpu(rep->rr_xid));
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001209 break;
1210 case err_chunk:
Chuck Leverddbb3472018-12-19 10:59:39 -05001211 dprintk("RPC: %s: server reports "
1212 "header decoding error, xid %08x\n", __func__,
1213 be32_to_cpu(rep->rr_xid));
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001214 break;
1215 default:
Chuck Leverddbb3472018-12-19 10:59:39 -05001216 dprintk("RPC: %s: server reports "
1217 "unrecognized error %d, xid %08x\n", __func__,
1218 be32_to_cpup(p), be32_to_cpu(rep->rr_xid));
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001219 }
1220
1221 r_xprt->rx_stats.bad_reply_count++;
1222 return -EREMOTEIO;
1223}
1224
Chuck Levere1352c92017-10-16 15:01:22 -04001225/* Perform XID lookup, reconstruction of the RPC reply, and
1226 * RPC completion while holding the transport lock to ensure
1227 * the rep, rqst, and rq_task pointers remain stable.
1228 */
1229void rpcrdma_complete_rqst(struct rpcrdma_rep *rep)
1230{
1231 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1232 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1233 struct rpc_rqst *rqst = rep->rr_rqst;
Chuck Levere1352c92017-10-16 15:01:22 -04001234 int status;
1235
1236 xprt->reestablish_timeout = 0;
1237
1238 switch (rep->rr_proc) {
1239 case rdma_msg:
1240 status = rpcrdma_decode_msg(r_xprt, rep, rqst);
1241 break;
1242 case rdma_nomsg:
1243 status = rpcrdma_decode_nomsg(r_xprt, rep);
1244 break;
1245 case rdma_error:
1246 status = rpcrdma_decode_error(r_xprt, rep, rqst);
1247 break;
1248 default:
1249 status = -EIO;
1250 }
1251 if (status < 0)
1252 goto out_badheader;
1253
1254out:
Trond Myklebust75c84152018-08-31 10:21:00 -04001255 spin_lock(&xprt->queue_lock);
Chuck Levere1352c92017-10-16 15:01:22 -04001256 xprt_complete_rqst(rqst->rq_task, status);
1257 xprt_unpin_rqst(rqst);
Trond Myklebust75c84152018-08-31 10:21:00 -04001258 spin_unlock(&xprt->queue_lock);
Chuck Levere1352c92017-10-16 15:01:22 -04001259 return;
1260
1261/* If the incoming reply terminated a pending RPC, the next
1262 * RPC call will post a replacement receive buffer as it is
1263 * being marshaled.
1264 */
1265out_badheader:
Chuck Leverb4a7f912017-12-20 16:30:48 -05001266 trace_xprtrdma_reply_hdr(rep);
Chuck Levere1352c92017-10-16 15:01:22 -04001267 r_xprt->rx_stats.bad_reply_count++;
Chuck Levere1352c92017-10-16 15:01:22 -04001268 goto out;
1269}
1270
Chuck Lever0ba6f372017-10-20 10:48:28 -04001271void rpcrdma_release_rqst(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
1272{
1273 /* Invalidate and unmap the data payloads before waking
1274 * the waiting application. This guarantees the memory
1275 * regions are properly fenced from the server before the
1276 * application accesses the data. It also ensures proper
1277 * send flow control: waking the next RPC waits until this
1278 * RPC has relinquished all its Send Queue entries.
1279 */
1280 if (!list_empty(&req->rl_registered))
Chuck Lever5f624122018-12-19 10:59:01 -05001281 frwr_unmap_sync(r_xprt, &req->rl_registered);
Chuck Lever01bb35c2017-10-20 10:48:36 -04001282
1283 /* Ensure that any DMA mapped pages associated with
1284 * the Send of the RPC Call have been unmapped before
1285 * allowing the RPC to complete. This protects argument
1286 * memory not controlled by the RPC client from being
1287 * re-used before we're done with it.
1288 */
1289 if (test_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags)) {
1290 r_xprt->rx_stats.reply_waits_for_send++;
1291 out_of_line_wait_on_bit(&req->rl_flags,
1292 RPCRDMA_REQ_F_TX_RESOURCES,
1293 bit_wait,
1294 TASK_UNINTERRUPTIBLE);
1295 }
Chuck Lever0ba6f372017-10-20 10:48:28 -04001296}
1297
Chuck Leverd8f532d2017-10-16 15:01:30 -04001298/* Reply handling runs in the poll worker thread. Anything that
1299 * might wait is deferred to a separate workqueue.
1300 */
1301void rpcrdma_deferred_completion(struct work_struct *work)
1302{
1303 struct rpcrdma_rep *rep =
1304 container_of(work, struct rpcrdma_rep, rr_work);
1305 struct rpcrdma_req *req = rpcr_to_rdmar(rep->rr_rqst);
Chuck Leverc3441612017-12-14 20:56:26 -05001306 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
Chuck Leverd8f532d2017-10-16 15:01:30 -04001307
Chuck Leverb4a7f912017-12-20 16:30:48 -05001308 trace_xprtrdma_defer_cmp(rep);
Chuck Leverc3441612017-12-14 20:56:26 -05001309 if (rep->rr_wc_flags & IB_WC_WITH_INVALIDATE)
Chuck Lever5f624122018-12-19 10:59:01 -05001310 frwr_reminv(rep, &req->rl_registered);
Chuck Leverc3441612017-12-14 20:56:26 -05001311 rpcrdma_release_rqst(r_xprt, req);
Chuck Leverd8f532d2017-10-16 15:01:30 -04001312 rpcrdma_complete_rqst(rep);
1313}
1314
Chuck Leverfe97b472015-10-24 17:27:10 -04001315/* Process received RPC/RDMA messages.
1316 *
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001317 * Errors must result in the RPC task either being awakened, or
1318 * allowed to timeout, to discover the errors at that time.
1319 */
Chuck Leverd8f532d2017-10-16 15:01:30 -04001320void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001321{
Chuck Lever431af642017-06-08 11:52:20 -04001322 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
Chuck Lever431af642017-06-08 11:52:20 -04001323 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
Chuck Leverbe798f92017-10-16 15:01:39 -04001324 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001325 struct rpcrdma_req *req;
1326 struct rpc_rqst *rqst;
Chuck Leverbe798f92017-10-16 15:01:39 -04001327 u32 credits;
Chuck Lever5381e0e2017-10-16 15:01:14 -04001328 __be32 *p;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001329
Chuck Lever7c8d9e72018-05-04 15:35:20 -04001330 /* Fixed transport header fields */
Chuck Lever5381e0e2017-10-16 15:01:14 -04001331 xdr_init_decode(&rep->rr_stream, &rep->rr_hdrbuf,
Chuck Lever0ccc61b2019-02-11 11:24:05 -05001332 rep->rr_hdrbuf.head[0].iov_base, NULL);
Chuck Lever5381e0e2017-10-16 15:01:14 -04001333 p = xdr_inline_decode(&rep->rr_stream, 4 * sizeof(*p));
Chuck Lever96f87782017-08-03 14:30:03 -04001334 if (unlikely(!p))
Chuck Leverb0e178a2015-10-24 17:26:54 -04001335 goto out_shortreply;
Chuck Lever5381e0e2017-10-16 15:01:14 -04001336 rep->rr_xid = *p++;
1337 rep->rr_vers = *p++;
Chuck Leverbe798f92017-10-16 15:01:39 -04001338 credits = be32_to_cpu(*p++);
Chuck Lever5381e0e2017-10-16 15:01:14 -04001339 rep->rr_proc = *p++;
Chuck Leverb0e178a2015-10-24 17:26:54 -04001340
Chuck Lever5381e0e2017-10-16 15:01:14 -04001341 if (rep->rr_vers != rpcrdma_version)
Chuck Lever61433af2017-10-16 15:01:06 -04001342 goto out_badversion;
1343
Chuck Lever5381e0e2017-10-16 15:01:14 -04001344 if (rpcrdma_is_bcall(r_xprt, rep))
Chuck Lever41c8f702017-08-03 14:30:11 -04001345 return;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001346
Chuck Leverfe97b472015-10-24 17:27:10 -04001347 /* Match incoming rpcrdma_rep to an rpcrdma_req to
1348 * get context for handling any incoming chunks.
1349 */
Trond Myklebust75c84152018-08-31 10:21:00 -04001350 spin_lock(&xprt->queue_lock);
Chuck Lever5381e0e2017-10-16 15:01:14 -04001351 rqst = xprt_lookup_rqst(xprt, rep->rr_xid);
Chuck Lever9590d082017-08-23 17:05:58 -04001352 if (!rqst)
1353 goto out_norqst;
1354 xprt_pin_rqst(rqst);
Trond Myklebust93bdcf92018-10-18 17:29:00 -04001355 spin_unlock(&xprt->queue_lock);
Chuck Leverbe798f92017-10-16 15:01:39 -04001356
1357 if (credits == 0)
1358 credits = 1; /* don't deadlock */
1359 else if (credits > buf->rb_max_requests)
1360 credits = buf->rb_max_requests;
Chuck Lever91ca1862018-10-01 14:25:09 -04001361 if (buf->rb_credits != credits) {
1362 spin_lock_bh(&xprt->transport_lock);
1363 buf->rb_credits = credits;
1364 xprt->cwnd = credits << RPC_CWNDSHIFT;
1365 spin_unlock_bh(&xprt->transport_lock);
1366 }
Chuck Leverbe798f92017-10-16 15:01:39 -04001367
Chuck Lever9590d082017-08-23 17:05:58 -04001368 req = rpcr_to_rdmar(rqst);
Chuck Lever07e10302018-12-07 11:11:44 -05001369 if (req->rl_reply) {
1370 trace_xprtrdma_leaked_rep(rqst, req->rl_reply);
1371 rpcrdma_recv_buffer_put(req->rl_reply);
1372 }
Chuck Lever4b196dc62017-06-08 11:51:56 -04001373 req->rl_reply = rep;
Chuck Levere1352c92017-10-16 15:01:22 -04001374 rep->rr_rqst = rqst;
Chuck Lever0ba6f372017-10-20 10:48:28 -04001375 clear_bit(RPCRDMA_REQ_F_PENDING, &req->rl_flags);
Chuck Lever431af642017-06-08 11:52:20 -04001376
Chuck Leverb4a7f912017-12-20 16:30:48 -05001377 trace_xprtrdma_reply(rqst->rq_task, rep, req, credits);
Chuck Lever6d2d0ee2018-12-19 10:58:29 -05001378 queue_work(buf->rb_completion_wq, &rep->rr_work);
Chuck Leverb0e178a2015-10-24 17:26:54 -04001379 return;
1380
Chuck Lever61433af2017-10-16 15:01:06 -04001381out_badversion:
Chuck Leverb4a7f912017-12-20 16:30:48 -05001382 trace_xprtrdma_reply_vers(rep);
Chuck Lever6ceea362018-12-19 10:58:24 -05001383 goto out;
Chuck Lever61433af2017-10-16 15:01:06 -04001384
Chuck Lever431af642017-06-08 11:52:20 -04001385out_norqst:
Trond Myklebust75c84152018-08-31 10:21:00 -04001386 spin_unlock(&xprt->queue_lock);
Chuck Leverb4a7f912017-12-20 16:30:48 -05001387 trace_xprtrdma_reply_rqst(rep);
Chuck Lever6ceea362018-12-19 10:58:24 -05001388 goto out;
Chuck Leverb0e178a2015-10-24 17:26:54 -04001389
Chuck Lever9590d082017-08-23 17:05:58 -04001390out_shortreply:
Chuck Leverb4a7f912017-12-20 16:30:48 -05001391 trace_xprtrdma_reply_short(rep);
Chuck Leverb0e178a2015-10-24 17:26:54 -04001392
Chuck Lever6ceea362018-12-19 10:58:24 -05001393out:
Chuck Lever7c8d9e72018-05-04 15:35:20 -04001394 rpcrdma_recv_buffer_put(rep);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001395}