blob: f358d1e40a5731d267456cd719ab4d78edecadaf [file] [log] [blame]
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -04001/*
Chuck Lever62b56a62017-10-30 16:22:14 -04002 * Copyright (c) 2014-2017 Oracle. All rights reserved.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04003 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the BSD-type
9 * license below:
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 *
18 * Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials provided
21 * with the distribution.
22 *
23 * Neither the name of the Network Appliance, Inc. nor the names of
24 * its contributors may be used to endorse or promote products
25 * derived from this software without specific prior written
26 * permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 */
40
41/*
42 * rpc_rdma.c
43 *
44 * This file contains the guts of the RPC RDMA protocol, and
45 * does marshaling/unmarshaling, etc. It is also where interfacing
46 * to the Linux RPC framework lives.
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -040047 */
48
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -040049#include <linux/highmem.h>
50
Chuck Leverb6e717cb2018-05-07 15:27:05 -040051#include "xprt_rdma.h"
52#include <trace/events/rpcrdma.h>
53
Jeff Laytonf895b252014-11-17 16:58:04 -050054#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -040055# define RPCDBG_FACILITY RPCDBG_TRANS
56#endif
57
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -040058static const char transfertypes[][12] = {
Chuck Lever94f58c52016-05-02 14:41:30 -040059 "inline", /* no chunks */
60 "read list", /* some argument via rdma read */
61 "*read list", /* entire request via rdma read */
62 "write list", /* some result via rdma write */
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -040063 "reply chunk" /* entire reply via rdma write */
64};
Chuck Lever302d3de2016-05-02 14:41:05 -040065
66/* Returns size of largest RPC-over-RDMA header in a Call message
67 *
Chuck Lever94f58c52016-05-02 14:41:30 -040068 * The largest Call header contains a full-size Read list and a
69 * minimal Reply chunk.
Chuck Lever302d3de2016-05-02 14:41:05 -040070 */
71static unsigned int rpcrdma_max_call_header_size(unsigned int maxsegs)
72{
73 unsigned int size;
74
75 /* Fixed header fields and list discriminators */
76 size = RPCRDMA_HDRLEN_MIN;
77
78 /* Maximum Read list size */
79 maxsegs += 2; /* segment for head and tail buffers */
Chuck Lever2232df52017-10-30 16:21:57 -040080 size = maxsegs * rpcrdma_readchunk_maxsz * sizeof(__be32);
Chuck Lever302d3de2016-05-02 14:41:05 -040081
Chuck Lever94f58c52016-05-02 14:41:30 -040082 /* Minimal Read chunk size */
83 size += sizeof(__be32); /* segment count */
Chuck Lever2232df52017-10-30 16:21:57 -040084 size += rpcrdma_segment_maxsz * sizeof(__be32);
Chuck Lever94f58c52016-05-02 14:41:30 -040085 size += sizeof(__be32); /* list discriminator */
86
Chuck Lever302d3de2016-05-02 14:41:05 -040087 dprintk("RPC: %s: max call header size = %u\n",
88 __func__, size);
89 return size;
90}
91
92/* Returns size of largest RPC-over-RDMA header in a Reply message
93 *
94 * There is only one Write list or one Reply chunk per Reply
95 * message. The larger list is the Write list.
96 */
97static unsigned int rpcrdma_max_reply_header_size(unsigned int maxsegs)
98{
99 unsigned int size;
100
101 /* Fixed header fields and list discriminators */
102 size = RPCRDMA_HDRLEN_MIN;
103
104 /* Maximum Write list size */
105 maxsegs += 2; /* segment for head and tail buffers */
106 size = sizeof(__be32); /* segment count */
Chuck Lever2232df52017-10-30 16:21:57 -0400107 size += maxsegs * rpcrdma_segment_maxsz * sizeof(__be32);
Chuck Lever302d3de2016-05-02 14:41:05 -0400108 size += sizeof(__be32); /* list discriminator */
109
110 dprintk("RPC: %s: max reply header size = %u\n",
111 __func__, size);
112 return size;
113}
114
Chuck Lever87cfb9a2016-09-15 10:57:07 -0400115void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *r_xprt)
Chuck Lever302d3de2016-05-02 14:41:05 -0400116{
Chuck Lever87cfb9a2016-09-15 10:57:07 -0400117 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
118 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
119 unsigned int maxsegs = ia->ri_max_segs;
120
Chuck Lever302d3de2016-05-02 14:41:05 -0400121 ia->ri_max_inline_write = cdata->inline_wsize -
122 rpcrdma_max_call_header_size(maxsegs);
123 ia->ri_max_inline_read = cdata->inline_rsize -
124 rpcrdma_max_reply_header_size(maxsegs);
125}
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400126
Chuck Lever5457ced2015-08-03 13:03:49 -0400127/* The client can send a request inline as long as the RPCRDMA header
128 * plus the RPC call fit under the transport's inline limit. If the
129 * combined call message size exceeds that limit, the client must use
Chuck Lever16f906d2017-02-08 17:00:10 -0500130 * a Read chunk for this operation.
131 *
132 * A Read chunk is also required if sending the RPC call inline would
133 * exceed this device's max_sge limit.
Chuck Lever5457ced2015-08-03 13:03:49 -0400134 */
Chuck Lever302d3de2016-05-02 14:41:05 -0400135static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt,
136 struct rpc_rqst *rqst)
Chuck Lever5457ced2015-08-03 13:03:49 -0400137{
Chuck Lever16f906d2017-02-08 17:00:10 -0500138 struct xdr_buf *xdr = &rqst->rq_snd_buf;
139 unsigned int count, remaining, offset;
Chuck Lever5457ced2015-08-03 13:03:49 -0400140
Chuck Lever16f906d2017-02-08 17:00:10 -0500141 if (xdr->len > r_xprt->rx_ia.ri_max_inline_write)
142 return false;
143
144 if (xdr->page_len) {
145 remaining = xdr->page_len;
Chuck Leverd933cc32017-06-08 11:53:16 -0400146 offset = offset_in_page(xdr->page_base);
Chuck Lever1179e2c2018-01-31 12:34:05 -0500147 count = RPCRDMA_MIN_SEND_SGES;
Chuck Lever16f906d2017-02-08 17:00:10 -0500148 while (remaining) {
149 remaining -= min_t(unsigned int,
150 PAGE_SIZE - offset, remaining);
151 offset = 0;
152 if (++count > r_xprt->rx_ia.ri_max_send_sges)
153 return false;
154 }
155 }
156
157 return true;
Chuck Lever5457ced2015-08-03 13:03:49 -0400158}
159
160/* The client can't know how large the actual reply will be. Thus it
161 * plans for the largest possible reply for that particular ULP
162 * operation. If the maximum combined reply message size exceeds that
163 * limit, the client must provide a write list or a reply chunk for
164 * this request.
165 */
Chuck Lever302d3de2016-05-02 14:41:05 -0400166static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt,
167 struct rpc_rqst *rqst)
Chuck Lever5457ced2015-08-03 13:03:49 -0400168{
Chuck Lever302d3de2016-05-02 14:41:05 -0400169 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
Chuck Lever5457ced2015-08-03 13:03:49 -0400170
Chuck Lever302d3de2016-05-02 14:41:05 -0400171 return rqst->rq_rcv_buf.buflen <= ia->ri_max_inline_read;
Chuck Lever5457ced2015-08-03 13:03:49 -0400172}
173
Chuck Lever28d9d562017-08-14 15:38:22 -0400174/* Split @vec on page boundaries into SGEs. FMR registers pages, not
175 * a byte range. Other modes coalesce these SGEs into a single MR
176 * when they can.
177 *
178 * Returns pointer to next available SGE, and bumps the total number
179 * of SGEs consumed.
Chuck Lever821c7912016-03-04 11:27:52 -0500180 */
Chuck Lever28d9d562017-08-14 15:38:22 -0400181static struct rpcrdma_mr_seg *
182rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg,
183 unsigned int *n)
Chuck Lever821c7912016-03-04 11:27:52 -0500184{
Chuck Lever28d9d562017-08-14 15:38:22 -0400185 u32 remaining, page_offset;
Chuck Lever821c7912016-03-04 11:27:52 -0500186 char *base;
187
188 base = vec->iov_base;
189 page_offset = offset_in_page(base);
190 remaining = vec->iov_len;
Chuck Lever28d9d562017-08-14 15:38:22 -0400191 while (remaining) {
192 seg->mr_page = NULL;
193 seg->mr_offset = base;
194 seg->mr_len = min_t(u32, PAGE_SIZE - page_offset, remaining);
195 remaining -= seg->mr_len;
196 base += seg->mr_len;
197 ++seg;
198 ++(*n);
Chuck Lever821c7912016-03-04 11:27:52 -0500199 page_offset = 0;
200 }
Chuck Lever28d9d562017-08-14 15:38:22 -0400201 return seg;
Chuck Lever821c7912016-03-04 11:27:52 -0500202}
203
Chuck Lever28d9d562017-08-14 15:38:22 -0400204/* Convert @xdrbuf into SGEs no larger than a page each. As they
205 * are registered, these SGEs are then coalesced into RDMA segments
206 * when the selected memreg mode supports it.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400207 *
Chuck Lever28d9d562017-08-14 15:38:22 -0400208 * Returns positive number of SGEs consumed, or a negative errno.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400209 */
210
211static int
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500212rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf,
213 unsigned int pos, enum rpcrdma_chunktype type,
214 struct rpcrdma_mr_seg *seg)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400215{
Chuck Lever28d9d562017-08-14 15:38:22 -0400216 unsigned long page_base;
217 unsigned int len, n;
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000218 struct page **ppages;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400219
Chuck Lever5ab81422016-06-29 13:54:25 -0400220 n = 0;
Chuck Lever28d9d562017-08-14 15:38:22 -0400221 if (pos == 0)
222 seg = rpcrdma_convert_kvec(&xdrbuf->head[0], seg, &n);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400223
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000224 len = xdrbuf->page_len;
225 ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT);
Chuck Leverd933cc32017-06-08 11:53:16 -0400226 page_base = offset_in_page(xdrbuf->page_base);
Chuck Lever28d9d562017-08-14 15:38:22 -0400227 while (len) {
228 if (unlikely(!*ppages)) {
229 /* XXX: Certain upper layer operations do
230 * not provide receive buffer pages.
231 */
232 *ppages = alloc_page(GFP_ATOMIC);
233 if (!*ppages)
Chuck Lever7a89f9c2016-06-29 13:53:43 -0400234 return -EAGAIN;
Shirley Ma196c6992014-05-28 10:34:24 -0400235 }
Chuck Lever28d9d562017-08-14 15:38:22 -0400236 seg->mr_page = *ppages;
237 seg->mr_offset = (char *)page_base;
238 seg->mr_len = min_t(u32, PAGE_SIZE - page_base, len);
239 len -= seg->mr_len;
240 ++ppages;
241 ++seg;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400242 ++n;
Chuck Lever28d9d562017-08-14 15:38:22 -0400243 page_base = 0;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400244 }
245
Chuck Lever24abdf12017-02-08 16:59:46 -0500246 /* When encoding a Read chunk, the tail iovec contains an
247 * XDR pad and may be omitted.
248 */
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500249 if (type == rpcrdma_readch && r_xprt->rx_ia.ri_implicit_roundup)
Chuck Lever28d9d562017-08-14 15:38:22 -0400250 goto out;
Chuck Lever677eb172015-08-03 13:04:17 -0400251
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500252 /* When encoding a Write chunk, some servers need to see an
253 * extra segment for non-XDR-aligned Write chunks. The upper
254 * layer provides space in the tail iovec that may be used
255 * for this purpose.
Chuck Leverc8b920b2016-09-15 10:57:16 -0400256 */
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500257 if (type == rpcrdma_writech && r_xprt->rx_ia.ri_implicit_roundup)
Chuck Lever28d9d562017-08-14 15:38:22 -0400258 goto out;
Chuck Leverc8b920b2016-09-15 10:57:16 -0400259
Chuck Lever28d9d562017-08-14 15:38:22 -0400260 if (xdrbuf->tail[0].iov_len)
261 seg = rpcrdma_convert_kvec(&xdrbuf->tail[0], seg, &n);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400262
Chuck Lever28d9d562017-08-14 15:38:22 -0400263out:
264 if (unlikely(n > RPCRDMA_MAX_SEGS))
265 return -EIO;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400266 return n;
267}
268
Chuck Lever39f4cd92017-08-10 12:47:36 -0400269static inline int
270encode_item_present(struct xdr_stream *xdr)
271{
272 __be32 *p;
273
274 p = xdr_reserve_space(xdr, sizeof(*p));
275 if (unlikely(!p))
276 return -EMSGSIZE;
277
278 *p = xdr_one;
279 return 0;
280}
281
282static inline int
283encode_item_not_present(struct xdr_stream *xdr)
284{
285 __be32 *p;
286
287 p = xdr_reserve_space(xdr, sizeof(*p));
288 if (unlikely(!p))
289 return -EMSGSIZE;
290
291 *p = xdr_zero;
292 return 0;
293}
294
295static void
Chuck Lever96cedde2017-12-14 20:57:55 -0500296xdr_encode_rdma_segment(__be32 *iptr, struct rpcrdma_mr *mr)
Chuck Lever94f58c52016-05-02 14:41:30 -0400297{
Chuck Lever96cedde2017-12-14 20:57:55 -0500298 *iptr++ = cpu_to_be32(mr->mr_handle);
299 *iptr++ = cpu_to_be32(mr->mr_length);
300 xdr_encode_hyper(iptr, mr->mr_offset);
Chuck Lever94f58c52016-05-02 14:41:30 -0400301}
302
Chuck Lever39f4cd92017-08-10 12:47:36 -0400303static int
Chuck Lever96cedde2017-12-14 20:57:55 -0500304encode_rdma_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr)
Chuck Lever39f4cd92017-08-10 12:47:36 -0400305{
306 __be32 *p;
307
308 p = xdr_reserve_space(xdr, 4 * sizeof(*p));
309 if (unlikely(!p))
310 return -EMSGSIZE;
311
Chuck Lever96cedde2017-12-14 20:57:55 -0500312 xdr_encode_rdma_segment(p, mr);
Chuck Lever39f4cd92017-08-10 12:47:36 -0400313 return 0;
314}
315
316static int
Chuck Lever96cedde2017-12-14 20:57:55 -0500317encode_read_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr,
Chuck Lever39f4cd92017-08-10 12:47:36 -0400318 u32 position)
319{
320 __be32 *p;
321
322 p = xdr_reserve_space(xdr, 6 * sizeof(*p));
323 if (unlikely(!p))
324 return -EMSGSIZE;
325
326 *p++ = xdr_one; /* Item present */
327 *p++ = cpu_to_be32(position);
Chuck Lever96cedde2017-12-14 20:57:55 -0500328 xdr_encode_rdma_segment(p, mr);
Chuck Lever39f4cd92017-08-10 12:47:36 -0400329 return 0;
330}
331
332/* Register and XDR encode the Read list. Supports encoding a list of read
Chuck Lever94f58c52016-05-02 14:41:30 -0400333 * segments that belong to a single read chunk.
334 *
335 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
336 *
337 * Read chunklist (a linked list):
338 * N elements, position P (same P for all chunks of same arg!):
339 * 1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0
340 *
Chuck Lever39f4cd92017-08-10 12:47:36 -0400341 * Returns zero on success, or a negative errno if a failure occurred.
342 * @xdr is advanced to the next position in the stream.
343 *
344 * Only a single @pos value is currently supported.
Chuck Lever94f58c52016-05-02 14:41:30 -0400345 */
Chuck Lever39f4cd92017-08-10 12:47:36 -0400346static noinline int
347rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
348 struct rpc_rqst *rqst, enum rpcrdma_chunktype rtype)
Chuck Lever94f58c52016-05-02 14:41:30 -0400349{
Chuck Lever39f4cd92017-08-10 12:47:36 -0400350 struct xdr_stream *xdr = &req->rl_stream;
Chuck Lever5ab81422016-06-29 13:54:25 -0400351 struct rpcrdma_mr_seg *seg;
Chuck Lever96cedde2017-12-14 20:57:55 -0500352 struct rpcrdma_mr *mr;
Chuck Lever94f58c52016-05-02 14:41:30 -0400353 unsigned int pos;
Chuck Lever6748b0ca2017-08-14 15:38:30 -0400354 int nsegs;
Chuck Lever94f58c52016-05-02 14:41:30 -0400355
356 pos = rqst->rq_snd_buf.head[0].iov_len;
357 if (rtype == rpcrdma_areadch)
358 pos = 0;
Chuck Lever5ab81422016-06-29 13:54:25 -0400359 seg = req->rl_segments;
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500360 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_snd_buf, pos,
361 rtype, seg);
Chuck Lever94f58c52016-05-02 14:41:30 -0400362 if (nsegs < 0)
Chuck Lever39f4cd92017-08-10 12:47:36 -0400363 return nsegs;
Chuck Lever94f58c52016-05-02 14:41:30 -0400364
365 do {
Chuck Lever6748b0ca2017-08-14 15:38:30 -0400366 seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
Chuck Lever96cedde2017-12-14 20:57:55 -0500367 false, &mr);
Chuck Lever6748b0ca2017-08-14 15:38:30 -0400368 if (IS_ERR(seg))
Chuck Lever9e679d52018-02-28 15:30:44 -0500369 goto out_maperr;
Chuck Lever96cedde2017-12-14 20:57:55 -0500370 rpcrdma_mr_push(mr, &req->rl_registered);
Chuck Lever94f58c52016-05-02 14:41:30 -0400371
Chuck Lever96cedde2017-12-14 20:57:55 -0500372 if (encode_read_segment(xdr, mr, pos) < 0)
Chuck Lever39f4cd92017-08-10 12:47:36 -0400373 return -EMSGSIZE;
Chuck Lever94f58c52016-05-02 14:41:30 -0400374
Chuck Lever58f10ad2017-12-20 16:30:56 -0500375 trace_xprtrdma_read_chunk(rqst->rq_task, pos, mr, nsegs);
Chuck Lever94f58c52016-05-02 14:41:30 -0400376 r_xprt->rx_stats.read_chunk_count++;
Chuck Lever96cedde2017-12-14 20:57:55 -0500377 nsegs -= mr->mr_nents;
Chuck Lever94f58c52016-05-02 14:41:30 -0400378 } while (nsegs);
Chuck Lever94f58c52016-05-02 14:41:30 -0400379
Chuck Lever39f4cd92017-08-10 12:47:36 -0400380 return 0;
Chuck Lever9e679d52018-02-28 15:30:44 -0500381
382out_maperr:
383 if (PTR_ERR(seg) == -EAGAIN)
384 xprt_wait_for_buffer_space(rqst->rq_task, NULL);
385 return PTR_ERR(seg);
Chuck Lever94f58c52016-05-02 14:41:30 -0400386}
387
Chuck Lever39f4cd92017-08-10 12:47:36 -0400388/* Register and XDR encode the Write list. Supports encoding a list
389 * containing one array of plain segments that belong to a single
390 * write chunk.
Chuck Lever94f58c52016-05-02 14:41:30 -0400391 *
392 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
393 *
394 * Write chunklist (a list of (one) counted array):
395 * N elements:
396 * 1 - N - HLOO - HLOO - ... - HLOO - 0
397 *
Chuck Lever39f4cd92017-08-10 12:47:36 -0400398 * Returns zero on success, or a negative errno if a failure occurred.
399 * @xdr is advanced to the next position in the stream.
400 *
401 * Only a single Write chunk is currently supported.
Chuck Lever94f58c52016-05-02 14:41:30 -0400402 */
Chuck Lever39f4cd92017-08-10 12:47:36 -0400403static noinline int
Chuck Lever94f58c52016-05-02 14:41:30 -0400404rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
Chuck Lever39f4cd92017-08-10 12:47:36 -0400405 struct rpc_rqst *rqst, enum rpcrdma_chunktype wtype)
Chuck Lever94f58c52016-05-02 14:41:30 -0400406{
Chuck Lever39f4cd92017-08-10 12:47:36 -0400407 struct xdr_stream *xdr = &req->rl_stream;
Chuck Lever5ab81422016-06-29 13:54:25 -0400408 struct rpcrdma_mr_seg *seg;
Chuck Lever96cedde2017-12-14 20:57:55 -0500409 struct rpcrdma_mr *mr;
Chuck Lever6748b0ca2017-08-14 15:38:30 -0400410 int nsegs, nchunks;
Chuck Lever94f58c52016-05-02 14:41:30 -0400411 __be32 *segcount;
412
Chuck Lever5ab81422016-06-29 13:54:25 -0400413 seg = req->rl_segments;
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500414 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf,
Chuck Lever94f58c52016-05-02 14:41:30 -0400415 rqst->rq_rcv_buf.head[0].iov_len,
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500416 wtype, seg);
Chuck Lever94f58c52016-05-02 14:41:30 -0400417 if (nsegs < 0)
Chuck Lever39f4cd92017-08-10 12:47:36 -0400418 return nsegs;
Chuck Lever94f58c52016-05-02 14:41:30 -0400419
Chuck Lever39f4cd92017-08-10 12:47:36 -0400420 if (encode_item_present(xdr) < 0)
421 return -EMSGSIZE;
422 segcount = xdr_reserve_space(xdr, sizeof(*segcount));
423 if (unlikely(!segcount))
424 return -EMSGSIZE;
425 /* Actual value encoded below */
Chuck Lever94f58c52016-05-02 14:41:30 -0400426
427 nchunks = 0;
428 do {
Chuck Lever6748b0ca2017-08-14 15:38:30 -0400429 seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
Chuck Lever96cedde2017-12-14 20:57:55 -0500430 true, &mr);
Chuck Lever6748b0ca2017-08-14 15:38:30 -0400431 if (IS_ERR(seg))
Chuck Lever9e679d52018-02-28 15:30:44 -0500432 goto out_maperr;
Chuck Lever96cedde2017-12-14 20:57:55 -0500433 rpcrdma_mr_push(mr, &req->rl_registered);
Chuck Lever94f58c52016-05-02 14:41:30 -0400434
Chuck Lever96cedde2017-12-14 20:57:55 -0500435 if (encode_rdma_segment(xdr, mr) < 0)
Chuck Lever39f4cd92017-08-10 12:47:36 -0400436 return -EMSGSIZE;
Chuck Lever94f58c52016-05-02 14:41:30 -0400437
Chuck Lever58f10ad2017-12-20 16:30:56 -0500438 trace_xprtrdma_write_chunk(rqst->rq_task, mr, nsegs);
Chuck Lever94f58c52016-05-02 14:41:30 -0400439 r_xprt->rx_stats.write_chunk_count++;
Chuck Leveraae23492018-01-03 15:38:09 -0500440 r_xprt->rx_stats.total_rdma_request += mr->mr_length;
Chuck Lever94f58c52016-05-02 14:41:30 -0400441 nchunks++;
Chuck Lever96cedde2017-12-14 20:57:55 -0500442 nsegs -= mr->mr_nents;
Chuck Lever94f58c52016-05-02 14:41:30 -0400443 } while (nsegs);
Chuck Lever94f58c52016-05-02 14:41:30 -0400444
445 /* Update count of segments in this Write chunk */
446 *segcount = cpu_to_be32(nchunks);
447
Chuck Lever39f4cd92017-08-10 12:47:36 -0400448 return 0;
Chuck Lever9e679d52018-02-28 15:30:44 -0500449
450out_maperr:
451 if (PTR_ERR(seg) == -EAGAIN)
452 xprt_wait_for_buffer_space(rqst->rq_task, NULL);
453 return PTR_ERR(seg);
Chuck Lever94f58c52016-05-02 14:41:30 -0400454}
455
Chuck Lever39f4cd92017-08-10 12:47:36 -0400456/* Register and XDR encode the Reply chunk. Supports encoding an array
457 * of plain segments that belong to a single write (reply) chunk.
Chuck Lever94f58c52016-05-02 14:41:30 -0400458 *
459 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
460 *
461 * Reply chunk (a counted array):
462 * N elements:
463 * 1 - N - HLOO - HLOO - ... - HLOO
464 *
Chuck Lever39f4cd92017-08-10 12:47:36 -0400465 * Returns zero on success, or a negative errno if a failure occurred.
466 * @xdr is advanced to the next position in the stream.
Chuck Lever94f58c52016-05-02 14:41:30 -0400467 */
Chuck Lever39f4cd92017-08-10 12:47:36 -0400468static noinline int
469rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
470 struct rpc_rqst *rqst, enum rpcrdma_chunktype wtype)
Chuck Lever94f58c52016-05-02 14:41:30 -0400471{
Chuck Lever39f4cd92017-08-10 12:47:36 -0400472 struct xdr_stream *xdr = &req->rl_stream;
Chuck Lever5ab81422016-06-29 13:54:25 -0400473 struct rpcrdma_mr_seg *seg;
Chuck Lever96cedde2017-12-14 20:57:55 -0500474 struct rpcrdma_mr *mr;
Chuck Lever6748b0ca2017-08-14 15:38:30 -0400475 int nsegs, nchunks;
Chuck Lever94f58c52016-05-02 14:41:30 -0400476 __be32 *segcount;
477
Chuck Lever5ab81422016-06-29 13:54:25 -0400478 seg = req->rl_segments;
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500479 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 0, wtype, seg);
Chuck Lever94f58c52016-05-02 14:41:30 -0400480 if (nsegs < 0)
Chuck Lever39f4cd92017-08-10 12:47:36 -0400481 return nsegs;
Chuck Lever94f58c52016-05-02 14:41:30 -0400482
Chuck Lever39f4cd92017-08-10 12:47:36 -0400483 if (encode_item_present(xdr) < 0)
484 return -EMSGSIZE;
485 segcount = xdr_reserve_space(xdr, sizeof(*segcount));
486 if (unlikely(!segcount))
487 return -EMSGSIZE;
488 /* Actual value encoded below */
Chuck Lever94f58c52016-05-02 14:41:30 -0400489
490 nchunks = 0;
491 do {
Chuck Lever6748b0ca2017-08-14 15:38:30 -0400492 seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
Chuck Lever96cedde2017-12-14 20:57:55 -0500493 true, &mr);
Chuck Lever6748b0ca2017-08-14 15:38:30 -0400494 if (IS_ERR(seg))
Chuck Lever9e679d52018-02-28 15:30:44 -0500495 goto out_maperr;
Chuck Lever96cedde2017-12-14 20:57:55 -0500496 rpcrdma_mr_push(mr, &req->rl_registered);
Chuck Lever94f58c52016-05-02 14:41:30 -0400497
Chuck Lever96cedde2017-12-14 20:57:55 -0500498 if (encode_rdma_segment(xdr, mr) < 0)
Chuck Lever39f4cd92017-08-10 12:47:36 -0400499 return -EMSGSIZE;
Chuck Lever94f58c52016-05-02 14:41:30 -0400500
Chuck Lever58f10ad2017-12-20 16:30:56 -0500501 trace_xprtrdma_reply_chunk(rqst->rq_task, mr, nsegs);
Chuck Lever94f58c52016-05-02 14:41:30 -0400502 r_xprt->rx_stats.reply_chunk_count++;
Chuck Leveraae23492018-01-03 15:38:09 -0500503 r_xprt->rx_stats.total_rdma_request += mr->mr_length;
Chuck Lever94f58c52016-05-02 14:41:30 -0400504 nchunks++;
Chuck Lever96cedde2017-12-14 20:57:55 -0500505 nsegs -= mr->mr_nents;
Chuck Lever94f58c52016-05-02 14:41:30 -0400506 } while (nsegs);
Chuck Lever94f58c52016-05-02 14:41:30 -0400507
508 /* Update count of segments in the Reply chunk */
509 *segcount = cpu_to_be32(nchunks);
510
Chuck Lever39f4cd92017-08-10 12:47:36 -0400511 return 0;
Chuck Lever9e679d52018-02-28 15:30:44 -0500512
513out_maperr:
514 if (PTR_ERR(seg) == -EAGAIN)
515 xprt_wait_for_buffer_space(rqst->rq_task, NULL);
516 return PTR_ERR(seg);
Chuck Lever94f58c52016-05-02 14:41:30 -0400517}
518
Chuck Lever394b2c72017-10-20 10:47:47 -0400519/**
Chuck Leverae729502017-10-20 10:48:12 -0400520 * rpcrdma_unmap_sendctx - DMA-unmap Send buffers
521 * @sc: sendctx containing SGEs to unmap
Chuck Lever394b2c72017-10-20 10:47:47 -0400522 *
523 */
524void
Chuck Leverae729502017-10-20 10:48:12 -0400525rpcrdma_unmap_sendctx(struct rpcrdma_sendctx *sc)
Chuck Lever394b2c72017-10-20 10:47:47 -0400526{
Chuck Leverae729502017-10-20 10:48:12 -0400527 struct rpcrdma_ia *ia = &sc->sc_xprt->rx_ia;
Chuck Lever394b2c72017-10-20 10:47:47 -0400528 struct ib_sge *sge;
529 unsigned int count;
530
531 /* The first two SGEs contain the transport header and
532 * the inline buffer. These are always left mapped so
533 * they can be cheaply re-used.
534 */
Chuck Leverae729502017-10-20 10:48:12 -0400535 sge = &sc->sc_sges[2];
536 for (count = sc->sc_unmap_count; count; ++sge, --count)
Chuck Lever394b2c72017-10-20 10:47:47 -0400537 ib_dma_unmap_page(ia->ri_device,
538 sge->addr, sge->length, DMA_TO_DEVICE);
Chuck Lever01bb35c2017-10-20 10:48:36 -0400539
540 if (test_and_clear_bit(RPCRDMA_REQ_F_TX_RESOURCES, &sc->sc_req->rl_flags)) {
541 smp_mb__after_atomic();
542 wake_up_bit(&sc->sc_req->rl_flags, RPCRDMA_REQ_F_TX_RESOURCES);
543 }
Chuck Lever394b2c72017-10-20 10:47:47 -0400544}
545
Chuck Levera062a2a2017-10-20 10:48:03 -0400546/* Prepare an SGE for the RPC-over-RDMA transport header.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400547 */
Chuck Lever655fec62016-09-15 10:57:24 -0400548static bool
549rpcrdma_prepare_hdr_sge(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
550 u32 len)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400551{
Chuck Leverae729502017-10-20 10:48:12 -0400552 struct rpcrdma_sendctx *sc = req->rl_sendctx;
Chuck Lever655fec62016-09-15 10:57:24 -0400553 struct rpcrdma_regbuf *rb = req->rl_rdmabuf;
Chuck Leverae729502017-10-20 10:48:12 -0400554 struct ib_sge *sge = sc->sc_sges;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400555
Chuck Levera062a2a2017-10-20 10:48:03 -0400556 if (!rpcrdma_dma_map_regbuf(ia, rb))
557 goto out_regbuf;
558 sge->addr = rdmab_addr(rb);
Chuck Lever655fec62016-09-15 10:57:24 -0400559 sge->length = len;
Chuck Levera062a2a2017-10-20 10:48:03 -0400560 sge->lkey = rdmab_lkey(rb);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400561
Chuck Lever91a10c52017-04-11 13:23:02 -0400562 ib_dma_sync_single_for_device(rdmab_device(rb), sge->addr,
Chuck Lever655fec62016-09-15 10:57:24 -0400563 sge->length, DMA_TO_DEVICE);
Chuck Leverae729502017-10-20 10:48:12 -0400564 sc->sc_wr.num_sge++;
Chuck Lever655fec62016-09-15 10:57:24 -0400565 return true;
Chuck Lever857f9ac2017-10-20 10:47:55 -0400566
567out_regbuf:
568 pr_err("rpcrdma: failed to DMA map a Send buffer\n");
569 return false;
Chuck Lever655fec62016-09-15 10:57:24 -0400570}
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400571
Chuck Lever655fec62016-09-15 10:57:24 -0400572/* Prepare the Send SGEs. The head and tail iovec, and each entry
573 * in the page list, gets its own SGE.
574 */
575static bool
576rpcrdma_prepare_msg_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
577 struct xdr_buf *xdr, enum rpcrdma_chunktype rtype)
578{
Chuck Leverae729502017-10-20 10:48:12 -0400579 struct rpcrdma_sendctx *sc = req->rl_sendctx;
Chuck Lever655fec62016-09-15 10:57:24 -0400580 unsigned int sge_no, page_base, len, remaining;
581 struct rpcrdma_regbuf *rb = req->rl_sendbuf;
582 struct ib_device *device = ia->ri_device;
Chuck Leverae729502017-10-20 10:48:12 -0400583 struct ib_sge *sge = sc->sc_sges;
Chuck Lever655fec62016-09-15 10:57:24 -0400584 u32 lkey = ia->ri_pd->local_dma_lkey;
585 struct page *page, **ppages;
Tom Talpeyb38ab402009-03-11 14:37:55 -0400586
Chuck Lever655fec62016-09-15 10:57:24 -0400587 /* The head iovec is straightforward, as it is already
588 * DMA-mapped. Sync the content that has changed.
589 */
590 if (!rpcrdma_dma_map_regbuf(ia, rb))
Chuck Lever857f9ac2017-10-20 10:47:55 -0400591 goto out_regbuf;
Chuck Lever655fec62016-09-15 10:57:24 -0400592 sge_no = 1;
593 sge[sge_no].addr = rdmab_addr(rb);
594 sge[sge_no].length = xdr->head[0].iov_len;
595 sge[sge_no].lkey = rdmab_lkey(rb);
Chuck Lever91a10c52017-04-11 13:23:02 -0400596 ib_dma_sync_single_for_device(rdmab_device(rb), sge[sge_no].addr,
Chuck Lever655fec62016-09-15 10:57:24 -0400597 sge[sge_no].length, DMA_TO_DEVICE);
598
599 /* If there is a Read chunk, the page list is being handled
600 * via explicit RDMA, and thus is skipped here. However, the
601 * tail iovec may include an XDR pad for the page list, as
602 * well as additional content, and may not reside in the
603 * same page as the head iovec.
604 */
605 if (rtype == rpcrdma_readch) {
606 len = xdr->tail[0].iov_len;
607
608 /* Do not include the tail if it is only an XDR pad */
609 if (len < 4)
610 goto out;
611
612 page = virt_to_page(xdr->tail[0].iov_base);
Chuck Leverd933cc32017-06-08 11:53:16 -0400613 page_base = offset_in_page(xdr->tail[0].iov_base);
Chuck Lever655fec62016-09-15 10:57:24 -0400614
615 /* If the content in the page list is an odd length,
616 * xdr_write_pages() has added a pad at the beginning
617 * of the tail iovec. Force the tail's non-pad content
618 * to land at the next XDR position in the Send message.
619 */
620 page_base += len & 3;
621 len -= len & 3;
622 goto map_tail;
623 }
624
625 /* If there is a page list present, temporarily DMA map
626 * and prepare an SGE for each page to be sent.
627 */
628 if (xdr->page_len) {
629 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
Chuck Leverd933cc32017-06-08 11:53:16 -0400630 page_base = offset_in_page(xdr->page_base);
Chuck Lever655fec62016-09-15 10:57:24 -0400631 remaining = xdr->page_len;
632 while (remaining) {
633 sge_no++;
634 if (sge_no > RPCRDMA_MAX_SEND_SGES - 2)
635 goto out_mapping_overflow;
636
637 len = min_t(u32, PAGE_SIZE - page_base, remaining);
638 sge[sge_no].addr = ib_dma_map_page(device, *ppages,
639 page_base, len,
640 DMA_TO_DEVICE);
641 if (ib_dma_mapping_error(device, sge[sge_no].addr))
642 goto out_mapping_err;
643 sge[sge_no].length = len;
644 sge[sge_no].lkey = lkey;
645
Chuck Leverae729502017-10-20 10:48:12 -0400646 sc->sc_unmap_count++;
Chuck Lever655fec62016-09-15 10:57:24 -0400647 ppages++;
648 remaining -= len;
649 page_base = 0;
Tom Talpeyb38ab402009-03-11 14:37:55 -0400650 }
Tom Talpeyb38ab402009-03-11 14:37:55 -0400651 }
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000652
Chuck Lever655fec62016-09-15 10:57:24 -0400653 /* The tail iovec is not always constructed in the same
654 * page where the head iovec resides (see, for example,
655 * gss_wrap_req_priv). To neatly accommodate that case,
656 * DMA map it separately.
657 */
658 if (xdr->tail[0].iov_len) {
659 page = virt_to_page(xdr->tail[0].iov_base);
Chuck Leverd933cc32017-06-08 11:53:16 -0400660 page_base = offset_in_page(xdr->tail[0].iov_base);
Chuck Lever655fec62016-09-15 10:57:24 -0400661 len = xdr->tail[0].iov_len;
662
663map_tail:
664 sge_no++;
665 sge[sge_no].addr = ib_dma_map_page(device, page,
666 page_base, len,
667 DMA_TO_DEVICE);
668 if (ib_dma_mapping_error(device, sge[sge_no].addr))
669 goto out_mapping_err;
670 sge[sge_no].length = len;
671 sge[sge_no].lkey = lkey;
Chuck Leverae729502017-10-20 10:48:12 -0400672 sc->sc_unmap_count++;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400673 }
Chuck Lever655fec62016-09-15 10:57:24 -0400674
675out:
Chuck Leverae729502017-10-20 10:48:12 -0400676 sc->sc_wr.num_sge += sge_no;
Chuck Lever01bb35c2017-10-20 10:48:36 -0400677 if (sc->sc_unmap_count)
678 __set_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags);
Chuck Lever655fec62016-09-15 10:57:24 -0400679 return true;
680
Chuck Lever857f9ac2017-10-20 10:47:55 -0400681out_regbuf:
682 pr_err("rpcrdma: failed to DMA map a Send buffer\n");
683 return false;
684
Chuck Lever655fec62016-09-15 10:57:24 -0400685out_mapping_overflow:
Chuck Leverae729502017-10-20 10:48:12 -0400686 rpcrdma_unmap_sendctx(sc);
Chuck Lever655fec62016-09-15 10:57:24 -0400687 pr_err("rpcrdma: too many Send SGEs (%u)\n", sge_no);
688 return false;
689
690out_mapping_err:
Chuck Leverae729502017-10-20 10:48:12 -0400691 rpcrdma_unmap_sendctx(sc);
Chuck Lever655fec62016-09-15 10:57:24 -0400692 pr_err("rpcrdma: Send mapping error\n");
693 return false;
694}
695
Chuck Lever857f9ac2017-10-20 10:47:55 -0400696/**
697 * rpcrdma_prepare_send_sges - Construct SGEs for a Send WR
698 * @r_xprt: controlling transport
699 * @req: context of RPC Call being marshalled
700 * @hdrlen: size of transport header, in bytes
701 * @xdr: xdr_buf containing RPC Call
702 * @rtype: chunk type being encoded
703 *
704 * Returns 0 on success; otherwise a negative errno is returned.
705 */
706int
707rpcrdma_prepare_send_sges(struct rpcrdma_xprt *r_xprt,
708 struct rpcrdma_req *req, u32 hdrlen,
709 struct xdr_buf *xdr, enum rpcrdma_chunktype rtype)
Chuck Lever655fec62016-09-15 10:57:24 -0400710{
Chuck Leverae729502017-10-20 10:48:12 -0400711 req->rl_sendctx = rpcrdma_sendctx_get_locked(&r_xprt->rx_buf);
712 if (!req->rl_sendctx)
713 return -ENOBUFS;
714 req->rl_sendctx->sc_wr.num_sge = 0;
715 req->rl_sendctx->sc_unmap_count = 0;
Chuck Lever01bb35c2017-10-20 10:48:36 -0400716 req->rl_sendctx->sc_req = req;
717 __clear_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags);
Chuck Lever655fec62016-09-15 10:57:24 -0400718
Chuck Lever857f9ac2017-10-20 10:47:55 -0400719 if (!rpcrdma_prepare_hdr_sge(&r_xprt->rx_ia, req, hdrlen))
720 return -EIO;
Chuck Lever655fec62016-09-15 10:57:24 -0400721
722 if (rtype != rpcrdma_areadch)
Chuck Lever857f9ac2017-10-20 10:47:55 -0400723 if (!rpcrdma_prepare_msg_sges(&r_xprt->rx_ia, req, xdr, rtype))
724 return -EIO;
Chuck Lever655fec62016-09-15 10:57:24 -0400725
Chuck Lever857f9ac2017-10-20 10:47:55 -0400726 return 0;
Chuck Lever655fec62016-09-15 10:57:24 -0400727}
728
Chuck Lever09e60642017-08-10 12:47:12 -0400729/**
730 * rpcrdma_marshal_req - Marshal and send one RPC request
731 * @r_xprt: controlling transport
732 * @rqst: RPC request to be marshaled
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400733 *
Chuck Lever09e60642017-08-10 12:47:12 -0400734 * For the RPC in "rqst", this function:
735 * - Chooses the transfer mode (eg., RDMA_MSG or RDMA_NOMSG)
736 * - Registers Read, Write, and Reply chunks
737 * - Constructs the transport header
738 * - Posts a Send WR to send the transport header and request
739 *
740 * Returns:
741 * %0 if the RPC was sent successfully,
742 * %-ENOTCONN if the connection was lost,
Chuck Lever9e679d52018-02-28 15:30:44 -0500743 * %-EAGAIN if the caller should call again with the same arguments,
744 * %-ENOBUFS if the caller should call again after a delay,
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400745 * %-EMSGSIZE if the transport header is too small,
Chuck Lever09e60642017-08-10 12:47:12 -0400746 * %-EIO if a permanent problem occurred while marshaling.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400747 */
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400748int
Chuck Lever09e60642017-08-10 12:47:12 -0400749rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400750{
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400751 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400752 struct xdr_stream *xdr = &req->rl_stream;
Chuck Levere2377942015-03-30 14:33:53 -0400753 enum rpcrdma_chunktype rtype, wtype;
Chuck Lever65b80172016-06-29 13:55:06 -0400754 bool ddp_allowed;
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400755 __be32 *p;
Chuck Lever39f4cd92017-08-10 12:47:36 -0400756 int ret;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400757
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400758 rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
759 xdr_init_encode(xdr, &req->rl_hdrbuf,
760 req->rl_rdmabuf->rg_base);
761
762 /* Fixed header fields */
Chuck Lever39f4cd92017-08-10 12:47:36 -0400763 ret = -EMSGSIZE;
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400764 p = xdr_reserve_space(xdr, 4 * sizeof(*p));
765 if (!p)
766 goto out_err;
767 *p++ = rqst->rq_xid;
768 *p++ = rpcrdma_version;
769 *p++ = cpu_to_be32(r_xprt->rx_buf.rb_max_requests);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400770
Chuck Lever65b80172016-06-29 13:55:06 -0400771 /* When the ULP employs a GSS flavor that guarantees integrity
772 * or privacy, direct data placement of individual data items
773 * is not allowed.
774 */
775 ddp_allowed = !(rqst->rq_cred->cr_auth->au_flags &
776 RPCAUTH_AUTH_DATATOUCH);
777
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400778 /*
779 * Chunks needed for results?
780 *
781 * o If the expected result is under the inline threshold, all ops
Chuck Lever33943b22015-08-03 13:04:08 -0400782 * return as inline.
Chuck Levercce6dee2016-05-02 14:41:14 -0400783 * o Large read ops return data as write chunk(s), header as
784 * inline.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400785 * o Large non-read ops return as a single reply chunk.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400786 */
Chuck Levercce6dee2016-05-02 14:41:14 -0400787 if (rpcrdma_results_inline(r_xprt, rqst))
Chuck Lever02eb57d82015-08-03 13:03:58 -0400788 wtype = rpcrdma_noch;
Chuck Lever65b80172016-06-29 13:55:06 -0400789 else if (ddp_allowed && rqst->rq_rcv_buf.flags & XDRBUF_READ)
Chuck Levercce6dee2016-05-02 14:41:14 -0400790 wtype = rpcrdma_writech;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400791 else
Chuck Levere2377942015-03-30 14:33:53 -0400792 wtype = rpcrdma_replych;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400793
794 /*
795 * Chunks needed for arguments?
796 *
797 * o If the total request is under the inline threshold, all ops
798 * are sent as inline.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400799 * o Large write ops transmit data as read chunk(s), header as
800 * inline.
Chuck Lever2fcc2132015-08-03 13:04:26 -0400801 * o Large non-write ops are sent with the entire message as a
802 * single read chunk (protocol 0-position special case).
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400803 *
Chuck Lever2fcc2132015-08-03 13:04:26 -0400804 * This assumes that the upper layer does not present a request
805 * that both has a data payload, and whose non-data arguments
806 * by themselves are larger than the inline threshold.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400807 */
Chuck Lever302d3de2016-05-02 14:41:05 -0400808 if (rpcrdma_args_inline(r_xprt, rqst)) {
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400809 *p++ = rdma_msg;
Chuck Levere2377942015-03-30 14:33:53 -0400810 rtype = rpcrdma_noch;
Chuck Lever65b80172016-06-29 13:55:06 -0400811 } else if (ddp_allowed && rqst->rq_snd_buf.flags & XDRBUF_WRITE) {
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400812 *p++ = rdma_msg;
Chuck Levere2377942015-03-30 14:33:53 -0400813 rtype = rpcrdma_readch;
Chuck Lever2fcc2132015-08-03 13:04:26 -0400814 } else {
Chuck Lever860477d2015-08-03 13:04:45 -0400815 r_xprt->rx_stats.nomsg_call_count++;
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400816 *p++ = rdma_nomsg;
Chuck Lever2fcc2132015-08-03 13:04:26 -0400817 rtype = rpcrdma_areadch;
Chuck Lever2fcc2132015-08-03 13:04:26 -0400818 }
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400819
Chuck Levera2b64702017-12-14 20:57:14 -0500820 /* If this is a retransmit, discard previously registered
821 * chunks. Very likely the connection has been replaced,
822 * so these registrations are invalid and unusable.
823 */
824 while (unlikely(!list_empty(&req->rl_registered))) {
Chuck Lever96cedde2017-12-14 20:57:55 -0500825 struct rpcrdma_mr *mr;
Chuck Levera2b64702017-12-14 20:57:14 -0500826
Chuck Lever96cedde2017-12-14 20:57:55 -0500827 mr = rpcrdma_mr_pop(&req->rl_registered);
828 rpcrdma_mr_defer_recovery(mr);
Chuck Levera2b64702017-12-14 20:57:14 -0500829 }
830
Chuck Lever94f58c52016-05-02 14:41:30 -0400831 /* This implementation supports the following combinations
832 * of chunk lists in one RPC-over-RDMA Call message:
833 *
834 * - Read list
835 * - Write list
836 * - Reply chunk
837 * - Read list + Reply chunk
838 *
839 * It might not yet support the following combinations:
840 *
841 * - Read list + Write list
842 *
843 * It does not support the following combinations:
844 *
845 * - Write list + Reply chunk
846 * - Read list + Write list + Reply chunk
847 *
848 * This implementation supports only a single chunk in each
849 * Read or Write list. Thus for example the client cannot
850 * send a Call message with a Position Zero Read chunk and a
851 * regular Read chunk at the same time.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400852 */
Chuck Lever39f4cd92017-08-10 12:47:36 -0400853 if (rtype != rpcrdma_noch) {
854 ret = rpcrdma_encode_read_list(r_xprt, req, rqst, rtype);
855 if (ret)
856 goto out_err;
857 }
858 ret = encode_item_not_present(xdr);
859 if (ret)
Chuck Lever18c0fb32017-02-08 17:00:27 -0500860 goto out_err;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400861
Chuck Lever39f4cd92017-08-10 12:47:36 -0400862 if (wtype == rpcrdma_writech) {
863 ret = rpcrdma_encode_write_list(r_xprt, req, rqst, wtype);
864 if (ret)
865 goto out_err;
866 }
867 ret = encode_item_not_present(xdr);
868 if (ret)
869 goto out_err;
870
871 if (wtype != rpcrdma_replych)
872 ret = encode_item_not_present(xdr);
873 else
874 ret = rpcrdma_encode_reply_chunk(r_xprt, req, rqst, wtype);
875 if (ret)
876 goto out_err;
877
Chuck Leverab03eff2017-12-20 16:30:40 -0500878 trace_xprtrdma_marshal(rqst, xdr_stream_pos(xdr), rtype, wtype);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400879
Chuck Lever857f9ac2017-10-20 10:47:55 -0400880 ret = rpcrdma_prepare_send_sges(r_xprt, req, xdr_stream_pos(xdr),
881 &rqst->rq_snd_buf, rtype);
882 if (ret)
Chuck Lever18c0fb32017-02-08 17:00:27 -0500883 goto out_err;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400884 return 0;
Chuck Lever302d3de2016-05-02 14:41:05 -0400885
Chuck Lever18c0fb32017-02-08 17:00:27 -0500886out_err:
Chuck Lever9e679d52018-02-28 15:30:44 -0500887 r_xprt->rx_stats.failed_marshal_count++;
Chuck Lever39f4cd92017-08-10 12:47:36 -0400888 return ret;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400889}
890
Chuck Levercb0ae1f2016-06-29 13:54:41 -0400891/**
892 * rpcrdma_inline_fixup - Scatter inline received data into rqst's iovecs
893 * @rqst: controlling RPC request
894 * @srcp: points to RPC message payload in receive buffer
895 * @copy_len: remaining length of receive buffer content
896 * @pad: Write chunk pad bytes needed (zero for pure inline)
897 *
898 * The upper layer has set the maximum number of bytes it can
899 * receive in each component of rq_rcv_buf. These values are set in
900 * the head.iov_len, page_len, tail.iov_len, and buflen fields.
Chuck Levercfabe2c2016-06-29 13:54:49 -0400901 *
902 * Unlike the TCP equivalent (xdr_partial_copy_from_skb), in
903 * many cases this function simply updates iov_base pointers in
904 * rq_rcv_buf to point directly to the received reply data, to
905 * avoid copying reply data.
Chuck Lever64695bde2016-06-29 13:54:58 -0400906 *
907 * Returns the count of bytes which had to be memcopied.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400908 */
Chuck Lever64695bde2016-06-29 13:54:58 -0400909static unsigned long
Tom Talpey9191ca32008-10-09 15:01:11 -0400910rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400911{
Chuck Lever64695bde2016-06-29 13:54:58 -0400912 unsigned long fixup_copy_count;
913 int i, npages, curlen;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400914 char *destp;
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000915 struct page **ppages;
916 int page_base;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400917
Chuck Levercb0ae1f2016-06-29 13:54:41 -0400918 /* The head iovec is redirected to the RPC reply message
919 * in the receive buffer, to avoid a memcopy.
920 */
921 rqst->rq_rcv_buf.head[0].iov_base = srcp;
Chuck Levercfabe2c2016-06-29 13:54:49 -0400922 rqst->rq_private_buf.head[0].iov_base = srcp;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400923
Chuck Levercb0ae1f2016-06-29 13:54:41 -0400924 /* The contents of the receive buffer that follow
925 * head.iov_len bytes are copied into the page list.
926 */
927 curlen = rqst->rq_rcv_buf.head[0].iov_len;
928 if (curlen > copy_len)
929 curlen = copy_len;
Chuck Levere11b7c92017-12-20 16:31:04 -0500930 trace_xprtrdma_fixup(rqst, copy_len, curlen);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400931 srcp += curlen;
932 copy_len -= curlen;
933
Chuck Leverd933cc32017-06-08 11:53:16 -0400934 ppages = rqst->rq_rcv_buf.pages +
935 (rqst->rq_rcv_buf.page_base >> PAGE_SHIFT);
936 page_base = offset_in_page(rqst->rq_rcv_buf.page_base);
Chuck Lever64695bde2016-06-29 13:54:58 -0400937 fixup_copy_count = 0;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400938 if (copy_len && rqst->rq_rcv_buf.page_len) {
Chuck Lever80414ab2016-06-29 13:54:33 -0400939 int pagelist_len;
940
941 pagelist_len = rqst->rq_rcv_buf.page_len;
942 if (pagelist_len > copy_len)
943 pagelist_len = copy_len;
944 npages = PAGE_ALIGN(page_base + pagelist_len) >> PAGE_SHIFT;
Chuck Lever64695bde2016-06-29 13:54:58 -0400945 for (i = 0; i < npages; i++) {
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000946 curlen = PAGE_SIZE - page_base;
Chuck Lever80414ab2016-06-29 13:54:33 -0400947 if (curlen > pagelist_len)
948 curlen = pagelist_len;
949
Chuck Levere11b7c92017-12-20 16:31:04 -0500950 trace_xprtrdma_fixup_pg(rqst, i, srcp,
951 copy_len, curlen);
Cong Wangb8541782011-11-25 23:14:40 +0800952 destp = kmap_atomic(ppages[i]);
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000953 memcpy(destp + page_base, srcp, curlen);
954 flush_dcache_page(ppages[i]);
Cong Wangb8541782011-11-25 23:14:40 +0800955 kunmap_atomic(destp);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400956 srcp += curlen;
957 copy_len -= curlen;
Chuck Lever64695bde2016-06-29 13:54:58 -0400958 fixup_copy_count += curlen;
Chuck Lever80414ab2016-06-29 13:54:33 -0400959 pagelist_len -= curlen;
960 if (!pagelist_len)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400961 break;
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000962 page_base = 0;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400963 }
Chuck Levercb0ae1f2016-06-29 13:54:41 -0400964
965 /* Implicit padding for the last segment in a Write
966 * chunk is inserted inline at the front of the tail
967 * iovec. The upper layer ignores the content of
968 * the pad. Simply ensure inline content in the tail
969 * that follows the Write chunk is properly aligned.
970 */
971 if (pad)
972 srcp -= pad;
Chuck Lever2b7bbc92014-03-12 12:51:30 -0400973 }
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400974
Chuck Levercb0ae1f2016-06-29 13:54:41 -0400975 /* The tail iovec is redirected to the remaining data
976 * in the receive buffer, to avoid a memcopy.
977 */
Chuck Levercfabe2c2016-06-29 13:54:49 -0400978 if (copy_len || pad) {
Chuck Levercb0ae1f2016-06-29 13:54:41 -0400979 rqst->rq_rcv_buf.tail[0].iov_base = srcp;
Chuck Levercfabe2c2016-06-29 13:54:49 -0400980 rqst->rq_private_buf.tail[0].iov_base = srcp;
981 }
Tom Talpey9191ca32008-10-09 15:01:11 -0400982
Chuck Lever64695bde2016-06-29 13:54:58 -0400983 return fixup_copy_count;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400984}
985
Chuck Lever63cae472015-10-24 17:28:08 -0400986/* By convention, backchannel calls arrive via rdma_msg type
987 * messages, and never populate the chunk lists. This makes
988 * the RPC/RDMA header small and fixed in size, so it is
989 * straightforward to check the RPC header's direction field.
990 */
991static bool
Chuck Lever5381e0e2017-10-16 15:01:14 -0400992rpcrdma_is_bcall(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
Chuck Lever41c8f702017-08-03 14:30:11 -0400993#if defined(CONFIG_SUNRPC_BACKCHANNEL)
Chuck Lever63cae472015-10-24 17:28:08 -0400994{
Chuck Lever41c8f702017-08-03 14:30:11 -0400995 struct xdr_stream *xdr = &rep->rr_stream;
996 __be32 *p;
Chuck Lever63cae472015-10-24 17:28:08 -0400997
Chuck Lever5381e0e2017-10-16 15:01:14 -0400998 if (rep->rr_proc != rdma_msg)
Chuck Lever63cae472015-10-24 17:28:08 -0400999 return false;
1000
Chuck Lever41c8f702017-08-03 14:30:11 -04001001 /* Peek at stream contents without advancing. */
1002 p = xdr_inline_decode(xdr, 0);
1003
1004 /* Chunk lists */
1005 if (*p++ != xdr_zero)
Chuck Lever63cae472015-10-24 17:28:08 -04001006 return false;
Chuck Lever41c8f702017-08-03 14:30:11 -04001007 if (*p++ != xdr_zero)
1008 return false;
1009 if (*p++ != xdr_zero)
Chuck Lever63cae472015-10-24 17:28:08 -04001010 return false;
1011
Chuck Lever41c8f702017-08-03 14:30:11 -04001012 /* RPC header */
Chuck Lever5381e0e2017-10-16 15:01:14 -04001013 if (*p++ != rep->rr_xid)
Chuck Lever41c8f702017-08-03 14:30:11 -04001014 return false;
1015 if (*p != cpu_to_be32(RPC_CALL))
1016 return false;
1017
1018 /* Now that we are sure this is a backchannel call,
1019 * advance to the RPC header.
1020 */
1021 p = xdr_inline_decode(xdr, 3 * sizeof(*p));
1022 if (unlikely(!p))
1023 goto out_short;
1024
1025 rpcrdma_bc_receive_call(r_xprt, rep);
Chuck Lever63cae472015-10-24 17:28:08 -04001026 return true;
Chuck Lever41c8f702017-08-03 14:30:11 -04001027
1028out_short:
1029 pr_warn("RPC/RDMA short backward direction call\n");
1030 if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, rep))
1031 xprt_disconnect_done(&r_xprt->rx_xprt);
Chuck Lever63cae472015-10-24 17:28:08 -04001032 return true;
1033}
Chuck Lever41c8f702017-08-03 14:30:11 -04001034#else /* CONFIG_SUNRPC_BACKCHANNEL */
1035{
1036 return false;
Chuck Lever63cae472015-10-24 17:28:08 -04001037}
1038#endif /* CONFIG_SUNRPC_BACKCHANNEL */
1039
Chuck Lever264b0cd2017-08-03 14:30:27 -04001040static int decode_rdma_segment(struct xdr_stream *xdr, u32 *length)
1041{
Chuck Levere11b7c92017-12-20 16:31:04 -05001042 u32 handle;
1043 u64 offset;
Chuck Lever264b0cd2017-08-03 14:30:27 -04001044 __be32 *p;
1045
1046 p = xdr_inline_decode(xdr, 4 * sizeof(*p));
1047 if (unlikely(!p))
1048 return -EIO;
1049
Chuck Levere11b7c92017-12-20 16:31:04 -05001050 handle = be32_to_cpup(p++);
1051 *length = be32_to_cpup(p++);
1052 xdr_decode_hyper(p, &offset);
Chuck Lever264b0cd2017-08-03 14:30:27 -04001053
Chuck Levere11b7c92017-12-20 16:31:04 -05001054 trace_xprtrdma_decode_seg(handle, *length, offset);
Chuck Lever264b0cd2017-08-03 14:30:27 -04001055 return 0;
1056}
1057
1058static int decode_write_chunk(struct xdr_stream *xdr, u32 *length)
1059{
1060 u32 segcount, seglength;
1061 __be32 *p;
1062
1063 p = xdr_inline_decode(xdr, sizeof(*p));
1064 if (unlikely(!p))
1065 return -EIO;
1066
1067 *length = 0;
1068 segcount = be32_to_cpup(p);
1069 while (segcount--) {
1070 if (decode_rdma_segment(xdr, &seglength))
1071 return -EIO;
1072 *length += seglength;
1073 }
1074
Chuck Lever264b0cd2017-08-03 14:30:27 -04001075 return 0;
1076}
1077
1078/* In RPC-over-RDMA Version One replies, a Read list is never
1079 * expected. This decoder is a stub that returns an error if
1080 * a Read list is present.
1081 */
1082static int decode_read_list(struct xdr_stream *xdr)
1083{
1084 __be32 *p;
1085
1086 p = xdr_inline_decode(xdr, sizeof(*p));
1087 if (unlikely(!p))
1088 return -EIO;
1089 if (unlikely(*p != xdr_zero))
1090 return -EIO;
1091 return 0;
1092}
1093
1094/* Supports only one Write chunk in the Write list
1095 */
1096static int decode_write_list(struct xdr_stream *xdr, u32 *length)
1097{
1098 u32 chunklen;
1099 bool first;
1100 __be32 *p;
1101
1102 *length = 0;
1103 first = true;
1104 do {
1105 p = xdr_inline_decode(xdr, sizeof(*p));
1106 if (unlikely(!p))
1107 return -EIO;
1108 if (*p == xdr_zero)
1109 break;
1110 if (!first)
1111 return -EIO;
1112
1113 if (decode_write_chunk(xdr, &chunklen))
1114 return -EIO;
1115 *length += chunklen;
1116 first = false;
1117 } while (true);
1118 return 0;
1119}
1120
1121static int decode_reply_chunk(struct xdr_stream *xdr, u32 *length)
1122{
1123 __be32 *p;
1124
1125 p = xdr_inline_decode(xdr, sizeof(*p));
1126 if (unlikely(!p))
1127 return -EIO;
1128
1129 *length = 0;
1130 if (*p != xdr_zero)
1131 if (decode_write_chunk(xdr, length))
1132 return -EIO;
1133 return 0;
1134}
1135
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001136static int
1137rpcrdma_decode_msg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1138 struct rpc_rqst *rqst)
1139{
1140 struct xdr_stream *xdr = &rep->rr_stream;
Chuck Lever264b0cd2017-08-03 14:30:27 -04001141 u32 writelist, replychunk, rpclen;
1142 char *base;
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001143
Chuck Lever264b0cd2017-08-03 14:30:27 -04001144 /* Decode the chunk lists */
1145 if (decode_read_list(xdr))
1146 return -EIO;
1147 if (decode_write_list(xdr, &writelist))
1148 return -EIO;
1149 if (decode_reply_chunk(xdr, &replychunk))
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001150 return -EIO;
1151
Chuck Lever264b0cd2017-08-03 14:30:27 -04001152 /* RDMA_MSG sanity checks */
1153 if (unlikely(replychunk))
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001154 return -EIO;
1155
Chuck Lever264b0cd2017-08-03 14:30:27 -04001156 /* Build the RPC reply's Payload stream in rqst->rq_rcv_buf */
1157 base = (char *)xdr_inline_decode(xdr, 0);
1158 rpclen = xdr_stream_remaining(xdr);
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001159 r_xprt->rx_stats.fixup_copy_count +=
Chuck Lever264b0cd2017-08-03 14:30:27 -04001160 rpcrdma_inline_fixup(rqst, base, rpclen, writelist & 3);
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001161
Chuck Lever264b0cd2017-08-03 14:30:27 -04001162 r_xprt->rx_stats.total_rdma_reply += writelist;
1163 return rpclen + xdr_align_size(writelist);
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001164}
1165
1166static noinline int
1167rpcrdma_decode_nomsg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
1168{
1169 struct xdr_stream *xdr = &rep->rr_stream;
Chuck Lever264b0cd2017-08-03 14:30:27 -04001170 u32 writelist, replychunk;
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001171
Chuck Lever264b0cd2017-08-03 14:30:27 -04001172 /* Decode the chunk lists */
1173 if (decode_read_list(xdr))
1174 return -EIO;
1175 if (decode_write_list(xdr, &writelist))
1176 return -EIO;
1177 if (decode_reply_chunk(xdr, &replychunk))
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001178 return -EIO;
1179
Chuck Lever264b0cd2017-08-03 14:30:27 -04001180 /* RDMA_NOMSG sanity checks */
1181 if (unlikely(writelist))
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001182 return -EIO;
Chuck Lever264b0cd2017-08-03 14:30:27 -04001183 if (unlikely(!replychunk))
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001184 return -EIO;
1185
Chuck Lever264b0cd2017-08-03 14:30:27 -04001186 /* Reply chunk buffer already is the reply vector */
1187 r_xprt->rx_stats.total_rdma_reply += replychunk;
1188 return replychunk;
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001189}
1190
1191static noinline int
1192rpcrdma_decode_error(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1193 struct rpc_rqst *rqst)
1194{
1195 struct xdr_stream *xdr = &rep->rr_stream;
1196 __be32 *p;
1197
1198 p = xdr_inline_decode(xdr, sizeof(*p));
1199 if (unlikely(!p))
1200 return -EIO;
1201
1202 switch (*p) {
1203 case err_vers:
1204 p = xdr_inline_decode(xdr, 2 * sizeof(*p));
1205 if (!p)
1206 break;
1207 dprintk("RPC: %5u: %s: server reports version error (%u-%u)\n",
1208 rqst->rq_task->tk_pid, __func__,
1209 be32_to_cpup(p), be32_to_cpu(*(p + 1)));
1210 break;
1211 case err_chunk:
1212 dprintk("RPC: %5u: %s: server reports header decoding error\n",
1213 rqst->rq_task->tk_pid, __func__);
1214 break;
1215 default:
1216 dprintk("RPC: %5u: %s: server reports unrecognized error %d\n",
1217 rqst->rq_task->tk_pid, __func__, be32_to_cpup(p));
1218 }
1219
1220 r_xprt->rx_stats.bad_reply_count++;
1221 return -EREMOTEIO;
1222}
1223
Chuck Levere1352c92017-10-16 15:01:22 -04001224/* Perform XID lookup, reconstruction of the RPC reply, and
1225 * RPC completion while holding the transport lock to ensure
1226 * the rep, rqst, and rq_task pointers remain stable.
1227 */
1228void rpcrdma_complete_rqst(struct rpcrdma_rep *rep)
1229{
1230 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1231 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1232 struct rpc_rqst *rqst = rep->rr_rqst;
1233 unsigned long cwnd;
1234 int status;
1235
1236 xprt->reestablish_timeout = 0;
1237
1238 switch (rep->rr_proc) {
1239 case rdma_msg:
1240 status = rpcrdma_decode_msg(r_xprt, rep, rqst);
1241 break;
1242 case rdma_nomsg:
1243 status = rpcrdma_decode_nomsg(r_xprt, rep);
1244 break;
1245 case rdma_error:
1246 status = rpcrdma_decode_error(r_xprt, rep, rqst);
1247 break;
1248 default:
1249 status = -EIO;
1250 }
1251 if (status < 0)
1252 goto out_badheader;
1253
1254out:
1255 spin_lock(&xprt->recv_lock);
1256 cwnd = xprt->cwnd;
Chuck Leverbe798f92017-10-16 15:01:39 -04001257 xprt->cwnd = r_xprt->rx_buf.rb_credits << RPC_CWNDSHIFT;
Chuck Levere1352c92017-10-16 15:01:22 -04001258 if (xprt->cwnd > cwnd)
1259 xprt_release_rqst_cong(rqst->rq_task);
1260
1261 xprt_complete_rqst(rqst->rq_task, status);
1262 xprt_unpin_rqst(rqst);
1263 spin_unlock(&xprt->recv_lock);
1264 return;
1265
1266/* If the incoming reply terminated a pending RPC, the next
1267 * RPC call will post a replacement receive buffer as it is
1268 * being marshaled.
1269 */
1270out_badheader:
Chuck Leverb4a7f912017-12-20 16:30:48 -05001271 trace_xprtrdma_reply_hdr(rep);
Chuck Levere1352c92017-10-16 15:01:22 -04001272 r_xprt->rx_stats.bad_reply_count++;
1273 status = -EIO;
1274 goto out;
1275}
1276
Chuck Lever0ba6f372017-10-20 10:48:28 -04001277void rpcrdma_release_rqst(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
1278{
1279 /* Invalidate and unmap the data payloads before waking
1280 * the waiting application. This guarantees the memory
1281 * regions are properly fenced from the server before the
1282 * application accesses the data. It also ensures proper
1283 * send flow control: waking the next RPC waits until this
1284 * RPC has relinquished all its Send Queue entries.
1285 */
1286 if (!list_empty(&req->rl_registered))
1287 r_xprt->rx_ia.ri_ops->ro_unmap_sync(r_xprt,
1288 &req->rl_registered);
Chuck Lever01bb35c2017-10-20 10:48:36 -04001289
1290 /* Ensure that any DMA mapped pages associated with
1291 * the Send of the RPC Call have been unmapped before
1292 * allowing the RPC to complete. This protects argument
1293 * memory not controlled by the RPC client from being
1294 * re-used before we're done with it.
1295 */
1296 if (test_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags)) {
1297 r_xprt->rx_stats.reply_waits_for_send++;
1298 out_of_line_wait_on_bit(&req->rl_flags,
1299 RPCRDMA_REQ_F_TX_RESOURCES,
1300 bit_wait,
1301 TASK_UNINTERRUPTIBLE);
1302 }
Chuck Lever0ba6f372017-10-20 10:48:28 -04001303}
1304
Chuck Leverd8f532d2017-10-16 15:01:30 -04001305/* Reply handling runs in the poll worker thread. Anything that
1306 * might wait is deferred to a separate workqueue.
1307 */
1308void rpcrdma_deferred_completion(struct work_struct *work)
1309{
1310 struct rpcrdma_rep *rep =
1311 container_of(work, struct rpcrdma_rep, rr_work);
1312 struct rpcrdma_req *req = rpcr_to_rdmar(rep->rr_rqst);
Chuck Leverc3441612017-12-14 20:56:26 -05001313 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
Chuck Leverd8f532d2017-10-16 15:01:30 -04001314
Chuck Leverb4a7f912017-12-20 16:30:48 -05001315 trace_xprtrdma_defer_cmp(rep);
Chuck Leverc3441612017-12-14 20:56:26 -05001316 if (rep->rr_wc_flags & IB_WC_WITH_INVALIDATE)
1317 r_xprt->rx_ia.ri_ops->ro_reminv(rep, &req->rl_registered);
1318 rpcrdma_release_rqst(r_xprt, req);
Chuck Leverd8f532d2017-10-16 15:01:30 -04001319 rpcrdma_complete_rqst(rep);
1320}
1321
Chuck Leverfe97b472015-10-24 17:27:10 -04001322/* Process received RPC/RDMA messages.
1323 *
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001324 * Errors must result in the RPC task either being awakened, or
1325 * allowed to timeout, to discover the errors at that time.
1326 */
Chuck Leverd8f532d2017-10-16 15:01:30 -04001327void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001328{
Chuck Lever431af642017-06-08 11:52:20 -04001329 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
Chuck Lever431af642017-06-08 11:52:20 -04001330 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
Chuck Leverbe798f92017-10-16 15:01:39 -04001331 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001332 struct rpcrdma_req *req;
1333 struct rpc_rqst *rqst;
Chuck Leverbe798f92017-10-16 15:01:39 -04001334 u32 credits;
Chuck Lever5381e0e2017-10-16 15:01:14 -04001335 __be32 *p;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001336
Chuck Levere2a67192017-08-03 14:30:44 -04001337 if (rep->rr_hdrbuf.head[0].iov_len == 0)
Chuck Leverb0e178a2015-10-24 17:26:54 -04001338 goto out_badstatus;
Chuck Leverb0e178a2015-10-24 17:26:54 -04001339
Chuck Lever5381e0e2017-10-16 15:01:14 -04001340 xdr_init_decode(&rep->rr_stream, &rep->rr_hdrbuf,
Chuck Lever96f87782017-08-03 14:30:03 -04001341 rep->rr_hdrbuf.head[0].iov_base);
1342
1343 /* Fixed transport header fields */
Chuck Lever5381e0e2017-10-16 15:01:14 -04001344 p = xdr_inline_decode(&rep->rr_stream, 4 * sizeof(*p));
Chuck Lever96f87782017-08-03 14:30:03 -04001345 if (unlikely(!p))
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001346 goto out_shortreply;
Chuck Lever5381e0e2017-10-16 15:01:14 -04001347 rep->rr_xid = *p++;
1348 rep->rr_vers = *p++;
Chuck Leverbe798f92017-10-16 15:01:39 -04001349 credits = be32_to_cpu(*p++);
Chuck Lever5381e0e2017-10-16 15:01:14 -04001350 rep->rr_proc = *p++;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001351
Chuck Lever5381e0e2017-10-16 15:01:14 -04001352 if (rep->rr_vers != rpcrdma_version)
Chuck Lever61433af2017-10-16 15:01:06 -04001353 goto out_badversion;
1354
Chuck Lever5381e0e2017-10-16 15:01:14 -04001355 if (rpcrdma_is_bcall(r_xprt, rep))
Chuck Lever41c8f702017-08-03 14:30:11 -04001356 return;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001357
Chuck Leverfe97b472015-10-24 17:27:10 -04001358 /* Match incoming rpcrdma_rep to an rpcrdma_req to
1359 * get context for handling any incoming chunks.
1360 */
Chuck Lever9590d082017-08-23 17:05:58 -04001361 spin_lock(&xprt->recv_lock);
Chuck Lever5381e0e2017-10-16 15:01:14 -04001362 rqst = xprt_lookup_rqst(xprt, rep->rr_xid);
Chuck Lever9590d082017-08-23 17:05:58 -04001363 if (!rqst)
1364 goto out_norqst;
1365 xprt_pin_rqst(rqst);
Chuck Leverbe798f92017-10-16 15:01:39 -04001366
1367 if (credits == 0)
1368 credits = 1; /* don't deadlock */
1369 else if (credits > buf->rb_max_requests)
1370 credits = buf->rb_max_requests;
1371 buf->rb_credits = credits;
1372
Chuck Lever9590d082017-08-23 17:05:58 -04001373 spin_unlock(&xprt->recv_lock);
Chuck Leverbe798f92017-10-16 15:01:39 -04001374
Chuck Lever9590d082017-08-23 17:05:58 -04001375 req = rpcr_to_rdmar(rqst);
Chuck Lever4b196dc62017-06-08 11:51:56 -04001376 req->rl_reply = rep;
Chuck Levere1352c92017-10-16 15:01:22 -04001377 rep->rr_rqst = rqst;
Chuck Lever0ba6f372017-10-20 10:48:28 -04001378 clear_bit(RPCRDMA_REQ_F_PENDING, &req->rl_flags);
Chuck Lever431af642017-06-08 11:52:20 -04001379
Chuck Leverb4a7f912017-12-20 16:30:48 -05001380 trace_xprtrdma_reply(rqst->rq_task, rep, req, credits);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001381
Chuck Lever6720a892018-02-28 15:30:27 -05001382 queue_work(rpcrdma_receive_wq, &rep->rr_work);
Chuck Leverb0e178a2015-10-24 17:26:54 -04001383 return;
1384
1385out_badstatus:
1386 rpcrdma_recv_buffer_put(rep);
1387 if (r_xprt->rx_ep.rep_connected == 1) {
1388 r_xprt->rx_ep.rep_connected = -EIO;
1389 rpcrdma_conn_func(&r_xprt->rx_ep);
1390 }
1391 return;
1392
Chuck Lever61433af2017-10-16 15:01:06 -04001393out_badversion:
Chuck Leverb4a7f912017-12-20 16:30:48 -05001394 trace_xprtrdma_reply_vers(rep);
Chuck Lever61433af2017-10-16 15:01:06 -04001395 goto repost;
1396
Chuck Levere1352c92017-10-16 15:01:22 -04001397/* The RPC transaction has already been terminated, or the header
1398 * is corrupt.
Chuck Lever59aa1f92016-03-04 11:28:18 -05001399 */
Chuck Lever431af642017-06-08 11:52:20 -04001400out_norqst:
Trond Myklebustce7c2522017-08-16 15:30:35 -04001401 spin_unlock(&xprt->recv_lock);
Chuck Leverb4a7f912017-12-20 16:30:48 -05001402 trace_xprtrdma_reply_rqst(rep);
Chuck Leverb0e178a2015-10-24 17:26:54 -04001403 goto repost;
1404
Chuck Lever9590d082017-08-23 17:05:58 -04001405out_shortreply:
Chuck Leverb4a7f912017-12-20 16:30:48 -05001406 trace_xprtrdma_reply_short(rep);
Chuck Leverb0e178a2015-10-24 17:26:54 -04001407
Chuck Lever431af642017-06-08 11:52:20 -04001408/* If no pending RPC transaction was matched, post a replacement
1409 * receive buffer before returning.
1410 */
Chuck Leverb0e178a2015-10-24 17:26:54 -04001411repost:
1412 r_xprt->rx_stats.bad_reply_count++;
Chuck Leverb1573802016-09-15 10:56:35 -04001413 if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, rep))
Chuck Leverb0e178a2015-10-24 17:26:54 -04001414 rpcrdma_recv_buffer_put(rep);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001415}