blob: 2c53ea9e1b83dae01ebdd1aa22d256174dfbae08 [file] [log] [blame]
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -04001/*
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04002 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
8 * license below:
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 *
14 * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 *
17 * Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
21 *
22 * Neither the name of the Network Appliance, Inc. nor the names of
23 * its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written
25 * permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 */
39
40/*
41 * rpc_rdma.c
42 *
43 * This file contains the guts of the RPC RDMA protocol, and
44 * does marshaling/unmarshaling, etc. It is also where interfacing
45 * to the Linux RPC framework lives.
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -040046 */
47
48#include "xprt_rdma.h"
49
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -040050#include <linux/highmem.h>
51
Jeff Laytonf895b252014-11-17 16:58:04 -050052#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -040053# define RPCDBG_FACILITY RPCDBG_TRANS
54#endif
55
Chuck Levere2377942015-03-30 14:33:53 -040056enum rpcrdma_chunktype {
57 rpcrdma_noch = 0,
58 rpcrdma_readch,
59 rpcrdma_areadch,
60 rpcrdma_writech,
61 rpcrdma_replych
62};
63
Jeff Laytonf895b252014-11-17 16:58:04 -050064#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -040065static const char transfertypes[][12] = {
66 "pure inline", /* no chunks */
67 " read chunk", /* some argument via rdma read */
68 "*read chunk", /* entire request via rdma read */
69 "write chunk", /* some result via rdma write */
70 "reply chunk" /* entire reply via rdma write */
71};
72#endif
73
74/*
75 * Chunk assembly from upper layer xdr_buf.
76 *
77 * Prepare the passed-in xdr_buf into representation as RPC/RDMA chunk
78 * elements. Segments are then coalesced when registered, if possible
79 * within the selected memreg mode.
Chuck Leverc93c6222014-05-28 10:35:14 -040080 *
81 * Returns positive number of segments converted, or a negative errno.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -040082 */
83
84static int
Chuck Lever2a428b22007-10-26 13:30:43 -040085rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, unsigned int pos,
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -040086 enum rpcrdma_chunktype type, struct rpcrdma_mr_seg *seg, int nsegs)
87{
88 int len, n = 0, p;
Tom Tuckerbd7ea312011-02-09 19:45:28 +000089 int page_base;
90 struct page **ppages;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -040091
92 if (pos == 0 && xdrbuf->head[0].iov_len) {
93 seg[n].mr_page = NULL;
94 seg[n].mr_offset = xdrbuf->head[0].iov_base;
95 seg[n].mr_len = xdrbuf->head[0].iov_len;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -040096 ++n;
97 }
98
Tom Tuckerbd7ea312011-02-09 19:45:28 +000099 len = xdrbuf->page_len;
100 ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT);
101 page_base = xdrbuf->page_base & ~PAGE_MASK;
102 p = 0;
103 while (len && n < nsegs) {
Shirley Ma196c6992014-05-28 10:34:24 -0400104 if (!ppages[p]) {
105 /* alloc the pagelist for receiving buffer */
106 ppages[p] = alloc_page(GFP_ATOMIC);
107 if (!ppages[p])
Chuck Leverc93c6222014-05-28 10:35:14 -0400108 return -ENOMEM;
Shirley Ma196c6992014-05-28 10:34:24 -0400109 }
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000110 seg[n].mr_page = ppages[p];
111 seg[n].mr_offset = (void *)(unsigned long) page_base;
112 seg[n].mr_len = min_t(u32, PAGE_SIZE - page_base, len);
Chuck Leverc93c6222014-05-28 10:35:14 -0400113 if (seg[n].mr_len > PAGE_SIZE)
114 return -EIO;
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000115 len -= seg[n].mr_len;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400116 ++n;
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000117 ++p;
118 page_base = 0; /* page offset only applies to first page */
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400119 }
120
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000121 /* Message overflows the seg array */
122 if (len && n == nsegs)
Chuck Leverc93c6222014-05-28 10:35:14 -0400123 return -EIO;
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000124
James Lentini50e10922007-12-10 11:24:48 -0500125 if (xdrbuf->tail[0].iov_len) {
Tom Talpey9191ca32008-10-09 15:01:11 -0400126 /* the rpcrdma protocol allows us to omit any trailing
127 * xdr pad bytes, saving the server an RDMA operation. */
128 if (xdrbuf->tail[0].iov_len < 4 && xprt_rdma_pad_optimize)
129 return n;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400130 if (n == nsegs)
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000131 /* Tail remains, but we're out of segments */
Chuck Leverc93c6222014-05-28 10:35:14 -0400132 return -EIO;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400133 seg[n].mr_page = NULL;
134 seg[n].mr_offset = xdrbuf->tail[0].iov_base;
135 seg[n].mr_len = xdrbuf->tail[0].iov_len;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400136 ++n;
137 }
138
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400139 return n;
140}
141
142/*
143 * Create read/write chunk lists, and reply chunks, for RDMA
144 *
145 * Assume check against THRESHOLD has been done, and chunks are required.
146 * Assume only encoding one list entry for read|write chunks. The NFSv3
147 * protocol is simple enough to allow this as it only has a single "bulk
148 * result" in each procedure - complicated NFSv4 COMPOUNDs are not. (The
149 * RDMA/Sessions NFSv4 proposal addresses this for future v4 revs.)
150 *
151 * When used for a single reply chunk (which is a special write
152 * chunk used for the entire reply, rather than just the data), it
153 * is used primarily for READDIR and READLINK which would otherwise
154 * be severely size-limited by a small rdma inline read max. The server
155 * response will come back as an RDMA Write, followed by a message
156 * of type RDMA_NOMSG carrying the xid and length. As a result, reply
157 * chunks do not provide data alignment, however they do not require
158 * "fixup" (moving the response to the upper layer buffer) either.
159 *
160 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
161 *
162 * Read chunklist (a linked list):
163 * N elements, position P (same P for all chunks of same arg!):
164 * 1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0
165 *
166 * Write chunklist (a list of (one) counted array):
167 * N elements:
168 * 1 - N - HLOO - HLOO - ... - HLOO - 0
169 *
170 * Reply chunk (a counted array):
171 * N elements:
172 * 1 - N - HLOO - HLOO - ... - HLOO
Chuck Leverc93c6222014-05-28 10:35:14 -0400173 *
174 * Returns positive RPC/RDMA header size, or negative errno.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400175 */
176
Chuck Leverc93c6222014-05-28 10:35:14 -0400177static ssize_t
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400178rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target,
179 struct rpcrdma_msg *headerp, enum rpcrdma_chunktype type)
180{
181 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
Trond Myklebusta4f08352013-01-08 09:10:21 -0500182 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
Chuck Leverc93c6222014-05-28 10:35:14 -0400183 int n, nsegs, nchunks = 0;
Chuck Lever2a428b22007-10-26 13:30:43 -0400184 unsigned int pos;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400185 struct rpcrdma_mr_seg *seg = req->rl_segments;
186 struct rpcrdma_read_chunk *cur_rchunk = NULL;
187 struct rpcrdma_write_array *warray = NULL;
188 struct rpcrdma_write_chunk *cur_wchunk = NULL;
Al Viro2d8a9722007-10-29 04:37:58 +0000189 __be32 *iptr = headerp->rm_body.rm_chunks;
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400190 int (*map)(struct rpcrdma_xprt *, struct rpcrdma_mr_seg *, int, bool);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400191
192 if (type == rpcrdma_readch || type == rpcrdma_areadch) {
193 /* a read chunk - server will RDMA Read our memory */
194 cur_rchunk = (struct rpcrdma_read_chunk *) iptr;
195 } else {
196 /* a write or reply chunk - server will RDMA Write our memory */
197 *iptr++ = xdr_zero; /* encode a NULL read chunk list */
198 if (type == rpcrdma_replych)
199 *iptr++ = xdr_zero; /* a NULL write chunk list */
200 warray = (struct rpcrdma_write_array *) iptr;
201 cur_wchunk = (struct rpcrdma_write_chunk *) (warray + 1);
202 }
203
204 if (type == rpcrdma_replych || type == rpcrdma_areadch)
205 pos = 0;
206 else
207 pos = target->head[0].iov_len;
208
209 nsegs = rpcrdma_convert_iovs(target, pos, type, seg, RPCRDMA_MAX_SEGS);
Chuck Leverc93c6222014-05-28 10:35:14 -0400210 if (nsegs < 0)
211 return nsegs;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400212
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400213 map = r_xprt->rx_ia.ri_ops->ro_map;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400214 do {
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400215 n = map(r_xprt, seg, nsegs, cur_wchunk != NULL);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400216 if (n <= 0)
217 goto out;
218 if (cur_rchunk) { /* read */
219 cur_rchunk->rc_discrim = xdr_one;
220 /* all read chunks have the same "position" */
Chuck Lever284f4902015-01-21 11:02:13 -0500221 cur_rchunk->rc_position = cpu_to_be32(pos);
222 cur_rchunk->rc_target.rs_handle =
223 cpu_to_be32(seg->mr_rkey);
224 cur_rchunk->rc_target.rs_length =
225 cpu_to_be32(seg->mr_len);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400226 xdr_encode_hyper(
Al Viro2d8a9722007-10-29 04:37:58 +0000227 (__be32 *)&cur_rchunk->rc_target.rs_offset,
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400228 seg->mr_base);
229 dprintk("RPC: %s: read chunk "
Chuck Lever2a428b22007-10-26 13:30:43 -0400230 "elem %d@0x%llx:0x%x pos %u (%s)\n", __func__,
Stephen Rothwelle08a1322007-10-30 00:44:32 -0700231 seg->mr_len, (unsigned long long)seg->mr_base,
232 seg->mr_rkey, pos, n < nsegs ? "more" : "last");
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400233 cur_rchunk++;
234 r_xprt->rx_stats.read_chunk_count++;
235 } else { /* write/reply */
Chuck Lever284f4902015-01-21 11:02:13 -0500236 cur_wchunk->wc_target.rs_handle =
237 cpu_to_be32(seg->mr_rkey);
238 cur_wchunk->wc_target.rs_length =
239 cpu_to_be32(seg->mr_len);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400240 xdr_encode_hyper(
Al Viro2d8a9722007-10-29 04:37:58 +0000241 (__be32 *)&cur_wchunk->wc_target.rs_offset,
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400242 seg->mr_base);
243 dprintk("RPC: %s: %s chunk "
244 "elem %d@0x%llx:0x%x (%s)\n", __func__,
245 (type == rpcrdma_replych) ? "reply" : "write",
Stephen Rothwelle08a1322007-10-30 00:44:32 -0700246 seg->mr_len, (unsigned long long)seg->mr_base,
247 seg->mr_rkey, n < nsegs ? "more" : "last");
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400248 cur_wchunk++;
249 if (type == rpcrdma_replych)
250 r_xprt->rx_stats.reply_chunk_count++;
251 else
252 r_xprt->rx_stats.write_chunk_count++;
253 r_xprt->rx_stats.total_rdma_request += seg->mr_len;
254 }
255 nchunks++;
256 seg += n;
257 nsegs -= n;
258 } while (nsegs);
259
260 /* success. all failures return above */
261 req->rl_nchunks = nchunks;
262
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400263 /*
264 * finish off header. If write, marshal discrim and nchunks.
265 */
266 if (cur_rchunk) {
Al Viro2d8a9722007-10-29 04:37:58 +0000267 iptr = (__be32 *) cur_rchunk;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400268 *iptr++ = xdr_zero; /* finish the read chunk list */
269 *iptr++ = xdr_zero; /* encode a NULL write chunk list */
270 *iptr++ = xdr_zero; /* encode a NULL reply chunk */
271 } else {
272 warray->wc_discrim = xdr_one;
Chuck Lever284f4902015-01-21 11:02:13 -0500273 warray->wc_nchunks = cpu_to_be32(nchunks);
Al Viro2d8a9722007-10-29 04:37:58 +0000274 iptr = (__be32 *) cur_wchunk;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400275 if (type == rpcrdma_writech) {
276 *iptr++ = xdr_zero; /* finish the write chunk list */
277 *iptr++ = xdr_zero; /* encode a NULL reply chunk */
278 }
279 }
280
281 /*
282 * Return header size.
283 */
284 return (unsigned char *)iptr - (unsigned char *)headerp;
285
286out:
Chuck Lever6814bae2015-03-30 14:34:48 -0400287 if (r_xprt->rx_ia.ri_memreg_strategy == RPCRDMA_FRMR)
288 return n;
289
290 for (pos = 0; nchunks--;)
291 pos += r_xprt->rx_ia.ri_ops->ro_unmap(r_xprt,
292 &req->rl_segments[pos]);
Chuck Leverc93c6222014-05-28 10:35:14 -0400293 return n;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400294}
295
296/*
297 * Copy write data inline.
298 * This function is used for "small" requests. Data which is passed
299 * to RPC via iovecs (or page list) is copied directly into the
300 * pre-registered memory buffer for this request. For small amounts
301 * of data, this is efficient. The cutoff value is tunable.
302 */
303static int
304rpcrdma_inline_pullup(struct rpc_rqst *rqst, int pad)
305{
306 int i, npages, curlen;
307 int copy_len;
308 unsigned char *srcp, *destp;
309 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000310 int page_base;
311 struct page **ppages;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400312
313 destp = rqst->rq_svec[0].iov_base;
314 curlen = rqst->rq_svec[0].iov_len;
315 destp += curlen;
316 /*
317 * Do optional padding where it makes sense. Alignment of write
318 * payload can help the server, if our setting is accurate.
319 */
320 pad -= (curlen + 36/*sizeof(struct rpcrdma_msg_padded)*/);
321 if (pad < 0 || rqst->rq_slen - curlen < RPCRDMA_INLINE_PAD_THRESH)
322 pad = 0; /* don't pad this request */
323
324 dprintk("RPC: %s: pad %d destp 0x%p len %d hdrlen %d\n",
325 __func__, pad, destp, rqst->rq_slen, curlen);
326
327 copy_len = rqst->rq_snd_buf.page_len;
Tom Talpeyb38ab402009-03-11 14:37:55 -0400328
329 if (rqst->rq_snd_buf.tail[0].iov_len) {
330 curlen = rqst->rq_snd_buf.tail[0].iov_len;
331 if (destp + copy_len != rqst->rq_snd_buf.tail[0].iov_base) {
332 memmove(destp + copy_len,
333 rqst->rq_snd_buf.tail[0].iov_base, curlen);
334 r_xprt->rx_stats.pullup_copy_count += curlen;
335 }
336 dprintk("RPC: %s: tail destp 0x%p len %d\n",
337 __func__, destp + copy_len, curlen);
338 rqst->rq_svec[0].iov_len += curlen;
339 }
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400340 r_xprt->rx_stats.pullup_copy_count += copy_len;
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000341
342 page_base = rqst->rq_snd_buf.page_base;
343 ppages = rqst->rq_snd_buf.pages + (page_base >> PAGE_SHIFT);
344 page_base &= ~PAGE_MASK;
345 npages = PAGE_ALIGN(page_base+copy_len) >> PAGE_SHIFT;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400346 for (i = 0; copy_len && i < npages; i++) {
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000347 curlen = PAGE_SIZE - page_base;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400348 if (curlen > copy_len)
349 curlen = copy_len;
350 dprintk("RPC: %s: page %d destp 0x%p len %d curlen %d\n",
351 __func__, i, destp, copy_len, curlen);
Cong Wangb8541782011-11-25 23:14:40 +0800352 srcp = kmap_atomic(ppages[i]);
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000353 memcpy(destp, srcp+page_base, curlen);
Cong Wangb8541782011-11-25 23:14:40 +0800354 kunmap_atomic(srcp);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400355 rqst->rq_svec[0].iov_len += curlen;
356 destp += curlen;
357 copy_len -= curlen;
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000358 page_base = 0;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400359 }
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400360 /* header now contains entire send message */
361 return pad;
362}
363
364/*
365 * Marshal a request: the primary job of this routine is to choose
366 * the transfer modes. See comments below.
367 *
368 * Uses multiple RDMA IOVs for a request:
369 * [0] -- RPC RDMA header, which uses memory from the *start* of the
370 * preregistered buffer that already holds the RPC data in
371 * its middle.
372 * [1] -- the RPC header/data, marshaled by RPC and the NFS protocol.
373 * [2] -- optional padding.
374 * [3] -- if padded, header only in [1] and data here.
Chuck Leverc93c6222014-05-28 10:35:14 -0400375 *
376 * Returns zero on success, otherwise a negative errno.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400377 */
378
379int
380rpcrdma_marshal_req(struct rpc_rqst *rqst)
381{
Trond Myklebusta4f08352013-01-08 09:10:21 -0500382 struct rpc_xprt *xprt = rqst->rq_xprt;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400383 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
384 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
385 char *base;
Chuck Leverc93c6222014-05-28 10:35:14 -0400386 size_t rpclen, padlen;
387 ssize_t hdrlen;
Chuck Levere2377942015-03-30 14:33:53 -0400388 enum rpcrdma_chunktype rtype, wtype;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400389 struct rpcrdma_msg *headerp;
390
391 /*
392 * rpclen gets amount of data in first buffer, which is the
393 * pre-registered buffer.
394 */
395 base = rqst->rq_svec[0].iov_base;
396 rpclen = rqst->rq_svec[0].iov_len;
397
Chuck Lever85275c82015-01-21 11:04:16 -0500398 headerp = rdmab_to_msg(req->rl_rdmabuf);
Chuck Lever284f4902015-01-21 11:02:13 -0500399 /* don't byte-swap XID, it's already done in request */
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400400 headerp->rm_xid = rqst->rq_xid;
Chuck Lever284f4902015-01-21 11:02:13 -0500401 headerp->rm_vers = rpcrdma_version;
402 headerp->rm_credit = cpu_to_be32(r_xprt->rx_buf.rb_max_requests);
403 headerp->rm_type = rdma_msg;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400404
405 /*
406 * Chunks needed for results?
407 *
408 * o If the expected result is under the inline threshold, all ops
409 * return as inline (but see later).
410 * o Large non-read ops return as a single reply chunk.
411 * o Large read ops return data as write chunk(s), header as inline.
412 *
413 * Note: the NFS code sending down multiple result segments implies
414 * the op is one of read, readdir[plus], readlink or NFSv4 getacl.
415 */
416
417 /*
418 * This code can handle read chunks, write chunks OR reply
419 * chunks -- only one type. If the request is too big to fit
420 * inline, then we will choose read chunks. If the request is
421 * a READ, then use write chunks to separate the file data
422 * into pages; otherwise use reply chunks.
423 */
424 if (rqst->rq_rcv_buf.buflen <= RPCRDMA_INLINE_READ_THRESHOLD(rqst))
Chuck Levere2377942015-03-30 14:33:53 -0400425 wtype = rpcrdma_noch;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400426 else if (rqst->rq_rcv_buf.page_len == 0)
Chuck Levere2377942015-03-30 14:33:53 -0400427 wtype = rpcrdma_replych;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400428 else if (rqst->rq_rcv_buf.flags & XDRBUF_READ)
Chuck Levere2377942015-03-30 14:33:53 -0400429 wtype = rpcrdma_writech;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400430 else
Chuck Levere2377942015-03-30 14:33:53 -0400431 wtype = rpcrdma_replych;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400432
433 /*
434 * Chunks needed for arguments?
435 *
436 * o If the total request is under the inline threshold, all ops
437 * are sent as inline.
438 * o Large non-write ops are sent with the entire message as a
439 * single read chunk (protocol 0-position special case).
440 * o Large write ops transmit data as read chunk(s), header as
441 * inline.
442 *
443 * Note: the NFS code sending down multiple argument segments
444 * implies the op is a write.
445 * TBD check NFSv4 setacl
446 */
447 if (rqst->rq_snd_buf.len <= RPCRDMA_INLINE_WRITE_THRESHOLD(rqst))
Chuck Levere2377942015-03-30 14:33:53 -0400448 rtype = rpcrdma_noch;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400449 else if (rqst->rq_snd_buf.page_len == 0)
Chuck Levere2377942015-03-30 14:33:53 -0400450 rtype = rpcrdma_areadch;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400451 else
Chuck Levere2377942015-03-30 14:33:53 -0400452 rtype = rpcrdma_readch;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400453
454 /* The following simplification is not true forever */
Chuck Levere2377942015-03-30 14:33:53 -0400455 if (rtype != rpcrdma_noch && wtype == rpcrdma_replych)
456 wtype = rpcrdma_noch;
457 if (rtype != rpcrdma_noch && wtype != rpcrdma_noch) {
Chuck Leverc93c6222014-05-28 10:35:14 -0400458 dprintk("RPC: %s: cannot marshal multiple chunk lists\n",
459 __func__);
460 return -EIO;
461 }
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400462
Chuck Leverf2846482015-01-21 11:02:29 -0500463 hdrlen = RPCRDMA_HDRLEN_MIN;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400464 padlen = 0;
465
466 /*
467 * Pull up any extra send data into the preregistered buffer.
468 * When padding is in use and applies to the transfer, insert
469 * it and change the message type.
470 */
Chuck Levere2377942015-03-30 14:33:53 -0400471 if (rtype == rpcrdma_noch) {
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400472
473 padlen = rpcrdma_inline_pullup(rqst,
474 RPCRDMA_INLINE_PAD_VALUE(rqst));
475
476 if (padlen) {
Chuck Lever284f4902015-01-21 11:02:13 -0500477 headerp->rm_type = rdma_msgp;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400478 headerp->rm_body.rm_padded.rm_align =
Chuck Lever284f4902015-01-21 11:02:13 -0500479 cpu_to_be32(RPCRDMA_INLINE_PAD_VALUE(rqst));
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400480 headerp->rm_body.rm_padded.rm_thresh =
Chuck Lever284f4902015-01-21 11:02:13 -0500481 cpu_to_be32(RPCRDMA_INLINE_PAD_THRESH);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400482 headerp->rm_body.rm_padded.rm_pempty[0] = xdr_zero;
483 headerp->rm_body.rm_padded.rm_pempty[1] = xdr_zero;
484 headerp->rm_body.rm_padded.rm_pempty[2] = xdr_zero;
485 hdrlen += 2 * sizeof(u32); /* extra words in padhdr */
Chuck Levere2377942015-03-30 14:33:53 -0400486 if (wtype != rpcrdma_noch) {
Chuck Leverc93c6222014-05-28 10:35:14 -0400487 dprintk("RPC: %s: invalid chunk list\n",
488 __func__);
489 return -EIO;
490 }
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400491 } else {
492 headerp->rm_body.rm_nochunks.rm_empty[0] = xdr_zero;
493 headerp->rm_body.rm_nochunks.rm_empty[1] = xdr_zero;
494 headerp->rm_body.rm_nochunks.rm_empty[2] = xdr_zero;
495 /* new length after pullup */
496 rpclen = rqst->rq_svec[0].iov_len;
497 /*
498 * Currently we try to not actually use read inline.
499 * Reply chunks have the desirable property that
500 * they land, packed, directly in the target buffers
501 * without headers, so they require no fixup. The
502 * additional RDMA Write op sends the same amount
503 * of data, streams on-the-wire and adds no overhead
504 * on receive. Therefore, we request a reply chunk
505 * for non-writes wherever feasible and efficient.
506 */
Chuck Levere2377942015-03-30 14:33:53 -0400507 if (wtype == rpcrdma_noch)
508 wtype = rpcrdma_replych;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400509 }
510 }
511
Chuck Levere2377942015-03-30 14:33:53 -0400512 if (rtype != rpcrdma_noch) {
513 hdrlen = rpcrdma_create_chunks(rqst, &rqst->rq_snd_buf,
514 headerp, rtype);
515 wtype = rtype; /* simplify dprintk */
516
517 } else if (wtype != rpcrdma_noch) {
518 hdrlen = rpcrdma_create_chunks(rqst, &rqst->rq_rcv_buf,
519 headerp, wtype);
520 }
Chuck Leverc93c6222014-05-28 10:35:14 -0400521 if (hdrlen < 0)
522 return hdrlen;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400523
Tom Talpey5f37d562008-10-09 15:01:52 -0400524 dprintk("RPC: %s: %s: hdrlen %zd rpclen %zd padlen %zd"
525 " headerp 0x%p base 0x%p lkey 0x%x\n",
Chuck Levere2377942015-03-30 14:33:53 -0400526 __func__, transfertypes[wtype], hdrlen, rpclen, padlen,
Chuck Lever85275c82015-01-21 11:04:16 -0500527 headerp, base, rdmab_lkey(req->rl_rdmabuf));
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400528
529 /*
530 * initialize send_iov's - normally only two: rdma chunk header and
531 * single preregistered RPC header buffer, but if padding is present,
532 * then use a preregistered (and zeroed) pad buffer between the RPC
533 * header and any write data. In all non-rdma cases, any following
534 * data has been copied into the RPC header buffer.
535 */
Chuck Lever85275c82015-01-21 11:04:16 -0500536 req->rl_send_iov[0].addr = rdmab_addr(req->rl_rdmabuf);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400537 req->rl_send_iov[0].length = hdrlen;
Chuck Lever85275c82015-01-21 11:04:16 -0500538 req->rl_send_iov[0].lkey = rdmab_lkey(req->rl_rdmabuf);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400539
Chuck Lever0ca77dc2015-01-21 11:04:08 -0500540 req->rl_send_iov[1].addr = rdmab_addr(req->rl_sendbuf);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400541 req->rl_send_iov[1].length = rpclen;
Chuck Lever0ca77dc2015-01-21 11:04:08 -0500542 req->rl_send_iov[1].lkey = rdmab_lkey(req->rl_sendbuf);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400543
544 req->rl_niovs = 2;
545
546 if (padlen) {
547 struct rpcrdma_ep *ep = &r_xprt->rx_ep;
548
Chuck Leverc05fbb52015-01-21 11:04:33 -0500549 req->rl_send_iov[2].addr = rdmab_addr(ep->rep_padbuf);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400550 req->rl_send_iov[2].length = padlen;
Chuck Leverc05fbb52015-01-21 11:04:33 -0500551 req->rl_send_iov[2].lkey = rdmab_lkey(ep->rep_padbuf);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400552
553 req->rl_send_iov[3].addr = req->rl_send_iov[1].addr + rpclen;
554 req->rl_send_iov[3].length = rqst->rq_slen - rpclen;
Chuck Lever0ca77dc2015-01-21 11:04:08 -0500555 req->rl_send_iov[3].lkey = rdmab_lkey(req->rl_sendbuf);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400556
557 req->rl_niovs = 4;
558 }
559
560 return 0;
561}
562
563/*
564 * Chase down a received write or reply chunklist to get length
565 * RDMA'd by server. See map at rpcrdma_create_chunks()! :-)
566 */
567static int
Chuck Leverd4b37ff2007-10-26 13:30:49 -0400568rpcrdma_count_chunks(struct rpcrdma_rep *rep, unsigned int max, int wrchunk, __be32 **iptrp)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400569{
570 unsigned int i, total_len;
571 struct rpcrdma_write_chunk *cur_wchunk;
Chuck Lever6b1184c2015-01-21 11:04:25 -0500572 char *base = (char *)rdmab_to_msg(rep->rr_rdmabuf);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400573
Chuck Lever284f4902015-01-21 11:02:13 -0500574 i = be32_to_cpu(**iptrp);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400575 if (i > max)
576 return -1;
577 cur_wchunk = (struct rpcrdma_write_chunk *) (*iptrp + 1);
578 total_len = 0;
579 while (i--) {
580 struct rpcrdma_segment *seg = &cur_wchunk->wc_target;
581 ifdebug(FACILITY) {
582 u64 off;
Al Viro2d8a9722007-10-29 04:37:58 +0000583 xdr_decode_hyper((__be32 *)&seg->rs_offset, &off);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400584 dprintk("RPC: %s: chunk %d@0x%llx:0x%x\n",
585 __func__,
Chuck Lever284f4902015-01-21 11:02:13 -0500586 be32_to_cpu(seg->rs_length),
Stephen Rothwelle08a1322007-10-30 00:44:32 -0700587 (unsigned long long)off,
Chuck Lever284f4902015-01-21 11:02:13 -0500588 be32_to_cpu(seg->rs_handle));
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400589 }
Chuck Lever284f4902015-01-21 11:02:13 -0500590 total_len += be32_to_cpu(seg->rs_length);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400591 ++cur_wchunk;
592 }
593 /* check and adjust for properly terminated write chunk */
594 if (wrchunk) {
Al Viro2d8a9722007-10-29 04:37:58 +0000595 __be32 *w = (__be32 *) cur_wchunk;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400596 if (*w++ != xdr_zero)
597 return -1;
598 cur_wchunk = (struct rpcrdma_write_chunk *) w;
599 }
Chuck Lever6b1184c2015-01-21 11:04:25 -0500600 if ((char *)cur_wchunk > base + rep->rr_len)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400601 return -1;
602
Al Viro2d8a9722007-10-29 04:37:58 +0000603 *iptrp = (__be32 *) cur_wchunk;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400604 return total_len;
605}
606
607/*
608 * Scatter inline received data back into provided iov's.
609 */
610static void
Tom Talpey9191ca32008-10-09 15:01:11 -0400611rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400612{
613 int i, npages, curlen, olen;
614 char *destp;
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000615 struct page **ppages;
616 int page_base;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400617
618 curlen = rqst->rq_rcv_buf.head[0].iov_len;
619 if (curlen > copy_len) { /* write chunk header fixup */
620 curlen = copy_len;
621 rqst->rq_rcv_buf.head[0].iov_len = curlen;
622 }
623
624 dprintk("RPC: %s: srcp 0x%p len %d hdrlen %d\n",
625 __func__, srcp, copy_len, curlen);
626
627 /* Shift pointer for first receive segment only */
628 rqst->rq_rcv_buf.head[0].iov_base = srcp;
629 srcp += curlen;
630 copy_len -= curlen;
631
632 olen = copy_len;
633 i = 0;
634 rpcx_to_rdmax(rqst->rq_xprt)->rx_stats.fixup_copy_count += olen;
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000635 page_base = rqst->rq_rcv_buf.page_base;
636 ppages = rqst->rq_rcv_buf.pages + (page_base >> PAGE_SHIFT);
637 page_base &= ~PAGE_MASK;
638
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400639 if (copy_len && rqst->rq_rcv_buf.page_len) {
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000640 npages = PAGE_ALIGN(page_base +
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400641 rqst->rq_rcv_buf.page_len) >> PAGE_SHIFT;
642 for (; i < npages; i++) {
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000643 curlen = PAGE_SIZE - page_base;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400644 if (curlen > copy_len)
645 curlen = copy_len;
646 dprintk("RPC: %s: page %d"
647 " srcp 0x%p len %d curlen %d\n",
648 __func__, i, srcp, copy_len, curlen);
Cong Wangb8541782011-11-25 23:14:40 +0800649 destp = kmap_atomic(ppages[i]);
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000650 memcpy(destp + page_base, srcp, curlen);
651 flush_dcache_page(ppages[i]);
Cong Wangb8541782011-11-25 23:14:40 +0800652 kunmap_atomic(destp);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400653 srcp += curlen;
654 copy_len -= curlen;
655 if (copy_len == 0)
656 break;
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000657 page_base = 0;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400658 }
Chuck Lever2b7bbc92014-03-12 12:51:30 -0400659 }
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400660
661 if (copy_len && rqst->rq_rcv_buf.tail[0].iov_len) {
662 curlen = copy_len;
663 if (curlen > rqst->rq_rcv_buf.tail[0].iov_len)
664 curlen = rqst->rq_rcv_buf.tail[0].iov_len;
665 if (rqst->rq_rcv_buf.tail[0].iov_base != srcp)
Tom Talpeyb38ab402009-03-11 14:37:55 -0400666 memmove(rqst->rq_rcv_buf.tail[0].iov_base, srcp, curlen);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400667 dprintk("RPC: %s: tail srcp 0x%p len %d curlen %d\n",
668 __func__, srcp, copy_len, curlen);
669 rqst->rq_rcv_buf.tail[0].iov_len = curlen;
670 copy_len -= curlen; ++i;
671 } else
672 rqst->rq_rcv_buf.tail[0].iov_len = 0;
673
Tom Talpey9191ca32008-10-09 15:01:11 -0400674 if (pad) {
675 /* implicit padding on terminal chunk */
676 unsigned char *p = rqst->rq_rcv_buf.tail[0].iov_base;
677 while (pad--)
678 p[rqst->rq_rcv_buf.tail[0].iov_len++] = 0;
679 }
680
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400681 if (copy_len)
682 dprintk("RPC: %s: %d bytes in"
683 " %d extra segments (%d lost)\n",
684 __func__, olen, i, copy_len);
685
686 /* TBD avoid a warning from call_decode() */
687 rqst->rq_private_buf = rqst->rq_rcv_buf;
688}
689
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400690void
Chuck Lever254f91e2014-05-28 10:32:17 -0400691rpcrdma_connect_worker(struct work_struct *work)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400692{
Chuck Lever254f91e2014-05-28 10:32:17 -0400693 struct rpcrdma_ep *ep =
694 container_of(work, struct rpcrdma_ep, rep_connect_worker.work);
Chuck Leverafadc462015-01-21 11:03:11 -0500695 struct rpcrdma_xprt *r_xprt =
696 container_of(ep, struct rpcrdma_xprt, rx_ep);
697 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400698
699 spin_lock_bh(&xprt->transport_lock);
Tom Talpey575448b2008-10-09 15:00:40 -0400700 if (++xprt->connect_cookie == 0) /* maintain a reserved value */
701 ++xprt->connect_cookie;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400702 if (ep->rep_connected > 0) {
703 if (!xprt_test_and_set_connected(xprt))
704 xprt_wake_pending_tasks(xprt, 0);
705 } else {
706 if (xprt_test_and_clear_connected(xprt))
Tom Talpey926449b2008-10-09 15:01:21 -0400707 xprt_wake_pending_tasks(xprt, -ENOTCONN);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400708 }
709 spin_unlock_bh(&xprt->transport_lock);
710}
711
712/*
Chuck Lever254f91e2014-05-28 10:32:17 -0400713 * This function is called when an async event is posted to
714 * the connection which changes the connection state. All it
715 * does at this point is mark the connection up/down, the rpc
716 * timers do the rest.
717 */
718void
719rpcrdma_conn_func(struct rpcrdma_ep *ep)
720{
721 schedule_delayed_work(&ep->rep_connect_worker, 0);
722}
723
724/*
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400725 * Called as a tasklet to do req/reply match and complete a request
726 * Errors must result in the RPC task either being awakened, or
727 * allowed to timeout, to discover the errors at that time.
728 */
729void
730rpcrdma_reply_handler(struct rpcrdma_rep *rep)
731{
732 struct rpcrdma_msg *headerp;
733 struct rpcrdma_req *req;
734 struct rpc_rqst *rqst;
735 struct rpc_xprt *xprt = rep->rr_xprt;
736 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
Al Viro2d8a9722007-10-29 04:37:58 +0000737 __be32 *iptr;
Chuck Lever9b1dcbc2015-02-12 10:14:51 -0500738 int rdmalen, status;
Chuck Levere7ce7102014-05-28 10:34:57 -0400739 unsigned long cwnd;
Chuck Lever9b1dcbc2015-02-12 10:14:51 -0500740 u32 credits;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400741
742 /* Check status. If bad, signal disconnect and return rep to pool */
743 if (rep->rr_len == ~0U) {
744 rpcrdma_recv_buffer_put(rep);
745 if (r_xprt->rx_ep.rep_connected == 1) {
746 r_xprt->rx_ep.rep_connected = -EIO;
747 rpcrdma_conn_func(&r_xprt->rx_ep);
748 }
749 return;
750 }
Chuck Leverf2846482015-01-21 11:02:29 -0500751 if (rep->rr_len < RPCRDMA_HDRLEN_MIN) {
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400752 dprintk("RPC: %s: short/invalid reply\n", __func__);
753 goto repost;
754 }
Chuck Lever6b1184c2015-01-21 11:04:25 -0500755 headerp = rdmab_to_msg(rep->rr_rdmabuf);
Chuck Lever284f4902015-01-21 11:02:13 -0500756 if (headerp->rm_vers != rpcrdma_version) {
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400757 dprintk("RPC: %s: invalid version %d\n",
Chuck Lever284f4902015-01-21 11:02:13 -0500758 __func__, be32_to_cpu(headerp->rm_vers));
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400759 goto repost;
760 }
761
762 /* Get XID and try for a match. */
763 spin_lock(&xprt->transport_lock);
764 rqst = xprt_lookup_rqst(xprt, headerp->rm_xid);
765 if (rqst == NULL) {
766 spin_unlock(&xprt->transport_lock);
767 dprintk("RPC: %s: reply 0x%p failed "
768 "to match any request xid 0x%08x len %d\n",
Chuck Lever052151a2015-01-21 11:02:21 -0500769 __func__, rep, be32_to_cpu(headerp->rm_xid),
770 rep->rr_len);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400771repost:
772 r_xprt->rx_stats.bad_reply_count++;
773 rep->rr_func = rpcrdma_reply_handler;
774 if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, &r_xprt->rx_ep, rep))
775 rpcrdma_recv_buffer_put(rep);
776
777 return;
778 }
779
780 /* get request object */
781 req = rpcr_to_rdmar(rqst);
Tom Tucker4a6862b2012-02-20 13:07:42 -0600782 if (req->rl_reply) {
783 spin_unlock(&xprt->transport_lock);
784 dprintk("RPC: %s: duplicate reply 0x%p to RPC "
785 "request 0x%p: xid 0x%08x\n", __func__, rep, req,
Chuck Lever052151a2015-01-21 11:02:21 -0500786 be32_to_cpu(headerp->rm_xid));
Tom Tucker4a6862b2012-02-20 13:07:42 -0600787 goto repost;
788 }
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400789
790 dprintk("RPC: %s: reply 0x%p completes request 0x%p\n"
791 " RPC request 0x%p xid 0x%08x\n",
Chuck Lever052151a2015-01-21 11:02:21 -0500792 __func__, rep, req, rqst,
793 be32_to_cpu(headerp->rm_xid));
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400794
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400795 /* from here on, the reply is no longer an orphan */
796 req->rl_reply = rep;
Chuck Lever18906972014-05-28 10:34:41 -0400797 xprt->reestablish_timeout = 0;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400798
799 /* check for expected message types */
800 /* The order of some of these tests is important. */
801 switch (headerp->rm_type) {
Chuck Lever284f4902015-01-21 11:02:13 -0500802 case rdma_msg:
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400803 /* never expect read chunks */
804 /* never expect reply chunks (two ways to check) */
805 /* never expect write chunks without having offered RDMA */
806 if (headerp->rm_body.rm_chunks[0] != xdr_zero ||
807 (headerp->rm_body.rm_chunks[1] == xdr_zero &&
808 headerp->rm_body.rm_chunks[2] != xdr_zero) ||
809 (headerp->rm_body.rm_chunks[1] != xdr_zero &&
810 req->rl_nchunks == 0))
811 goto badheader;
812 if (headerp->rm_body.rm_chunks[1] != xdr_zero) {
813 /* count any expected write chunks in read reply */
814 /* start at write chunk array count */
815 iptr = &headerp->rm_body.rm_chunks[2];
816 rdmalen = rpcrdma_count_chunks(rep,
817 req->rl_nchunks, 1, &iptr);
818 /* check for validity, and no reply chunk after */
819 if (rdmalen < 0 || *iptr++ != xdr_zero)
820 goto badheader;
821 rep->rr_len -=
822 ((unsigned char *)iptr - (unsigned char *)headerp);
823 status = rep->rr_len + rdmalen;
824 r_xprt->rx_stats.total_rdma_reply += rdmalen;
Tom Talpey9191ca32008-10-09 15:01:11 -0400825 /* special case - last chunk may omit padding */
826 if (rdmalen &= 3) {
827 rdmalen = 4 - rdmalen;
828 status += rdmalen;
829 }
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400830 } else {
831 /* else ordinary inline */
Tom Talpey9191ca32008-10-09 15:01:11 -0400832 rdmalen = 0;
Chuck Leverf2846482015-01-21 11:02:29 -0500833 iptr = (__be32 *)((unsigned char *)headerp +
834 RPCRDMA_HDRLEN_MIN);
835 rep->rr_len -= RPCRDMA_HDRLEN_MIN;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400836 status = rep->rr_len;
837 }
838 /* Fix up the rpc results for upper layer */
Tom Talpey9191ca32008-10-09 15:01:11 -0400839 rpcrdma_inline_fixup(rqst, (char *)iptr, rep->rr_len, rdmalen);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400840 break;
841
Chuck Lever284f4902015-01-21 11:02:13 -0500842 case rdma_nomsg:
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400843 /* never expect read or write chunks, always reply chunks */
844 if (headerp->rm_body.rm_chunks[0] != xdr_zero ||
845 headerp->rm_body.rm_chunks[1] != xdr_zero ||
846 headerp->rm_body.rm_chunks[2] != xdr_one ||
847 req->rl_nchunks == 0)
848 goto badheader;
Chuck Leverf2846482015-01-21 11:02:29 -0500849 iptr = (__be32 *)((unsigned char *)headerp +
850 RPCRDMA_HDRLEN_MIN);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400851 rdmalen = rpcrdma_count_chunks(rep, req->rl_nchunks, 0, &iptr);
852 if (rdmalen < 0)
853 goto badheader;
854 r_xprt->rx_stats.total_rdma_reply += rdmalen;
855 /* Reply chunk buffer already is the reply vector - no fixup. */
856 status = rdmalen;
857 break;
858
859badheader:
860 default:
861 dprintk("%s: invalid rpcrdma reply header (type %d):"
862 " chunks[012] == %d %d %d"
863 " expected chunks <= %d\n",
Chuck Lever284f4902015-01-21 11:02:13 -0500864 __func__, be32_to_cpu(headerp->rm_type),
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400865 headerp->rm_body.rm_chunks[0],
866 headerp->rm_body.rm_chunks[1],
867 headerp->rm_body.rm_chunks[2],
868 req->rl_nchunks);
869 status = -EIO;
870 r_xprt->rx_stats.bad_reply_count++;
871 break;
872 }
873
Chuck Levereba8ff62015-01-21 11:03:02 -0500874 credits = be32_to_cpu(headerp->rm_credit);
875 if (credits == 0)
876 credits = 1; /* don't deadlock */
877 else if (credits > r_xprt->rx_buf.rb_max_requests)
878 credits = r_xprt->rx_buf.rb_max_requests;
879
Chuck Levere7ce7102014-05-28 10:34:57 -0400880 cwnd = xprt->cwnd;
Chuck Levereba8ff62015-01-21 11:03:02 -0500881 xprt->cwnd = credits << RPC_CWNDSHIFT;
Chuck Levere7ce7102014-05-28 10:34:57 -0400882 if (xprt->cwnd > cwnd)
883 xprt_release_rqst_cong(rqst->rq_task);
884
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400885 dprintk("RPC: %s: xprt_complete_rqst(0x%p, 0x%p, %d)\n",
886 __func__, xprt, rqst, status);
887 xprt_complete_rqst(rqst->rq_task, status);
888 spin_unlock(&xprt->transport_lock);
889}