blob: 5833329c132c886ca300c0eec2dfe0fdad5ee7b3 [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * linux/net/sunrpc/xdr.c
4 *
5 * Generic XDR support.
6 *
7 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
8 */
9
Chuck Levera246b012005-08-11 16:25:23 -040010#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090011#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/string.h>
14#include <linux/kernel.h>
15#include <linux/pagemap.h>
16#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/sunrpc/xdr.h>
18#include <linux/sunrpc/msg_prot.h>
Trond Myklebust9d96acb2018-09-13 12:22:04 -040019#include <linux/bvec.h>
Chuck Lever55828632019-02-11 11:24:10 -050020#include <trace/events/sunrpc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021
Anna Schumakere6ac0ac2020-04-21 11:27:00 -040022static void _copy_to_pages(struct page **, size_t, const char *, size_t);
23
24
Linus Torvalds1da177e2005-04-16 15:20:36 -070025/*
26 * XDR functions for basic NFS types
27 */
Alexey Dobriyand8ed0292006-09-26 22:29:38 -070028__be32 *
29xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj)
Linus Torvalds1da177e2005-04-16 15:20:36 -070030{
31 unsigned int quadlen = XDR_QUADLEN(obj->len);
32
33 p[quadlen] = 0; /* zero trailing bytes */
Benny Halevy9f162d22009-08-14 17:18:44 +030034 *p++ = cpu_to_be32(obj->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 memcpy(p, obj->data, obj->len);
36 return p + XDR_QUADLEN(obj->len);
37}
Trond Myklebust468039e2008-12-23 15:21:31 -050038EXPORT_SYMBOL_GPL(xdr_encode_netobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Alexey Dobriyand8ed0292006-09-26 22:29:38 -070040__be32 *
41xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
Linus Torvalds1da177e2005-04-16 15:20:36 -070042{
43 unsigned int len;
44
Benny Halevy98866b52009-08-14 17:18:49 +030045 if ((len = be32_to_cpu(*p++)) > XDR_MAX_NETOBJ)
Linus Torvalds1da177e2005-04-16 15:20:36 -070046 return NULL;
47 obj->len = len;
48 obj->data = (u8 *) p;
49 return p + XDR_QUADLEN(len);
50}
Trond Myklebust468039e2008-12-23 15:21:31 -050051EXPORT_SYMBOL_GPL(xdr_decode_netobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -070052
53/**
54 * xdr_encode_opaque_fixed - Encode fixed length opaque data
Pavel Pisa4dc3b162005-05-01 08:59:25 -070055 * @p: pointer to current position in XDR buffer.
56 * @ptr: pointer to data to encode (or NULL)
57 * @nbytes: size of data.
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 *
59 * Copy the array of data of length nbytes at ptr to the XDR buffer
60 * at position p, then align to the next 32-bit boundary by padding
61 * with zero bytes (see RFC1832).
62 * Note: if ptr is NULL, only the padding is performed.
63 *
64 * Returns the updated current XDR buffer position
65 *
66 */
Alexey Dobriyand8ed0292006-09-26 22:29:38 -070067__be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes)
Linus Torvalds1da177e2005-04-16 15:20:36 -070068{
69 if (likely(nbytes != 0)) {
70 unsigned int quadlen = XDR_QUADLEN(nbytes);
71 unsigned int padding = (quadlen << 2) - nbytes;
72
73 if (ptr != NULL)
74 memcpy(p, ptr, nbytes);
75 if (padding != 0)
76 memset((char *)p + nbytes, 0, padding);
77 p += quadlen;
78 }
79 return p;
80}
Trond Myklebust468039e2008-12-23 15:21:31 -050081EXPORT_SYMBOL_GPL(xdr_encode_opaque_fixed);
Linus Torvalds1da177e2005-04-16 15:20:36 -070082
83/**
84 * xdr_encode_opaque - Encode variable length opaque data
Pavel Pisa4dc3b162005-05-01 08:59:25 -070085 * @p: pointer to current position in XDR buffer.
86 * @ptr: pointer to data to encode (or NULL)
87 * @nbytes: size of data.
Linus Torvalds1da177e2005-04-16 15:20:36 -070088 *
89 * Returns the updated current XDR buffer position
90 */
Alexey Dobriyand8ed0292006-09-26 22:29:38 -070091__be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes)
Linus Torvalds1da177e2005-04-16 15:20:36 -070092{
Benny Halevy9f162d22009-08-14 17:18:44 +030093 *p++ = cpu_to_be32(nbytes);
Linus Torvalds1da177e2005-04-16 15:20:36 -070094 return xdr_encode_opaque_fixed(p, ptr, nbytes);
95}
Trond Myklebust468039e2008-12-23 15:21:31 -050096EXPORT_SYMBOL_GPL(xdr_encode_opaque);
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
Alexey Dobriyand8ed0292006-09-26 22:29:38 -070098__be32 *
99xdr_encode_string(__be32 *p, const char *string)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100{
101 return xdr_encode_array(p, string, strlen(string));
102}
Trond Myklebust468039e2008-12-23 15:21:31 -0500103EXPORT_SYMBOL_GPL(xdr_encode_string);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104
Alexey Dobriyand8ed0292006-09-26 22:29:38 -0700105__be32 *
Chuck Levere5cff482007-11-01 16:56:47 -0400106xdr_decode_string_inplace(__be32 *p, char **sp,
107 unsigned int *lenp, unsigned int maxlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108{
Chuck Levere5cff482007-11-01 16:56:47 -0400109 u32 len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110
Benny Halevy98866b52009-08-14 17:18:49 +0300111 len = be32_to_cpu(*p++);
Chuck Levere5cff482007-11-01 16:56:47 -0400112 if (len > maxlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 return NULL;
114 *lenp = len;
115 *sp = (char *) p;
116 return p + XDR_QUADLEN(len);
117}
Trond Myklebust468039e2008-12-23 15:21:31 -0500118EXPORT_SYMBOL_GPL(xdr_decode_string_inplace);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119
Chuck Leverb4687da2010-09-21 16:55:48 -0400120/**
121 * xdr_terminate_string - '\0'-terminate a string residing in an xdr_buf
122 * @buf: XDR buffer where string resides
123 * @len: length of string, in bytes
124 *
125 */
126void
127xdr_terminate_string(struct xdr_buf *buf, const u32 len)
128{
129 char *kaddr;
130
Cong Wangb8541782011-11-25 23:14:40 +0800131 kaddr = kmap_atomic(buf->pages[0]);
Chuck Leverb4687da2010-09-21 16:55:48 -0400132 kaddr[buf->page_base + len] = '\0';
Cong Wangb8541782011-11-25 23:14:40 +0800133 kunmap_atomic(kaddr);
Chuck Leverb4687da2010-09-21 16:55:48 -0400134}
Trond Myklebust0d961aa2011-07-13 19:24:15 -0400135EXPORT_SYMBOL_GPL(xdr_terminate_string);
Chuck Leverb4687da2010-09-21 16:55:48 -0400136
Trond Myklebust9d96acb2018-09-13 12:22:04 -0400137size_t
138xdr_buf_pagecount(struct xdr_buf *buf)
139{
140 if (!buf->page_len)
141 return 0;
142 return (buf->page_base + buf->page_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
143}
144
145int
146xdr_alloc_bvec(struct xdr_buf *buf, gfp_t gfp)
147{
148 size_t i, n = xdr_buf_pagecount(buf);
149
150 if (n != 0 && buf->bvec == NULL) {
151 buf->bvec = kmalloc_array(n, sizeof(buf->bvec[0]), gfp);
152 if (!buf->bvec)
153 return -ENOMEM;
154 for (i = 0; i < n; i++) {
155 buf->bvec[i].bv_page = buf->pages[i];
156 buf->bvec[i].bv_len = PAGE_SIZE;
157 buf->bvec[i].bv_offset = 0;
158 }
159 }
160 return 0;
161}
162
163void
164xdr_free_bvec(struct xdr_buf *buf)
165{
166 kfree(buf->bvec);
167 buf->bvec = NULL;
168}
169
Chuck Levercf500ba2019-02-11 11:25:20 -0500170/**
171 * xdr_inline_pages - Prepare receive buffer for a large reply
172 * @xdr: xdr_buf into which reply will be placed
173 * @offset: expected offset where data payload will start, in bytes
174 * @pages: vector of struct page pointers
175 * @base: offset in first page where receive should start, in bytes
176 * @len: expected size of the upper layer data payload, in bytes
177 *
178 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
181 struct page **pages, unsigned int base, unsigned int len)
182{
183 struct kvec *head = xdr->head;
184 struct kvec *tail = xdr->tail;
185 char *buf = (char *)head->iov_base;
186 unsigned int buflen = head->iov_len;
187
188 head->iov_len = offset;
189
190 xdr->pages = pages;
191 xdr->page_base = base;
192 xdr->page_len = len;
193
194 tail->iov_base = buf + offset;
195 tail->iov_len = buflen - offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 xdr->buflen += len;
197}
Trond Myklebust468039e2008-12-23 15:21:31 -0500198EXPORT_SYMBOL_GPL(xdr_inline_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200/*
201 * Helper routines for doing 'memmove' like operations on a struct xdr_buf
Ben Hutchings2c530402012-07-10 10:55:09 +0000202 */
203
204/**
Anna Schumakere6ac0ac2020-04-21 11:27:00 -0400205 * _shift_data_left_pages
206 * @pages: vector of pages containing both the source and dest memory area.
207 * @pgto_base: page vector address of destination
208 * @pgfrom_base: page vector address of source
209 * @len: number of bytes to copy
210 *
211 * Note: the addresses pgto_base and pgfrom_base are both calculated in
212 * the same way:
213 * if a memory area starts at byte 'base' in page 'pages[i]',
214 * then its address is given as (i << PAGE_CACHE_SHIFT) + base
215 * Alse note: pgto_base must be < pgfrom_base, but the memory areas
216 * they point to may overlap.
217 */
218static void
219_shift_data_left_pages(struct page **pages, size_t pgto_base,
220 size_t pgfrom_base, size_t len)
221{
222 struct page **pgfrom, **pgto;
223 char *vfrom, *vto;
224 size_t copy;
225
226 BUG_ON(pgfrom_base <= pgto_base);
227
Trond Myklebustc54e9592020-12-07 13:30:46 -0500228 if (!len)
229 return;
230
Anna Schumakere6ac0ac2020-04-21 11:27:00 -0400231 pgto = pages + (pgto_base >> PAGE_SHIFT);
232 pgfrom = pages + (pgfrom_base >> PAGE_SHIFT);
233
234 pgto_base &= ~PAGE_MASK;
235 pgfrom_base &= ~PAGE_MASK;
236
237 do {
238 if (pgto_base >= PAGE_SIZE) {
239 pgto_base = 0;
240 pgto++;
241 }
242 if (pgfrom_base >= PAGE_SIZE){
243 pgfrom_base = 0;
244 pgfrom++;
245 }
246
247 copy = len;
248 if (copy > (PAGE_SIZE - pgto_base))
249 copy = PAGE_SIZE - pgto_base;
250 if (copy > (PAGE_SIZE - pgfrom_base))
251 copy = PAGE_SIZE - pgfrom_base;
252
253 vto = kmap_atomic(*pgto);
254 if (*pgto != *pgfrom) {
255 vfrom = kmap_atomic(*pgfrom);
256 memcpy(vto + pgto_base, vfrom + pgfrom_base, copy);
257 kunmap_atomic(vfrom);
258 } else
259 memmove(vto + pgto_base, vto + pgfrom_base, copy);
260 flush_dcache_page(*pgto);
261 kunmap_atomic(vto);
262
263 pgto_base += copy;
264 pgfrom_base += copy;
265
266 } while ((len -= copy) != 0);
267}
268
269static void
270_shift_data_left_tail(struct xdr_buf *buf, unsigned int pgto, size_t len)
271{
272 struct kvec *tail = buf->tail;
273
274 if (len > tail->iov_len)
275 len = tail->iov_len;
276
277 _copy_to_pages(buf->pages,
278 buf->page_base + pgto,
279 (char *)tail->iov_base,
280 len);
281 tail->iov_len -= len;
282
283 if (tail->iov_len > 0)
284 memmove((char *)tail->iov_base,
285 tail->iov_base + len,
286 tail->iov_len);
287}
288
289/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 * _shift_data_right_pages
291 * @pages: vector of pages containing both the source and dest memory area.
292 * @pgto_base: page vector address of destination
293 * @pgfrom_base: page vector address of source
294 * @len: number of bytes to copy
295 *
296 * Note: the addresses pgto_base and pgfrom_base are both calculated in
297 * the same way:
298 * if a memory area starts at byte 'base' in page 'pages[i]',
Kirill A. Shutemovea1754a2016-04-01 15:29:48 +0300299 * then its address is given as (i << PAGE_SHIFT) + base
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 * Also note: pgfrom_base must be < pgto_base, but the memory areas
301 * they point to may overlap.
302 */
303static void
304_shift_data_right_pages(struct page **pages, size_t pgto_base,
305 size_t pgfrom_base, size_t len)
306{
307 struct page **pgfrom, **pgto;
308 char *vfrom, *vto;
309 size_t copy;
310
311 BUG_ON(pgto_base <= pgfrom_base);
312
Trond Myklebustc54e9592020-12-07 13:30:46 -0500313 if (!len)
314 return;
315
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316 pgto_base += len;
317 pgfrom_base += len;
318
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300319 pgto = pages + (pgto_base >> PAGE_SHIFT);
320 pgfrom = pages + (pgfrom_base >> PAGE_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300322 pgto_base &= ~PAGE_MASK;
323 pgfrom_base &= ~PAGE_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324
325 do {
326 /* Are any pointers crossing a page boundary? */
327 if (pgto_base == 0) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300328 pgto_base = PAGE_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329 pgto--;
330 }
331 if (pgfrom_base == 0) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300332 pgfrom_base = PAGE_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333 pgfrom--;
334 }
335
336 copy = len;
337 if (copy > pgto_base)
338 copy = pgto_base;
339 if (copy > pgfrom_base)
340 copy = pgfrom_base;
341 pgto_base -= copy;
342 pgfrom_base -= copy;
343
Cong Wangb8541782011-11-25 23:14:40 +0800344 vto = kmap_atomic(*pgto);
Trond Myklebust347e2232013-08-28 13:35:13 -0400345 if (*pgto != *pgfrom) {
346 vfrom = kmap_atomic(*pgfrom);
347 memcpy(vto + pgto_base, vfrom + pgfrom_base, copy);
348 kunmap_atomic(vfrom);
349 } else
350 memmove(vto + pgto_base, vto + pgfrom_base, copy);
Trond Myklebustbce34812006-07-05 13:17:12 -0400351 flush_dcache_page(*pgto);
Cong Wangb8541782011-11-25 23:14:40 +0800352 kunmap_atomic(vto);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353
354 } while ((len -= copy) != 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355}
356
Anna Schumaker43f0f082020-05-06 13:21:30 -0400357static unsigned int
358_shift_data_right_tail(struct xdr_buf *buf, unsigned int pgfrom, size_t len)
359{
360 struct kvec *tail = buf->tail;
361 unsigned int tailbuf_len;
362 unsigned int result = 0;
363 size_t copy;
364
365 tailbuf_len = buf->buflen - buf->head->iov_len - buf->page_len;
366
367 /* Shift the tail first */
368 if (tailbuf_len != 0) {
369 unsigned int free_space = tailbuf_len - tail->iov_len;
370
371 if (len < free_space)
372 free_space = len;
373 if (len > free_space)
374 len = free_space;
375
376 tail->iov_len += free_space;
377 copy = len;
378
379 if (tail->iov_len > len) {
380 char *p = (char *)tail->iov_base + len;
381 memmove(p, tail->iov_base, tail->iov_len - free_space);
382 result += tail->iov_len - free_space;
383 } else
384 copy = tail->iov_len;
385
386 /* Copy from the inlined pages into the tail */
387 _copy_from_pages((char *)tail->iov_base,
388 buf->pages,
389 buf->page_base + pgfrom,
390 copy);
391 result += copy;
392 }
393
394 return result;
395}
396
Ben Hutchings2c530402012-07-10 10:55:09 +0000397/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398 * _copy_to_pages
399 * @pages: array of pages
400 * @pgbase: page vector address of destination
401 * @p: pointer to source data
402 * @len: length
403 *
404 * Copies data from an arbitrary memory location into an array of pages
405 * The copy is assumed to be non-overlapping.
406 */
407static void
408_copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
409{
410 struct page **pgto;
411 char *vto;
412 size_t copy;
413
Trond Myklebustc54e9592020-12-07 13:30:46 -0500414 if (!len)
415 return;
416
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300417 pgto = pages + (pgbase >> PAGE_SHIFT);
418 pgbase &= ~PAGE_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419
Trond Myklebustdaeba892008-03-31 17:02:02 -0400420 for (;;) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300421 copy = PAGE_SIZE - pgbase;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422 if (copy > len)
423 copy = len;
424
Cong Wangb8541782011-11-25 23:14:40 +0800425 vto = kmap_atomic(*pgto);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 memcpy(vto + pgbase, p, copy);
Cong Wangb8541782011-11-25 23:14:40 +0800427 kunmap_atomic(vto);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428
Trond Myklebustdaeba892008-03-31 17:02:02 -0400429 len -= copy;
430 if (len == 0)
431 break;
432
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433 pgbase += copy;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300434 if (pgbase == PAGE_SIZE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 flush_dcache_page(*pgto);
436 pgbase = 0;
437 pgto++;
438 }
439 p += copy;
Trond Myklebustdaeba892008-03-31 17:02:02 -0400440 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441 flush_dcache_page(*pgto);
442}
443
Ben Hutchings2c530402012-07-10 10:55:09 +0000444/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 * _copy_from_pages
446 * @p: pointer to destination
447 * @pages: array of pages
448 * @pgbase: offset of source data
449 * @len: length
450 *
451 * Copies data into an arbitrary memory location from an array of pages
452 * The copy is assumed to be non-overlapping.
453 */
Andy Adamsonbf118a32011-12-07 11:55:27 -0500454void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455_copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
456{
457 struct page **pgfrom;
458 char *vfrom;
459 size_t copy;
460
Trond Myklebustc54e9592020-12-07 13:30:46 -0500461 if (!len)
462 return;
463
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300464 pgfrom = pages + (pgbase >> PAGE_SHIFT);
465 pgbase &= ~PAGE_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466
467 do {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300468 copy = PAGE_SIZE - pgbase;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469 if (copy > len)
470 copy = len;
471
Cong Wangb8541782011-11-25 23:14:40 +0800472 vfrom = kmap_atomic(*pgfrom);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 memcpy(p, vfrom + pgbase, copy);
Cong Wangb8541782011-11-25 23:14:40 +0800474 kunmap_atomic(vfrom);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475
476 pgbase += copy;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300477 if (pgbase == PAGE_SIZE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 pgbase = 0;
479 pgfrom++;
480 }
481 p += copy;
482
483 } while ((len -= copy) != 0);
484}
Andy Adamsonbf118a32011-12-07 11:55:27 -0500485EXPORT_SYMBOL_GPL(_copy_from_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486
Ben Hutchings2c530402012-07-10 10:55:09 +0000487/**
Anna Schumaker84ce1822014-05-28 13:38:53 -0400488 * _zero_pages
489 * @pages: array of pages
490 * @pgbase: beginning page vector address
491 * @len: length
492 */
493static void
494_zero_pages(struct page **pages, size_t pgbase, size_t len)
495{
496 struct page **page;
497 char *vpage;
498 size_t zero;
499
500 page = pages + (pgbase >> PAGE_SHIFT);
501 pgbase &= ~PAGE_MASK;
502
503 do {
504 zero = PAGE_SIZE - pgbase;
505 if (zero > len)
506 zero = len;
507
508 vpage = kmap_atomic(*page);
509 memset(vpage + pgbase, 0, zero);
510 kunmap_atomic(vpage);
511
512 flush_dcache_page(*page);
513 pgbase = 0;
514 page++;
515
516 } while ((len -= zero) != 0);
517}
518
519/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520 * xdr_shrink_bufhead
521 * @buf: xdr_buf
522 * @len: bytes to remove from buf->head[0]
523 *
YOSHIFUJI Hideakicca51722007-02-09 15:38:13 -0800524 * Shrinks XDR buffer's header kvec buf->head[0] by
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 * 'len' bytes. The extra data is not lost, but is instead
526 * moved into the inlined pages and/or the tail.
527 */
Chuck Lever7be9cea32019-02-11 11:24:16 -0500528static unsigned int
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
530{
531 struct kvec *head, *tail;
532 size_t copy, offs;
533 unsigned int pglen = buf->page_len;
Chuck Lever7be9cea32019-02-11 11:24:16 -0500534 unsigned int result;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535
Chuck Lever7be9cea32019-02-11 11:24:16 -0500536 result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 tail = buf->tail;
538 head = buf->head;
Weston Andros Adamson18e624a2012-10-23 10:43:42 -0400539
540 WARN_ON_ONCE(len > head->iov_len);
541 if (len > head->iov_len)
542 len = head->iov_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543
544 /* Shift the tail first */
545 if (tail->iov_len != 0) {
546 if (tail->iov_len > len) {
547 copy = tail->iov_len - len;
548 memmove((char *)tail->iov_base + len,
549 tail->iov_base, copy);
Chuck Lever7be9cea32019-02-11 11:24:16 -0500550 result += copy;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 }
552 /* Copy from the inlined pages into the tail */
553 copy = len;
554 if (copy > pglen)
555 copy = pglen;
556 offs = len - copy;
557 if (offs >= tail->iov_len)
558 copy = 0;
559 else if (copy > tail->iov_len - offs)
560 copy = tail->iov_len - offs;
Chuck Lever7be9cea32019-02-11 11:24:16 -0500561 if (copy != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562 _copy_from_pages((char *)tail->iov_base + offs,
563 buf->pages,
564 buf->page_base + pglen + offs - len,
565 copy);
Chuck Lever7be9cea32019-02-11 11:24:16 -0500566 result += copy;
567 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 /* Do we also need to copy data from the head into the tail ? */
569 if (len > pglen) {
570 offs = copy = len - pglen;
571 if (copy > tail->iov_len)
572 copy = tail->iov_len;
573 memcpy(tail->iov_base,
574 (char *)head->iov_base +
575 head->iov_len - offs,
576 copy);
Chuck Lever7be9cea32019-02-11 11:24:16 -0500577 result += copy;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578 }
579 }
580 /* Now handle pages */
581 if (pglen != 0) {
582 if (pglen > len)
583 _shift_data_right_pages(buf->pages,
584 buf->page_base + len,
585 buf->page_base,
586 pglen - len);
587 copy = len;
588 if (len > pglen)
589 copy = pglen;
590 _copy_to_pages(buf->pages, buf->page_base,
591 (char *)head->iov_base + head->iov_len - len,
592 copy);
Chuck Lever7be9cea32019-02-11 11:24:16 -0500593 result += copy;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 }
595 head->iov_len -= len;
596 buf->buflen -= len;
597 /* Have we truncated the message? */
598 if (buf->len > buf->buflen)
599 buf->len = buf->buflen;
Chuck Lever7be9cea32019-02-11 11:24:16 -0500600
601 return result;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602}
603
Ben Hutchings2c530402012-07-10 10:55:09 +0000604/**
Chuck Levere8d70b32019-11-15 08:39:07 -0500605 * xdr_shrink_pagelen - shrinks buf->pages by up to @len bytes
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 * @buf: xdr_buf
607 * @len: bytes to remove from buf->pages
608 *
Chuck Levere8d70b32019-11-15 08:39:07 -0500609 * The extra data is not lost, but is instead moved into buf->tail.
610 * Returns the actual number of bytes moved.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611 */
Chuck Lever7be9cea32019-02-11 11:24:16 -0500612static unsigned int
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
614{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 unsigned int pglen = buf->page_len;
Chuck Lever7be9cea32019-02-11 11:24:16 -0500616 unsigned int result;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617
Chuck Levere8d70b32019-11-15 08:39:07 -0500618 if (len > buf->page_len)
619 len = buf-> page_len;
Trond Myklebustcf187c22010-08-29 12:13:16 -0400620
Anna Schumaker43f0f082020-05-06 13:21:30 -0400621 result = _shift_data_right_tail(buf, pglen - len, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 buf->page_len -= len;
623 buf->buflen -= len;
624 /* Have we truncated the message? */
625 if (buf->len > buf->buflen)
626 buf->len = buf->buflen;
Chuck Lever7be9cea32019-02-11 11:24:16 -0500627
628 return result;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629}
630
631void
632xdr_shift_buf(struct xdr_buf *buf, size_t len)
633{
634 xdr_shrink_bufhead(buf, len);
635}
Trond Myklebust468039e2008-12-23 15:21:31 -0500636EXPORT_SYMBOL_GPL(xdr_shift_buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637
638/**
Trond Myklebust4517d522012-06-21 17:14:46 -0400639 * xdr_stream_pos - Return the current offset from the start of the xdr_stream
640 * @xdr: pointer to struct xdr_stream
641 */
642unsigned int xdr_stream_pos(const struct xdr_stream *xdr)
643{
644 return (unsigned int)(XDR_QUADLEN(xdr->buf->len) - xdr->nwords) << 2;
645}
646EXPORT_SYMBOL_GPL(xdr_stream_pos);
647
648/**
Anna Schumakercf1f08c2020-04-17 11:00:24 -0400649 * xdr_page_pos - Return the current offset from the start of the xdr pages
650 * @xdr: pointer to struct xdr_stream
651 */
652unsigned int xdr_page_pos(const struct xdr_stream *xdr)
653{
654 unsigned int pos = xdr_stream_pos(xdr);
655
656 WARN_ON(pos < xdr->buf->head[0].iov_len);
657 return pos - xdr->buf->head[0].iov_len;
658}
659EXPORT_SYMBOL_GPL(xdr_page_pos);
660
661/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662 * xdr_init_encode - Initialize a struct xdr_stream for sending data.
663 * @xdr: pointer to xdr_stream struct
664 * @buf: pointer to XDR buffer in which to encode data
665 * @p: current pointer inside XDR buffer
Chuck Lever0ccc61b2019-02-11 11:24:05 -0500666 * @rqst: pointer to controlling rpc_rqst, for debugging
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667 *
668 * Note: at the moment the RPC client only passes the length of our
669 * scratch buffer in the xdr_buf's header kvec. Previously this
670 * meant we needed to call xdr_adjust_iovec() after encoding the
671 * data. With the new scheme, the xdr_stream manages the details
672 * of the buffer length, and takes care of adjusting the kvec
673 * length for us.
674 */
Chuck Lever0ccc61b2019-02-11 11:24:05 -0500675void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p,
676 struct rpc_rqst *rqst)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677{
678 struct kvec *iov = buf->head;
Trond Myklebust334ccfd2005-06-22 17:16:19 +0000679 int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680
J. Bruce Fields2825a7f2013-08-26 16:04:46 -0400681 xdr_set_scratch_buffer(xdr, NULL, 0);
Trond Myklebust334ccfd2005-06-22 17:16:19 +0000682 BUG_ON(scratch_len < 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683 xdr->buf = buf;
684 xdr->iov = iov;
Alexey Dobriyand8ed0292006-09-26 22:29:38 -0700685 xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len);
686 xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len);
Trond Myklebust334ccfd2005-06-22 17:16:19 +0000687 BUG_ON(iov->iov_len > scratch_len);
688
689 if (p != xdr->p && p != NULL) {
690 size_t len;
691
692 BUG_ON(p < xdr->p || p > xdr->end);
693 len = (char *)p - (char *)xdr->p;
694 xdr->p = p;
695 buf->len += len;
696 iov->iov_len += len;
697 }
Chuck Lever0ccc61b2019-02-11 11:24:05 -0500698 xdr->rqst = rqst;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699}
Trond Myklebust468039e2008-12-23 15:21:31 -0500700EXPORT_SYMBOL_GPL(xdr_init_encode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701
702/**
J. Bruce Fields2825a7f2013-08-26 16:04:46 -0400703 * xdr_commit_encode - Ensure all data is written to buffer
704 * @xdr: pointer to xdr_stream
705 *
706 * We handle encoding across page boundaries by giving the caller a
707 * temporary location to write to, then later copying the data into
708 * place; xdr_commit_encode does that copying.
709 *
710 * Normally the caller doesn't need to call this directly, as the
711 * following xdr_reserve_space will do it. But an explicit call may be
712 * required at the end of encoding, or any other time when the xdr_buf
713 * data might be read.
714 */
Chuck Lever95bd8302019-08-19 18:37:05 -0400715inline void xdr_commit_encode(struct xdr_stream *xdr)
J. Bruce Fields2825a7f2013-08-26 16:04:46 -0400716{
717 int shift = xdr->scratch.iov_len;
718 void *page;
719
720 if (shift == 0)
721 return;
722 page = page_address(*xdr->page_ptr);
723 memcpy(xdr->scratch.iov_base, page, shift);
724 memmove(page, page + shift, (void *)xdr->p - page);
725 xdr->scratch.iov_len = 0;
726}
727EXPORT_SYMBOL_GPL(xdr_commit_encode);
728
Trond Myklebust22cb4382014-07-12 18:01:02 -0400729static __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr,
730 size_t nbytes)
J. Bruce Fields2825a7f2013-08-26 16:04:46 -0400731{
YueHaibing025911a2018-11-08 02:04:57 +0000732 __be32 *p;
J. Bruce Fields2825a7f2013-08-26 16:04:46 -0400733 int space_left;
734 int frag1bytes, frag2bytes;
735
736 if (nbytes > PAGE_SIZE)
Chuck Lever55828632019-02-11 11:24:10 -0500737 goto out_overflow; /* Bigger buffers require special handling */
J. Bruce Fields2825a7f2013-08-26 16:04:46 -0400738 if (xdr->buf->len + nbytes > xdr->buf->buflen)
Chuck Lever55828632019-02-11 11:24:10 -0500739 goto out_overflow; /* Sorry, we're totally out of space */
J. Bruce Fields2825a7f2013-08-26 16:04:46 -0400740 frag1bytes = (xdr->end - xdr->p) << 2;
741 frag2bytes = nbytes - frag1bytes;
742 if (xdr->iov)
743 xdr->iov->iov_len += frag1bytes;
J. Bruce Fields05638dc2014-06-02 12:05:47 -0400744 else
J. Bruce Fields2825a7f2013-08-26 16:04:46 -0400745 xdr->buf->page_len += frag1bytes;
J. Bruce Fields05638dc2014-06-02 12:05:47 -0400746 xdr->page_ptr++;
J. Bruce Fields2825a7f2013-08-26 16:04:46 -0400747 xdr->iov = NULL;
748 /*
749 * If the last encode didn't end exactly on a page boundary, the
750 * next one will straddle boundaries. Encode into the next
751 * page, then copy it back later in xdr_commit_encode. We use
752 * the "scratch" iov to track any temporarily unused fragment of
753 * space at the end of the previous buffer:
754 */
755 xdr->scratch.iov_base = xdr->p;
756 xdr->scratch.iov_len = frag1bytes;
757 p = page_address(*xdr->page_ptr);
758 /*
759 * Note this is where the next encode will start after we've
760 * shifted this one back:
761 */
762 xdr->p = (void *)p + frag2bytes;
763 space_left = xdr->buf->buflen - xdr->buf->len;
764 xdr->end = (void *)p + min_t(int, space_left, PAGE_SIZE);
765 xdr->buf->page_len += frag2bytes;
766 xdr->buf->len += nbytes;
767 return p;
Chuck Lever55828632019-02-11 11:24:10 -0500768out_overflow:
769 trace_rpc_xdr_overflow(xdr, nbytes);
770 return NULL;
J. Bruce Fields2825a7f2013-08-26 16:04:46 -0400771}
772
773/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774 * xdr_reserve_space - Reserve buffer space for sending
775 * @xdr: pointer to xdr_stream
776 * @nbytes: number of bytes to reserve
777 *
778 * Checks that we have enough buffer space to encode 'nbytes' more
779 * bytes of data. If so, update the total xdr_buf length, and
780 * adjust the length of the current kvec.
781 */
Alexey Dobriyand8ed0292006-09-26 22:29:38 -0700782__be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783{
Alexey Dobriyand8ed0292006-09-26 22:29:38 -0700784 __be32 *p = xdr->p;
785 __be32 *q;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786
J. Bruce Fields2825a7f2013-08-26 16:04:46 -0400787 xdr_commit_encode(xdr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788 /* align nbytes on the next 32-bit boundary */
789 nbytes += 3;
790 nbytes &= ~3;
791 q = p + (nbytes >> 2);
792 if (unlikely(q > xdr->end || q < p))
J. Bruce Fields2825a7f2013-08-26 16:04:46 -0400793 return xdr_get_next_encode_buffer(xdr, nbytes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794 xdr->p = q;
J. Bruce Fields2825a7f2013-08-26 16:04:46 -0400795 if (xdr->iov)
796 xdr->iov->iov_len += nbytes;
797 else
798 xdr->buf->page_len += nbytes;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799 xdr->buf->len += nbytes;
800 return p;
801}
Trond Myklebust468039e2008-12-23 15:21:31 -0500802EXPORT_SYMBOL_GPL(xdr_reserve_space);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803
Anna Schumaker403217f2020-08-17 12:53:06 -0400804
805/**
806 * xdr_reserve_space_vec - Reserves a large amount of buffer space for sending
807 * @xdr: pointer to xdr_stream
808 * @vec: pointer to a kvec array
809 * @nbytes: number of bytes to reserve
810 *
811 * Reserves enough buffer space to encode 'nbytes' of data and stores the
812 * pointers in 'vec'. The size argument passed to xdr_reserve_space() is
813 * determined based on the number of bytes remaining in the current page to
814 * avoid invalidating iov_base pointers when xdr_commit_encode() is called.
815 */
816int xdr_reserve_space_vec(struct xdr_stream *xdr, struct kvec *vec, size_t nbytes)
817{
818 int thislen;
819 int v = 0;
820 __be32 *p;
821
822 /*
823 * svcrdma requires every READ payload to start somewhere
824 * in xdr->pages.
825 */
826 if (xdr->iov == xdr->buf->head) {
827 xdr->iov = NULL;
828 xdr->end = xdr->p;
829 }
830
831 while (nbytes) {
832 thislen = xdr->buf->page_len % PAGE_SIZE;
833 thislen = min_t(size_t, nbytes, PAGE_SIZE - thislen);
834
835 p = xdr_reserve_space(xdr, thislen);
836 if (!p)
837 return -EIO;
838
839 vec[v].iov_base = p;
840 vec[v].iov_len = thislen;
841 v++;
842 nbytes -= thislen;
843 }
844
845 return v;
846}
847EXPORT_SYMBOL_GPL(xdr_reserve_space_vec);
848
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849/**
J. Bruce Fields3e19ce72014-02-25 17:44:21 -0500850 * xdr_truncate_encode - truncate an encode buffer
851 * @xdr: pointer to xdr_stream
852 * @len: new length of buffer
853 *
854 * Truncates the xdr stream, so that xdr->buf->len == len,
855 * and xdr->p points at offset len from the start of the buffer, and
856 * head, tail, and page lengths are adjusted to correspond.
857 *
858 * If this means moving xdr->p to a different buffer, we assume that
Randy Dunlap1cc52132020-08-22 18:07:38 -0700859 * the end pointer should be set to the end of the current page,
J. Bruce Fields3e19ce72014-02-25 17:44:21 -0500860 * except in the case of the head buffer when we assume the head
861 * buffer's current length represents the end of the available buffer.
862 *
863 * This is *not* safe to use on a buffer that already has inlined page
864 * cache pages (as in a zero-copy server read reply), except for the
865 * simple case of truncating from one position in the tail to another.
866 *
867 */
868void xdr_truncate_encode(struct xdr_stream *xdr, size_t len)
869{
870 struct xdr_buf *buf = xdr->buf;
871 struct kvec *head = buf->head;
872 struct kvec *tail = buf->tail;
873 int fraglen;
J. Bruce Fields49a068f2014-12-22 16:14:51 -0500874 int new;
J. Bruce Fields3e19ce72014-02-25 17:44:21 -0500875
876 if (len > buf->len) {
877 WARN_ON_ONCE(1);
878 return;
879 }
J. Bruce Fields2825a7f2013-08-26 16:04:46 -0400880 xdr_commit_encode(xdr);
J. Bruce Fields3e19ce72014-02-25 17:44:21 -0500881
882 fraglen = min_t(int, buf->len - len, tail->iov_len);
883 tail->iov_len -= fraglen;
884 buf->len -= fraglen;
J. Bruce Fieldsed38c062014-09-19 17:21:35 -0400885 if (tail->iov_len) {
J. Bruce Fields3e19ce72014-02-25 17:44:21 -0500886 xdr->p = tail->iov_base + tail->iov_len;
J. Bruce Fields280caac2014-10-01 11:36:31 -0400887 WARN_ON_ONCE(!xdr->end);
888 WARN_ON_ONCE(!xdr->iov);
J. Bruce Fields3e19ce72014-02-25 17:44:21 -0500889 return;
890 }
891 WARN_ON_ONCE(fraglen);
892 fraglen = min_t(int, buf->len - len, buf->page_len);
893 buf->page_len -= fraglen;
894 buf->len -= fraglen;
895
896 new = buf->page_base + buf->page_len;
J. Bruce Fields49a068f2014-12-22 16:14:51 -0500897
898 xdr->page_ptr = buf->pages + (new >> PAGE_SHIFT);
J. Bruce Fields3e19ce72014-02-25 17:44:21 -0500899
J. Bruce Fieldsed38c062014-09-19 17:21:35 -0400900 if (buf->page_len) {
J. Bruce Fields3e19ce72014-02-25 17:44:21 -0500901 xdr->p = page_address(*xdr->page_ptr);
902 xdr->end = (void *)xdr->p + PAGE_SIZE;
903 xdr->p = (void *)xdr->p + (new % PAGE_SIZE);
J. Bruce Fields280caac2014-10-01 11:36:31 -0400904 WARN_ON_ONCE(xdr->iov);
J. Bruce Fields3e19ce72014-02-25 17:44:21 -0500905 return;
906 }
Frank Sorenson5d7a5bc2018-10-30 15:10:40 -0500907 if (fraglen)
J. Bruce Fields3e19ce72014-02-25 17:44:21 -0500908 xdr->end = head->iov_base + head->iov_len;
909 /* (otherwise assume xdr->end is already set) */
Frank Sorenson5d7a5bc2018-10-30 15:10:40 -0500910 xdr->page_ptr--;
J. Bruce Fields3e19ce72014-02-25 17:44:21 -0500911 head->iov_len = len;
912 buf->len = len;
913 xdr->p = head->iov_base + head->iov_len;
914 xdr->iov = buf->head;
915}
916EXPORT_SYMBOL(xdr_truncate_encode);
917
918/**
J. Bruce Fieldsdb3f58a2014-03-06 13:22:18 -0500919 * xdr_restrict_buflen - decrease available buffer space
920 * @xdr: pointer to xdr_stream
921 * @newbuflen: new maximum number of bytes available
922 *
923 * Adjust our idea of how much space is available in the buffer.
924 * If we've already used too much space in the buffer, returns -1.
925 * If the available space is already smaller than newbuflen, returns 0
926 * and does nothing. Otherwise, adjusts xdr->buf->buflen to newbuflen
927 * and ensures xdr->end is set at most offset newbuflen from the start
928 * of the buffer.
929 */
930int xdr_restrict_buflen(struct xdr_stream *xdr, int newbuflen)
931{
932 struct xdr_buf *buf = xdr->buf;
933 int left_in_this_buf = (void *)xdr->end - (void *)xdr->p;
934 int end_offset = buf->len + left_in_this_buf;
935
936 if (newbuflen < 0 || newbuflen < buf->len)
937 return -1;
938 if (newbuflen > buf->buflen)
939 return 0;
940 if (newbuflen < end_offset)
941 xdr->end = (void *)xdr->end + newbuflen - end_offset;
942 buf->buflen = newbuflen;
943 return 0;
944}
945EXPORT_SYMBOL(xdr_restrict_buflen);
946
947/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948 * xdr_write_pages - Insert a list of pages into an XDR buffer for sending
949 * @xdr: pointer to xdr_stream
950 * @pages: list of pages
951 * @base: offset of first byte
952 * @len: length of data in bytes
953 *
954 */
955void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base,
956 unsigned int len)
957{
958 struct xdr_buf *buf = xdr->buf;
959 struct kvec *iov = buf->tail;
960 buf->pages = pages;
961 buf->page_base = base;
962 buf->page_len = len;
963
964 iov->iov_base = (char *)xdr->p;
965 iov->iov_len = 0;
966 xdr->iov = iov;
967
968 if (len & 3) {
969 unsigned int pad = 4 - (len & 3);
970
971 BUG_ON(xdr->p >= xdr->end);
972 iov->iov_base = (char *)xdr->p + (len & 3);
973 iov->iov_len += pad;
974 len += pad;
975 *xdr->p++ = 0;
976 }
977 buf->buflen += len;
978 buf->len += len;
979}
Trond Myklebust468039e2008-12-23 15:21:31 -0500980EXPORT_SYMBOL_GPL(xdr_write_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981
Trond Myklebust8d86e372020-11-21 14:50:43 -0500982static unsigned int xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov,
983 unsigned int base, unsigned int len)
Trond Myklebust66502392011-01-08 17:45:38 -0500984{
985 if (len > iov->iov_len)
986 len = iov->iov_len;
Trond Myklebust8d86e372020-11-21 14:50:43 -0500987 if (unlikely(base > len))
988 base = len;
989 xdr->p = (__be32*)(iov->iov_base + base);
Trond Myklebust66502392011-01-08 17:45:38 -0500990 xdr->end = (__be32*)(iov->iov_base + len);
991 xdr->iov = iov;
992 xdr->page_ptr = NULL;
Trond Myklebust8d86e372020-11-21 14:50:43 -0500993 return len - base;
Trond Myklebust66502392011-01-08 17:45:38 -0500994}
995
Trond Myklebust8d86e372020-11-21 14:50:43 -0500996static unsigned int xdr_set_page_base(struct xdr_stream *xdr,
997 unsigned int base, unsigned int len)
Trond Myklebust66502392011-01-08 17:45:38 -0500998{
999 unsigned int pgnr;
1000 unsigned int maxlen;
1001 unsigned int pgoff;
1002 unsigned int pgend;
1003 void *kaddr;
1004
1005 maxlen = xdr->buf->page_len;
Trond Myklebust8d86e372020-11-21 14:50:43 -05001006 if (base >= maxlen) {
1007 base = maxlen;
1008 maxlen = 0;
1009 } else
1010 maxlen -= base;
Trond Myklebust66502392011-01-08 17:45:38 -05001011 if (len > maxlen)
1012 len = maxlen;
1013
1014 base += xdr->buf->page_base;
1015
1016 pgnr = base >> PAGE_SHIFT;
1017 xdr->page_ptr = &xdr->buf->pages[pgnr];
1018 kaddr = page_address(*xdr->page_ptr);
1019
1020 pgoff = base & ~PAGE_MASK;
1021 xdr->p = (__be32*)(kaddr + pgoff);
1022
1023 pgend = pgoff + len;
1024 if (pgend > PAGE_SIZE)
1025 pgend = PAGE_SIZE;
1026 xdr->end = (__be32*)(kaddr + pgend);
1027 xdr->iov = NULL;
Trond Myklebust8d86e372020-11-21 14:50:43 -05001028 return len;
Trond Myklebust66502392011-01-08 17:45:38 -05001029}
1030
Anna Schumakerf7d61ee2015-01-26 17:26:19 -05001031static void xdr_set_page(struct xdr_stream *xdr, unsigned int base,
1032 unsigned int len)
1033{
Trond Myklebust02790242020-11-21 21:21:11 -05001034 if (xdr_set_page_base(xdr, base, len) == 0) {
1035 base -= xdr->buf->page_len;
1036 xdr_set_iov(xdr, xdr->buf->tail, base, len);
1037 }
Anna Schumakerf7d61ee2015-01-26 17:26:19 -05001038}
1039
Trond Myklebust66502392011-01-08 17:45:38 -05001040static void xdr_set_next_page(struct xdr_stream *xdr)
1041{
1042 unsigned int newbase;
1043
1044 newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT;
1045 newbase -= xdr->buf->page_base;
Trond Myklebust02790242020-11-21 21:21:11 -05001046 if (newbase < xdr->buf->page_len)
1047 xdr_set_page_base(xdr, newbase, xdr_stream_remaining(xdr));
1048 else
1049 xdr_set_iov(xdr, xdr->buf->tail, 0, xdr_stream_remaining(xdr));
Trond Myklebust66502392011-01-08 17:45:38 -05001050}
1051
1052static bool xdr_set_next_buffer(struct xdr_stream *xdr)
1053{
1054 if (xdr->page_ptr != NULL)
1055 xdr_set_next_page(xdr);
Trond Myklebust02790242020-11-21 21:21:11 -05001056 else if (xdr->iov == xdr->buf->head)
1057 xdr_set_page(xdr, 0, xdr_stream_remaining(xdr));
Trond Myklebust66502392011-01-08 17:45:38 -05001058 return xdr->p != xdr->end;
1059}
1060
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061/**
1062 * xdr_init_decode - Initialize an xdr_stream for decoding data.
1063 * @xdr: pointer to xdr_stream struct
1064 * @buf: pointer to XDR buffer from which to decode data
1065 * @p: current pointer inside XDR buffer
Chuck Lever0ccc61b2019-02-11 11:24:05 -05001066 * @rqst: pointer to controlling rpc_rqst, for debugging
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067 */
Chuck Lever0ccc61b2019-02-11 11:24:05 -05001068void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p,
1069 struct rpc_rqst *rqst)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071 xdr->buf = buf;
Trond Myklebust66502392011-01-08 17:45:38 -05001072 xdr->scratch.iov_base = NULL;
1073 xdr->scratch.iov_len = 0;
Trond Myklebustbfeea1d2012-06-20 09:58:35 -04001074 xdr->nwords = XDR_QUADLEN(buf->len);
Trond Myklebust8d86e372020-11-21 14:50:43 -05001075 if (xdr_set_iov(xdr, buf->head, 0, buf->len) == 0 &&
1076 xdr_set_page_base(xdr, 0, buf->len) == 0)
1077 xdr_set_iov(xdr, buf->tail, 0, buf->len);
Trond Myklebustbfeea1d2012-06-20 09:58:35 -04001078 if (p != NULL && p > xdr->p && xdr->end >= p) {
1079 xdr->nwords -= p - xdr->p;
Trond Myklebust15376932012-06-28 17:17:48 -04001080 xdr->p = p;
Trond Myklebustbfeea1d2012-06-20 09:58:35 -04001081 }
Chuck Lever0ccc61b2019-02-11 11:24:05 -05001082 xdr->rqst = rqst;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083}
Trond Myklebust468039e2008-12-23 15:21:31 -05001084EXPORT_SYMBOL_GPL(xdr_init_decode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085
Benny Halevyf7da7a12011-05-19 14:16:47 -04001086/**
Chuck Lever7ecce752017-04-11 13:23:59 -04001087 * xdr_init_decode_pages - Initialize an xdr_stream for decoding into pages
Benny Halevyf7da7a12011-05-19 14:16:47 -04001088 * @xdr: pointer to xdr_stream struct
1089 * @buf: pointer to XDR buffer from which to decode data
1090 * @pages: list of pages to decode into
1091 * @len: length in bytes of buffer in pages
1092 */
1093void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
1094 struct page **pages, unsigned int len)
1095{
1096 memset(buf, 0, sizeof(*buf));
1097 buf->pages = pages;
1098 buf->page_len = len;
1099 buf->buflen = len;
1100 buf->len = len;
Chuck Lever0ccc61b2019-02-11 11:24:05 -05001101 xdr_init_decode(xdr, buf, NULL, NULL);
Benny Halevyf7da7a12011-05-19 14:16:47 -04001102}
1103EXPORT_SYMBOL_GPL(xdr_init_decode_pages);
1104
Trond Myklebust66502392011-01-08 17:45:38 -05001105static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
Trond Myklebustba8e4522010-10-19 19:58:49 -04001106{
Trond Myklebustbfeea1d2012-06-20 09:58:35 -04001107 unsigned int nwords = XDR_QUADLEN(nbytes);
Trond Myklebustba8e4522010-10-19 19:58:49 -04001108 __be32 *p = xdr->p;
Trond Myklebustbfeea1d2012-06-20 09:58:35 -04001109 __be32 *q = p + nwords;
Trond Myklebustba8e4522010-10-19 19:58:49 -04001110
Trond Myklebustbfeea1d2012-06-20 09:58:35 -04001111 if (unlikely(nwords > xdr->nwords || q > xdr->end || q < p))
Trond Myklebustba8e4522010-10-19 19:58:49 -04001112 return NULL;
Trond Myklebust66502392011-01-08 17:45:38 -05001113 xdr->p = q;
Trond Myklebustbfeea1d2012-06-20 09:58:35 -04001114 xdr->nwords -= nwords;
Trond Myklebustba8e4522010-10-19 19:58:49 -04001115 return p;
1116}
Trond Myklebustba8e4522010-10-19 19:58:49 -04001117
1118/**
Trond Myklebust66502392011-01-08 17:45:38 -05001119 * xdr_set_scratch_buffer - Attach a scratch buffer for decoding data.
1120 * @xdr: pointer to xdr_stream struct
1121 * @buf: pointer to an empty buffer
1122 * @buflen: size of 'buf'
1123 *
1124 * The scratch buffer is used when decoding from an array of pages.
1125 * If an xdr_inline_decode() call spans across page boundaries, then
1126 * we copy the data into the scratch buffer in order to allow linear
1127 * access.
1128 */
1129void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen)
1130{
1131 xdr->scratch.iov_base = buf;
1132 xdr->scratch.iov_len = buflen;
1133}
1134EXPORT_SYMBOL_GPL(xdr_set_scratch_buffer);
1135
1136static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes)
1137{
1138 __be32 *p;
Trond Myklebustace0e142016-09-20 14:33:42 -04001139 char *cpdest = xdr->scratch.iov_base;
Trond Myklebust66502392011-01-08 17:45:38 -05001140 size_t cplen = (char *)xdr->end - (char *)xdr->p;
1141
1142 if (nbytes > xdr->scratch.iov_len)
Chuck Lever55828632019-02-11 11:24:10 -05001143 goto out_overflow;
Trond Myklebustace0e142016-09-20 14:33:42 -04001144 p = __xdr_inline_decode(xdr, cplen);
1145 if (p == NULL)
1146 return NULL;
1147 memcpy(cpdest, p, cplen);
Chuck Lever55828632019-02-11 11:24:10 -05001148 if (!xdr_set_next_buffer(xdr))
1149 goto out_overflow;
Trond Myklebust66502392011-01-08 17:45:38 -05001150 cpdest += cplen;
1151 nbytes -= cplen;
Trond Myklebust66502392011-01-08 17:45:38 -05001152 p = __xdr_inline_decode(xdr, nbytes);
1153 if (p == NULL)
1154 return NULL;
1155 memcpy(cpdest, p, nbytes);
1156 return xdr->scratch.iov_base;
Chuck Lever55828632019-02-11 11:24:10 -05001157out_overflow:
1158 trace_rpc_xdr_overflow(xdr, nbytes);
1159 return NULL;
Trond Myklebust66502392011-01-08 17:45:38 -05001160}
1161
1162/**
1163 * xdr_inline_decode - Retrieve XDR data to decode
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164 * @xdr: pointer to xdr_stream struct
1165 * @nbytes: number of bytes of data to decode
1166 *
1167 * Check if the input buffer is long enough to enable us to decode
1168 * 'nbytes' more bytes of data starting at the current position.
1169 * If so return the current pointer, then update the current
1170 * pointer position.
1171 */
Alexey Dobriyand8ed0292006-09-26 22:29:38 -07001172__be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173{
Trond Myklebust66502392011-01-08 17:45:38 -05001174 __be32 *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175
Chuck Lever55828632019-02-11 11:24:10 -05001176 if (unlikely(nbytes == 0))
Trond Myklebust66502392011-01-08 17:45:38 -05001177 return xdr->p;
1178 if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr))
Chuck Lever55828632019-02-11 11:24:10 -05001179 goto out_overflow;
Trond Myklebust66502392011-01-08 17:45:38 -05001180 p = __xdr_inline_decode(xdr, nbytes);
1181 if (p != NULL)
1182 return p;
1183 return xdr_copy_to_scratch(xdr, nbytes);
Chuck Lever55828632019-02-11 11:24:10 -05001184out_overflow:
1185 trace_rpc_xdr_overflow(xdr, nbytes);
1186 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187}
Trond Myklebust468039e2008-12-23 15:21:31 -05001188EXPORT_SYMBOL_GPL(xdr_inline_decode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189
Anna Schumaker06216ec2020-04-20 17:38:17 -04001190static void xdr_realign_pages(struct xdr_stream *xdr)
1191{
1192 struct xdr_buf *buf = xdr->buf;
1193 struct kvec *iov = buf->head;
1194 unsigned int cur = xdr_stream_pos(xdr);
1195 unsigned int copied, offset;
1196
1197 /* Realign pages to current pointer position */
1198 if (iov->iov_len > cur) {
1199 offset = iov->iov_len - cur;
1200 copied = xdr_shrink_bufhead(buf, offset);
1201 trace_rpc_xdr_alignment(xdr, offset, copied);
1202 xdr->nwords = XDR_QUADLEN(buf->len - cur);
1203 }
1204}
1205
Trond Myklebust3994ee62012-06-26 12:34:05 -04001206static unsigned int xdr_align_pages(struct xdr_stream *xdr, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207{
1208 struct xdr_buf *buf = xdr->buf;
Trond Myklebustbfeea1d2012-06-20 09:58:35 -04001209 unsigned int nwords = XDR_QUADLEN(len);
Trond Myklebustb760b312012-06-26 12:19:55 -04001210 unsigned int cur = xdr_stream_pos(xdr);
Chuck Lever7be9cea32019-02-11 11:24:16 -05001211 unsigned int copied, offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212
Trond Myklebustbfeea1d2012-06-20 09:58:35 -04001213 if (xdr->nwords == 0)
Trond Myklebustc337d362012-06-21 17:05:37 -04001214 return 0;
Chuck Lever7be9cea32019-02-11 11:24:16 -05001215
Anna Schumaker06216ec2020-04-20 17:38:17 -04001216 xdr_realign_pages(xdr);
Trond Myklebustbfeea1d2012-06-20 09:58:35 -04001217 if (nwords > xdr->nwords) {
1218 nwords = xdr->nwords;
1219 len = nwords << 2;
1220 }
Trond Myklebusta11a2bf2012-08-02 13:21:43 -04001221 if (buf->page_len <= len)
Trond Myklebust8a9a8b82012-08-01 14:32:13 -04001222 len = buf->page_len;
Trond Myklebusta11a2bf2012-08-02 13:21:43 -04001223 else if (nwords < xdr->nwords) {
1224 /* Truncate page data and move it into the tail */
Chuck Lever7be9cea32019-02-11 11:24:16 -05001225 offset = buf->page_len - len;
1226 copied = xdr_shrink_pagelen(buf, offset);
1227 trace_rpc_xdr_alignment(xdr, offset, copied);
Trond Myklebusta11a2bf2012-08-02 13:21:43 -04001228 xdr->nwords = XDR_QUADLEN(buf->len - cur);
1229 }
Trond Myklebust3994ee62012-06-26 12:34:05 -04001230 return len;
1231}
Trond Myklebustbd00f842012-06-26 13:50:43 -04001232
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233/**
Trond Myklebust1d973162020-11-20 16:31:03 -05001234 * xdr_read_pages - align page-based XDR data to current pointer position
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235 * @xdr: pointer to xdr_stream struct
1236 * @len: number of bytes of page data
1237 *
1238 * Moves data beyond the current pointer position from the XDR head[] buffer
Trond Myklebust1d973162020-11-20 16:31:03 -05001239 * into the page list. Any data that lies beyond current position + @len
1240 * bytes is moved into the XDR tail[]. The xdr_stream current position is
1241 * then advanced past that data to align to the next XDR object in the tail.
Trond Myklebust3994ee62012-06-26 12:34:05 -04001242 *
1243 * Returns the number of XDR encoded bytes now contained in the pages
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244 */
Trond Myklebust3994ee62012-06-26 12:34:05 -04001245unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246{
Trond Myklebust1d973162020-11-20 16:31:03 -05001247 unsigned int nwords = XDR_QUADLEN(len);
1248 unsigned int base, end, pglen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249
Trond Myklebust1d973162020-11-20 16:31:03 -05001250 pglen = xdr_align_pages(xdr, nwords << 2);
1251 if (pglen == 0)
Trond Myklebust3994ee62012-06-26 12:34:05 -04001252 return 0;
Trond Myklebustbd00f842012-06-26 13:50:43 -04001253
Trond Myklebust1d973162020-11-20 16:31:03 -05001254 xdr->nwords -= nwords;
1255 base = (nwords << 2) - pglen;
1256 end = xdr_stream_remaining(xdr) - pglen;
1257
1258 if (xdr_set_iov(xdr, xdr->buf->tail, base, end) == 0)
1259 xdr->nwords = 0;
1260 return len <= pglen ? len : pglen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261}
Trond Myklebust468039e2008-12-23 15:21:31 -05001262EXPORT_SYMBOL_GPL(xdr_read_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263
Anna Schumakere6ac0ac2020-04-21 11:27:00 -04001264uint64_t xdr_align_data(struct xdr_stream *xdr, uint64_t offset, uint32_t length)
1265{
1266 struct xdr_buf *buf = xdr->buf;
1267 unsigned int from, bytes;
1268 unsigned int shift = 0;
1269
1270 if ((offset + length) < offset ||
1271 (offset + length) > buf->page_len)
1272 length = buf->page_len - offset;
1273
1274 xdr_realign_pages(xdr);
1275 from = xdr_page_pos(xdr);
Trond Myklebusteee1f542020-11-21 21:39:02 -05001276 bytes = xdr_stream_remaining(xdr);
Anna Schumakere6ac0ac2020-04-21 11:27:00 -04001277 if (length < bytes)
1278 bytes = length;
1279
1280 /* Move page data to the left */
1281 if (from > offset) {
1282 shift = min_t(unsigned int, bytes, buf->page_len - from);
1283 _shift_data_left_pages(buf->pages,
1284 buf->page_base + offset,
1285 buf->page_base + from,
1286 shift);
1287 bytes -= shift;
1288
1289 /* Move tail data into the pages, if necessary */
1290 if (bytes > 0)
1291 _shift_data_left_tail(buf, offset + shift, bytes);
1292 }
1293
1294 xdr->nwords -= XDR_QUADLEN(length);
Trond Myklebust02790242020-11-21 21:21:11 -05001295 xdr_set_page(xdr, from + length, xdr_stream_remaining(xdr));
Anna Schumakere6ac0ac2020-04-21 11:27:00 -04001296 return length;
1297}
1298EXPORT_SYMBOL_GPL(xdr_align_data);
1299
Anna Schumaker84ce1822014-05-28 13:38:53 -04001300uint64_t xdr_expand_hole(struct xdr_stream *xdr, uint64_t offset, uint64_t length)
1301{
1302 struct xdr_buf *buf = xdr->buf;
1303 unsigned int bytes;
1304 unsigned int from;
1305 unsigned int truncated = 0;
1306
1307 if ((offset + length) < offset ||
1308 (offset + length) > buf->page_len)
1309 length = buf->page_len - offset;
1310
1311 xdr_realign_pages(xdr);
1312 from = xdr_page_pos(xdr);
Trond Myklebusteee1f542020-11-21 21:39:02 -05001313 bytes = xdr_stream_remaining(xdr);
Anna Schumaker84ce1822014-05-28 13:38:53 -04001314
1315 if (offset + length + bytes > buf->page_len) {
1316 unsigned int shift = (offset + length + bytes) - buf->page_len;
1317 unsigned int res = _shift_data_right_tail(buf, from + bytes - shift, shift);
1318 truncated = shift - res;
1319 xdr->nwords -= XDR_QUADLEN(truncated);
1320 bytes -= shift;
1321 }
1322
1323 /* Now move the page data over and zero pages */
1324 if (bytes > 0)
1325 _shift_data_right_pages(buf->pages,
1326 buf->page_base + offset + length,
1327 buf->page_base + from,
1328 bytes);
1329 _zero_pages(buf->pages, buf->page_base + offset, length);
1330
1331 buf->len += length - (from - offset) - truncated;
Trond Myklebust02790242020-11-21 21:21:11 -05001332 xdr_set_page(xdr, offset + length, xdr_stream_remaining(xdr));
Anna Schumaker84ce1822014-05-28 13:38:53 -04001333 return length;
1334}
1335EXPORT_SYMBOL_GPL(xdr_expand_hole);
1336
Trond Myklebust8b23ea72006-06-09 09:34:21 -04001337/**
1338 * xdr_enter_page - decode data from the XDR page
1339 * @xdr: pointer to xdr_stream struct
1340 * @len: number of bytes of page data
1341 *
1342 * Moves data beyond the current pointer position from the XDR head[] buffer
1343 * into the page list. Any data that lies beyond current position + "len"
1344 * bytes is moved into the XDR tail[]. The current pointer is then
1345 * repositioned at the beginning of the first XDR page.
1346 */
1347void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
1348{
Trond Myklebustf8bb7f02012-06-21 14:53:10 -04001349 len = xdr_align_pages(xdr, len);
Trond Myklebust8b23ea72006-06-09 09:34:21 -04001350 /*
1351 * Position current pointer at beginning of tail, and
1352 * set remaining message length.
1353 */
Trond Myklebustf8bb7f02012-06-21 14:53:10 -04001354 if (len != 0)
1355 xdr_set_page_base(xdr, 0, len);
Trond Myklebust8b23ea72006-06-09 09:34:21 -04001356}
Trond Myklebust468039e2008-12-23 15:21:31 -05001357EXPORT_SYMBOL_GPL(xdr_enter_page);
Trond Myklebust8b23ea72006-06-09 09:34:21 -04001358
Julia Lawallc2bd2c02020-01-01 08:43:30 +01001359static const struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360
1361void
1362xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf)
1363{
1364 buf->head[0] = *iov;
1365 buf->tail[0] = empty_iov;
1366 buf->page_len = 0;
1367 buf->buflen = buf->len = iov->iov_len;
1368}
Trond Myklebust468039e2008-12-23 15:21:31 -05001369EXPORT_SYMBOL_GPL(xdr_buf_from_iov);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001370
J. Bruce Fieldsde4aee22014-02-25 17:21:08 -05001371/**
1372 * xdr_buf_subsegment - set subbuf to a portion of buf
1373 * @buf: an xdr buffer
1374 * @subbuf: the result buffer
1375 * @base: beginning of range in bytes
1376 * @len: length of range in bytes
1377 *
1378 * sets @subbuf to an xdr buffer representing the portion of @buf of
1379 * length @len starting at offset @base.
1380 *
1381 * @buf and @subbuf may be pointers to the same struct xdr_buf.
1382 *
1383 * Returns -1 if base of length are out of bounds.
1384 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385int
1386xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
Trond Myklebust1e789572006-08-31 15:09:19 -04001387 unsigned int base, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389 subbuf->buflen = subbuf->len = len;
Trond Myklebust1e789572006-08-31 15:09:19 -04001390 if (base < buf->head[0].iov_len) {
1391 subbuf->head[0].iov_base = buf->head[0].iov_base + base;
1392 subbuf->head[0].iov_len = min_t(unsigned int, len,
1393 buf->head[0].iov_len - base);
1394 len -= subbuf->head[0].iov_len;
1395 base = 0;
1396 } else {
Trond Myklebust1e789572006-08-31 15:09:19 -04001397 base -= buf->head[0].iov_len;
Chuck Lever89a3c9f2020-06-25 11:32:34 -04001398 subbuf->head[0].iov_base = buf->head[0].iov_base;
J. Bruce Fieldsde4aee22014-02-25 17:21:08 -05001399 subbuf->head[0].iov_len = 0;
Trond Myklebust1e789572006-08-31 15:09:19 -04001400 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401
1402 if (base < buf->page_len) {
Trond Myklebust1e789572006-08-31 15:09:19 -04001403 subbuf->page_len = min(buf->page_len - base, len);
1404 base += buf->page_base;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001405 subbuf->page_base = base & ~PAGE_MASK;
1406 subbuf->pages = &buf->pages[base >> PAGE_SHIFT];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407 len -= subbuf->page_len;
1408 base = 0;
1409 } else {
1410 base -= buf->page_len;
Chuck Lever89a3c9f2020-06-25 11:32:34 -04001411 subbuf->pages = buf->pages;
1412 subbuf->page_base = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413 subbuf->page_len = 0;
1414 }
1415
Trond Myklebust1e789572006-08-31 15:09:19 -04001416 if (base < buf->tail[0].iov_len) {
1417 subbuf->tail[0].iov_base = buf->tail[0].iov_base + base;
1418 subbuf->tail[0].iov_len = min_t(unsigned int, len,
1419 buf->tail[0].iov_len - base);
1420 len -= subbuf->tail[0].iov_len;
1421 base = 0;
1422 } else {
Trond Myklebust1e789572006-08-31 15:09:19 -04001423 base -= buf->tail[0].iov_len;
Chuck Lever89a3c9f2020-06-25 11:32:34 -04001424 subbuf->tail[0].iov_base = buf->tail[0].iov_base;
J. Bruce Fieldsde4aee22014-02-25 17:21:08 -05001425 subbuf->tail[0].iov_len = 0;
Trond Myklebust1e789572006-08-31 15:09:19 -04001426 }
1427
Linus Torvalds1da177e2005-04-16 15:20:36 -07001428 if (base || len)
1429 return -1;
1430 return 0;
1431}
Trond Myklebust468039e2008-12-23 15:21:31 -05001432EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433
Chuck Lever0a8e7b72020-04-15 17:36:22 -04001434/**
1435 * xdr_buf_trim - lop at most "len" bytes off the end of "buf"
1436 * @buf: buf to be trimmed
1437 * @len: number of bytes to reduce "buf" by
1438 *
1439 * Trim an xdr_buf by the given number of bytes by fixing up the lengths. Note
1440 * that it's possible that we'll trim less than that amount if the xdr_buf is
1441 * too small, or if (for instance) it's all in the head and the parser has
1442 * already read too far into it.
1443 */
1444void xdr_buf_trim(struct xdr_buf *buf, unsigned int len)
1445{
1446 size_t cur;
1447 unsigned int trim = len;
1448
1449 if (buf->tail[0].iov_len) {
1450 cur = min_t(size_t, buf->tail[0].iov_len, trim);
1451 buf->tail[0].iov_len -= cur;
1452 trim -= cur;
1453 if (!trim)
1454 goto fix_len;
1455 }
1456
1457 if (buf->page_len) {
1458 cur = min_t(unsigned int, buf->page_len, trim);
1459 buf->page_len -= cur;
1460 trim -= cur;
1461 if (!trim)
1462 goto fix_len;
1463 }
1464
1465 if (buf->head[0].iov_len) {
1466 cur = min_t(size_t, buf->head[0].iov_len, trim);
1467 buf->head[0].iov_len -= cur;
1468 trim -= cur;
1469 }
1470fix_len:
1471 buf->len -= (len - trim);
1472}
1473EXPORT_SYMBOL_GPL(xdr_buf_trim);
1474
Trond Myklebust4e3e43a2006-10-17 13:47:24 -04001475static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476{
Trond Myklebust1e789572006-08-31 15:09:19 -04001477 unsigned int this_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478
Trond Myklebust4e3e43a2006-10-17 13:47:24 -04001479 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
1480 memcpy(obj, subbuf->head[0].iov_base, this_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481 len -= this_len;
1482 obj += this_len;
Trond Myklebust4e3e43a2006-10-17 13:47:24 -04001483 this_len = min_t(unsigned int, len, subbuf->page_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484 if (this_len)
Trond Myklebust4e3e43a2006-10-17 13:47:24 -04001485 _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486 len -= this_len;
1487 obj += this_len;
Trond Myklebust4e3e43a2006-10-17 13:47:24 -04001488 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
1489 memcpy(obj, subbuf->tail[0].iov_base, this_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490}
1491
Andreas Gruenbacherbd8100e2005-06-22 17:16:24 +00001492/* obj is assumed to point to allocated memory of size at least len: */
Trond Myklebust4e3e43a2006-10-17 13:47:24 -04001493int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
Andreas Gruenbacherbd8100e2005-06-22 17:16:24 +00001494{
1495 struct xdr_buf subbuf;
Andreas Gruenbacherbd8100e2005-06-22 17:16:24 +00001496 int status;
1497
1498 status = xdr_buf_subsegment(buf, &subbuf, base, len);
Trond Myklebust4e3e43a2006-10-17 13:47:24 -04001499 if (status != 0)
1500 return status;
1501 __read_bytes_from_xdr_buf(&subbuf, obj, len);
1502 return 0;
1503}
Trond Myklebust468039e2008-12-23 15:21:31 -05001504EXPORT_SYMBOL_GPL(read_bytes_from_xdr_buf);
Trond Myklebust4e3e43a2006-10-17 13:47:24 -04001505
1506static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
1507{
1508 unsigned int this_len;
1509
1510 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
1511 memcpy(subbuf->head[0].iov_base, obj, this_len);
Andreas Gruenbacherbd8100e2005-06-22 17:16:24 +00001512 len -= this_len;
1513 obj += this_len;
Trond Myklebust4e3e43a2006-10-17 13:47:24 -04001514 this_len = min_t(unsigned int, len, subbuf->page_len);
Andreas Gruenbacherbd8100e2005-06-22 17:16:24 +00001515 if (this_len)
Trond Myklebust4e3e43a2006-10-17 13:47:24 -04001516 _copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len);
Andreas Gruenbacherbd8100e2005-06-22 17:16:24 +00001517 len -= this_len;
1518 obj += this_len;
Trond Myklebust4e3e43a2006-10-17 13:47:24 -04001519 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
1520 memcpy(subbuf->tail[0].iov_base, obj, this_len);
1521}
1522
1523/* obj is assumed to point to allocated memory of size at least len: */
1524int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
1525{
1526 struct xdr_buf subbuf;
1527 int status;
1528
1529 status = xdr_buf_subsegment(buf, &subbuf, base, len);
1530 if (status != 0)
1531 return status;
1532 __write_bytes_to_xdr_buf(&subbuf, obj, len);
1533 return 0;
Andreas Gruenbacherbd8100e2005-06-22 17:16:24 +00001534}
Kevin Coffmanc43abae2010-03-17 13:02:58 -04001535EXPORT_SYMBOL_GPL(write_bytes_to_xdr_buf);
Andreas Gruenbacherbd8100e2005-06-22 17:16:24 +00001536
1537int
Trond Myklebust1e789572006-08-31 15:09:19 -04001538xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539{
Alexey Dobriyand8ed0292006-09-26 22:29:38 -07001540 __be32 raw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541 int status;
1542
1543 status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
1544 if (status)
1545 return status;
Benny Halevy98866b52009-08-14 17:18:49 +03001546 *obj = be32_to_cpu(raw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547 return 0;
1548}
Trond Myklebust468039e2008-12-23 15:21:31 -05001549EXPORT_SYMBOL_GPL(xdr_decode_word);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550
Andreas Gruenbacherbd8100e2005-06-22 17:16:24 +00001551int
Trond Myklebust1e789572006-08-31 15:09:19 -04001552xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
Andreas Gruenbacherbd8100e2005-06-22 17:16:24 +00001553{
Benny Halevy9f162d22009-08-14 17:18:44 +03001554 __be32 raw = cpu_to_be32(obj);
Andreas Gruenbacherbd8100e2005-06-22 17:16:24 +00001555
1556 return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
1557}
Trond Myklebust468039e2008-12-23 15:21:31 -05001558EXPORT_SYMBOL_GPL(xdr_encode_word);
Andreas Gruenbacherbd8100e2005-06-22 17:16:24 +00001559
Andreas Gruenbacherbd8100e2005-06-22 17:16:24 +00001560/* Returns 0 on success, or else a negative error code. */
1561static int
1562xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
1563 struct xdr_array2_desc *desc, int encode)
1564{
1565 char *elem = NULL, *c;
1566 unsigned int copied = 0, todo, avail_here;
1567 struct page **ppages = NULL;
1568 int err;
1569
1570 if (encode) {
1571 if (xdr_encode_word(buf, base, desc->array_len) != 0)
1572 return -EINVAL;
1573 } else {
1574 if (xdr_decode_word(buf, base, &desc->array_len) != 0 ||
Trond Myklebust58fcb8d2005-08-10 18:15:12 -04001575 desc->array_len > desc->array_maxlen ||
Andreas Gruenbacherbd8100e2005-06-22 17:16:24 +00001576 (unsigned long) base + 4 + desc->array_len *
1577 desc->elem_size > buf->len)
1578 return -EINVAL;
1579 }
1580 base += 4;
1581
1582 if (!desc->xcode)
1583 return 0;
1584
1585 todo = desc->array_len * desc->elem_size;
1586
1587 /* process head */
1588 if (todo && base < buf->head->iov_len) {
1589 c = buf->head->iov_base + base;
1590 avail_here = min_t(unsigned int, todo,
1591 buf->head->iov_len - base);
1592 todo -= avail_here;
1593
1594 while (avail_here >= desc->elem_size) {
1595 err = desc->xcode(desc, c);
1596 if (err)
1597 goto out;
1598 c += desc->elem_size;
1599 avail_here -= desc->elem_size;
1600 }
1601 if (avail_here) {
1602 if (!elem) {
1603 elem = kmalloc(desc->elem_size, GFP_KERNEL);
1604 err = -ENOMEM;
1605 if (!elem)
1606 goto out;
1607 }
1608 if (encode) {
1609 err = desc->xcode(desc, elem);
1610 if (err)
1611 goto out;
1612 memcpy(c, elem, avail_here);
1613 } else
1614 memcpy(elem, c, avail_here);
1615 copied = avail_here;
1616 }
1617 base = buf->head->iov_len; /* align to start of pages */
1618 }
1619
1620 /* process pages array */
1621 base -= buf->head->iov_len;
1622 if (todo && base < buf->page_len) {
1623 unsigned int avail_page;
1624
1625 avail_here = min(todo, buf->page_len - base);
1626 todo -= avail_here;
1627
1628 base += buf->page_base;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001629 ppages = buf->pages + (base >> PAGE_SHIFT);
1630 base &= ~PAGE_MASK;
1631 avail_page = min_t(unsigned int, PAGE_SIZE - base,
Andreas Gruenbacherbd8100e2005-06-22 17:16:24 +00001632 avail_here);
1633 c = kmap(*ppages) + base;
1634
1635 while (avail_here) {
1636 avail_here -= avail_page;
1637 if (copied || avail_page < desc->elem_size) {
1638 unsigned int l = min(avail_page,
1639 desc->elem_size - copied);
1640 if (!elem) {
1641 elem = kmalloc(desc->elem_size,
1642 GFP_KERNEL);
1643 err = -ENOMEM;
1644 if (!elem)
1645 goto out;
1646 }
1647 if (encode) {
1648 if (!copied) {
1649 err = desc->xcode(desc, elem);
1650 if (err)
1651 goto out;
1652 }
1653 memcpy(c, elem + copied, l);
1654 copied += l;
1655 if (copied == desc->elem_size)
1656 copied = 0;
1657 } else {
1658 memcpy(elem + copied, c, l);
1659 copied += l;
1660 if (copied == desc->elem_size) {
1661 err = desc->xcode(desc, elem);
1662 if (err)
1663 goto out;
1664 copied = 0;
1665 }
1666 }
1667 avail_page -= l;
1668 c += l;
1669 }
1670 while (avail_page >= desc->elem_size) {
1671 err = desc->xcode(desc, c);
1672 if (err)
1673 goto out;
1674 c += desc->elem_size;
1675 avail_page -= desc->elem_size;
1676 }
1677 if (avail_page) {
1678 unsigned int l = min(avail_page,
1679 desc->elem_size - copied);
1680 if (!elem) {
1681 elem = kmalloc(desc->elem_size,
1682 GFP_KERNEL);
1683 err = -ENOMEM;
1684 if (!elem)
1685 goto out;
1686 }
1687 if (encode) {
1688 if (!copied) {
1689 err = desc->xcode(desc, elem);
1690 if (err)
1691 goto out;
1692 }
1693 memcpy(c, elem + copied, l);
1694 copied += l;
1695 if (copied == desc->elem_size)
1696 copied = 0;
1697 } else {
1698 memcpy(elem + copied, c, l);
1699 copied += l;
1700 if (copied == desc->elem_size) {
1701 err = desc->xcode(desc, elem);
1702 if (err)
1703 goto out;
1704 copied = 0;
1705 }
1706 }
1707 }
1708 if (avail_here) {
1709 kunmap(*ppages);
1710 ppages++;
1711 c = kmap(*ppages);
1712 }
1713
1714 avail_page = min(avail_here,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001715 (unsigned int) PAGE_SIZE);
Andreas Gruenbacherbd8100e2005-06-22 17:16:24 +00001716 }
1717 base = buf->page_len; /* align to start of tail */
1718 }
1719
1720 /* process tail */
1721 base -= buf->page_len;
1722 if (todo) {
1723 c = buf->tail->iov_base + base;
1724 if (copied) {
1725 unsigned int l = desc->elem_size - copied;
1726
1727 if (encode)
1728 memcpy(c, elem + copied, l);
1729 else {
1730 memcpy(elem + copied, c, l);
1731 err = desc->xcode(desc, elem);
1732 if (err)
1733 goto out;
1734 }
1735 todo -= l;
1736 c += l;
1737 }
1738 while (todo) {
1739 err = desc->xcode(desc, c);
1740 if (err)
1741 goto out;
1742 c += desc->elem_size;
1743 todo -= desc->elem_size;
1744 }
1745 }
1746 err = 0;
1747
1748out:
Jesper Juhla51482b2005-11-08 09:41:34 -08001749 kfree(elem);
Andreas Gruenbacherbd8100e2005-06-22 17:16:24 +00001750 if (ppages)
1751 kunmap(*ppages);
1752 return err;
1753}
1754
1755int
1756xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
1757 struct xdr_array2_desc *desc)
1758{
1759 if (base >= buf->len)
1760 return -EINVAL;
1761
1762 return xdr_xcode_array2(buf, base, desc, 0);
1763}
Trond Myklebust468039e2008-12-23 15:21:31 -05001764EXPORT_SYMBOL_GPL(xdr_decode_array2);
Andreas Gruenbacherbd8100e2005-06-22 17:16:24 +00001765
1766int
1767xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
1768 struct xdr_array2_desc *desc)
1769{
1770 if ((unsigned long) base + 4 + desc->array_len * desc->elem_size >
1771 buf->head->iov_len + buf->page_len + buf->tail->iov_len)
1772 return -EINVAL;
1773
1774 return xdr_xcode_array2(buf, base, desc, 1);
1775}
Trond Myklebust468039e2008-12-23 15:21:31 -05001776EXPORT_SYMBOL_GPL(xdr_encode_array2);
Olga Kornievskaia37a4e6c2006-12-04 20:22:33 -05001777
1778int
1779xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
YOSHIFUJI Hideakicca51722007-02-09 15:38:13 -08001780 int (*actor)(struct scatterlist *, void *), void *data)
Olga Kornievskaia37a4e6c2006-12-04 20:22:33 -05001781{
1782 int i, ret = 0;
Eric Dumazet95c96172012-04-15 05:58:06 +00001783 unsigned int page_len, thislen, page_offset;
Olga Kornievskaia37a4e6c2006-12-04 20:22:33 -05001784 struct scatterlist sg[1];
1785
Herbert Xu68e3f5d2007-10-27 00:52:07 -07001786 sg_init_table(sg, 1);
1787
Olga Kornievskaia37a4e6c2006-12-04 20:22:33 -05001788 if (offset >= buf->head[0].iov_len) {
1789 offset -= buf->head[0].iov_len;
1790 } else {
1791 thislen = buf->head[0].iov_len - offset;
1792 if (thislen > len)
1793 thislen = len;
1794 sg_set_buf(sg, buf->head[0].iov_base + offset, thislen);
1795 ret = actor(sg, data);
1796 if (ret)
1797 goto out;
1798 offset = 0;
1799 len -= thislen;
1800 }
1801 if (len == 0)
1802 goto out;
1803
1804 if (offset >= buf->page_len) {
1805 offset -= buf->page_len;
1806 } else {
1807 page_len = buf->page_len - offset;
1808 if (page_len > len)
1809 page_len = len;
1810 len -= page_len;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001811 page_offset = (offset + buf->page_base) & (PAGE_SIZE - 1);
1812 i = (offset + buf->page_base) >> PAGE_SHIFT;
1813 thislen = PAGE_SIZE - page_offset;
Olga Kornievskaia37a4e6c2006-12-04 20:22:33 -05001814 do {
1815 if (thislen > page_len)
1816 thislen = page_len;
Jens Axboe642f149032007-10-24 11:20:47 +02001817 sg_set_page(sg, buf->pages[i], thislen, page_offset);
Olga Kornievskaia37a4e6c2006-12-04 20:22:33 -05001818 ret = actor(sg, data);
1819 if (ret)
1820 goto out;
1821 page_len -= thislen;
1822 i++;
1823 page_offset = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001824 thislen = PAGE_SIZE;
Olga Kornievskaia37a4e6c2006-12-04 20:22:33 -05001825 } while (page_len != 0);
1826 offset = 0;
1827 }
1828 if (len == 0)
1829 goto out;
1830 if (offset < buf->tail[0].iov_len) {
1831 thislen = buf->tail[0].iov_len - offset;
1832 if (thislen > len)
1833 thislen = len;
1834 sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen);
1835 ret = actor(sg, data);
1836 len -= thislen;
1837 }
1838 if (len != 0)
1839 ret = -EINVAL;
1840out:
1841 return ret;
1842}
Trond Myklebust468039e2008-12-23 15:21:31 -05001843EXPORT_SYMBOL_GPL(xdr_process_buf);
Olga Kornievskaia37a4e6c2006-12-04 20:22:33 -05001844
Trond Myklebust5c741d42017-02-19 16:08:31 -05001845/**
Trond Myklebust0e779aa2018-03-20 17:03:05 -04001846 * xdr_stream_decode_opaque - Decode variable length opaque
1847 * @xdr: pointer to xdr_stream
1848 * @ptr: location to store opaque data
1849 * @size: size of storage buffer @ptr
1850 *
1851 * Return values:
1852 * On success, returns size of object stored in *@ptr
1853 * %-EBADMSG on XDR buffer overflow
1854 * %-EMSGSIZE on overflow of storage buffer @ptr
1855 */
1856ssize_t xdr_stream_decode_opaque(struct xdr_stream *xdr, void *ptr, size_t size)
1857{
1858 ssize_t ret;
1859 void *p;
1860
1861 ret = xdr_stream_decode_opaque_inline(xdr, &p, size);
1862 if (ret <= 0)
1863 return ret;
1864 memcpy(ptr, p, ret);
1865 return ret;
1866}
1867EXPORT_SYMBOL_GPL(xdr_stream_decode_opaque);
1868
1869/**
1870 * xdr_stream_decode_opaque_dup - Decode and duplicate variable length opaque
1871 * @xdr: pointer to xdr_stream
1872 * @ptr: location to store pointer to opaque data
1873 * @maxlen: maximum acceptable object size
1874 * @gfp_flags: GFP mask to use
1875 *
1876 * Return values:
1877 * On success, returns size of object stored in *@ptr
1878 * %-EBADMSG on XDR buffer overflow
1879 * %-EMSGSIZE if the size of the object would exceed @maxlen
1880 * %-ENOMEM on memory allocation failure
1881 */
1882ssize_t xdr_stream_decode_opaque_dup(struct xdr_stream *xdr, void **ptr,
1883 size_t maxlen, gfp_t gfp_flags)
1884{
1885 ssize_t ret;
1886 void *p;
1887
1888 ret = xdr_stream_decode_opaque_inline(xdr, &p, maxlen);
1889 if (ret > 0) {
1890 *ptr = kmemdup(p, ret, gfp_flags);
1891 if (*ptr != NULL)
1892 return ret;
1893 ret = -ENOMEM;
1894 }
1895 *ptr = NULL;
1896 return ret;
1897}
1898EXPORT_SYMBOL_GPL(xdr_stream_decode_opaque_dup);
1899
1900/**
1901 * xdr_stream_decode_string - Decode variable length string
1902 * @xdr: pointer to xdr_stream
1903 * @str: location to store string
1904 * @size: size of storage buffer @str
1905 *
1906 * Return values:
1907 * On success, returns length of NUL-terminated string stored in *@str
1908 * %-EBADMSG on XDR buffer overflow
1909 * %-EMSGSIZE on overflow of storage buffer @str
1910 */
1911ssize_t xdr_stream_decode_string(struct xdr_stream *xdr, char *str, size_t size)
1912{
1913 ssize_t ret;
1914 void *p;
1915
1916 ret = xdr_stream_decode_opaque_inline(xdr, &p, size);
1917 if (ret > 0) {
1918 memcpy(str, p, ret);
1919 str[ret] = '\0';
1920 return strlen(str);
1921 }
1922 *str = '\0';
1923 return ret;
1924}
1925EXPORT_SYMBOL_GPL(xdr_stream_decode_string);
1926
1927/**
Trond Myklebust5c741d42017-02-19 16:08:31 -05001928 * xdr_stream_decode_string_dup - Decode and duplicate variable length string
1929 * @xdr: pointer to xdr_stream
1930 * @str: location to store pointer to string
1931 * @maxlen: maximum acceptable string length
1932 * @gfp_flags: GFP mask to use
1933 *
1934 * Return values:
1935 * On success, returns length of NUL-terminated string stored in *@ptr
1936 * %-EBADMSG on XDR buffer overflow
1937 * %-EMSGSIZE if the size of the string would exceed @maxlen
1938 * %-ENOMEM on memory allocation failure
1939 */
1940ssize_t xdr_stream_decode_string_dup(struct xdr_stream *xdr, char **str,
1941 size_t maxlen, gfp_t gfp_flags)
1942{
1943 void *p;
1944 ssize_t ret;
1945
1946 ret = xdr_stream_decode_opaque_inline(xdr, &p, maxlen);
1947 if (ret > 0) {
Trond Myklebust4aceaae2020-11-10 10:56:53 -05001948 char *s = kmemdup_nul(p, ret, gfp_flags);
Trond Myklebust5c741d42017-02-19 16:08:31 -05001949 if (s != NULL) {
Trond Myklebust5c741d42017-02-19 16:08:31 -05001950 *str = s;
1951 return strlen(s);
1952 }
1953 ret = -ENOMEM;
1954 }
1955 *str = NULL;
1956 return ret;
1957}
1958EXPORT_SYMBOL_GPL(xdr_stream_decode_string_dup);