blob: 2787d18fb3e1dd4e4225dfff97311635d830b63a [file] [log] [blame]
Al Viro4f18cd32014-02-05 19:11:33 -05001#include <linux/export.h>
2#include <linux/uio.h>
3#include <linux/pagemap.h>
Al Viro91f79c42014-03-21 04:58:33 -04004#include <linux/slab.h>
5#include <linux/vmalloc.h>
Al Viroa604ec72014-11-24 01:08:00 -05006#include <net/checksum.h>
Al Viro4f18cd32014-02-05 19:11:33 -05007
Al Viro04a31162014-11-27 13:51:41 -05008#define iterate_iovec(i, n, __v, __p, skip, STEP) { \
9 size_t left; \
10 size_t wanted = n; \
11 __p = i->iov; \
12 __v.iov_len = min(n, __p->iov_len - skip); \
13 if (likely(__v.iov_len)) { \
14 __v.iov_base = __p->iov_base + skip; \
15 left = (STEP); \
16 __v.iov_len -= left; \
17 skip += __v.iov_len; \
18 n -= __v.iov_len; \
19 } else { \
20 left = 0; \
21 } \
22 while (unlikely(!left && n)) { \
23 __p++; \
24 __v.iov_len = min(n, __p->iov_len); \
25 if (unlikely(!__v.iov_len)) \
26 continue; \
27 __v.iov_base = __p->iov_base; \
28 left = (STEP); \
29 __v.iov_len -= left; \
30 skip = __v.iov_len; \
31 n -= __v.iov_len; \
32 } \
33 n = wanted - n; \
34}
35
Al Viroa2804552014-11-27 14:48:42 -050036#define iterate_kvec(i, n, __v, __p, skip, STEP) { \
37 size_t wanted = n; \
38 __p = i->kvec; \
39 __v.iov_len = min(n, __p->iov_len - skip); \
40 if (likely(__v.iov_len)) { \
41 __v.iov_base = __p->iov_base + skip; \
42 (void)(STEP); \
43 skip += __v.iov_len; \
44 n -= __v.iov_len; \
45 } \
46 while (unlikely(n)) { \
47 __p++; \
48 __v.iov_len = min(n, __p->iov_len); \
49 if (unlikely(!__v.iov_len)) \
50 continue; \
51 __v.iov_base = __p->iov_base; \
52 (void)(STEP); \
53 skip = __v.iov_len; \
54 n -= __v.iov_len; \
55 } \
56 n = wanted; \
57}
58
Al Viro04a31162014-11-27 13:51:41 -050059#define iterate_bvec(i, n, __v, __p, skip, STEP) { \
60 size_t wanted = n; \
61 __p = i->bvec; \
62 __v.bv_len = min_t(size_t, n, __p->bv_len - skip); \
63 if (likely(__v.bv_len)) { \
64 __v.bv_page = __p->bv_page; \
65 __v.bv_offset = __p->bv_offset + skip; \
66 (void)(STEP); \
67 skip += __v.bv_len; \
68 n -= __v.bv_len; \
69 } \
70 while (unlikely(n)) { \
71 __p++; \
72 __v.bv_len = min_t(size_t, n, __p->bv_len); \
73 if (unlikely(!__v.bv_len)) \
74 continue; \
75 __v.bv_page = __p->bv_page; \
76 __v.bv_offset = __p->bv_offset; \
77 (void)(STEP); \
78 skip = __v.bv_len; \
79 n -= __v.bv_len; \
80 } \
81 n = wanted; \
82}
83
Al Viroa2804552014-11-27 14:48:42 -050084#define iterate_all_kinds(i, n, v, I, B, K) { \
Al Viro04a31162014-11-27 13:51:41 -050085 size_t skip = i->iov_offset; \
86 if (unlikely(i->type & ITER_BVEC)) { \
87 const struct bio_vec *bvec; \
88 struct bio_vec v; \
89 iterate_bvec(i, n, v, bvec, skip, (B)) \
Al Viroa2804552014-11-27 14:48:42 -050090 } else if (unlikely(i->type & ITER_KVEC)) { \
91 const struct kvec *kvec; \
92 struct kvec v; \
93 iterate_kvec(i, n, v, kvec, skip, (K)) \
Al Viro04a31162014-11-27 13:51:41 -050094 } else { \
95 const struct iovec *iov; \
96 struct iovec v; \
97 iterate_iovec(i, n, v, iov, skip, (I)) \
98 } \
99}
100
Al Viroa2804552014-11-27 14:48:42 -0500101#define iterate_and_advance(i, n, v, I, B, K) { \
Al Virodd254f52016-05-09 11:54:48 -0400102 if (unlikely(i->count < n)) \
103 n = i->count; \
Al Viro19f18452016-05-25 17:36:19 -0400104 if (i->count) { \
Al Virodd254f52016-05-09 11:54:48 -0400105 size_t skip = i->iov_offset; \
106 if (unlikely(i->type & ITER_BVEC)) { \
107 const struct bio_vec *bvec; \
108 struct bio_vec v; \
109 iterate_bvec(i, n, v, bvec, skip, (B)) \
110 if (skip == bvec->bv_len) { \
111 bvec++; \
112 skip = 0; \
113 } \
114 i->nr_segs -= bvec - i->bvec; \
115 i->bvec = bvec; \
116 } else if (unlikely(i->type & ITER_KVEC)) { \
117 const struct kvec *kvec; \
118 struct kvec v; \
119 iterate_kvec(i, n, v, kvec, skip, (K)) \
120 if (skip == kvec->iov_len) { \
121 kvec++; \
122 skip = 0; \
123 } \
124 i->nr_segs -= kvec - i->kvec; \
125 i->kvec = kvec; \
126 } else { \
127 const struct iovec *iov; \
128 struct iovec v; \
129 iterate_iovec(i, n, v, iov, skip, (I)) \
130 if (skip == iov->iov_len) { \
131 iov++; \
132 skip = 0; \
133 } \
134 i->nr_segs -= iov - i->iov; \
135 i->iov = iov; \
Al Viro7ce2a912014-11-27 13:59:45 -0500136 } \
Al Virodd254f52016-05-09 11:54:48 -0400137 i->count -= n; \
138 i->iov_offset = skip; \
Al Viro7ce2a912014-11-27 13:59:45 -0500139 } \
Al Viro7ce2a912014-11-27 13:59:45 -0500140}
141
Al Viro62a80672014-04-04 23:12:29 -0400142static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
Al Viro4f18cd32014-02-05 19:11:33 -0500143 struct iov_iter *i)
144{
145 size_t skip, copy, left, wanted;
146 const struct iovec *iov;
147 char __user *buf;
148 void *kaddr, *from;
149
150 if (unlikely(bytes > i->count))
151 bytes = i->count;
152
153 if (unlikely(!bytes))
154 return 0;
155
156 wanted = bytes;
157 iov = i->iov;
158 skip = i->iov_offset;
159 buf = iov->iov_base + skip;
160 copy = min(bytes, iov->iov_len - skip);
161
162 if (!fault_in_pages_writeable(buf, copy)) {
163 kaddr = kmap_atomic(page);
164 from = kaddr + offset;
165
166 /* first chunk, usually the only one */
167 left = __copy_to_user_inatomic(buf, from, copy);
168 copy -= left;
169 skip += copy;
170 from += copy;
171 bytes -= copy;
172
173 while (unlikely(!left && bytes)) {
174 iov++;
175 buf = iov->iov_base;
176 copy = min(bytes, iov->iov_len);
177 left = __copy_to_user_inatomic(buf, from, copy);
178 copy -= left;
179 skip = copy;
180 from += copy;
181 bytes -= copy;
182 }
183 if (likely(!bytes)) {
184 kunmap_atomic(kaddr);
185 goto done;
186 }
187 offset = from - kaddr;
188 buf += copy;
189 kunmap_atomic(kaddr);
190 copy = min(bytes, iov->iov_len - skip);
191 }
192 /* Too bad - revert to non-atomic kmap */
193 kaddr = kmap(page);
194 from = kaddr + offset;
195 left = __copy_to_user(buf, from, copy);
196 copy -= left;
197 skip += copy;
198 from += copy;
199 bytes -= copy;
200 while (unlikely(!left && bytes)) {
201 iov++;
202 buf = iov->iov_base;
203 copy = min(bytes, iov->iov_len);
204 left = __copy_to_user(buf, from, copy);
205 copy -= left;
206 skip = copy;
207 from += copy;
208 bytes -= copy;
209 }
210 kunmap(page);
211done:
Al Viro81055e52014-04-04 19:23:46 -0400212 if (skip == iov->iov_len) {
213 iov++;
214 skip = 0;
215 }
Al Viro4f18cd32014-02-05 19:11:33 -0500216 i->count -= wanted - bytes;
217 i->nr_segs -= iov - i->iov;
218 i->iov = iov;
219 i->iov_offset = skip;
220 return wanted - bytes;
221}
Al Viro4f18cd32014-02-05 19:11:33 -0500222
Al Viro62a80672014-04-04 23:12:29 -0400223static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
Al Virof0d1bec2014-04-03 15:05:18 -0400224 struct iov_iter *i)
225{
226 size_t skip, copy, left, wanted;
227 const struct iovec *iov;
228 char __user *buf;
229 void *kaddr, *to;
230
231 if (unlikely(bytes > i->count))
232 bytes = i->count;
233
234 if (unlikely(!bytes))
235 return 0;
236
237 wanted = bytes;
238 iov = i->iov;
239 skip = i->iov_offset;
240 buf = iov->iov_base + skip;
241 copy = min(bytes, iov->iov_len - skip);
242
243 if (!fault_in_pages_readable(buf, copy)) {
244 kaddr = kmap_atomic(page);
245 to = kaddr + offset;
246
247 /* first chunk, usually the only one */
248 left = __copy_from_user_inatomic(to, buf, copy);
249 copy -= left;
250 skip += copy;
251 to += copy;
252 bytes -= copy;
253
254 while (unlikely(!left && bytes)) {
255 iov++;
256 buf = iov->iov_base;
257 copy = min(bytes, iov->iov_len);
258 left = __copy_from_user_inatomic(to, buf, copy);
259 copy -= left;
260 skip = copy;
261 to += copy;
262 bytes -= copy;
263 }
264 if (likely(!bytes)) {
265 kunmap_atomic(kaddr);
266 goto done;
267 }
268 offset = to - kaddr;
269 buf += copy;
270 kunmap_atomic(kaddr);
271 copy = min(bytes, iov->iov_len - skip);
272 }
273 /* Too bad - revert to non-atomic kmap */
274 kaddr = kmap(page);
275 to = kaddr + offset;
276 left = __copy_from_user(to, buf, copy);
277 copy -= left;
278 skip += copy;
279 to += copy;
280 bytes -= copy;
281 while (unlikely(!left && bytes)) {
282 iov++;
283 buf = iov->iov_base;
284 copy = min(bytes, iov->iov_len);
285 left = __copy_from_user(to, buf, copy);
286 copy -= left;
287 skip = copy;
288 to += copy;
289 bytes -= copy;
290 }
291 kunmap(page);
292done:
Al Viro81055e52014-04-04 19:23:46 -0400293 if (skip == iov->iov_len) {
294 iov++;
295 skip = 0;
296 }
Al Virof0d1bec2014-04-03 15:05:18 -0400297 i->count -= wanted - bytes;
298 i->nr_segs -= iov - i->iov;
299 i->iov = iov;
300 i->iov_offset = skip;
301 return wanted - bytes;
302}
Al Virof0d1bec2014-04-03 15:05:18 -0400303
Al Viro4f18cd32014-02-05 19:11:33 -0500304/*
305 * Fault in the first iovec of the given iov_iter, to a maximum length
306 * of bytes. Returns 0 on success, or non-zero if the memory could not be
307 * accessed (ie. because it is an invalid address).
308 *
309 * writev-intensive code may want this to prefault several iovecs -- that
310 * would be possible (callers must not rely on the fact that _only_ the
311 * first iovec will be faulted with the current implementation).
312 */
313int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
314{
Al Viroa2804552014-11-27 14:48:42 -0500315 if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
Al Viro62a80672014-04-04 23:12:29 -0400316 char __user *buf = i->iov->iov_base + i->iov_offset;
317 bytes = min(bytes, i->iov->iov_len - i->iov_offset);
318 return fault_in_pages_readable(buf, bytes);
319 }
320 return 0;
Al Viro4f18cd32014-02-05 19:11:33 -0500321}
322EXPORT_SYMBOL(iov_iter_fault_in_readable);
323
Anton Altaparmakov171a0202015-03-11 10:43:31 -0400324/*
325 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
326 * bytes. For each iovec, fault in each page that constitutes the iovec.
327 *
328 * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
329 * because it is an invalid address).
330 */
331int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes)
332{
333 size_t skip = i->iov_offset;
334 const struct iovec *iov;
335 int err;
336 struct iovec v;
337
338 if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
339 iterate_iovec(i, bytes, v, iov, skip, ({
340 err = fault_in_multipages_readable(v.iov_base,
341 v.iov_len);
342 if (unlikely(err))
343 return err;
344 0;}))
345 }
346 return 0;
347}
348EXPORT_SYMBOL(iov_iter_fault_in_multipages_readable);
349
Al Viro71d8e532014-03-05 19:28:09 -0500350void iov_iter_init(struct iov_iter *i, int direction,
351 const struct iovec *iov, unsigned long nr_segs,
352 size_t count)
353{
354 /* It will get better. Eventually... */
Al Viroa2804552014-11-27 14:48:42 -0500355 if (segment_eq(get_fs(), KERNEL_DS)) {
Al Viro62a80672014-04-04 23:12:29 -0400356 direction |= ITER_KVEC;
Al Viroa2804552014-11-27 14:48:42 -0500357 i->type = direction;
358 i->kvec = (struct kvec *)iov;
359 } else {
360 i->type = direction;
361 i->iov = iov;
362 }
Al Viro71d8e532014-03-05 19:28:09 -0500363 i->nr_segs = nr_segs;
364 i->iov_offset = 0;
365 i->count = count;
366}
367EXPORT_SYMBOL(iov_iter_init);
Al Viro7b2c99d2014-03-15 04:05:57 -0400368
Al Viro62a80672014-04-04 23:12:29 -0400369static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
370{
371 char *from = kmap_atomic(page);
372 memcpy(to, from + offset, len);
373 kunmap_atomic(from);
374}
375
Al Viro36f7a8a2015-12-06 16:49:22 -0500376static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
Al Viro62a80672014-04-04 23:12:29 -0400377{
378 char *to = kmap_atomic(page);
379 memcpy(to + offset, from, len);
380 kunmap_atomic(to);
381}
382
Matthew Wilcoxc35e0242014-08-01 09:27:22 -0400383static void memzero_page(struct page *page, size_t offset, size_t len)
384{
385 char *addr = kmap_atomic(page);
386 memset(addr + offset, 0, len);
387 kunmap_atomic(addr);
388}
389
Al Viro36f7a8a2015-12-06 16:49:22 -0500390size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
Al Viro62a80672014-04-04 23:12:29 -0400391{
Al Viro36f7a8a2015-12-06 16:49:22 -0500392 const char *from = addr;
Al Viro3d4d3e42014-11-27 14:28:06 -0500393 iterate_and_advance(i, bytes, v,
394 __copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len,
395 v.iov_len),
396 memcpy_to_page(v.bv_page, v.bv_offset,
Al Viroa2804552014-11-27 14:48:42 -0500397 (from += v.bv_len) - v.bv_len, v.bv_len),
398 memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
Al Viro3d4d3e42014-11-27 14:28:06 -0500399 )
Al Viro62a80672014-04-04 23:12:29 -0400400
Al Viro3d4d3e42014-11-27 14:28:06 -0500401 return bytes;
Matthew Wilcoxc35e0242014-08-01 09:27:22 -0400402}
403EXPORT_SYMBOL(copy_to_iter);
404
405size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
406{
Al Viro0dbca9a2014-11-27 14:26:43 -0500407 char *to = addr;
Al Viro0dbca9a2014-11-27 14:26:43 -0500408 iterate_and_advance(i, bytes, v,
409 __copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base,
410 v.iov_len),
411 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
Al Viroa2804552014-11-27 14:48:42 -0500412 v.bv_offset, v.bv_len),
413 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
Al Viro0dbca9a2014-11-27 14:26:43 -0500414 )
415
416 return bytes;
Matthew Wilcoxc35e0242014-08-01 09:27:22 -0400417}
418EXPORT_SYMBOL(copy_from_iter);
419
Al Viroaa583092014-11-27 20:27:08 -0500420size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
421{
422 char *to = addr;
Al Viroaa583092014-11-27 20:27:08 -0500423 iterate_and_advance(i, bytes, v,
424 __copy_from_user_nocache((to += v.iov_len) - v.iov_len,
425 v.iov_base, v.iov_len),
426 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
427 v.bv_offset, v.bv_len),
428 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
429 )
430
431 return bytes;
432}
433EXPORT_SYMBOL(copy_from_iter_nocache);
434
Al Virod2715242014-11-27 14:22:37 -0500435size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
436 struct iov_iter *i)
437{
438 if (i->type & (ITER_BVEC|ITER_KVEC)) {
439 void *kaddr = kmap_atomic(page);
440 size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
441 kunmap_atomic(kaddr);
442 return wanted;
443 } else
444 return copy_page_to_iter_iovec(page, offset, bytes, i);
445}
446EXPORT_SYMBOL(copy_page_to_iter);
447
448size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
449 struct iov_iter *i)
450{
Al Viroa2804552014-11-27 14:48:42 -0500451 if (i->type & (ITER_BVEC|ITER_KVEC)) {
Al Virod2715242014-11-27 14:22:37 -0500452 void *kaddr = kmap_atomic(page);
453 size_t wanted = copy_from_iter(kaddr + offset, bytes, i);
454 kunmap_atomic(kaddr);
455 return wanted;
456 } else
457 return copy_page_from_iter_iovec(page, offset, bytes, i);
458}
459EXPORT_SYMBOL(copy_page_from_iter);
460
Matthew Wilcoxc35e0242014-08-01 09:27:22 -0400461size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
462{
Al Viro8442fa42014-11-27 14:18:54 -0500463 iterate_and_advance(i, bytes, v,
464 __clear_user(v.iov_base, v.iov_len),
Al Viroa2804552014-11-27 14:48:42 -0500465 memzero_page(v.bv_page, v.bv_offset, v.bv_len),
466 memset(v.iov_base, 0, v.iov_len)
Al Viro8442fa42014-11-27 14:18:54 -0500467 )
468
469 return bytes;
Matthew Wilcoxc35e0242014-08-01 09:27:22 -0400470}
471EXPORT_SYMBOL(iov_iter_zero);
472
Al Viro62a80672014-04-04 23:12:29 -0400473size_t iov_iter_copy_from_user_atomic(struct page *page,
474 struct iov_iter *i, unsigned long offset, size_t bytes)
475{
Al Viro04a31162014-11-27 13:51:41 -0500476 char *kaddr = kmap_atomic(page), *p = kaddr + offset;
477 iterate_all_kinds(i, bytes, v,
478 __copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
479 v.iov_base, v.iov_len),
480 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
Al Viroa2804552014-11-27 14:48:42 -0500481 v.bv_offset, v.bv_len),
482 memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
Al Viro04a31162014-11-27 13:51:41 -0500483 )
484 kunmap_atomic(kaddr);
485 return bytes;
Al Viro62a80672014-04-04 23:12:29 -0400486}
487EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
488
489void iov_iter_advance(struct iov_iter *i, size_t size)
490{
Al Viroa2804552014-11-27 14:48:42 -0500491 iterate_and_advance(i, size, v, 0, 0, 0)
Al Viro62a80672014-04-04 23:12:29 -0400492}
493EXPORT_SYMBOL(iov_iter_advance);
494
495/*
496 * Return the count of just the current iov_iter segment.
497 */
498size_t iov_iter_single_seg_count(const struct iov_iter *i)
499{
500 if (i->nr_segs == 1)
501 return i->count;
502 else if (i->type & ITER_BVEC)
Al Viro62a80672014-04-04 23:12:29 -0400503 return min(i->count, i->bvec->bv_len - i->iov_offset);
Paul Mackerrasad0eab92014-11-13 20:15:23 +1100504 else
505 return min(i->count, i->iov->iov_len - i->iov_offset);
Al Viro62a80672014-04-04 23:12:29 -0400506}
507EXPORT_SYMBOL(iov_iter_single_seg_count);
508
Al Viroabb78f82014-11-24 14:46:11 -0500509void iov_iter_kvec(struct iov_iter *i, int direction,
Al Viro05afcb72015-01-23 01:08:07 -0500510 const struct kvec *kvec, unsigned long nr_segs,
Al Viroabb78f82014-11-24 14:46:11 -0500511 size_t count)
512{
513 BUG_ON(!(direction & ITER_KVEC));
514 i->type = direction;
Al Viro05afcb72015-01-23 01:08:07 -0500515 i->kvec = kvec;
Al Viroabb78f82014-11-24 14:46:11 -0500516 i->nr_segs = nr_segs;
517 i->iov_offset = 0;
518 i->count = count;
519}
520EXPORT_SYMBOL(iov_iter_kvec);
521
Al Viro05afcb72015-01-23 01:08:07 -0500522void iov_iter_bvec(struct iov_iter *i, int direction,
523 const struct bio_vec *bvec, unsigned long nr_segs,
524 size_t count)
525{
526 BUG_ON(!(direction & ITER_BVEC));
527 i->type = direction;
528 i->bvec = bvec;
529 i->nr_segs = nr_segs;
530 i->iov_offset = 0;
531 i->count = count;
532}
533EXPORT_SYMBOL(iov_iter_bvec);
534
Al Viro62a80672014-04-04 23:12:29 -0400535unsigned long iov_iter_alignment(const struct iov_iter *i)
536{
Al Viro04a31162014-11-27 13:51:41 -0500537 unsigned long res = 0;
538 size_t size = i->count;
539
540 if (!size)
541 return 0;
542
543 iterate_all_kinds(i, size, v,
544 (res |= (unsigned long)v.iov_base | v.iov_len, 0),
Al Viroa2804552014-11-27 14:48:42 -0500545 res |= v.bv_offset | v.bv_len,
546 res |= (unsigned long)v.iov_base | v.iov_len
Al Viro04a31162014-11-27 13:51:41 -0500547 )
548 return res;
Al Viro62a80672014-04-04 23:12:29 -0400549}
550EXPORT_SYMBOL(iov_iter_alignment);
551
552ssize_t iov_iter_get_pages(struct iov_iter *i,
Miklos Szeredi2c809292014-09-24 17:09:11 +0200553 struct page **pages, size_t maxsize, unsigned maxpages,
Al Viro62a80672014-04-04 23:12:29 -0400554 size_t *start)
555{
Al Viroe5393fa2014-11-27 14:12:09 -0500556 if (maxsize > i->count)
557 maxsize = i->count;
558
559 if (!maxsize)
560 return 0;
561
562 iterate_all_kinds(i, maxsize, v, ({
563 unsigned long addr = (unsigned long)v.iov_base;
564 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
565 int n;
566 int res;
567
568 if (len > maxpages * PAGE_SIZE)
569 len = maxpages * PAGE_SIZE;
570 addr &= ~(PAGE_SIZE - 1);
571 n = DIV_ROUND_UP(len, PAGE_SIZE);
572 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
573 if (unlikely(res < 0))
574 return res;
575 return (res == n ? len : res * PAGE_SIZE) - *start;
576 0;}),({
577 /* can't be more than PAGE_SIZE */
578 *start = v.bv_offset;
579 get_page(*pages = v.bv_page);
580 return v.bv_len;
Al Viroa2804552014-11-27 14:48:42 -0500581 }),({
582 return -EFAULT;
Al Viroe5393fa2014-11-27 14:12:09 -0500583 })
584 )
585 return 0;
Al Viro62a80672014-04-04 23:12:29 -0400586}
587EXPORT_SYMBOL(iov_iter_get_pages);
588
Al Viro1b17f1f2014-11-27 14:14:31 -0500589static struct page **get_pages_array(size_t n)
590{
591 struct page **p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
592 if (!p)
593 p = vmalloc(n * sizeof(struct page *));
594 return p;
595}
596
Al Viro62a80672014-04-04 23:12:29 -0400597ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
598 struct page ***pages, size_t maxsize,
599 size_t *start)
600{
Al Viro1b17f1f2014-11-27 14:14:31 -0500601 struct page **p;
602
603 if (maxsize > i->count)
604 maxsize = i->count;
605
606 if (!maxsize)
607 return 0;
608
609 iterate_all_kinds(i, maxsize, v, ({
610 unsigned long addr = (unsigned long)v.iov_base;
611 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
612 int n;
613 int res;
614
615 addr &= ~(PAGE_SIZE - 1);
616 n = DIV_ROUND_UP(len, PAGE_SIZE);
617 p = get_pages_array(n);
618 if (!p)
619 return -ENOMEM;
620 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
621 if (unlikely(res < 0)) {
622 kvfree(p);
623 return res;
624 }
625 *pages = p;
626 return (res == n ? len : res * PAGE_SIZE) - *start;
627 0;}),({
628 /* can't be more than PAGE_SIZE */
629 *start = v.bv_offset;
630 *pages = p = get_pages_array(1);
631 if (!p)
632 return -ENOMEM;
633 get_page(*p = v.bv_page);
634 return v.bv_len;
Al Viroa2804552014-11-27 14:48:42 -0500635 }),({
636 return -EFAULT;
Al Viro1b17f1f2014-11-27 14:14:31 -0500637 })
638 )
639 return 0;
Al Viro62a80672014-04-04 23:12:29 -0400640}
641EXPORT_SYMBOL(iov_iter_get_pages_alloc);
642
Al Viroa604ec72014-11-24 01:08:00 -0500643size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
644 struct iov_iter *i)
645{
646 char *to = addr;
647 __wsum sum, next;
648 size_t off = 0;
Al Viroa604ec72014-11-24 01:08:00 -0500649 sum = *csum;
650 iterate_and_advance(i, bytes, v, ({
651 int err = 0;
652 next = csum_and_copy_from_user(v.iov_base,
653 (to += v.iov_len) - v.iov_len,
654 v.iov_len, 0, &err);
655 if (!err) {
656 sum = csum_block_add(sum, next, off);
657 off += v.iov_len;
658 }
659 err ? v.iov_len : 0;
660 }), ({
661 char *p = kmap_atomic(v.bv_page);
662 next = csum_partial_copy_nocheck(p + v.bv_offset,
663 (to += v.bv_len) - v.bv_len,
664 v.bv_len, 0);
665 kunmap_atomic(p);
666 sum = csum_block_add(sum, next, off);
667 off += v.bv_len;
668 }),({
669 next = csum_partial_copy_nocheck(v.iov_base,
670 (to += v.iov_len) - v.iov_len,
671 v.iov_len, 0);
672 sum = csum_block_add(sum, next, off);
673 off += v.iov_len;
674 })
675 )
676 *csum = sum;
677 return bytes;
678}
679EXPORT_SYMBOL(csum_and_copy_from_iter);
680
Al Viro36f7a8a2015-12-06 16:49:22 -0500681size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
Al Viroa604ec72014-11-24 01:08:00 -0500682 struct iov_iter *i)
683{
Al Viro36f7a8a2015-12-06 16:49:22 -0500684 const char *from = addr;
Al Viroa604ec72014-11-24 01:08:00 -0500685 __wsum sum, next;
686 size_t off = 0;
Al Viroa604ec72014-11-24 01:08:00 -0500687 sum = *csum;
688 iterate_and_advance(i, bytes, v, ({
689 int err = 0;
690 next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
691 v.iov_base,
692 v.iov_len, 0, &err);
693 if (!err) {
694 sum = csum_block_add(sum, next, off);
695 off += v.iov_len;
696 }
697 err ? v.iov_len : 0;
698 }), ({
699 char *p = kmap_atomic(v.bv_page);
700 next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len,
701 p + v.bv_offset,
702 v.bv_len, 0);
703 kunmap_atomic(p);
704 sum = csum_block_add(sum, next, off);
705 off += v.bv_len;
706 }),({
707 next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len,
708 v.iov_base,
709 v.iov_len, 0);
710 sum = csum_block_add(sum, next, off);
711 off += v.iov_len;
712 })
713 )
714 *csum = sum;
715 return bytes;
716}
717EXPORT_SYMBOL(csum_and_copy_to_iter);
718
Al Viro62a80672014-04-04 23:12:29 -0400719int iov_iter_npages(const struct iov_iter *i, int maxpages)
720{
Al Viroe0f2dc42014-11-27 14:09:46 -0500721 size_t size = i->count;
722 int npages = 0;
723
724 if (!size)
725 return 0;
726
727 iterate_all_kinds(i, size, v, ({
728 unsigned long p = (unsigned long)v.iov_base;
729 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
730 - p / PAGE_SIZE;
731 if (npages >= maxpages)
732 return maxpages;
733 0;}),({
734 npages++;
735 if (npages >= maxpages)
736 return maxpages;
Al Viroa2804552014-11-27 14:48:42 -0500737 }),({
738 unsigned long p = (unsigned long)v.iov_base;
739 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
740 - p / PAGE_SIZE;
741 if (npages >= maxpages)
742 return maxpages;
Al Viroe0f2dc42014-11-27 14:09:46 -0500743 })
744 )
745 return npages;
Al Viro62a80672014-04-04 23:12:29 -0400746}
Al Virof67da302014-03-19 01:16:16 -0400747EXPORT_SYMBOL(iov_iter_npages);
Al Viro4b8164b2015-01-31 20:08:47 -0500748
749const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
750{
751 *new = *old;
752 if (new->type & ITER_BVEC)
753 return new->bvec = kmemdup(new->bvec,
754 new->nr_segs * sizeof(struct bio_vec),
755 flags);
756 else
757 /* iovec and kvec have identical layout */
758 return new->iov = kmemdup(new->iov,
759 new->nr_segs * sizeof(struct iovec),
760 flags);
761}
762EXPORT_SYMBOL(dup_iter);
Al Virobc917be2015-03-21 17:45:43 -0400763
764int import_iovec(int type, const struct iovec __user * uvector,
765 unsigned nr_segs, unsigned fast_segs,
766 struct iovec **iov, struct iov_iter *i)
767{
768 ssize_t n;
769 struct iovec *p;
770 n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
771 *iov, &p);
772 if (n < 0) {
773 if (p != *iov)
774 kfree(p);
775 *iov = NULL;
776 return n;
777 }
778 iov_iter_init(i, type, p, nr_segs, n);
779 *iov = p == *iov ? NULL : p;
780 return 0;
781}
782EXPORT_SYMBOL(import_iovec);
783
784#ifdef CONFIG_COMPAT
785#include <linux/compat.h>
786
787int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
788 unsigned nr_segs, unsigned fast_segs,
789 struct iovec **iov, struct iov_iter *i)
790{
791 ssize_t n;
792 struct iovec *p;
793 n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
794 *iov, &p);
795 if (n < 0) {
796 if (p != *iov)
797 kfree(p);
798 *iov = NULL;
799 return n;
800 }
801 iov_iter_init(i, type, p, nr_segs, n);
802 *iov = p == *iov ? NULL : p;
803 return 0;
804}
805#endif
806
807int import_single_range(int rw, void __user *buf, size_t len,
808 struct iovec *iov, struct iov_iter *i)
809{
810 if (len > MAX_RW_COUNT)
811 len = MAX_RW_COUNT;
812 if (unlikely(!access_ok(!rw, buf, len)))
813 return -EFAULT;
814
815 iov->iov_base = buf;
816 iov->iov_len = len;
817 iov_iter_init(i, rw, iov, 1, len);
818 return 0;
819}
Al Viroe1267582015-12-06 20:38:56 -0500820EXPORT_SYMBOL(import_single_range);