blob: ab5f523bc0df90a5eadd6ab92c3e9f0f3b4c2c64 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001/* SPDX-License-Identifier: GPL-2.0-or-later */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * Berkeley style UIO structures - Alan Cox 1994.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 */
David Howells607ca462012-10-13 10:46:48 +01005#ifndef __LINUX_UIO_H
6#define __LINUX_UIO_H
Linus Torvalds1da177e2005-04-16 15:20:36 -07007
Kent Overstreet92236872013-11-27 16:29:46 -08008#include <linux/kernel.h>
Al Viroaa28de22017-06-29 21:45:10 -04009#include <linux/thread_info.h>
Sagi Grimbergd05f4432018-12-03 17:52:09 -080010#include <crypto/hash.h>
David Howells607ca462012-10-13 10:46:48 +010011#include <uapi/linux/uio.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012
Kent Overstreet92236872013-11-27 16:29:46 -080013struct page;
Al Viro241699c2016-09-22 16:33:12 -040014struct pipe_inode_info;
Jiri Slaby812ed032009-07-29 15:04:19 -070015
16struct kvec {
17 void *iov_base; /* and that should *never* hold a userland pointer */
18 size_t iov_len;
19};
20
David Howells00e23702018-10-22 13:07:28 +010021enum iter_type {
Jens Axboe875f1d02019-02-27 13:05:25 -070022 /* iter types */
23 ITER_IOVEC = 4,
24 ITER_KVEC = 8,
25 ITER_BVEC = 16,
26 ITER_PIPE = 32,
27 ITER_DISCARD = 64,
Al Viro62a80672014-04-04 23:12:29 -040028};
29
Kent Overstreet92236872013-11-27 16:29:46 -080030struct iov_iter {
Jens Axboe875f1d02019-02-27 13:05:25 -070031 /*
32 * Bit 0 is the read/write bit, set if we're writing.
33 * Bit 1 is the BVEC_FLAG_NO_REF bit, set if type is a bvec and
34 * the caller isn't expecting to drop a page reference when done.
35 */
David Howellsaa563d72018-10-20 00:57:56 +010036 unsigned int type;
Kent Overstreet92236872013-11-27 16:29:46 -080037 size_t iov_offset;
38 size_t count;
Al Viro62a80672014-04-04 23:12:29 -040039 union {
40 const struct iovec *iov;
Al Viroa2804552014-11-27 14:48:42 -050041 const struct kvec *kvec;
Al Viro62a80672014-04-04 23:12:29 -040042 const struct bio_vec *bvec;
Al Viro241699c2016-09-22 16:33:12 -040043 struct pipe_inode_info *pipe;
Al Viro62a80672014-04-04 23:12:29 -040044 };
Al Viro241699c2016-09-22 16:33:12 -040045 union {
46 unsigned long nr_segs;
Al Viro27c0e372017-02-17 18:42:24 -050047 struct {
48 int idx;
49 int start_idx;
50 };
Al Viro241699c2016-09-22 16:33:12 -040051 };
Kent Overstreet92236872013-11-27 16:29:46 -080052};
53
David Howells00e23702018-10-22 13:07:28 +010054static inline enum iter_type iov_iter_type(const struct iov_iter *i)
55{
Christoph Hellwigb6207432019-06-26 15:49:28 +020056 return i->type & ~(READ | WRITE);
David Howells00e23702018-10-22 13:07:28 +010057}
58
59static inline bool iter_is_iovec(const struct iov_iter *i)
60{
61 return iov_iter_type(i) == ITER_IOVEC;
62}
63
64static inline bool iov_iter_is_kvec(const struct iov_iter *i)
65{
66 return iov_iter_type(i) == ITER_KVEC;
67}
68
69static inline bool iov_iter_is_bvec(const struct iov_iter *i)
70{
71 return iov_iter_type(i) == ITER_BVEC;
72}
73
74static inline bool iov_iter_is_pipe(const struct iov_iter *i)
75{
76 return iov_iter_type(i) == ITER_PIPE;
77}
78
David Howells9ea9ce02018-10-20 00:57:56 +010079static inline bool iov_iter_is_discard(const struct iov_iter *i)
80{
81 return iov_iter_type(i) == ITER_DISCARD;
82}
83
David Howells00e23702018-10-22 13:07:28 +010084static inline unsigned char iov_iter_rw(const struct iov_iter *i)
85{
86 return i->type & (READ | WRITE);
87}
88
Linus Torvalds1da177e2005-04-16 15:20:36 -070089/*
90 * Total number of bytes covered by an iovec.
91 *
92 * NOTE that it is not safe to use this function until all the iovec's
93 * segment lengths have been validated. Because the individual lengths can
94 * overflow a size_t when added together.
95 */
96static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
97{
98 unsigned long seg;
99 size_t ret = 0;
100
101 for (seg = 0; seg < nr_segs; seg++)
102 ret += iov[seg].iov_len;
103 return ret;
104}
105
Kent Overstreet92236872013-11-27 16:29:46 -0800106static inline struct iovec iov_iter_iovec(const struct iov_iter *iter)
107{
108 return (struct iovec) {
109 .iov_base = iter->iov->iov_base + iter->iov_offset,
110 .iov_len = min(iter->count,
111 iter->iov->iov_len - iter->iov_offset),
112 };
113}
114
Kent Overstreet92236872013-11-27 16:29:46 -0800115size_t iov_iter_copy_from_user_atomic(struct page *page,
116 struct iov_iter *i, unsigned long offset, size_t bytes);
Kent Overstreet92236872013-11-27 16:29:46 -0800117void iov_iter_advance(struct iov_iter *i, size_t bytes);
Al Viro27c0e372017-02-17 18:42:24 -0500118void iov_iter_revert(struct iov_iter *i, size_t bytes);
Kent Overstreet92236872013-11-27 16:29:46 -0800119int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
120size_t iov_iter_single_seg_count(const struct iov_iter *i);
Al Viro6e58e792014-02-03 17:07:03 -0500121size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
122 struct iov_iter *i);
Al Virof0d1bec2014-04-03 15:05:18 -0400123size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
124 struct iov_iter *i);
Al Viroaa28de22017-06-29 21:45:10 -0400125
126size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
127size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
128bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i);
129size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
130bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i);
131
132static __always_inline __must_check
133size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
134{
135 if (unlikely(!check_copy_size(addr, bytes, true)))
Al Viroc43aeb12017-07-10 07:40:49 -0400136 return 0;
Al Viroaa28de22017-06-29 21:45:10 -0400137 else
138 return _copy_to_iter(addr, bytes, i);
139}
140
141static __always_inline __must_check
142size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
143{
144 if (unlikely(!check_copy_size(addr, bytes, false)))
Al Viroc43aeb12017-07-10 07:40:49 -0400145 return 0;
Al Viroaa28de22017-06-29 21:45:10 -0400146 else
147 return _copy_from_iter(addr, bytes, i);
148}
149
150static __always_inline __must_check
151bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
152{
153 if (unlikely(!check_copy_size(addr, bytes, false)))
154 return false;
155 else
156 return _copy_from_iter_full(addr, bytes, i);
157}
158
159static __always_inline __must_check
160size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
161{
162 if (unlikely(!check_copy_size(addr, bytes, false)))
Al Viroc43aeb12017-07-10 07:40:49 -0400163 return 0;
Al Viroaa28de22017-06-29 21:45:10 -0400164 else
165 return _copy_from_iter_nocache(addr, bytes, i);
166}
167
168static __always_inline __must_check
169bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
170{
171 if (unlikely(!check_copy_size(addr, bytes, false)))
172 return false;
173 else
174 return _copy_from_iter_full_nocache(addr, bytes, i);
175}
176
Dan Williams0aed55a2017-05-29 12:22:50 -0700177#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
178/*
179 * Note, users like pmem that depend on the stricter semantics of
180 * copy_from_iter_flushcache() than copy_from_iter_nocache() must check for
181 * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the
182 * destination is flushed from the cache on return.
183 */
Linus Torvalds6a37e942017-07-07 20:39:20 -0700184size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
Dan Williams0aed55a2017-05-29 12:22:50 -0700185#else
Linus Torvalds6a37e942017-07-07 20:39:20 -0700186#define _copy_from_iter_flushcache _copy_from_iter_nocache
Dan Williams0aed55a2017-05-29 12:22:50 -0700187#endif
Linus Torvalds6a37e942017-07-07 20:39:20 -0700188
Dan Williams87803562018-05-03 17:06:31 -0700189#ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE
Dan Williams522239b2018-05-22 23:17:03 -0700190size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i);
Dan Williams87803562018-05-03 17:06:31 -0700191#else
192#define _copy_to_iter_mcsafe _copy_to_iter
193#endif
194
Linus Torvalds6a37e942017-07-07 20:39:20 -0700195static __always_inline __must_check
196size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
197{
198 if (unlikely(!check_copy_size(addr, bytes, false)))
Al Viroc43aeb12017-07-10 07:40:49 -0400199 return 0;
Linus Torvalds6a37e942017-07-07 20:39:20 -0700200 else
201 return _copy_from_iter_flushcache(addr, bytes, i);
202}
203
Dan Williams87803562018-05-03 17:06:31 -0700204static __always_inline __must_check
205size_t copy_to_iter_mcsafe(void *addr, size_t bytes, struct iov_iter *i)
206{
Dave Jiangdfb06cb2018-09-05 13:31:40 -0700207 if (unlikely(!check_copy_size(addr, bytes, true)))
Dan Williams87803562018-05-03 17:06:31 -0700208 return 0;
209 else
210 return _copy_to_iter_mcsafe(addr, bytes, i);
211}
212
Matthew Wilcoxc35e0242014-08-01 09:27:22 -0400213size_t iov_iter_zero(size_t bytes, struct iov_iter *);
Al Viro886a3912014-03-05 13:50:45 -0500214unsigned long iov_iter_alignment(const struct iov_iter *i);
Al Viro357f4352016-04-08 19:05:19 -0400215unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
David Howellsaa563d72018-10-20 00:57:56 +0100216void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov,
Al Viro71d8e532014-03-05 19:28:09 -0500217 unsigned long nr_segs, size_t count);
David Howellsaa563d72018-10-20 00:57:56 +0100218void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec *kvec,
Al Viro05afcb72015-01-23 01:08:07 -0500219 unsigned long nr_segs, size_t count);
David Howellsaa563d72018-10-20 00:57:56 +0100220void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec,
Al Viroabb78f82014-11-24 14:46:11 -0500221 unsigned long nr_segs, size_t count);
David Howellsaa563d72018-10-20 00:57:56 +0100222void iov_iter_pipe(struct iov_iter *i, unsigned int direction, struct pipe_inode_info *pipe,
Al Viro241699c2016-09-22 16:33:12 -0400223 size_t count);
David Howells9ea9ce02018-10-20 00:57:56 +0100224void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count);
Al Viro7b2c99d2014-03-15 04:05:57 -0400225ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
Miklos Szeredi2c809292014-09-24 17:09:11 +0200226 size_t maxsize, unsigned maxpages, size_t *start);
Al Viro91f79c42014-03-21 04:58:33 -0400227ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages,
228 size_t maxsize, size_t *start);
Al Virof67da302014-03-19 01:16:16 -0400229int iov_iter_npages(const struct iov_iter *i, int maxpages);
Kent Overstreet92236872013-11-27 16:29:46 -0800230
Al Viro4b8164b2015-01-31 20:08:47 -0500231const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
232
Al Virob57332b2016-10-10 13:57:37 -0400233static inline size_t iov_iter_count(const struct iov_iter *i)
Kent Overstreet92236872013-11-27 16:29:46 -0800234{
235 return i->count;
236}
237
Omar Sandovalbd8e0ff2015-03-17 14:04:02 -0700238/*
Al Viro0b86dbf2014-06-23 08:44:40 +0100239 * Cap the iov_iter by given limit; note that the second argument is
240 * *not* the new size - it's upper limit for such. Passing it a value
241 * greater than the amount of data in iov_iter is fine - it'll just do
242 * nothing in that case.
243 */
244static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
Al Viro0c949332014-03-22 06:51:37 -0400245{
Al Viro0b86dbf2014-06-23 08:44:40 +0100246 /*
247 * count doesn't have to fit in size_t - comparison extends both
248 * operands to u64 here and any value that would be truncated by
249 * conversion in assignement is by definition greater than all
250 * values of size_t, including old i->count.
251 */
Al Viro0c949332014-03-22 06:51:37 -0400252 if (i->count > count)
253 i->count = count;
254}
255
Al Virob42b15f2014-04-04 12:15:19 -0400256/*
257 * reexpand a previously truncated iterator; count must be no more than how much
258 * we had shrunk it.
259 */
260static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
261{
262 i->count = count;
263}
Sagi Grimbergcb002d02018-12-03 17:52:07 -0800264size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump, struct iov_iter *i);
Al Viroa604ec72014-11-24 01:08:00 -0500265size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
Al Virocbbd26b2016-11-01 22:09:04 -0400266bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
Sagi Grimbergd05f4432018-12-03 17:52:09 -0800267size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
268 struct iov_iter *i);
Al Virob42b15f2014-04-04 12:15:19 -0400269
Jens Axboe87e5e6d2019-05-14 16:02:22 -0600270ssize_t import_iovec(int type, const struct iovec __user * uvector,
Al Virobc917be2015-03-21 17:45:43 -0400271 unsigned nr_segs, unsigned fast_segs,
272 struct iovec **iov, struct iov_iter *i);
273
274#ifdef CONFIG_COMPAT
275struct compat_iovec;
Jens Axboe87e5e6d2019-05-14 16:02:22 -0600276ssize_t compat_import_iovec(int type, const struct compat_iovec __user * uvector,
Al Virobc917be2015-03-21 17:45:43 -0400277 unsigned nr_segs, unsigned fast_segs,
278 struct iovec **iov, struct iov_iter *i);
279#endif
280
281int import_single_range(int type, void __user *buf, size_t len,
282 struct iovec *iov, struct iov_iter *i);
283
Al Viro09cf6982017-02-18 01:44:03 -0500284int iov_iter_for_each_range(struct iov_iter *i, size_t bytes,
285 int (*f)(struct kvec *vec, void *context),
286 void *context);
287
Jiri Slaby812ed032009-07-29 15:04:19 -0700288#endif