Thomas Gleixner | 2874c5f | 2019-05-27 08:55:01 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Berkeley style UIO structures - Alan Cox 1994. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | */ |
David Howells | 607ca46 | 2012-10-13 10:46:48 +0100 | [diff] [blame] | 5 | #ifndef __LINUX_UIO_H |
| 6 | #define __LINUX_UIO_H |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | |
Kent Overstreet | 9223687 | 2013-11-27 16:29:46 -0800 | [diff] [blame] | 8 | #include <linux/kernel.h> |
Al Viro | aa28de2 | 2017-06-29 21:45:10 -0400 | [diff] [blame] | 9 | #include <linux/thread_info.h> |
Sagi Grimberg | d05f443 | 2018-12-03 17:52:09 -0800 | [diff] [blame] | 10 | #include <crypto/hash.h> |
David Howells | 607ca46 | 2012-10-13 10:46:48 +0100 | [diff] [blame] | 11 | #include <uapi/linux/uio.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | |
Kent Overstreet | 9223687 | 2013-11-27 16:29:46 -0800 | [diff] [blame] | 13 | struct page; |
Al Viro | 241699c | 2016-09-22 16:33:12 -0400 | [diff] [blame] | 14 | struct pipe_inode_info; |
Jiri Slaby | 812ed03 | 2009-07-29 15:04:19 -0700 | [diff] [blame] | 15 | |
| 16 | struct kvec { |
| 17 | void *iov_base; /* and that should *never* hold a userland pointer */ |
| 18 | size_t iov_len; |
| 19 | }; |
| 20 | |
David Howells | 00e2370 | 2018-10-22 13:07:28 +0100 | [diff] [blame] | 21 | enum iter_type { |
Jens Axboe | 875f1d0 | 2019-02-27 13:05:25 -0700 | [diff] [blame] | 22 | /* iter types */ |
| 23 | ITER_IOVEC = 4, |
| 24 | ITER_KVEC = 8, |
| 25 | ITER_BVEC = 16, |
| 26 | ITER_PIPE = 32, |
| 27 | ITER_DISCARD = 64, |
Al Viro | 62a8067 | 2014-04-04 23:12:29 -0400 | [diff] [blame] | 28 | }; |
| 29 | |
Kent Overstreet | 9223687 | 2013-11-27 16:29:46 -0800 | [diff] [blame] | 30 | struct iov_iter { |
Jens Axboe | 875f1d0 | 2019-02-27 13:05:25 -0700 | [diff] [blame] | 31 | /* |
| 32 | * Bit 0 is the read/write bit, set if we're writing. |
| 33 | * Bit 1 is the BVEC_FLAG_NO_REF bit, set if type is a bvec and |
| 34 | * the caller isn't expecting to drop a page reference when done. |
| 35 | */ |
David Howells | aa563d7 | 2018-10-20 00:57:56 +0100 | [diff] [blame] | 36 | unsigned int type; |
Kent Overstreet | 9223687 | 2013-11-27 16:29:46 -0800 | [diff] [blame] | 37 | size_t iov_offset; |
| 38 | size_t count; |
Al Viro | 62a8067 | 2014-04-04 23:12:29 -0400 | [diff] [blame] | 39 | union { |
| 40 | const struct iovec *iov; |
Al Viro | a280455 | 2014-11-27 14:48:42 -0500 | [diff] [blame] | 41 | const struct kvec *kvec; |
Al Viro | 62a8067 | 2014-04-04 23:12:29 -0400 | [diff] [blame] | 42 | const struct bio_vec *bvec; |
Al Viro | 241699c | 2016-09-22 16:33:12 -0400 | [diff] [blame] | 43 | struct pipe_inode_info *pipe; |
Al Viro | 62a8067 | 2014-04-04 23:12:29 -0400 | [diff] [blame] | 44 | }; |
Al Viro | 241699c | 2016-09-22 16:33:12 -0400 | [diff] [blame] | 45 | union { |
| 46 | unsigned long nr_segs; |
Al Viro | 27c0e37 | 2017-02-17 18:42:24 -0500 | [diff] [blame] | 47 | struct { |
| 48 | int idx; |
| 49 | int start_idx; |
| 50 | }; |
Al Viro | 241699c | 2016-09-22 16:33:12 -0400 | [diff] [blame] | 51 | }; |
Kent Overstreet | 9223687 | 2013-11-27 16:29:46 -0800 | [diff] [blame] | 52 | }; |
| 53 | |
David Howells | 00e2370 | 2018-10-22 13:07:28 +0100 | [diff] [blame] | 54 | static inline enum iter_type iov_iter_type(const struct iov_iter *i) |
| 55 | { |
Christoph Hellwig | b620743 | 2019-06-26 15:49:28 +0200 | [diff] [blame] | 56 | return i->type & ~(READ | WRITE); |
David Howells | 00e2370 | 2018-10-22 13:07:28 +0100 | [diff] [blame] | 57 | } |
| 58 | |
| 59 | static inline bool iter_is_iovec(const struct iov_iter *i) |
| 60 | { |
| 61 | return iov_iter_type(i) == ITER_IOVEC; |
| 62 | } |
| 63 | |
| 64 | static inline bool iov_iter_is_kvec(const struct iov_iter *i) |
| 65 | { |
| 66 | return iov_iter_type(i) == ITER_KVEC; |
| 67 | } |
| 68 | |
| 69 | static inline bool iov_iter_is_bvec(const struct iov_iter *i) |
| 70 | { |
| 71 | return iov_iter_type(i) == ITER_BVEC; |
| 72 | } |
| 73 | |
| 74 | static inline bool iov_iter_is_pipe(const struct iov_iter *i) |
| 75 | { |
| 76 | return iov_iter_type(i) == ITER_PIPE; |
| 77 | } |
| 78 | |
David Howells | 9ea9ce0 | 2018-10-20 00:57:56 +0100 | [diff] [blame] | 79 | static inline bool iov_iter_is_discard(const struct iov_iter *i) |
| 80 | { |
| 81 | return iov_iter_type(i) == ITER_DISCARD; |
| 82 | } |
| 83 | |
David Howells | 00e2370 | 2018-10-22 13:07:28 +0100 | [diff] [blame] | 84 | static inline unsigned char iov_iter_rw(const struct iov_iter *i) |
| 85 | { |
| 86 | return i->type & (READ | WRITE); |
| 87 | } |
| 88 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 89 | /* |
| 90 | * Total number of bytes covered by an iovec. |
| 91 | * |
| 92 | * NOTE that it is not safe to use this function until all the iovec's |
| 93 | * segment lengths have been validated. Because the individual lengths can |
| 94 | * overflow a size_t when added together. |
| 95 | */ |
| 96 | static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs) |
| 97 | { |
| 98 | unsigned long seg; |
| 99 | size_t ret = 0; |
| 100 | |
| 101 | for (seg = 0; seg < nr_segs; seg++) |
| 102 | ret += iov[seg].iov_len; |
| 103 | return ret; |
| 104 | } |
| 105 | |
Kent Overstreet | 9223687 | 2013-11-27 16:29:46 -0800 | [diff] [blame] | 106 | static inline struct iovec iov_iter_iovec(const struct iov_iter *iter) |
| 107 | { |
| 108 | return (struct iovec) { |
| 109 | .iov_base = iter->iov->iov_base + iter->iov_offset, |
| 110 | .iov_len = min(iter->count, |
| 111 | iter->iov->iov_len - iter->iov_offset), |
| 112 | }; |
| 113 | } |
| 114 | |
Kent Overstreet | 9223687 | 2013-11-27 16:29:46 -0800 | [diff] [blame] | 115 | size_t iov_iter_copy_from_user_atomic(struct page *page, |
| 116 | struct iov_iter *i, unsigned long offset, size_t bytes); |
Kent Overstreet | 9223687 | 2013-11-27 16:29:46 -0800 | [diff] [blame] | 117 | void iov_iter_advance(struct iov_iter *i, size_t bytes); |
Al Viro | 27c0e37 | 2017-02-17 18:42:24 -0500 | [diff] [blame] | 118 | void iov_iter_revert(struct iov_iter *i, size_t bytes); |
Kent Overstreet | 9223687 | 2013-11-27 16:29:46 -0800 | [diff] [blame] | 119 | int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes); |
| 120 | size_t iov_iter_single_seg_count(const struct iov_iter *i); |
Al Viro | 6e58e79 | 2014-02-03 17:07:03 -0500 | [diff] [blame] | 121 | size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, |
| 122 | struct iov_iter *i); |
Al Viro | f0d1bec | 2014-04-03 15:05:18 -0400 | [diff] [blame] | 123 | size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, |
| 124 | struct iov_iter *i); |
Al Viro | aa28de2 | 2017-06-29 21:45:10 -0400 | [diff] [blame] | 125 | |
| 126 | size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i); |
| 127 | size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i); |
| 128 | bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i); |
| 129 | size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i); |
| 130 | bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i); |
| 131 | |
| 132 | static __always_inline __must_check |
| 133 | size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) |
| 134 | { |
| 135 | if (unlikely(!check_copy_size(addr, bytes, true))) |
Al Viro | c43aeb1 | 2017-07-10 07:40:49 -0400 | [diff] [blame] | 136 | return 0; |
Al Viro | aa28de2 | 2017-06-29 21:45:10 -0400 | [diff] [blame] | 137 | else |
| 138 | return _copy_to_iter(addr, bytes, i); |
| 139 | } |
| 140 | |
| 141 | static __always_inline __must_check |
| 142 | size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) |
| 143 | { |
| 144 | if (unlikely(!check_copy_size(addr, bytes, false))) |
Al Viro | c43aeb1 | 2017-07-10 07:40:49 -0400 | [diff] [blame] | 145 | return 0; |
Al Viro | aa28de2 | 2017-06-29 21:45:10 -0400 | [diff] [blame] | 146 | else |
| 147 | return _copy_from_iter(addr, bytes, i); |
| 148 | } |
| 149 | |
| 150 | static __always_inline __must_check |
| 151 | bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i) |
| 152 | { |
| 153 | if (unlikely(!check_copy_size(addr, bytes, false))) |
| 154 | return false; |
| 155 | else |
| 156 | return _copy_from_iter_full(addr, bytes, i); |
| 157 | } |
| 158 | |
| 159 | static __always_inline __must_check |
| 160 | size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) |
| 161 | { |
| 162 | if (unlikely(!check_copy_size(addr, bytes, false))) |
Al Viro | c43aeb1 | 2017-07-10 07:40:49 -0400 | [diff] [blame] | 163 | return 0; |
Al Viro | aa28de2 | 2017-06-29 21:45:10 -0400 | [diff] [blame] | 164 | else |
| 165 | return _copy_from_iter_nocache(addr, bytes, i); |
| 166 | } |
| 167 | |
| 168 | static __always_inline __must_check |
| 169 | bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i) |
| 170 | { |
| 171 | if (unlikely(!check_copy_size(addr, bytes, false))) |
| 172 | return false; |
| 173 | else |
| 174 | return _copy_from_iter_full_nocache(addr, bytes, i); |
| 175 | } |
| 176 | |
Dan Williams | 0aed55a | 2017-05-29 12:22:50 -0700 | [diff] [blame] | 177 | #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE |
| 178 | /* |
| 179 | * Note, users like pmem that depend on the stricter semantics of |
| 180 | * copy_from_iter_flushcache() than copy_from_iter_nocache() must check for |
| 181 | * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the |
| 182 | * destination is flushed from the cache on return. |
| 183 | */ |
Linus Torvalds | 6a37e94 | 2017-07-07 20:39:20 -0700 | [diff] [blame] | 184 | size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i); |
Dan Williams | 0aed55a | 2017-05-29 12:22:50 -0700 | [diff] [blame] | 185 | #else |
Linus Torvalds | 6a37e94 | 2017-07-07 20:39:20 -0700 | [diff] [blame] | 186 | #define _copy_from_iter_flushcache _copy_from_iter_nocache |
Dan Williams | 0aed55a | 2017-05-29 12:22:50 -0700 | [diff] [blame] | 187 | #endif |
Linus Torvalds | 6a37e94 | 2017-07-07 20:39:20 -0700 | [diff] [blame] | 188 | |
Dan Williams | 8780356 | 2018-05-03 17:06:31 -0700 | [diff] [blame] | 189 | #ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE |
Dan Williams | 522239b | 2018-05-22 23:17:03 -0700 | [diff] [blame] | 190 | size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i); |
Dan Williams | 8780356 | 2018-05-03 17:06:31 -0700 | [diff] [blame] | 191 | #else |
| 192 | #define _copy_to_iter_mcsafe _copy_to_iter |
| 193 | #endif |
| 194 | |
Linus Torvalds | 6a37e94 | 2017-07-07 20:39:20 -0700 | [diff] [blame] | 195 | static __always_inline __must_check |
| 196 | size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) |
| 197 | { |
| 198 | if (unlikely(!check_copy_size(addr, bytes, false))) |
Al Viro | c43aeb1 | 2017-07-10 07:40:49 -0400 | [diff] [blame] | 199 | return 0; |
Linus Torvalds | 6a37e94 | 2017-07-07 20:39:20 -0700 | [diff] [blame] | 200 | else |
| 201 | return _copy_from_iter_flushcache(addr, bytes, i); |
| 202 | } |
| 203 | |
Dan Williams | 8780356 | 2018-05-03 17:06:31 -0700 | [diff] [blame] | 204 | static __always_inline __must_check |
| 205 | size_t copy_to_iter_mcsafe(void *addr, size_t bytes, struct iov_iter *i) |
| 206 | { |
Dave Jiang | dfb06cb | 2018-09-05 13:31:40 -0700 | [diff] [blame] | 207 | if (unlikely(!check_copy_size(addr, bytes, true))) |
Dan Williams | 8780356 | 2018-05-03 17:06:31 -0700 | [diff] [blame] | 208 | return 0; |
| 209 | else |
| 210 | return _copy_to_iter_mcsafe(addr, bytes, i); |
| 211 | } |
| 212 | |
Matthew Wilcox | c35e024 | 2014-08-01 09:27:22 -0400 | [diff] [blame] | 213 | size_t iov_iter_zero(size_t bytes, struct iov_iter *); |
Al Viro | 886a391 | 2014-03-05 13:50:45 -0500 | [diff] [blame] | 214 | unsigned long iov_iter_alignment(const struct iov_iter *i); |
Al Viro | 357f435 | 2016-04-08 19:05:19 -0400 | [diff] [blame] | 215 | unsigned long iov_iter_gap_alignment(const struct iov_iter *i); |
David Howells | aa563d7 | 2018-10-20 00:57:56 +0100 | [diff] [blame] | 216 | void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov, |
Al Viro | 71d8e53 | 2014-03-05 19:28:09 -0500 | [diff] [blame] | 217 | unsigned long nr_segs, size_t count); |
David Howells | aa563d7 | 2018-10-20 00:57:56 +0100 | [diff] [blame] | 218 | void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec *kvec, |
Al Viro | 05afcb7 | 2015-01-23 01:08:07 -0500 | [diff] [blame] | 219 | unsigned long nr_segs, size_t count); |
David Howells | aa563d7 | 2018-10-20 00:57:56 +0100 | [diff] [blame] | 220 | void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec, |
Al Viro | abb78f8 | 2014-11-24 14:46:11 -0500 | [diff] [blame] | 221 | unsigned long nr_segs, size_t count); |
David Howells | aa563d7 | 2018-10-20 00:57:56 +0100 | [diff] [blame] | 222 | void iov_iter_pipe(struct iov_iter *i, unsigned int direction, struct pipe_inode_info *pipe, |
Al Viro | 241699c | 2016-09-22 16:33:12 -0400 | [diff] [blame] | 223 | size_t count); |
David Howells | 9ea9ce0 | 2018-10-20 00:57:56 +0100 | [diff] [blame] | 224 | void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count); |
Al Viro | 7b2c99d | 2014-03-15 04:05:57 -0400 | [diff] [blame] | 225 | ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages, |
Miklos Szeredi | 2c80929 | 2014-09-24 17:09:11 +0200 | [diff] [blame] | 226 | size_t maxsize, unsigned maxpages, size_t *start); |
Al Viro | 91f79c4 | 2014-03-21 04:58:33 -0400 | [diff] [blame] | 227 | ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages, |
| 228 | size_t maxsize, size_t *start); |
Al Viro | f67da30 | 2014-03-19 01:16:16 -0400 | [diff] [blame] | 229 | int iov_iter_npages(const struct iov_iter *i, int maxpages); |
Kent Overstreet | 9223687 | 2013-11-27 16:29:46 -0800 | [diff] [blame] | 230 | |
Al Viro | 4b8164b | 2015-01-31 20:08:47 -0500 | [diff] [blame] | 231 | const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags); |
| 232 | |
Al Viro | b57332b | 2016-10-10 13:57:37 -0400 | [diff] [blame] | 233 | static inline size_t iov_iter_count(const struct iov_iter *i) |
Kent Overstreet | 9223687 | 2013-11-27 16:29:46 -0800 | [diff] [blame] | 234 | { |
| 235 | return i->count; |
| 236 | } |
| 237 | |
Omar Sandoval | bd8e0ff | 2015-03-17 14:04:02 -0700 | [diff] [blame] | 238 | /* |
Al Viro | 0b86dbf | 2014-06-23 08:44:40 +0100 | [diff] [blame] | 239 | * Cap the iov_iter by given limit; note that the second argument is |
| 240 | * *not* the new size - it's upper limit for such. Passing it a value |
| 241 | * greater than the amount of data in iov_iter is fine - it'll just do |
| 242 | * nothing in that case. |
| 243 | */ |
| 244 | static inline void iov_iter_truncate(struct iov_iter *i, u64 count) |
Al Viro | 0c94933 | 2014-03-22 06:51:37 -0400 | [diff] [blame] | 245 | { |
Al Viro | 0b86dbf | 2014-06-23 08:44:40 +0100 | [diff] [blame] | 246 | /* |
| 247 | * count doesn't have to fit in size_t - comparison extends both |
| 248 | * operands to u64 here and any value that would be truncated by |
| 249 | * conversion in assignement is by definition greater than all |
| 250 | * values of size_t, including old i->count. |
| 251 | */ |
Al Viro | 0c94933 | 2014-03-22 06:51:37 -0400 | [diff] [blame] | 252 | if (i->count > count) |
| 253 | i->count = count; |
| 254 | } |
| 255 | |
Al Viro | b42b15f | 2014-04-04 12:15:19 -0400 | [diff] [blame] | 256 | /* |
| 257 | * reexpand a previously truncated iterator; count must be no more than how much |
| 258 | * we had shrunk it. |
| 259 | */ |
| 260 | static inline void iov_iter_reexpand(struct iov_iter *i, size_t count) |
| 261 | { |
| 262 | i->count = count; |
| 263 | } |
Sagi Grimberg | cb002d0 | 2018-12-03 17:52:07 -0800 | [diff] [blame] | 264 | size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump, struct iov_iter *i); |
Al Viro | a604ec7 | 2014-11-24 01:08:00 -0500 | [diff] [blame] | 265 | size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); |
Al Viro | cbbd26b | 2016-11-01 22:09:04 -0400 | [diff] [blame] | 266 | bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); |
Sagi Grimberg | d05f443 | 2018-12-03 17:52:09 -0800 | [diff] [blame] | 267 | size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp, |
| 268 | struct iov_iter *i); |
Al Viro | b42b15f | 2014-04-04 12:15:19 -0400 | [diff] [blame] | 269 | |
Jens Axboe | 87e5e6d | 2019-05-14 16:02:22 -0600 | [diff] [blame] | 270 | ssize_t import_iovec(int type, const struct iovec __user * uvector, |
Al Viro | bc917be | 2015-03-21 17:45:43 -0400 | [diff] [blame] | 271 | unsigned nr_segs, unsigned fast_segs, |
| 272 | struct iovec **iov, struct iov_iter *i); |
| 273 | |
| 274 | #ifdef CONFIG_COMPAT |
| 275 | struct compat_iovec; |
Jens Axboe | 87e5e6d | 2019-05-14 16:02:22 -0600 | [diff] [blame] | 276 | ssize_t compat_import_iovec(int type, const struct compat_iovec __user * uvector, |
Al Viro | bc917be | 2015-03-21 17:45:43 -0400 | [diff] [blame] | 277 | unsigned nr_segs, unsigned fast_segs, |
| 278 | struct iovec **iov, struct iov_iter *i); |
| 279 | #endif |
| 280 | |
| 281 | int import_single_range(int type, void __user *buf, size_t len, |
| 282 | struct iovec *iov, struct iov_iter *i); |
| 283 | |
Al Viro | 09cf698 | 2017-02-18 01:44:03 -0500 | [diff] [blame] | 284 | int iov_iter_for_each_range(struct iov_iter *i, size_t bytes, |
| 285 | int (*f)(struct kvec *vec, void *context), |
| 286 | void *context); |
| 287 | |
Jiri Slaby | 812ed03 | 2009-07-29 15:04:19 -0700 | [diff] [blame] | 288 | #endif |