Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2007 Jens Axboe <jens.axboe@oracle.com> |
| 3 | * |
| 4 | * Scatterlist handling helpers. |
| 5 | * |
| 6 | * This source code is licensed under the GNU General Public License, |
| 7 | * Version 2. See the file COPYING for more details. |
| 8 | */ |
Paul Gortmaker | 8bc3bcc | 2011-11-16 21:29:17 -0500 | [diff] [blame] | 9 | #include <linux/export.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 10 | #include <linux/slab.h> |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 11 | #include <linux/scatterlist.h> |
FUJITA Tomonori | b1adaf6 | 2008-03-18 00:15:03 +0900 | [diff] [blame] | 12 | #include <linux/highmem.h> |
Chris Wilson | b94de9b | 2010-07-28 22:59:02 +0100 | [diff] [blame] | 13 | #include <linux/kmemleak.h> |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 14 | |
| 15 | /** |
| 16 | * sg_next - return the next scatterlist entry in a list |
| 17 | * @sg: The current sg entry |
| 18 | * |
| 19 | * Description: |
| 20 | * Usually the next entry will be @sg@ + 1, but if this sg element is part |
| 21 | * of a chained scatterlist, it could jump to the start of a new |
| 22 | * scatterlist array. |
| 23 | * |
| 24 | **/ |
| 25 | struct scatterlist *sg_next(struct scatterlist *sg) |
| 26 | { |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 27 | if (sg_is_last(sg)) |
| 28 | return NULL; |
| 29 | |
| 30 | sg++; |
| 31 | if (unlikely(sg_is_chain(sg))) |
| 32 | sg = sg_chain_ptr(sg); |
| 33 | |
| 34 | return sg; |
| 35 | } |
| 36 | EXPORT_SYMBOL(sg_next); |
| 37 | |
| 38 | /** |
Maxim Levitsky | 2e48461 | 2012-09-27 12:45:28 +0200 | [diff] [blame] | 39 | * sg_nents - return total count of entries in scatterlist |
| 40 | * @sg: The scatterlist |
| 41 | * |
| 42 | * Description: |
| 43 | * Allows to know how many entries are in sg, taking into acount |
| 44 | * chaining as well |
| 45 | * |
| 46 | **/ |
| 47 | int sg_nents(struct scatterlist *sg) |
| 48 | { |
Maxim Levitsky | 232f1b5 | 2012-09-28 10:38:15 +0200 | [diff] [blame] | 49 | int nents; |
| 50 | for (nents = 0; sg; sg = sg_next(sg)) |
Maxim Levitsky | 2e48461 | 2012-09-27 12:45:28 +0200 | [diff] [blame] | 51 | nents++; |
Maxim Levitsky | 2e48461 | 2012-09-27 12:45:28 +0200 | [diff] [blame] | 52 | return nents; |
| 53 | } |
| 54 | EXPORT_SYMBOL(sg_nents); |
| 55 | |
Tom Lendacky | cfaed10 | 2015-06-01 11:15:25 -0500 | [diff] [blame] | 56 | /** |
| 57 | * sg_nents_for_len - return total count of entries in scatterlist |
| 58 | * needed to satisfy the supplied length |
| 59 | * @sg: The scatterlist |
| 60 | * @len: The total required length |
| 61 | * |
| 62 | * Description: |
| 63 | * Determines the number of entries in sg that are required to meet |
| 64 | * the supplied length, taking into acount chaining as well |
| 65 | * |
| 66 | * Returns: |
| 67 | * the number of sg entries needed, negative error on failure |
| 68 | * |
| 69 | **/ |
| 70 | int sg_nents_for_len(struct scatterlist *sg, u64 len) |
| 71 | { |
| 72 | int nents; |
| 73 | u64 total; |
| 74 | |
| 75 | if (!len) |
| 76 | return 0; |
| 77 | |
| 78 | for (nents = 0, total = 0; sg; sg = sg_next(sg)) { |
| 79 | nents++; |
| 80 | total += sg->length; |
| 81 | if (total >= len) |
| 82 | return nents; |
| 83 | } |
| 84 | |
| 85 | return -EINVAL; |
| 86 | } |
| 87 | EXPORT_SYMBOL(sg_nents_for_len); |
Maxim Levitsky | 2e48461 | 2012-09-27 12:45:28 +0200 | [diff] [blame] | 88 | |
| 89 | /** |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 90 | * sg_last - return the last scatterlist entry in a list |
| 91 | * @sgl: First entry in the scatterlist |
| 92 | * @nents: Number of entries in the scatterlist |
| 93 | * |
| 94 | * Description: |
| 95 | * Should only be used casually, it (currently) scans the entire list |
| 96 | * to get the last entry. |
| 97 | * |
| 98 | * Note that the @sgl@ pointer passed in need not be the first one, |
| 99 | * the important bit is that @nents@ denotes the number of entries that |
| 100 | * exist from @sgl@. |
| 101 | * |
| 102 | **/ |
| 103 | struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents) |
| 104 | { |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 105 | struct scatterlist *sg, *ret = NULL; |
| 106 | unsigned int i; |
| 107 | |
| 108 | for_each_sg(sgl, sg, nents, i) |
| 109 | ret = sg; |
| 110 | |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 111 | BUG_ON(!sg_is_last(ret)); |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 112 | return ret; |
| 113 | } |
| 114 | EXPORT_SYMBOL(sg_last); |
| 115 | |
| 116 | /** |
| 117 | * sg_init_table - Initialize SG table |
| 118 | * @sgl: The SG table |
| 119 | * @nents: Number of entries in table |
| 120 | * |
| 121 | * Notes: |
| 122 | * If this is part of a chained sg table, sg_mark_end() should be |
| 123 | * used only on the last table part. |
| 124 | * |
| 125 | **/ |
| 126 | void sg_init_table(struct scatterlist *sgl, unsigned int nents) |
| 127 | { |
| 128 | memset(sgl, 0, sizeof(*sgl) * nents); |
Prashant Bhole | f385178 | 2018-03-30 09:20:59 +0900 | [diff] [blame] | 129 | sg_init_marker(sgl, nents); |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 130 | } |
| 131 | EXPORT_SYMBOL(sg_init_table); |
| 132 | |
| 133 | /** |
| 134 | * sg_init_one - Initialize a single entry sg list |
| 135 | * @sg: SG entry |
| 136 | * @buf: Virtual address for IO |
| 137 | * @buflen: IO length |
| 138 | * |
| 139 | **/ |
| 140 | void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen) |
| 141 | { |
| 142 | sg_init_table(sg, 1); |
| 143 | sg_set_buf(sg, buf, buflen); |
| 144 | } |
| 145 | EXPORT_SYMBOL(sg_init_one); |
| 146 | |
| 147 | /* |
| 148 | * The default behaviour of sg_alloc_table() is to use these kmalloc/kfree |
| 149 | * helpers. |
| 150 | */ |
| 151 | static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask) |
| 152 | { |
Chris Wilson | b94de9b | 2010-07-28 22:59:02 +0100 | [diff] [blame] | 153 | if (nents == SG_MAX_SINGLE_ALLOC) { |
| 154 | /* |
| 155 | * Kmemleak doesn't track page allocations as they are not |
| 156 | * commonly used (in a raw form) for kernel data structures. |
| 157 | * As we chain together a list of pages and then a normal |
| 158 | * kmalloc (tracked by kmemleak), in order to for that last |
| 159 | * allocation not to become decoupled (and thus a |
| 160 | * false-positive) we need to inform kmemleak of all the |
| 161 | * intermediate allocations. |
| 162 | */ |
| 163 | void *ptr = (void *) __get_free_page(gfp_mask); |
| 164 | kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask); |
| 165 | return ptr; |
| 166 | } else |
Kees Cook | 6da2ec5 | 2018-06-12 13:55:00 -0700 | [diff] [blame] | 167 | return kmalloc_array(nents, sizeof(struct scatterlist), |
| 168 | gfp_mask); |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 169 | } |
| 170 | |
| 171 | static void sg_kfree(struct scatterlist *sg, unsigned int nents) |
| 172 | { |
Chris Wilson | b94de9b | 2010-07-28 22:59:02 +0100 | [diff] [blame] | 173 | if (nents == SG_MAX_SINGLE_ALLOC) { |
| 174 | kmemleak_free(sg); |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 175 | free_page((unsigned long) sg); |
Chris Wilson | b94de9b | 2010-07-28 22:59:02 +0100 | [diff] [blame] | 176 | } else |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 177 | kfree(sg); |
| 178 | } |
| 179 | |
| 180 | /** |
| 181 | * __sg_free_table - Free a previously mapped sg table |
| 182 | * @table: The sg table header to use |
James Bottomley | 7cedb1f | 2008-01-13 14:15:28 -0600 | [diff] [blame] | 183 | * @max_ents: The maximum number of entries per single scatterlist |
Ming Lei | 4635873 | 2019-04-28 15:39:30 +0800 | [diff] [blame^] | 184 | * @nents_first_chunk: Number of entries int the (preallocated) first |
| 185 | * scatterlist chunk, 0 means no such preallocated first chunk |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 186 | * @free_fn: Free function |
| 187 | * |
| 188 | * Description: |
James Bottomley | 7cedb1f | 2008-01-13 14:15:28 -0600 | [diff] [blame] | 189 | * Free an sg table previously allocated and setup with |
| 190 | * __sg_alloc_table(). The @max_ents value must be identical to |
| 191 | * that previously used with __sg_alloc_table(). |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 192 | * |
| 193 | **/ |
James Bottomley | 7cedb1f | 2008-01-13 14:15:28 -0600 | [diff] [blame] | 194 | void __sg_free_table(struct sg_table *table, unsigned int max_ents, |
Ming Lei | 4635873 | 2019-04-28 15:39:30 +0800 | [diff] [blame^] | 195 | unsigned int nents_first_chunk, sg_free_fn *free_fn) |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 196 | { |
| 197 | struct scatterlist *sgl, *next; |
Ming Lei | 4635873 | 2019-04-28 15:39:30 +0800 | [diff] [blame^] | 198 | unsigned curr_max_ents = nents_first_chunk ?: max_ents; |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 199 | |
| 200 | if (unlikely(!table->sgl)) |
| 201 | return; |
| 202 | |
| 203 | sgl = table->sgl; |
| 204 | while (table->orig_nents) { |
| 205 | unsigned int alloc_size = table->orig_nents; |
| 206 | unsigned int sg_size; |
| 207 | |
| 208 | /* |
James Bottomley | 7cedb1f | 2008-01-13 14:15:28 -0600 | [diff] [blame] | 209 | * If we have more than max_ents segments left, |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 210 | * then assign 'next' to the sg table after the current one. |
| 211 | * sg_size is then one less than alloc size, since the last |
| 212 | * element is the chain pointer. |
| 213 | */ |
Ming Lei | 4635873 | 2019-04-28 15:39:30 +0800 | [diff] [blame^] | 214 | if (alloc_size > curr_max_ents) { |
| 215 | next = sg_chain_ptr(&sgl[curr_max_ents - 1]); |
| 216 | alloc_size = curr_max_ents; |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 217 | sg_size = alloc_size - 1; |
| 218 | } else { |
| 219 | sg_size = alloc_size; |
| 220 | next = NULL; |
| 221 | } |
| 222 | |
| 223 | table->orig_nents -= sg_size; |
Ming Lei | 4635873 | 2019-04-28 15:39:30 +0800 | [diff] [blame^] | 224 | if (nents_first_chunk) |
| 225 | nents_first_chunk = 0; |
Tony Battersby | c21e59d | 2014-10-23 15:10:21 -0400 | [diff] [blame] | 226 | else |
| 227 | free_fn(sgl, alloc_size); |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 228 | sgl = next; |
Ming Lei | 4635873 | 2019-04-28 15:39:30 +0800 | [diff] [blame^] | 229 | curr_max_ents = max_ents; |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 230 | } |
| 231 | |
| 232 | table->sgl = NULL; |
| 233 | } |
| 234 | EXPORT_SYMBOL(__sg_free_table); |
| 235 | |
| 236 | /** |
| 237 | * sg_free_table - Free a previously allocated sg table |
| 238 | * @table: The mapped sg table header |
| 239 | * |
| 240 | **/ |
| 241 | void sg_free_table(struct sg_table *table) |
| 242 | { |
Christoph Hellwig | c53c6d6 | 2014-04-15 14:38:31 +0200 | [diff] [blame] | 243 | __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree); |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 244 | } |
| 245 | EXPORT_SYMBOL(sg_free_table); |
| 246 | |
| 247 | /** |
| 248 | * __sg_alloc_table - Allocate and initialize an sg table with given allocator |
| 249 | * @table: The sg table header to use |
| 250 | * @nents: Number of entries in sg list |
James Bottomley | 7cedb1f | 2008-01-13 14:15:28 -0600 | [diff] [blame] | 251 | * @max_ents: The maximum number of entries the allocator returns per call |
Ming Lei | 4635873 | 2019-04-28 15:39:30 +0800 | [diff] [blame^] | 252 | * @nents_first_chunk: Number of entries int the (preallocated) first |
| 253 | * scatterlist chunk, 0 means no such preallocated chunk provided by user |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 254 | * @gfp_mask: GFP allocation mask |
| 255 | * @alloc_fn: Allocator to use |
| 256 | * |
James Bottomley | 7cedb1f | 2008-01-13 14:15:28 -0600 | [diff] [blame] | 257 | * Description: |
| 258 | * This function returns a @table @nents long. The allocator is |
| 259 | * defined to return scatterlist chunks of maximum size @max_ents. |
| 260 | * Thus if @nents is bigger than @max_ents, the scatterlists will be |
| 261 | * chained in units of @max_ents. |
| 262 | * |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 263 | * Notes: |
| 264 | * If this function returns non-0 (eg failure), the caller must call |
| 265 | * __sg_free_table() to cleanup any leftover allocations. |
| 266 | * |
| 267 | **/ |
James Bottomley | 7cedb1f | 2008-01-13 14:15:28 -0600 | [diff] [blame] | 268 | int __sg_alloc_table(struct sg_table *table, unsigned int nents, |
Christoph Hellwig | c53c6d6 | 2014-04-15 14:38:31 +0200 | [diff] [blame] | 269 | unsigned int max_ents, struct scatterlist *first_chunk, |
Ming Lei | 4635873 | 2019-04-28 15:39:30 +0800 | [diff] [blame^] | 270 | unsigned int nents_first_chunk, gfp_t gfp_mask, |
| 271 | sg_alloc_fn *alloc_fn) |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 272 | { |
| 273 | struct scatterlist *sg, *prv; |
| 274 | unsigned int left; |
Ming Lei | 4635873 | 2019-04-28 15:39:30 +0800 | [diff] [blame^] | 275 | unsigned curr_max_ents = nents_first_chunk ?: max_ents; |
| 276 | unsigned prv_max_ents; |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 277 | |
Dan Carpenter | 27daabd | 2013-07-08 16:01:58 -0700 | [diff] [blame] | 278 | memset(table, 0, sizeof(*table)); |
| 279 | |
| 280 | if (nents == 0) |
| 281 | return -EINVAL; |
Christoph Hellwig | 7c703e5 | 2018-11-09 09:51:00 +0100 | [diff] [blame] | 282 | #ifdef CONFIG_ARCH_NO_SG_CHAIN |
Nick Bowler | 6fd59a8 | 2012-12-17 16:05:20 -0800 | [diff] [blame] | 283 | if (WARN_ON_ONCE(nents > max_ents)) |
| 284 | return -EINVAL; |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 285 | #endif |
| 286 | |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 287 | left = nents; |
| 288 | prv = NULL; |
| 289 | do { |
| 290 | unsigned int sg_size, alloc_size = left; |
| 291 | |
Ming Lei | 4635873 | 2019-04-28 15:39:30 +0800 | [diff] [blame^] | 292 | if (alloc_size > curr_max_ents) { |
| 293 | alloc_size = curr_max_ents; |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 294 | sg_size = alloc_size - 1; |
| 295 | } else |
| 296 | sg_size = alloc_size; |
| 297 | |
| 298 | left -= sg_size; |
| 299 | |
Christoph Hellwig | c53c6d6 | 2014-04-15 14:38:31 +0200 | [diff] [blame] | 300 | if (first_chunk) { |
| 301 | sg = first_chunk; |
| 302 | first_chunk = NULL; |
| 303 | } else { |
| 304 | sg = alloc_fn(alloc_size, gfp_mask); |
| 305 | } |
Jeffrey Carlyle | edce682 | 2010-08-30 19:55:09 +0200 | [diff] [blame] | 306 | if (unlikely(!sg)) { |
| 307 | /* |
| 308 | * Adjust entry count to reflect that the last |
| 309 | * entry of the previous table won't be used for |
| 310 | * linkage. Without this, sg_kfree() may get |
| 311 | * confused. |
| 312 | */ |
| 313 | if (prv) |
| 314 | table->nents = ++table->orig_nents; |
| 315 | |
| 316 | return -ENOMEM; |
| 317 | } |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 318 | |
| 319 | sg_init_table(sg, alloc_size); |
| 320 | table->nents = table->orig_nents += sg_size; |
| 321 | |
| 322 | /* |
| 323 | * If this is the first mapping, assign the sg table header. |
| 324 | * If this is not the first mapping, chain previous part. |
| 325 | */ |
| 326 | if (prv) |
Ming Lei | 4635873 | 2019-04-28 15:39:30 +0800 | [diff] [blame^] | 327 | sg_chain(prv, prv_max_ents, sg); |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 328 | else |
| 329 | table->sgl = sg; |
| 330 | |
| 331 | /* |
| 332 | * If no more entries after this one, mark the end |
| 333 | */ |
| 334 | if (!left) |
| 335 | sg_mark_end(&sg[sg_size - 1]); |
| 336 | |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 337 | prv = sg; |
Ming Lei | 4635873 | 2019-04-28 15:39:30 +0800 | [diff] [blame^] | 338 | prv_max_ents = curr_max_ents; |
| 339 | curr_max_ents = max_ents; |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 340 | } while (left); |
| 341 | |
| 342 | return 0; |
| 343 | } |
| 344 | EXPORT_SYMBOL(__sg_alloc_table); |
| 345 | |
| 346 | /** |
| 347 | * sg_alloc_table - Allocate and initialize an sg table |
| 348 | * @table: The sg table header to use |
| 349 | * @nents: Number of entries in sg list |
| 350 | * @gfp_mask: GFP allocation mask |
| 351 | * |
| 352 | * Description: |
| 353 | * Allocate and initialize an sg table. If @nents@ is larger than |
| 354 | * SG_MAX_SINGLE_ALLOC a chained sg table will be setup. |
| 355 | * |
| 356 | **/ |
| 357 | int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) |
| 358 | { |
| 359 | int ret; |
| 360 | |
James Bottomley | 7cedb1f | 2008-01-13 14:15:28 -0600 | [diff] [blame] | 361 | ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC, |
Ming Lei | 4635873 | 2019-04-28 15:39:30 +0800 | [diff] [blame^] | 362 | NULL, 0, gfp_mask, sg_kmalloc); |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 363 | if (unlikely(ret)) |
Ming Lei | 4635873 | 2019-04-28 15:39:30 +0800 | [diff] [blame^] | 364 | __sg_free_table(table, SG_MAX_SINGLE_ALLOC, 0, sg_kfree); |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 365 | |
| 366 | return ret; |
| 367 | } |
| 368 | EXPORT_SYMBOL(sg_alloc_table); |
FUJITA Tomonori | b1adaf6 | 2008-03-18 00:15:03 +0900 | [diff] [blame] | 369 | |
| 370 | /** |
Tvrtko Ursulin | 89d8589 | 2017-08-03 10:13:51 +0100 | [diff] [blame] | 371 | * __sg_alloc_table_from_pages - Allocate and initialize an sg table from |
| 372 | * an array of pages |
| 373 | * @sgt: The sg table header to use |
| 374 | * @pages: Pointer to an array of page pointers |
| 375 | * @n_pages: Number of pages in the pages array |
| 376 | * @offset: Offset from start of the first page to the start of a buffer |
| 377 | * @size: Number of valid bytes in the buffer (after offset) |
| 378 | * @max_segment: Maximum size of a scatterlist node in bytes (page aligned) |
| 379 | * @gfp_mask: GFP allocation mask |
Tomasz Stanislawski | efc42bc | 2012-06-18 09:25:01 +0200 | [diff] [blame] | 380 | * |
| 381 | * Description: |
| 382 | * Allocate and initialize an sg table from a list of pages. Contiguous |
Tvrtko Ursulin | 89d8589 | 2017-08-03 10:13:51 +0100 | [diff] [blame] | 383 | * ranges of the pages are squashed into a single scatterlist node up to the |
| 384 | * maximum size specified in @max_segment. An user may provide an offset at a |
| 385 | * start and a size of valid data in a buffer specified by the page array. |
| 386 | * The returned sg table is released by sg_free_table. |
Tomasz Stanislawski | efc42bc | 2012-06-18 09:25:01 +0200 | [diff] [blame] | 387 | * |
| 388 | * Returns: |
| 389 | * 0 on success, negative error on failure |
| 390 | */ |
Tvrtko Ursulin | 89d8589 | 2017-08-03 10:13:51 +0100 | [diff] [blame] | 391 | int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages, |
| 392 | unsigned int n_pages, unsigned int offset, |
| 393 | unsigned long size, unsigned int max_segment, |
| 394 | gfp_t gfp_mask) |
Tomasz Stanislawski | efc42bc | 2012-06-18 09:25:01 +0200 | [diff] [blame] | 395 | { |
Tvrtko Ursulin | c125906 | 2017-08-03 10:13:12 +0100 | [diff] [blame] | 396 | unsigned int chunks, cur_page, seg_len, i; |
Tomasz Stanislawski | efc42bc | 2012-06-18 09:25:01 +0200 | [diff] [blame] | 397 | int ret; |
| 398 | struct scatterlist *s; |
| 399 | |
Tvrtko Ursulin | 89d8589 | 2017-08-03 10:13:51 +0100 | [diff] [blame] | 400 | if (WARN_ON(!max_segment || offset_in_page(max_segment))) |
| 401 | return -EINVAL; |
| 402 | |
Tomasz Stanislawski | efc42bc | 2012-06-18 09:25:01 +0200 | [diff] [blame] | 403 | /* compute number of contiguous chunks */ |
| 404 | chunks = 1; |
Tvrtko Ursulin | c125906 | 2017-08-03 10:13:12 +0100 | [diff] [blame] | 405 | seg_len = 0; |
| 406 | for (i = 1; i < n_pages; i++) { |
| 407 | seg_len += PAGE_SIZE; |
| 408 | if (seg_len >= max_segment || |
| 409 | page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) { |
| 410 | chunks++; |
| 411 | seg_len = 0; |
| 412 | } |
| 413 | } |
Tomasz Stanislawski | efc42bc | 2012-06-18 09:25:01 +0200 | [diff] [blame] | 414 | |
| 415 | ret = sg_alloc_table(sgt, chunks, gfp_mask); |
| 416 | if (unlikely(ret)) |
| 417 | return ret; |
| 418 | |
| 419 | /* merging chunks and putting them into the scatterlist */ |
| 420 | cur_page = 0; |
| 421 | for_each_sg(sgt->sgl, s, sgt->orig_nents, i) { |
Tvrtko Ursulin | c125906 | 2017-08-03 10:13:12 +0100 | [diff] [blame] | 422 | unsigned int j, chunk_size; |
Tomasz Stanislawski | efc42bc | 2012-06-18 09:25:01 +0200 | [diff] [blame] | 423 | |
| 424 | /* look for the end of the current chunk */ |
Tvrtko Ursulin | c125906 | 2017-08-03 10:13:12 +0100 | [diff] [blame] | 425 | seg_len = 0; |
| 426 | for (j = cur_page + 1; j < n_pages; j++) { |
| 427 | seg_len += PAGE_SIZE; |
| 428 | if (seg_len >= max_segment || |
| 429 | page_to_pfn(pages[j]) != |
Tomasz Stanislawski | efc42bc | 2012-06-18 09:25:01 +0200 | [diff] [blame] | 430 | page_to_pfn(pages[j - 1]) + 1) |
| 431 | break; |
Tvrtko Ursulin | c125906 | 2017-08-03 10:13:12 +0100 | [diff] [blame] | 432 | } |
Tomasz Stanislawski | efc42bc | 2012-06-18 09:25:01 +0200 | [diff] [blame] | 433 | |
| 434 | chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset; |
Tvrtko Ursulin | c125906 | 2017-08-03 10:13:12 +0100 | [diff] [blame] | 435 | sg_set_page(s, pages[cur_page], |
| 436 | min_t(unsigned long, size, chunk_size), offset); |
Tomasz Stanislawski | efc42bc | 2012-06-18 09:25:01 +0200 | [diff] [blame] | 437 | size -= chunk_size; |
| 438 | offset = 0; |
| 439 | cur_page = j; |
| 440 | } |
| 441 | |
| 442 | return 0; |
| 443 | } |
Tvrtko Ursulin | 89d8589 | 2017-08-03 10:13:51 +0100 | [diff] [blame] | 444 | EXPORT_SYMBOL(__sg_alloc_table_from_pages); |
| 445 | |
| 446 | /** |
| 447 | * sg_alloc_table_from_pages - Allocate and initialize an sg table from |
| 448 | * an array of pages |
| 449 | * @sgt: The sg table header to use |
| 450 | * @pages: Pointer to an array of page pointers |
| 451 | * @n_pages: Number of pages in the pages array |
| 452 | * @offset: Offset from start of the first page to the start of a buffer |
| 453 | * @size: Number of valid bytes in the buffer (after offset) |
| 454 | * @gfp_mask: GFP allocation mask |
| 455 | * |
| 456 | * Description: |
| 457 | * Allocate and initialize an sg table from a list of pages. Contiguous |
| 458 | * ranges of the pages are squashed into a single scatterlist node. A user |
| 459 | * may provide an offset at a start and a size of valid data in a buffer |
| 460 | * specified by the page array. The returned sg table is released by |
| 461 | * sg_free_table. |
| 462 | * |
| 463 | * Returns: |
| 464 | * 0 on success, negative error on failure |
| 465 | */ |
| 466 | int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages, |
| 467 | unsigned int n_pages, unsigned int offset, |
| 468 | unsigned long size, gfp_t gfp_mask) |
| 469 | { |
| 470 | return __sg_alloc_table_from_pages(sgt, pages, n_pages, offset, size, |
| 471 | SCATTERLIST_MAX_SEGMENT, gfp_mask); |
| 472 | } |
Tomasz Stanislawski | efc42bc | 2012-06-18 09:25:01 +0200 | [diff] [blame] | 473 | EXPORT_SYMBOL(sg_alloc_table_from_pages); |
| 474 | |
Bart Van Assche | e80a0af | 2018-01-05 08:26:46 -0800 | [diff] [blame] | 475 | #ifdef CONFIG_SGL_ALLOC |
| 476 | |
| 477 | /** |
| 478 | * sgl_alloc_order - allocate a scatterlist and its pages |
| 479 | * @length: Length in bytes of the scatterlist. Must be at least one |
| 480 | * @order: Second argument for alloc_pages() |
| 481 | * @chainable: Whether or not to allocate an extra element in the scatterlist |
| 482 | * for scatterlist chaining purposes |
| 483 | * @gfp: Memory allocation flags |
| 484 | * @nent_p: [out] Number of entries in the scatterlist that have pages |
| 485 | * |
| 486 | * Returns: A pointer to an initialized scatterlist or %NULL upon failure. |
| 487 | */ |
| 488 | struct scatterlist *sgl_alloc_order(unsigned long long length, |
| 489 | unsigned int order, bool chainable, |
| 490 | gfp_t gfp, unsigned int *nent_p) |
| 491 | { |
| 492 | struct scatterlist *sgl, *sg; |
| 493 | struct page *page; |
| 494 | unsigned int nent, nalloc; |
| 495 | u32 elem_len; |
| 496 | |
| 497 | nent = round_up(length, PAGE_SIZE << order) >> (PAGE_SHIFT + order); |
| 498 | /* Check for integer overflow */ |
| 499 | if (length > (nent << (PAGE_SHIFT + order))) |
| 500 | return NULL; |
| 501 | nalloc = nent; |
| 502 | if (chainable) { |
| 503 | /* Check for integer overflow */ |
| 504 | if (nalloc + 1 < nalloc) |
| 505 | return NULL; |
| 506 | nalloc++; |
| 507 | } |
| 508 | sgl = kmalloc_array(nalloc, sizeof(struct scatterlist), |
| 509 | (gfp & ~GFP_DMA) | __GFP_ZERO); |
| 510 | if (!sgl) |
| 511 | return NULL; |
| 512 | |
Bart Van Assche | 8c7a8d1 | 2018-01-19 11:00:54 -0800 | [diff] [blame] | 513 | sg_init_table(sgl, nalloc); |
Bart Van Assche | e80a0af | 2018-01-05 08:26:46 -0800 | [diff] [blame] | 514 | sg = sgl; |
| 515 | while (length) { |
| 516 | elem_len = min_t(u64, length, PAGE_SIZE << order); |
| 517 | page = alloc_pages(gfp, order); |
| 518 | if (!page) { |
| 519 | sgl_free(sgl); |
| 520 | return NULL; |
| 521 | } |
| 522 | |
| 523 | sg_set_page(sg, page, elem_len, 0); |
| 524 | length -= elem_len; |
| 525 | sg = sg_next(sg); |
| 526 | } |
Bart Van Assche | 8c7a8d1 | 2018-01-19 11:00:54 -0800 | [diff] [blame] | 527 | WARN_ONCE(length, "length = %lld\n", length); |
Bart Van Assche | e80a0af | 2018-01-05 08:26:46 -0800 | [diff] [blame] | 528 | if (nent_p) |
| 529 | *nent_p = nent; |
| 530 | return sgl; |
| 531 | } |
| 532 | EXPORT_SYMBOL(sgl_alloc_order); |
| 533 | |
| 534 | /** |
| 535 | * sgl_alloc - allocate a scatterlist and its pages |
| 536 | * @length: Length in bytes of the scatterlist |
| 537 | * @gfp: Memory allocation flags |
| 538 | * @nent_p: [out] Number of entries in the scatterlist |
| 539 | * |
| 540 | * Returns: A pointer to an initialized scatterlist or %NULL upon failure. |
| 541 | */ |
| 542 | struct scatterlist *sgl_alloc(unsigned long long length, gfp_t gfp, |
| 543 | unsigned int *nent_p) |
| 544 | { |
| 545 | return sgl_alloc_order(length, 0, false, gfp, nent_p); |
| 546 | } |
| 547 | EXPORT_SYMBOL(sgl_alloc); |
| 548 | |
| 549 | /** |
Bart Van Assche | 8c7a8d1 | 2018-01-19 11:00:54 -0800 | [diff] [blame] | 550 | * sgl_free_n_order - free a scatterlist and its pages |
| 551 | * @sgl: Scatterlist with one or more elements |
| 552 | * @nents: Maximum number of elements to free |
| 553 | * @order: Second argument for __free_pages() |
| 554 | * |
| 555 | * Notes: |
| 556 | * - If several scatterlists have been chained and each chain element is |
| 557 | * freed separately then it's essential to set nents correctly to avoid that a |
| 558 | * page would get freed twice. |
| 559 | * - All pages in a chained scatterlist can be freed at once by setting @nents |
| 560 | * to a high number. |
| 561 | */ |
| 562 | void sgl_free_n_order(struct scatterlist *sgl, int nents, int order) |
| 563 | { |
| 564 | struct scatterlist *sg; |
| 565 | struct page *page; |
| 566 | int i; |
| 567 | |
| 568 | for_each_sg(sgl, sg, nents, i) { |
| 569 | if (!sg) |
| 570 | break; |
| 571 | page = sg_page(sg); |
| 572 | if (page) |
| 573 | __free_pages(page, order); |
| 574 | } |
| 575 | kfree(sgl); |
| 576 | } |
| 577 | EXPORT_SYMBOL(sgl_free_n_order); |
| 578 | |
| 579 | /** |
Bart Van Assche | e80a0af | 2018-01-05 08:26:46 -0800 | [diff] [blame] | 580 | * sgl_free_order - free a scatterlist and its pages |
| 581 | * @sgl: Scatterlist with one or more elements |
| 582 | * @order: Second argument for __free_pages() |
| 583 | */ |
| 584 | void sgl_free_order(struct scatterlist *sgl, int order) |
| 585 | { |
Bart Van Assche | 8c7a8d1 | 2018-01-19 11:00:54 -0800 | [diff] [blame] | 586 | sgl_free_n_order(sgl, INT_MAX, order); |
Bart Van Assche | e80a0af | 2018-01-05 08:26:46 -0800 | [diff] [blame] | 587 | } |
| 588 | EXPORT_SYMBOL(sgl_free_order); |
| 589 | |
| 590 | /** |
| 591 | * sgl_free - free a scatterlist and its pages |
| 592 | * @sgl: Scatterlist with one or more elements |
| 593 | */ |
| 594 | void sgl_free(struct scatterlist *sgl) |
| 595 | { |
| 596 | sgl_free_order(sgl, 0); |
| 597 | } |
| 598 | EXPORT_SYMBOL(sgl_free); |
| 599 | |
| 600 | #endif /* CONFIG_SGL_ALLOC */ |
| 601 | |
Imre Deak | a321e91 | 2013-02-27 17:02:56 -0800 | [diff] [blame] | 602 | void __sg_page_iter_start(struct sg_page_iter *piter, |
| 603 | struct scatterlist *sglist, unsigned int nents, |
| 604 | unsigned long pgoffset) |
| 605 | { |
| 606 | piter->__pg_advance = 0; |
| 607 | piter->__nents = nents; |
| 608 | |
Imre Deak | a321e91 | 2013-02-27 17:02:56 -0800 | [diff] [blame] | 609 | piter->sg = sglist; |
| 610 | piter->sg_pgoffset = pgoffset; |
| 611 | } |
| 612 | EXPORT_SYMBOL(__sg_page_iter_start); |
| 613 | |
| 614 | static int sg_page_count(struct scatterlist *sg) |
| 615 | { |
| 616 | return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT; |
| 617 | } |
| 618 | |
| 619 | bool __sg_page_iter_next(struct sg_page_iter *piter) |
| 620 | { |
| 621 | if (!piter->__nents || !piter->sg) |
| 622 | return false; |
| 623 | |
| 624 | piter->sg_pgoffset += piter->__pg_advance; |
| 625 | piter->__pg_advance = 1; |
| 626 | |
| 627 | while (piter->sg_pgoffset >= sg_page_count(piter->sg)) { |
| 628 | piter->sg_pgoffset -= sg_page_count(piter->sg); |
| 629 | piter->sg = sg_next(piter->sg); |
| 630 | if (!--piter->__nents || !piter->sg) |
| 631 | return false; |
| 632 | } |
Imre Deak | a321e91 | 2013-02-27 17:02:56 -0800 | [diff] [blame] | 633 | |
| 634 | return true; |
| 635 | } |
| 636 | EXPORT_SYMBOL(__sg_page_iter_next); |
| 637 | |
Jason Gunthorpe | d901b27 | 2019-01-04 11:40:21 -0700 | [diff] [blame] | 638 | static int sg_dma_page_count(struct scatterlist *sg) |
| 639 | { |
| 640 | return PAGE_ALIGN(sg->offset + sg_dma_len(sg)) >> PAGE_SHIFT; |
| 641 | } |
| 642 | |
| 643 | bool __sg_page_iter_dma_next(struct sg_dma_page_iter *dma_iter) |
| 644 | { |
| 645 | struct sg_page_iter *piter = &dma_iter->base; |
| 646 | |
| 647 | if (!piter->__nents || !piter->sg) |
| 648 | return false; |
| 649 | |
| 650 | piter->sg_pgoffset += piter->__pg_advance; |
| 651 | piter->__pg_advance = 1; |
| 652 | |
| 653 | while (piter->sg_pgoffset >= sg_dma_page_count(piter->sg)) { |
| 654 | piter->sg_pgoffset -= sg_dma_page_count(piter->sg); |
| 655 | piter->sg = sg_next(piter->sg); |
| 656 | if (!--piter->__nents || !piter->sg) |
| 657 | return false; |
| 658 | } |
| 659 | |
| 660 | return true; |
| 661 | } |
| 662 | EXPORT_SYMBOL(__sg_page_iter_dma_next); |
| 663 | |
Tomasz Stanislawski | efc42bc | 2012-06-18 09:25:01 +0200 | [diff] [blame] | 664 | /** |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 665 | * sg_miter_start - start mapping iteration over a sg list |
| 666 | * @miter: sg mapping iter to be started |
| 667 | * @sgl: sg list to iterate over |
| 668 | * @nents: number of sg entries |
| 669 | * |
| 670 | * Description: |
| 671 | * Starts mapping iterator @miter. |
| 672 | * |
| 673 | * Context: |
| 674 | * Don't care. |
| 675 | */ |
| 676 | void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl, |
| 677 | unsigned int nents, unsigned int flags) |
| 678 | { |
| 679 | memset(miter, 0, sizeof(struct sg_mapping_iter)); |
| 680 | |
Imre Deak | 4225fc8 | 2013-02-27 17:02:57 -0800 | [diff] [blame] | 681 | __sg_page_iter_start(&miter->piter, sgl, nents, 0); |
Sebastian Andrzej Siewior | 6de7e356 | 2009-06-18 10:19:12 +0200 | [diff] [blame] | 682 | WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG))); |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 683 | miter->__flags = flags; |
| 684 | } |
| 685 | EXPORT_SYMBOL(sg_miter_start); |
| 686 | |
Akinobu Mita | 1105200 | 2013-07-08 16:01:52 -0700 | [diff] [blame] | 687 | static bool sg_miter_get_next_page(struct sg_mapping_iter *miter) |
| 688 | { |
| 689 | if (!miter->__remaining) { |
| 690 | struct scatterlist *sg; |
| 691 | unsigned long pgoffset; |
| 692 | |
| 693 | if (!__sg_page_iter_next(&miter->piter)) |
| 694 | return false; |
| 695 | |
| 696 | sg = miter->piter.sg; |
| 697 | pgoffset = miter->piter.sg_pgoffset; |
| 698 | |
| 699 | miter->__offset = pgoffset ? 0 : sg->offset; |
| 700 | miter->__remaining = sg->offset + sg->length - |
| 701 | (pgoffset << PAGE_SHIFT) - miter->__offset; |
| 702 | miter->__remaining = min_t(unsigned long, miter->__remaining, |
| 703 | PAGE_SIZE - miter->__offset); |
| 704 | } |
| 705 | |
| 706 | return true; |
| 707 | } |
| 708 | |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 709 | /** |
Akinobu Mita | df642ce | 2013-07-08 16:01:54 -0700 | [diff] [blame] | 710 | * sg_miter_skip - reposition mapping iterator |
| 711 | * @miter: sg mapping iter to be skipped |
| 712 | * @offset: number of bytes to plus the current location |
| 713 | * |
| 714 | * Description: |
| 715 | * Sets the offset of @miter to its current location plus @offset bytes. |
| 716 | * If mapping iterator @miter has been proceeded by sg_miter_next(), this |
| 717 | * stops @miter. |
| 718 | * |
| 719 | * Context: |
| 720 | * Don't care if @miter is stopped, or not proceeded yet. |
| 721 | * Otherwise, preemption disabled if the SG_MITER_ATOMIC is set. |
| 722 | * |
| 723 | * Returns: |
| 724 | * true if @miter contains the valid mapping. false if end of sg |
| 725 | * list is reached. |
| 726 | */ |
Ming Lei | 0d6077f | 2013-11-26 12:43:37 +0800 | [diff] [blame] | 727 | bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset) |
Akinobu Mita | df642ce | 2013-07-08 16:01:54 -0700 | [diff] [blame] | 728 | { |
| 729 | sg_miter_stop(miter); |
| 730 | |
| 731 | while (offset) { |
| 732 | off_t consumed; |
| 733 | |
| 734 | if (!sg_miter_get_next_page(miter)) |
| 735 | return false; |
| 736 | |
| 737 | consumed = min_t(off_t, offset, miter->__remaining); |
| 738 | miter->__offset += consumed; |
| 739 | miter->__remaining -= consumed; |
| 740 | offset -= consumed; |
| 741 | } |
| 742 | |
| 743 | return true; |
| 744 | } |
Ming Lei | 0d6077f | 2013-11-26 12:43:37 +0800 | [diff] [blame] | 745 | EXPORT_SYMBOL(sg_miter_skip); |
Akinobu Mita | df642ce | 2013-07-08 16:01:54 -0700 | [diff] [blame] | 746 | |
| 747 | /** |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 748 | * sg_miter_next - proceed mapping iterator to the next mapping |
| 749 | * @miter: sg mapping iter to proceed |
| 750 | * |
| 751 | * Description: |
Tejun Heo | 8290e2d | 2012-10-04 17:13:28 -0700 | [diff] [blame] | 752 | * Proceeds @miter to the next mapping. @miter should have been started |
| 753 | * using sg_miter_start(). On successful return, @miter->page, |
| 754 | * @miter->addr and @miter->length point to the current mapping. |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 755 | * |
| 756 | * Context: |
Tejun Heo | 8290e2d | 2012-10-04 17:13:28 -0700 | [diff] [blame] | 757 | * Preemption disabled if SG_MITER_ATOMIC. Preemption must stay disabled |
| 758 | * till @miter is stopped. May sleep if !SG_MITER_ATOMIC. |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 759 | * |
| 760 | * Returns: |
| 761 | * true if @miter contains the next mapping. false if end of sg |
| 762 | * list is reached. |
| 763 | */ |
| 764 | bool sg_miter_next(struct sg_mapping_iter *miter) |
| 765 | { |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 766 | sg_miter_stop(miter); |
| 767 | |
Imre Deak | 4225fc8 | 2013-02-27 17:02:57 -0800 | [diff] [blame] | 768 | /* |
| 769 | * Get to the next page if necessary. |
| 770 | * __remaining, __offset is adjusted by sg_miter_stop |
| 771 | */ |
Akinobu Mita | 1105200 | 2013-07-08 16:01:52 -0700 | [diff] [blame] | 772 | if (!sg_miter_get_next_page(miter)) |
| 773 | return false; |
Imre Deak | 4225fc8 | 2013-02-27 17:02:57 -0800 | [diff] [blame] | 774 | |
Imre Deak | 2db76d7 | 2013-03-26 15:14:18 +0200 | [diff] [blame] | 775 | miter->page = sg_page_iter_page(&miter->piter); |
Imre Deak | 4225fc8 | 2013-02-27 17:02:57 -0800 | [diff] [blame] | 776 | miter->consumed = miter->length = miter->__remaining; |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 777 | |
| 778 | if (miter->__flags & SG_MITER_ATOMIC) |
Imre Deak | 4225fc8 | 2013-02-27 17:02:57 -0800 | [diff] [blame] | 779 | miter->addr = kmap_atomic(miter->page) + miter->__offset; |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 780 | else |
Imre Deak | 4225fc8 | 2013-02-27 17:02:57 -0800 | [diff] [blame] | 781 | miter->addr = kmap(miter->page) + miter->__offset; |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 782 | |
| 783 | return true; |
| 784 | } |
| 785 | EXPORT_SYMBOL(sg_miter_next); |
| 786 | |
| 787 | /** |
| 788 | * sg_miter_stop - stop mapping iteration |
| 789 | * @miter: sg mapping iter to be stopped |
| 790 | * |
| 791 | * Description: |
| 792 | * Stops mapping iterator @miter. @miter should have been started |
Masahiro Yamada | 4ba6a2b | 2016-02-08 16:09:08 +0900 | [diff] [blame] | 793 | * using sg_miter_start(). A stopped iteration can be resumed by |
| 794 | * calling sg_miter_next() on it. This is useful when resources (kmap) |
| 795 | * need to be released during iteration. |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 796 | * |
| 797 | * Context: |
Tejun Heo | 8290e2d | 2012-10-04 17:13:28 -0700 | [diff] [blame] | 798 | * Preemption disabled if the SG_MITER_ATOMIC is set. Don't care |
| 799 | * otherwise. |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 800 | */ |
| 801 | void sg_miter_stop(struct sg_mapping_iter *miter) |
| 802 | { |
| 803 | WARN_ON(miter->consumed > miter->length); |
| 804 | |
| 805 | /* drop resources from the last iteration */ |
| 806 | if (miter->addr) { |
| 807 | miter->__offset += miter->consumed; |
Imre Deak | 4225fc8 | 2013-02-27 17:02:57 -0800 | [diff] [blame] | 808 | miter->__remaining -= miter->consumed; |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 809 | |
Ming Lei | 3d77b50 | 2013-10-31 16:34:17 -0700 | [diff] [blame] | 810 | if ((miter->__flags & SG_MITER_TO_SG) && |
| 811 | !PageSlab(miter->page)) |
Sebastian Andrzej Siewior | 6de7e356 | 2009-06-18 10:19:12 +0200 | [diff] [blame] | 812 | flush_kernel_dcache_page(miter->page); |
| 813 | |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 814 | if (miter->__flags & SG_MITER_ATOMIC) { |
Tejun Heo | 8290e2d | 2012-10-04 17:13:28 -0700 | [diff] [blame] | 815 | WARN_ON_ONCE(preemptible()); |
Cong Wang | c3eede8 | 2011-11-25 23:14:39 +0800 | [diff] [blame] | 816 | kunmap_atomic(miter->addr); |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 817 | } else |
Arjan van de Ven | f652c52 | 2008-11-19 15:36:19 -0800 | [diff] [blame] | 818 | kunmap(miter->page); |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 819 | |
| 820 | miter->page = NULL; |
| 821 | miter->addr = NULL; |
| 822 | miter->length = 0; |
| 823 | miter->consumed = 0; |
| 824 | } |
| 825 | } |
| 826 | EXPORT_SYMBOL(sg_miter_stop); |
| 827 | |
| 828 | /** |
FUJITA Tomonori | b1adaf6 | 2008-03-18 00:15:03 +0900 | [diff] [blame] | 829 | * sg_copy_buffer - Copy data between a linear buffer and an SG list |
| 830 | * @sgl: The SG list |
| 831 | * @nents: Number of SG entries |
| 832 | * @buf: Where to copy from |
| 833 | * @buflen: The number of bytes to copy |
Akinobu Mita | df642ce | 2013-07-08 16:01:54 -0700 | [diff] [blame] | 834 | * @skip: Number of bytes to skip before copying |
| 835 | * @to_buffer: transfer direction (true == from an sg list to a |
| 836 | * buffer, false == from a buffer to an sg list |
FUJITA Tomonori | b1adaf6 | 2008-03-18 00:15:03 +0900 | [diff] [blame] | 837 | * |
| 838 | * Returns the number of copied bytes. |
| 839 | * |
| 840 | **/ |
Dave Gordon | 386ecb1 | 2015-06-30 14:58:57 -0700 | [diff] [blame] | 841 | size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, |
| 842 | size_t buflen, off_t skip, bool to_buffer) |
FUJITA Tomonori | b1adaf6 | 2008-03-18 00:15:03 +0900 | [diff] [blame] | 843 | { |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 844 | unsigned int offset = 0; |
| 845 | struct sg_mapping_iter miter; |
Sebastian Andrzej Siewior | 6de7e356 | 2009-06-18 10:19:12 +0200 | [diff] [blame] | 846 | unsigned int sg_flags = SG_MITER_ATOMIC; |
FUJITA Tomonori | b1adaf6 | 2008-03-18 00:15:03 +0900 | [diff] [blame] | 847 | |
Sebastian Andrzej Siewior | 6de7e356 | 2009-06-18 10:19:12 +0200 | [diff] [blame] | 848 | if (to_buffer) |
| 849 | sg_flags |= SG_MITER_FROM_SG; |
| 850 | else |
| 851 | sg_flags |= SG_MITER_TO_SG; |
| 852 | |
| 853 | sg_miter_start(&miter, sgl, nents, sg_flags); |
FUJITA Tomonori | b1adaf6 | 2008-03-18 00:15:03 +0900 | [diff] [blame] | 854 | |
Akinobu Mita | df642ce | 2013-07-08 16:01:54 -0700 | [diff] [blame] | 855 | if (!sg_miter_skip(&miter, skip)) |
| 856 | return false; |
| 857 | |
Gilad Ben-Yossef | 1d5210ef | 2017-02-27 14:28:27 -0800 | [diff] [blame] | 858 | while ((offset < buflen) && sg_miter_next(&miter)) { |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 859 | unsigned int len; |
FUJITA Tomonori | b1adaf6 | 2008-03-18 00:15:03 +0900 | [diff] [blame] | 860 | |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 861 | len = min(miter.length, buflen - offset); |
FUJITA Tomonori | b1adaf6 | 2008-03-18 00:15:03 +0900 | [diff] [blame] | 862 | |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 863 | if (to_buffer) |
| 864 | memcpy(buf + offset, miter.addr, len); |
Sebastian Andrzej Siewior | 6de7e356 | 2009-06-18 10:19:12 +0200 | [diff] [blame] | 865 | else |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 866 | memcpy(miter.addr, buf + offset, len); |
FUJITA Tomonori | b1adaf6 | 2008-03-18 00:15:03 +0900 | [diff] [blame] | 867 | |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 868 | offset += len; |
FUJITA Tomonori | b1adaf6 | 2008-03-18 00:15:03 +0900 | [diff] [blame] | 869 | } |
| 870 | |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 871 | sg_miter_stop(&miter); |
| 872 | |
| 873 | return offset; |
FUJITA Tomonori | b1adaf6 | 2008-03-18 00:15:03 +0900 | [diff] [blame] | 874 | } |
Dave Gordon | 386ecb1 | 2015-06-30 14:58:57 -0700 | [diff] [blame] | 875 | EXPORT_SYMBOL(sg_copy_buffer); |
FUJITA Tomonori | b1adaf6 | 2008-03-18 00:15:03 +0900 | [diff] [blame] | 876 | |
| 877 | /** |
| 878 | * sg_copy_from_buffer - Copy from a linear buffer to an SG list |
| 879 | * @sgl: The SG list |
| 880 | * @nents: Number of SG entries |
| 881 | * @buf: Where to copy from |
| 882 | * @buflen: The number of bytes to copy |
| 883 | * |
| 884 | * Returns the number of copied bytes. |
| 885 | * |
| 886 | **/ |
| 887 | size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents, |
Dave Gordon | 2a1bf8f | 2015-06-30 14:58:54 -0700 | [diff] [blame] | 888 | const void *buf, size_t buflen) |
FUJITA Tomonori | b1adaf6 | 2008-03-18 00:15:03 +0900 | [diff] [blame] | 889 | { |
Dave Gordon | 2a1bf8f | 2015-06-30 14:58:54 -0700 | [diff] [blame] | 890 | return sg_copy_buffer(sgl, nents, (void *)buf, buflen, 0, false); |
FUJITA Tomonori | b1adaf6 | 2008-03-18 00:15:03 +0900 | [diff] [blame] | 891 | } |
| 892 | EXPORT_SYMBOL(sg_copy_from_buffer); |
| 893 | |
| 894 | /** |
| 895 | * sg_copy_to_buffer - Copy from an SG list to a linear buffer |
| 896 | * @sgl: The SG list |
| 897 | * @nents: Number of SG entries |
| 898 | * @buf: Where to copy to |
| 899 | * @buflen: The number of bytes to copy |
| 900 | * |
| 901 | * Returns the number of copied bytes. |
| 902 | * |
| 903 | **/ |
| 904 | size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents, |
| 905 | void *buf, size_t buflen) |
| 906 | { |
Akinobu Mita | df642ce | 2013-07-08 16:01:54 -0700 | [diff] [blame] | 907 | return sg_copy_buffer(sgl, nents, buf, buflen, 0, true); |
FUJITA Tomonori | b1adaf6 | 2008-03-18 00:15:03 +0900 | [diff] [blame] | 908 | } |
| 909 | EXPORT_SYMBOL(sg_copy_to_buffer); |
Akinobu Mita | df642ce | 2013-07-08 16:01:54 -0700 | [diff] [blame] | 910 | |
| 911 | /** |
| 912 | * sg_pcopy_from_buffer - Copy from a linear buffer to an SG list |
| 913 | * @sgl: The SG list |
| 914 | * @nents: Number of SG entries |
| 915 | * @buf: Where to copy from |
Akinobu Mita | df642ce | 2013-07-08 16:01:54 -0700 | [diff] [blame] | 916 | * @buflen: The number of bytes to copy |
Dave Gordon | 4dc7daf | 2015-06-30 14:58:52 -0700 | [diff] [blame] | 917 | * @skip: Number of bytes to skip before copying |
Akinobu Mita | df642ce | 2013-07-08 16:01:54 -0700 | [diff] [blame] | 918 | * |
| 919 | * Returns the number of copied bytes. |
| 920 | * |
| 921 | **/ |
| 922 | size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents, |
Dave Gordon | 2a1bf8f | 2015-06-30 14:58:54 -0700 | [diff] [blame] | 923 | const void *buf, size_t buflen, off_t skip) |
Akinobu Mita | df642ce | 2013-07-08 16:01:54 -0700 | [diff] [blame] | 924 | { |
Dave Gordon | 2a1bf8f | 2015-06-30 14:58:54 -0700 | [diff] [blame] | 925 | return sg_copy_buffer(sgl, nents, (void *)buf, buflen, skip, false); |
Akinobu Mita | df642ce | 2013-07-08 16:01:54 -0700 | [diff] [blame] | 926 | } |
| 927 | EXPORT_SYMBOL(sg_pcopy_from_buffer); |
| 928 | |
| 929 | /** |
| 930 | * sg_pcopy_to_buffer - Copy from an SG list to a linear buffer |
| 931 | * @sgl: The SG list |
| 932 | * @nents: Number of SG entries |
| 933 | * @buf: Where to copy to |
Akinobu Mita | df642ce | 2013-07-08 16:01:54 -0700 | [diff] [blame] | 934 | * @buflen: The number of bytes to copy |
Dave Gordon | 4dc7daf | 2015-06-30 14:58:52 -0700 | [diff] [blame] | 935 | * @skip: Number of bytes to skip before copying |
Akinobu Mita | df642ce | 2013-07-08 16:01:54 -0700 | [diff] [blame] | 936 | * |
| 937 | * Returns the number of copied bytes. |
| 938 | * |
| 939 | **/ |
| 940 | size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents, |
| 941 | void *buf, size_t buflen, off_t skip) |
| 942 | { |
| 943 | return sg_copy_buffer(sgl, nents, buf, buflen, skip, true); |
| 944 | } |
| 945 | EXPORT_SYMBOL(sg_pcopy_to_buffer); |
Johannes Thumshirn | 0945e56 | 2017-06-07 11:45:28 +0200 | [diff] [blame] | 946 | |
| 947 | /** |
| 948 | * sg_zero_buffer - Zero-out a part of a SG list |
| 949 | * @sgl: The SG list |
| 950 | * @nents: Number of SG entries |
| 951 | * @buflen: The number of bytes to zero out |
| 952 | * @skip: Number of bytes to skip before zeroing |
| 953 | * |
| 954 | * Returns the number of bytes zeroed. |
| 955 | **/ |
| 956 | size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents, |
| 957 | size_t buflen, off_t skip) |
| 958 | { |
| 959 | unsigned int offset = 0; |
| 960 | struct sg_mapping_iter miter; |
| 961 | unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG; |
| 962 | |
| 963 | sg_miter_start(&miter, sgl, nents, sg_flags); |
| 964 | |
| 965 | if (!sg_miter_skip(&miter, skip)) |
| 966 | return false; |
| 967 | |
| 968 | while (offset < buflen && sg_miter_next(&miter)) { |
| 969 | unsigned int len; |
| 970 | |
| 971 | len = min(miter.length, buflen - offset); |
| 972 | memset(miter.addr, 0, len); |
| 973 | |
| 974 | offset += len; |
| 975 | } |
| 976 | |
| 977 | sg_miter_stop(&miter); |
| 978 | return offset; |
| 979 | } |
| 980 | EXPORT_SYMBOL(sg_zero_buffer); |