Thomas Gleixner | 40b0b3f | 2019-06-03 07:44:46 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2007 Jens Axboe <jens.axboe@oracle.com> |
| 4 | * |
| 5 | * Scatterlist handling helpers. |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 6 | */ |
Paul Gortmaker | 8bc3bcc | 2011-11-16 21:29:17 -0500 | [diff] [blame] | 7 | #include <linux/export.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 8 | #include <linux/slab.h> |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 9 | #include <linux/scatterlist.h> |
FUJITA Tomonori | b1adaf6 | 2008-03-18 00:15:03 +0900 | [diff] [blame] | 10 | #include <linux/highmem.h> |
Chris Wilson | b94de9b | 2010-07-28 22:59:02 +0100 | [diff] [blame] | 11 | #include <linux/kmemleak.h> |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 12 | |
| 13 | /** |
| 14 | * sg_next - return the next scatterlist entry in a list |
| 15 | * @sg: The current sg entry |
| 16 | * |
| 17 | * Description: |
| 18 | * Usually the next entry will be @sg@ + 1, but if this sg element is part |
| 19 | * of a chained scatterlist, it could jump to the start of a new |
| 20 | * scatterlist array. |
| 21 | * |
| 22 | **/ |
| 23 | struct scatterlist *sg_next(struct scatterlist *sg) |
| 24 | { |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 25 | if (sg_is_last(sg)) |
| 26 | return NULL; |
| 27 | |
| 28 | sg++; |
| 29 | if (unlikely(sg_is_chain(sg))) |
| 30 | sg = sg_chain_ptr(sg); |
| 31 | |
| 32 | return sg; |
| 33 | } |
| 34 | EXPORT_SYMBOL(sg_next); |
| 35 | |
| 36 | /** |
Maxim Levitsky | 2e48461 | 2012-09-27 12:45:28 +0200 | [diff] [blame] | 37 | * sg_nents - return total count of entries in scatterlist |
| 38 | * @sg: The scatterlist |
| 39 | * |
| 40 | * Description: |
| 41 | * Allows to know how many entries are in sg, taking into acount |
| 42 | * chaining as well |
| 43 | * |
| 44 | **/ |
| 45 | int sg_nents(struct scatterlist *sg) |
| 46 | { |
Maxim Levitsky | 232f1b5 | 2012-09-28 10:38:15 +0200 | [diff] [blame] | 47 | int nents; |
| 48 | for (nents = 0; sg; sg = sg_next(sg)) |
Maxim Levitsky | 2e48461 | 2012-09-27 12:45:28 +0200 | [diff] [blame] | 49 | nents++; |
Maxim Levitsky | 2e48461 | 2012-09-27 12:45:28 +0200 | [diff] [blame] | 50 | return nents; |
| 51 | } |
| 52 | EXPORT_SYMBOL(sg_nents); |
| 53 | |
Tom Lendacky | cfaed10 | 2015-06-01 11:15:25 -0500 | [diff] [blame] | 54 | /** |
| 55 | * sg_nents_for_len - return total count of entries in scatterlist |
| 56 | * needed to satisfy the supplied length |
| 57 | * @sg: The scatterlist |
| 58 | * @len: The total required length |
| 59 | * |
| 60 | * Description: |
| 61 | * Determines the number of entries in sg that are required to meet |
| 62 | * the supplied length, taking into acount chaining as well |
| 63 | * |
| 64 | * Returns: |
| 65 | * the number of sg entries needed, negative error on failure |
| 66 | * |
| 67 | **/ |
| 68 | int sg_nents_for_len(struct scatterlist *sg, u64 len) |
| 69 | { |
| 70 | int nents; |
| 71 | u64 total; |
| 72 | |
| 73 | if (!len) |
| 74 | return 0; |
| 75 | |
| 76 | for (nents = 0, total = 0; sg; sg = sg_next(sg)) { |
| 77 | nents++; |
| 78 | total += sg->length; |
| 79 | if (total >= len) |
| 80 | return nents; |
| 81 | } |
| 82 | |
| 83 | return -EINVAL; |
| 84 | } |
| 85 | EXPORT_SYMBOL(sg_nents_for_len); |
Maxim Levitsky | 2e48461 | 2012-09-27 12:45:28 +0200 | [diff] [blame] | 86 | |
| 87 | /** |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 88 | * sg_last - return the last scatterlist entry in a list |
| 89 | * @sgl: First entry in the scatterlist |
| 90 | * @nents: Number of entries in the scatterlist |
| 91 | * |
| 92 | * Description: |
| 93 | * Should only be used casually, it (currently) scans the entire list |
| 94 | * to get the last entry. |
| 95 | * |
| 96 | * Note that the @sgl@ pointer passed in need not be the first one, |
| 97 | * the important bit is that @nents@ denotes the number of entries that |
| 98 | * exist from @sgl@. |
| 99 | * |
| 100 | **/ |
| 101 | struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents) |
| 102 | { |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 103 | struct scatterlist *sg, *ret = NULL; |
| 104 | unsigned int i; |
| 105 | |
| 106 | for_each_sg(sgl, sg, nents, i) |
| 107 | ret = sg; |
| 108 | |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 109 | BUG_ON(!sg_is_last(ret)); |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 110 | return ret; |
| 111 | } |
| 112 | EXPORT_SYMBOL(sg_last); |
| 113 | |
| 114 | /** |
| 115 | * sg_init_table - Initialize SG table |
| 116 | * @sgl: The SG table |
| 117 | * @nents: Number of entries in table |
| 118 | * |
| 119 | * Notes: |
| 120 | * If this is part of a chained sg table, sg_mark_end() should be |
| 121 | * used only on the last table part. |
| 122 | * |
| 123 | **/ |
| 124 | void sg_init_table(struct scatterlist *sgl, unsigned int nents) |
| 125 | { |
| 126 | memset(sgl, 0, sizeof(*sgl) * nents); |
Prashant Bhole | f385178 | 2018-03-30 09:20:59 +0900 | [diff] [blame] | 127 | sg_init_marker(sgl, nents); |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 128 | } |
| 129 | EXPORT_SYMBOL(sg_init_table); |
| 130 | |
| 131 | /** |
| 132 | * sg_init_one - Initialize a single entry sg list |
| 133 | * @sg: SG entry |
| 134 | * @buf: Virtual address for IO |
| 135 | * @buflen: IO length |
| 136 | * |
| 137 | **/ |
| 138 | void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen) |
| 139 | { |
| 140 | sg_init_table(sg, 1); |
| 141 | sg_set_buf(sg, buf, buflen); |
| 142 | } |
| 143 | EXPORT_SYMBOL(sg_init_one); |
| 144 | |
| 145 | /* |
| 146 | * The default behaviour of sg_alloc_table() is to use these kmalloc/kfree |
| 147 | * helpers. |
| 148 | */ |
| 149 | static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask) |
| 150 | { |
Chris Wilson | b94de9b | 2010-07-28 22:59:02 +0100 | [diff] [blame] | 151 | if (nents == SG_MAX_SINGLE_ALLOC) { |
| 152 | /* |
| 153 | * Kmemleak doesn't track page allocations as they are not |
| 154 | * commonly used (in a raw form) for kernel data structures. |
| 155 | * As we chain together a list of pages and then a normal |
| 156 | * kmalloc (tracked by kmemleak), in order to for that last |
| 157 | * allocation not to become decoupled (and thus a |
| 158 | * false-positive) we need to inform kmemleak of all the |
| 159 | * intermediate allocations. |
| 160 | */ |
| 161 | void *ptr = (void *) __get_free_page(gfp_mask); |
| 162 | kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask); |
| 163 | return ptr; |
| 164 | } else |
Kees Cook | 6da2ec5 | 2018-06-12 13:55:00 -0700 | [diff] [blame] | 165 | return kmalloc_array(nents, sizeof(struct scatterlist), |
| 166 | gfp_mask); |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 167 | } |
| 168 | |
| 169 | static void sg_kfree(struct scatterlist *sg, unsigned int nents) |
| 170 | { |
Chris Wilson | b94de9b | 2010-07-28 22:59:02 +0100 | [diff] [blame] | 171 | if (nents == SG_MAX_SINGLE_ALLOC) { |
| 172 | kmemleak_free(sg); |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 173 | free_page((unsigned long) sg); |
Chris Wilson | b94de9b | 2010-07-28 22:59:02 +0100 | [diff] [blame] | 174 | } else |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 175 | kfree(sg); |
| 176 | } |
| 177 | |
| 178 | /** |
| 179 | * __sg_free_table - Free a previously mapped sg table |
| 180 | * @table: The sg table header to use |
James Bottomley | 7cedb1f | 2008-01-13 14:15:28 -0600 | [diff] [blame] | 181 | * @max_ents: The maximum number of entries per single scatterlist |
Ming Lei | 4635873 | 2019-04-28 15:39:30 +0800 | [diff] [blame] | 182 | * @nents_first_chunk: Number of entries int the (preallocated) first |
| 183 | * scatterlist chunk, 0 means no such preallocated first chunk |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 184 | * @free_fn: Free function |
| 185 | * |
| 186 | * Description: |
James Bottomley | 7cedb1f | 2008-01-13 14:15:28 -0600 | [diff] [blame] | 187 | * Free an sg table previously allocated and setup with |
| 188 | * __sg_alloc_table(). The @max_ents value must be identical to |
| 189 | * that previously used with __sg_alloc_table(). |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 190 | * |
| 191 | **/ |
James Bottomley | 7cedb1f | 2008-01-13 14:15:28 -0600 | [diff] [blame] | 192 | void __sg_free_table(struct sg_table *table, unsigned int max_ents, |
Ming Lei | 4635873 | 2019-04-28 15:39:30 +0800 | [diff] [blame] | 193 | unsigned int nents_first_chunk, sg_free_fn *free_fn) |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 194 | { |
| 195 | struct scatterlist *sgl, *next; |
Ming Lei | 4635873 | 2019-04-28 15:39:30 +0800 | [diff] [blame] | 196 | unsigned curr_max_ents = nents_first_chunk ?: max_ents; |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 197 | |
| 198 | if (unlikely(!table->sgl)) |
| 199 | return; |
| 200 | |
| 201 | sgl = table->sgl; |
| 202 | while (table->orig_nents) { |
| 203 | unsigned int alloc_size = table->orig_nents; |
| 204 | unsigned int sg_size; |
| 205 | |
| 206 | /* |
James Bottomley | 7cedb1f | 2008-01-13 14:15:28 -0600 | [diff] [blame] | 207 | * If we have more than max_ents segments left, |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 208 | * then assign 'next' to the sg table after the current one. |
| 209 | * sg_size is then one less than alloc size, since the last |
| 210 | * element is the chain pointer. |
| 211 | */ |
Ming Lei | 4635873 | 2019-04-28 15:39:30 +0800 | [diff] [blame] | 212 | if (alloc_size > curr_max_ents) { |
| 213 | next = sg_chain_ptr(&sgl[curr_max_ents - 1]); |
| 214 | alloc_size = curr_max_ents; |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 215 | sg_size = alloc_size - 1; |
| 216 | } else { |
| 217 | sg_size = alloc_size; |
| 218 | next = NULL; |
| 219 | } |
| 220 | |
| 221 | table->orig_nents -= sg_size; |
Ming Lei | 4635873 | 2019-04-28 15:39:30 +0800 | [diff] [blame] | 222 | if (nents_first_chunk) |
| 223 | nents_first_chunk = 0; |
Tony Battersby | c21e59d | 2014-10-23 15:10:21 -0400 | [diff] [blame] | 224 | else |
| 225 | free_fn(sgl, alloc_size); |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 226 | sgl = next; |
Ming Lei | 4635873 | 2019-04-28 15:39:30 +0800 | [diff] [blame] | 227 | curr_max_ents = max_ents; |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 228 | } |
| 229 | |
| 230 | table->sgl = NULL; |
| 231 | } |
| 232 | EXPORT_SYMBOL(__sg_free_table); |
| 233 | |
| 234 | /** |
| 235 | * sg_free_table - Free a previously allocated sg table |
| 236 | * @table: The mapped sg table header |
| 237 | * |
| 238 | **/ |
| 239 | void sg_free_table(struct sg_table *table) |
| 240 | { |
Christoph Hellwig | c53c6d6 | 2014-04-15 14:38:31 +0200 | [diff] [blame] | 241 | __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree); |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 242 | } |
| 243 | EXPORT_SYMBOL(sg_free_table); |
| 244 | |
| 245 | /** |
| 246 | * __sg_alloc_table - Allocate and initialize an sg table with given allocator |
| 247 | * @table: The sg table header to use |
| 248 | * @nents: Number of entries in sg list |
James Bottomley | 7cedb1f | 2008-01-13 14:15:28 -0600 | [diff] [blame] | 249 | * @max_ents: The maximum number of entries the allocator returns per call |
Ming Lei | 4635873 | 2019-04-28 15:39:30 +0800 | [diff] [blame] | 250 | * @nents_first_chunk: Number of entries int the (preallocated) first |
| 251 | * scatterlist chunk, 0 means no such preallocated chunk provided by user |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 252 | * @gfp_mask: GFP allocation mask |
| 253 | * @alloc_fn: Allocator to use |
| 254 | * |
James Bottomley | 7cedb1f | 2008-01-13 14:15:28 -0600 | [diff] [blame] | 255 | * Description: |
| 256 | * This function returns a @table @nents long. The allocator is |
| 257 | * defined to return scatterlist chunks of maximum size @max_ents. |
| 258 | * Thus if @nents is bigger than @max_ents, the scatterlists will be |
| 259 | * chained in units of @max_ents. |
| 260 | * |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 261 | * Notes: |
| 262 | * If this function returns non-0 (eg failure), the caller must call |
| 263 | * __sg_free_table() to cleanup any leftover allocations. |
| 264 | * |
| 265 | **/ |
James Bottomley | 7cedb1f | 2008-01-13 14:15:28 -0600 | [diff] [blame] | 266 | int __sg_alloc_table(struct sg_table *table, unsigned int nents, |
Christoph Hellwig | c53c6d6 | 2014-04-15 14:38:31 +0200 | [diff] [blame] | 267 | unsigned int max_ents, struct scatterlist *first_chunk, |
Ming Lei | 4635873 | 2019-04-28 15:39:30 +0800 | [diff] [blame] | 268 | unsigned int nents_first_chunk, gfp_t gfp_mask, |
| 269 | sg_alloc_fn *alloc_fn) |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 270 | { |
| 271 | struct scatterlist *sg, *prv; |
| 272 | unsigned int left; |
Ming Lei | 4635873 | 2019-04-28 15:39:30 +0800 | [diff] [blame] | 273 | unsigned curr_max_ents = nents_first_chunk ?: max_ents; |
| 274 | unsigned prv_max_ents; |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 275 | |
Dan Carpenter | 27daabd | 2013-07-08 16:01:58 -0700 | [diff] [blame] | 276 | memset(table, 0, sizeof(*table)); |
| 277 | |
| 278 | if (nents == 0) |
| 279 | return -EINVAL; |
Christoph Hellwig | 7c703e5 | 2018-11-09 09:51:00 +0100 | [diff] [blame] | 280 | #ifdef CONFIG_ARCH_NO_SG_CHAIN |
Nick Bowler | 6fd59a8 | 2012-12-17 16:05:20 -0800 | [diff] [blame] | 281 | if (WARN_ON_ONCE(nents > max_ents)) |
| 282 | return -EINVAL; |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 283 | #endif |
| 284 | |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 285 | left = nents; |
| 286 | prv = NULL; |
| 287 | do { |
| 288 | unsigned int sg_size, alloc_size = left; |
| 289 | |
Ming Lei | 4635873 | 2019-04-28 15:39:30 +0800 | [diff] [blame] | 290 | if (alloc_size > curr_max_ents) { |
| 291 | alloc_size = curr_max_ents; |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 292 | sg_size = alloc_size - 1; |
| 293 | } else |
| 294 | sg_size = alloc_size; |
| 295 | |
| 296 | left -= sg_size; |
| 297 | |
Christoph Hellwig | c53c6d6 | 2014-04-15 14:38:31 +0200 | [diff] [blame] | 298 | if (first_chunk) { |
| 299 | sg = first_chunk; |
| 300 | first_chunk = NULL; |
| 301 | } else { |
| 302 | sg = alloc_fn(alloc_size, gfp_mask); |
| 303 | } |
Jeffrey Carlyle | edce682 | 2010-08-30 19:55:09 +0200 | [diff] [blame] | 304 | if (unlikely(!sg)) { |
| 305 | /* |
| 306 | * Adjust entry count to reflect that the last |
| 307 | * entry of the previous table won't be used for |
| 308 | * linkage. Without this, sg_kfree() may get |
| 309 | * confused. |
| 310 | */ |
| 311 | if (prv) |
| 312 | table->nents = ++table->orig_nents; |
| 313 | |
Nathan Chancellor | 4e456fe | 2020-01-30 22:16:37 -0800 | [diff] [blame] | 314 | return -ENOMEM; |
Jeffrey Carlyle | edce682 | 2010-08-30 19:55:09 +0200 | [diff] [blame] | 315 | } |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 316 | |
| 317 | sg_init_table(sg, alloc_size); |
| 318 | table->nents = table->orig_nents += sg_size; |
| 319 | |
| 320 | /* |
| 321 | * If this is the first mapping, assign the sg table header. |
| 322 | * If this is not the first mapping, chain previous part. |
| 323 | */ |
| 324 | if (prv) |
Ming Lei | 4635873 | 2019-04-28 15:39:30 +0800 | [diff] [blame] | 325 | sg_chain(prv, prv_max_ents, sg); |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 326 | else |
| 327 | table->sgl = sg; |
| 328 | |
| 329 | /* |
| 330 | * If no more entries after this one, mark the end |
| 331 | */ |
| 332 | if (!left) |
| 333 | sg_mark_end(&sg[sg_size - 1]); |
| 334 | |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 335 | prv = sg; |
Ming Lei | 4635873 | 2019-04-28 15:39:30 +0800 | [diff] [blame] | 336 | prv_max_ents = curr_max_ents; |
| 337 | curr_max_ents = max_ents; |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 338 | } while (left); |
| 339 | |
| 340 | return 0; |
| 341 | } |
| 342 | EXPORT_SYMBOL(__sg_alloc_table); |
| 343 | |
| 344 | /** |
| 345 | * sg_alloc_table - Allocate and initialize an sg table |
| 346 | * @table: The sg table header to use |
| 347 | * @nents: Number of entries in sg list |
| 348 | * @gfp_mask: GFP allocation mask |
| 349 | * |
| 350 | * Description: |
| 351 | * Allocate and initialize an sg table. If @nents@ is larger than |
| 352 | * SG_MAX_SINGLE_ALLOC a chained sg table will be setup. |
| 353 | * |
| 354 | **/ |
| 355 | int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) |
| 356 | { |
| 357 | int ret; |
| 358 | |
James Bottomley | 7cedb1f | 2008-01-13 14:15:28 -0600 | [diff] [blame] | 359 | ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC, |
Ming Lei | 4635873 | 2019-04-28 15:39:30 +0800 | [diff] [blame] | 360 | NULL, 0, gfp_mask, sg_kmalloc); |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 361 | if (unlikely(ret)) |
Ming Lei | 4635873 | 2019-04-28 15:39:30 +0800 | [diff] [blame] | 362 | __sg_free_table(table, SG_MAX_SINGLE_ALLOC, 0, sg_kfree); |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 363 | |
| 364 | return ret; |
| 365 | } |
| 366 | EXPORT_SYMBOL(sg_alloc_table); |
FUJITA Tomonori | b1adaf6 | 2008-03-18 00:15:03 +0900 | [diff] [blame] | 367 | |
Maor Gottlieb | 07da122 | 2020-10-04 18:43:37 +0300 | [diff] [blame] | 368 | static struct scatterlist *get_next_sg(struct sg_table *table, |
| 369 | struct scatterlist *cur, |
| 370 | unsigned long needed_sges, |
| 371 | gfp_t gfp_mask) |
| 372 | { |
| 373 | struct scatterlist *new_sg, *next_sg; |
| 374 | unsigned int alloc_size; |
| 375 | |
| 376 | if (cur) { |
| 377 | next_sg = sg_next(cur); |
| 378 | /* Check if last entry should be keeped for chainning */ |
| 379 | if (!sg_is_last(next_sg) || needed_sges == 1) |
| 380 | return next_sg; |
| 381 | } |
| 382 | |
| 383 | alloc_size = min_t(unsigned long, needed_sges, SG_MAX_SINGLE_ALLOC); |
| 384 | new_sg = sg_kmalloc(alloc_size, gfp_mask); |
| 385 | if (!new_sg) |
| 386 | return ERR_PTR(-ENOMEM); |
| 387 | sg_init_table(new_sg, alloc_size); |
| 388 | if (cur) { |
| 389 | __sg_chain(next_sg, new_sg); |
| 390 | table->orig_nents += alloc_size - 1; |
| 391 | } else { |
| 392 | table->sgl = new_sg; |
| 393 | table->orig_nents = alloc_size; |
| 394 | table->nents = 0; |
| 395 | } |
| 396 | return new_sg; |
| 397 | } |
| 398 | |
FUJITA Tomonori | b1adaf6 | 2008-03-18 00:15:03 +0900 | [diff] [blame] | 399 | /** |
Tvrtko Ursulin | 89d8589 | 2017-08-03 10:13:51 +0100 | [diff] [blame] | 400 | * __sg_alloc_table_from_pages - Allocate and initialize an sg table from |
| 401 | * an array of pages |
| 402 | * @sgt: The sg table header to use |
| 403 | * @pages: Pointer to an array of page pointers |
| 404 | * @n_pages: Number of pages in the pages array |
| 405 | * @offset: Offset from start of the first page to the start of a buffer |
| 406 | * @size: Number of valid bytes in the buffer (after offset) |
Jason Gunthorpe | 9a40401 | 2020-10-16 08:46:01 -0300 | [diff] [blame^] | 407 | * @max_segment: Maximum size of a scatterlist element in bytes |
Maor Gottlieb | 07da122 | 2020-10-04 18:43:37 +0300 | [diff] [blame] | 408 | * @prv: Last populated sge in sgt |
| 409 | * @left_pages: Left pages caller have to set after this call |
Tvrtko Ursulin | 89d8589 | 2017-08-03 10:13:51 +0100 | [diff] [blame] | 410 | * @gfp_mask: GFP allocation mask |
Tomasz Stanislawski | efc42bc | 2012-06-18 09:25:01 +0200 | [diff] [blame] | 411 | * |
Maor Gottlieb | 07da122 | 2020-10-04 18:43:37 +0300 | [diff] [blame] | 412 | * Description: |
| 413 | * If @prv is NULL, allocate and initialize an sg table from a list of pages, |
| 414 | * else reuse the scatterlist passed in at @prv. |
| 415 | * Contiguous ranges of the pages are squashed into a single scatterlist |
| 416 | * entry up to the maximum size specified in @max_segment. A user may |
| 417 | * provide an offset at a start and a size of valid data in a buffer |
| 418 | * specified by the page array. |
Tomasz Stanislawski | efc42bc | 2012-06-18 09:25:01 +0200 | [diff] [blame] | 419 | * |
| 420 | * Returns: |
Maor Gottlieb | 07da122 | 2020-10-04 18:43:37 +0300 | [diff] [blame] | 421 | * Last SGE in sgt on success, PTR_ERR on otherwise. |
| 422 | * The allocation in @sgt must be released by sg_free_table. |
| 423 | * |
| 424 | * Notes: |
| 425 | * If this function returns non-0 (eg failure), the caller must call |
| 426 | * sg_free_table() to cleanup any leftover allocations. |
Tomasz Stanislawski | efc42bc | 2012-06-18 09:25:01 +0200 | [diff] [blame] | 427 | */ |
Maor Gottlieb | 07da122 | 2020-10-04 18:43:37 +0300 | [diff] [blame] | 428 | struct scatterlist *__sg_alloc_table_from_pages(struct sg_table *sgt, |
| 429 | struct page **pages, unsigned int n_pages, unsigned int offset, |
| 430 | unsigned long size, unsigned int max_segment, |
| 431 | struct scatterlist *prv, unsigned int left_pages, |
| 432 | gfp_t gfp_mask) |
Tomasz Stanislawski | efc42bc | 2012-06-18 09:25:01 +0200 | [diff] [blame] | 433 | { |
Maor Gottlieb | 07da122 | 2020-10-04 18:43:37 +0300 | [diff] [blame] | 434 | unsigned int chunks, cur_page, seg_len, i, prv_len = 0; |
| 435 | unsigned int added_nents = 0; |
| 436 | struct scatterlist *s = prv; |
Tomasz Stanislawski | efc42bc | 2012-06-18 09:25:01 +0200 | [diff] [blame] | 437 | |
Jason Gunthorpe | 9a40401 | 2020-10-16 08:46:01 -0300 | [diff] [blame^] | 438 | /* |
| 439 | * The algorithm below requires max_segment to be aligned to PAGE_SIZE |
| 440 | * otherwise it can overshoot. |
| 441 | */ |
| 442 | max_segment = ALIGN_DOWN(max_segment, PAGE_SIZE); |
| 443 | if (WARN_ON(max_segment < PAGE_SIZE)) |
Maor Gottlieb | 07da122 | 2020-10-04 18:43:37 +0300 | [diff] [blame] | 444 | return ERR_PTR(-EINVAL); |
| 445 | |
| 446 | if (IS_ENABLED(CONFIG_ARCH_NO_SG_CHAIN) && prv) |
| 447 | return ERR_PTR(-EOPNOTSUPP); |
| 448 | |
| 449 | if (prv) { |
| 450 | unsigned long paddr = (page_to_pfn(sg_page(prv)) * PAGE_SIZE + |
| 451 | prv->offset + prv->length) / |
| 452 | PAGE_SIZE; |
| 453 | |
| 454 | if (WARN_ON(offset)) |
| 455 | return ERR_PTR(-EINVAL); |
| 456 | |
| 457 | /* Merge contiguous pages into the last SG */ |
| 458 | prv_len = prv->length; |
| 459 | while (n_pages && page_to_pfn(pages[0]) == paddr) { |
| 460 | if (prv->length + PAGE_SIZE > max_segment) |
| 461 | break; |
| 462 | prv->length += PAGE_SIZE; |
| 463 | paddr++; |
| 464 | pages++; |
| 465 | n_pages--; |
| 466 | } |
| 467 | if (!n_pages) |
| 468 | goto out; |
| 469 | } |
Tvrtko Ursulin | 89d8589 | 2017-08-03 10:13:51 +0100 | [diff] [blame] | 470 | |
Tomasz Stanislawski | efc42bc | 2012-06-18 09:25:01 +0200 | [diff] [blame] | 471 | /* compute number of contiguous chunks */ |
| 472 | chunks = 1; |
Tvrtko Ursulin | c125906 | 2017-08-03 10:13:12 +0100 | [diff] [blame] | 473 | seg_len = 0; |
| 474 | for (i = 1; i < n_pages; i++) { |
| 475 | seg_len += PAGE_SIZE; |
| 476 | if (seg_len >= max_segment || |
| 477 | page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) { |
| 478 | chunks++; |
| 479 | seg_len = 0; |
| 480 | } |
| 481 | } |
Tomasz Stanislawski | efc42bc | 2012-06-18 09:25:01 +0200 | [diff] [blame] | 482 | |
Tomasz Stanislawski | efc42bc | 2012-06-18 09:25:01 +0200 | [diff] [blame] | 483 | /* merging chunks and putting them into the scatterlist */ |
| 484 | cur_page = 0; |
Maor Gottlieb | 07da122 | 2020-10-04 18:43:37 +0300 | [diff] [blame] | 485 | for (i = 0; i < chunks; i++) { |
Tvrtko Ursulin | c125906 | 2017-08-03 10:13:12 +0100 | [diff] [blame] | 486 | unsigned int j, chunk_size; |
Tomasz Stanislawski | efc42bc | 2012-06-18 09:25:01 +0200 | [diff] [blame] | 487 | |
| 488 | /* look for the end of the current chunk */ |
Tvrtko Ursulin | c125906 | 2017-08-03 10:13:12 +0100 | [diff] [blame] | 489 | seg_len = 0; |
| 490 | for (j = cur_page + 1; j < n_pages; j++) { |
| 491 | seg_len += PAGE_SIZE; |
| 492 | if (seg_len >= max_segment || |
| 493 | page_to_pfn(pages[j]) != |
Tomasz Stanislawski | efc42bc | 2012-06-18 09:25:01 +0200 | [diff] [blame] | 494 | page_to_pfn(pages[j - 1]) + 1) |
| 495 | break; |
Tvrtko Ursulin | c125906 | 2017-08-03 10:13:12 +0100 | [diff] [blame] | 496 | } |
Tomasz Stanislawski | efc42bc | 2012-06-18 09:25:01 +0200 | [diff] [blame] | 497 | |
Maor Gottlieb | 07da122 | 2020-10-04 18:43:37 +0300 | [diff] [blame] | 498 | /* Pass how many chunks might be left */ |
| 499 | s = get_next_sg(sgt, s, chunks - i + left_pages, gfp_mask); |
| 500 | if (IS_ERR(s)) { |
| 501 | /* |
| 502 | * Adjust entry length to be as before function was |
| 503 | * called. |
| 504 | */ |
| 505 | if (prv) |
| 506 | prv->length = prv_len; |
| 507 | return s; |
| 508 | } |
Tomasz Stanislawski | efc42bc | 2012-06-18 09:25:01 +0200 | [diff] [blame] | 509 | chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset; |
Tvrtko Ursulin | c125906 | 2017-08-03 10:13:12 +0100 | [diff] [blame] | 510 | sg_set_page(s, pages[cur_page], |
| 511 | min_t(unsigned long, size, chunk_size), offset); |
Maor Gottlieb | 07da122 | 2020-10-04 18:43:37 +0300 | [diff] [blame] | 512 | added_nents++; |
Tomasz Stanislawski | efc42bc | 2012-06-18 09:25:01 +0200 | [diff] [blame] | 513 | size -= chunk_size; |
| 514 | offset = 0; |
| 515 | cur_page = j; |
| 516 | } |
Maor Gottlieb | 07da122 | 2020-10-04 18:43:37 +0300 | [diff] [blame] | 517 | sgt->nents += added_nents; |
| 518 | out: |
| 519 | if (!left_pages) |
| 520 | sg_mark_end(s); |
| 521 | return s; |
Tomasz Stanislawski | efc42bc | 2012-06-18 09:25:01 +0200 | [diff] [blame] | 522 | } |
Tvrtko Ursulin | 89d8589 | 2017-08-03 10:13:51 +0100 | [diff] [blame] | 523 | EXPORT_SYMBOL(__sg_alloc_table_from_pages); |
| 524 | |
| 525 | /** |
| 526 | * sg_alloc_table_from_pages - Allocate and initialize an sg table from |
| 527 | * an array of pages |
| 528 | * @sgt: The sg table header to use |
| 529 | * @pages: Pointer to an array of page pointers |
| 530 | * @n_pages: Number of pages in the pages array |
| 531 | * @offset: Offset from start of the first page to the start of a buffer |
| 532 | * @size: Number of valid bytes in the buffer (after offset) |
| 533 | * @gfp_mask: GFP allocation mask |
| 534 | * |
| 535 | * Description: |
| 536 | * Allocate and initialize an sg table from a list of pages. Contiguous |
| 537 | * ranges of the pages are squashed into a single scatterlist node. A user |
| 538 | * may provide an offset at a start and a size of valid data in a buffer |
| 539 | * specified by the page array. The returned sg table is released by |
| 540 | * sg_free_table. |
| 541 | * |
| 542 | * Returns: |
| 543 | * 0 on success, negative error on failure |
| 544 | */ |
| 545 | int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages, |
| 546 | unsigned int n_pages, unsigned int offset, |
| 547 | unsigned long size, gfp_t gfp_mask) |
| 548 | { |
Maor Gottlieb | 07da122 | 2020-10-04 18:43:37 +0300 | [diff] [blame] | 549 | return PTR_ERR_OR_ZERO(__sg_alloc_table_from_pages(sgt, pages, n_pages, |
Jason Gunthorpe | 9a40401 | 2020-10-16 08:46:01 -0300 | [diff] [blame^] | 550 | offset, size, UINT_MAX, NULL, 0, gfp_mask)); |
Tvrtko Ursulin | 89d8589 | 2017-08-03 10:13:51 +0100 | [diff] [blame] | 551 | } |
Tomasz Stanislawski | efc42bc | 2012-06-18 09:25:01 +0200 | [diff] [blame] | 552 | EXPORT_SYMBOL(sg_alloc_table_from_pages); |
| 553 | |
Bart Van Assche | e80a0af | 2018-01-05 08:26:46 -0800 | [diff] [blame] | 554 | #ifdef CONFIG_SGL_ALLOC |
| 555 | |
| 556 | /** |
| 557 | * sgl_alloc_order - allocate a scatterlist and its pages |
| 558 | * @length: Length in bytes of the scatterlist. Must be at least one |
| 559 | * @order: Second argument for alloc_pages() |
| 560 | * @chainable: Whether or not to allocate an extra element in the scatterlist |
| 561 | * for scatterlist chaining purposes |
| 562 | * @gfp: Memory allocation flags |
| 563 | * @nent_p: [out] Number of entries in the scatterlist that have pages |
| 564 | * |
| 565 | * Returns: A pointer to an initialized scatterlist or %NULL upon failure. |
| 566 | */ |
| 567 | struct scatterlist *sgl_alloc_order(unsigned long long length, |
| 568 | unsigned int order, bool chainable, |
| 569 | gfp_t gfp, unsigned int *nent_p) |
| 570 | { |
| 571 | struct scatterlist *sgl, *sg; |
| 572 | struct page *page; |
| 573 | unsigned int nent, nalloc; |
| 574 | u32 elem_len; |
| 575 | |
| 576 | nent = round_up(length, PAGE_SIZE << order) >> (PAGE_SHIFT + order); |
| 577 | /* Check for integer overflow */ |
| 578 | if (length > (nent << (PAGE_SHIFT + order))) |
| 579 | return NULL; |
| 580 | nalloc = nent; |
| 581 | if (chainable) { |
| 582 | /* Check for integer overflow */ |
| 583 | if (nalloc + 1 < nalloc) |
| 584 | return NULL; |
| 585 | nalloc++; |
| 586 | } |
| 587 | sgl = kmalloc_array(nalloc, sizeof(struct scatterlist), |
| 588 | (gfp & ~GFP_DMA) | __GFP_ZERO); |
| 589 | if (!sgl) |
| 590 | return NULL; |
| 591 | |
Bart Van Assche | 8c7a8d1 | 2018-01-19 11:00:54 -0800 | [diff] [blame] | 592 | sg_init_table(sgl, nalloc); |
Bart Van Assche | e80a0af | 2018-01-05 08:26:46 -0800 | [diff] [blame] | 593 | sg = sgl; |
| 594 | while (length) { |
| 595 | elem_len = min_t(u64, length, PAGE_SIZE << order); |
| 596 | page = alloc_pages(gfp, order); |
| 597 | if (!page) { |
| 598 | sgl_free(sgl); |
| 599 | return NULL; |
| 600 | } |
| 601 | |
| 602 | sg_set_page(sg, page, elem_len, 0); |
| 603 | length -= elem_len; |
| 604 | sg = sg_next(sg); |
| 605 | } |
Bart Van Assche | 8c7a8d1 | 2018-01-19 11:00:54 -0800 | [diff] [blame] | 606 | WARN_ONCE(length, "length = %lld\n", length); |
Bart Van Assche | e80a0af | 2018-01-05 08:26:46 -0800 | [diff] [blame] | 607 | if (nent_p) |
| 608 | *nent_p = nent; |
| 609 | return sgl; |
| 610 | } |
| 611 | EXPORT_SYMBOL(sgl_alloc_order); |
| 612 | |
| 613 | /** |
| 614 | * sgl_alloc - allocate a scatterlist and its pages |
| 615 | * @length: Length in bytes of the scatterlist |
| 616 | * @gfp: Memory allocation flags |
| 617 | * @nent_p: [out] Number of entries in the scatterlist |
| 618 | * |
| 619 | * Returns: A pointer to an initialized scatterlist or %NULL upon failure. |
| 620 | */ |
| 621 | struct scatterlist *sgl_alloc(unsigned long long length, gfp_t gfp, |
| 622 | unsigned int *nent_p) |
| 623 | { |
| 624 | return sgl_alloc_order(length, 0, false, gfp, nent_p); |
| 625 | } |
| 626 | EXPORT_SYMBOL(sgl_alloc); |
| 627 | |
| 628 | /** |
Bart Van Assche | 8c7a8d1 | 2018-01-19 11:00:54 -0800 | [diff] [blame] | 629 | * sgl_free_n_order - free a scatterlist and its pages |
| 630 | * @sgl: Scatterlist with one or more elements |
| 631 | * @nents: Maximum number of elements to free |
| 632 | * @order: Second argument for __free_pages() |
| 633 | * |
| 634 | * Notes: |
| 635 | * - If several scatterlists have been chained and each chain element is |
| 636 | * freed separately then it's essential to set nents correctly to avoid that a |
| 637 | * page would get freed twice. |
| 638 | * - All pages in a chained scatterlist can be freed at once by setting @nents |
| 639 | * to a high number. |
| 640 | */ |
| 641 | void sgl_free_n_order(struct scatterlist *sgl, int nents, int order) |
| 642 | { |
| 643 | struct scatterlist *sg; |
| 644 | struct page *page; |
| 645 | int i; |
| 646 | |
| 647 | for_each_sg(sgl, sg, nents, i) { |
| 648 | if (!sg) |
| 649 | break; |
| 650 | page = sg_page(sg); |
| 651 | if (page) |
| 652 | __free_pages(page, order); |
| 653 | } |
| 654 | kfree(sgl); |
| 655 | } |
| 656 | EXPORT_SYMBOL(sgl_free_n_order); |
| 657 | |
| 658 | /** |
Bart Van Assche | e80a0af | 2018-01-05 08:26:46 -0800 | [diff] [blame] | 659 | * sgl_free_order - free a scatterlist and its pages |
| 660 | * @sgl: Scatterlist with one or more elements |
| 661 | * @order: Second argument for __free_pages() |
| 662 | */ |
| 663 | void sgl_free_order(struct scatterlist *sgl, int order) |
| 664 | { |
Bart Van Assche | 8c7a8d1 | 2018-01-19 11:00:54 -0800 | [diff] [blame] | 665 | sgl_free_n_order(sgl, INT_MAX, order); |
Bart Van Assche | e80a0af | 2018-01-05 08:26:46 -0800 | [diff] [blame] | 666 | } |
| 667 | EXPORT_SYMBOL(sgl_free_order); |
| 668 | |
| 669 | /** |
| 670 | * sgl_free - free a scatterlist and its pages |
| 671 | * @sgl: Scatterlist with one or more elements |
| 672 | */ |
| 673 | void sgl_free(struct scatterlist *sgl) |
| 674 | { |
| 675 | sgl_free_order(sgl, 0); |
| 676 | } |
| 677 | EXPORT_SYMBOL(sgl_free); |
| 678 | |
| 679 | #endif /* CONFIG_SGL_ALLOC */ |
| 680 | |
Imre Deak | a321e91 | 2013-02-27 17:02:56 -0800 | [diff] [blame] | 681 | void __sg_page_iter_start(struct sg_page_iter *piter, |
| 682 | struct scatterlist *sglist, unsigned int nents, |
| 683 | unsigned long pgoffset) |
| 684 | { |
| 685 | piter->__pg_advance = 0; |
| 686 | piter->__nents = nents; |
| 687 | |
Imre Deak | a321e91 | 2013-02-27 17:02:56 -0800 | [diff] [blame] | 688 | piter->sg = sglist; |
| 689 | piter->sg_pgoffset = pgoffset; |
| 690 | } |
| 691 | EXPORT_SYMBOL(__sg_page_iter_start); |
| 692 | |
| 693 | static int sg_page_count(struct scatterlist *sg) |
| 694 | { |
| 695 | return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT; |
| 696 | } |
| 697 | |
| 698 | bool __sg_page_iter_next(struct sg_page_iter *piter) |
| 699 | { |
| 700 | if (!piter->__nents || !piter->sg) |
| 701 | return false; |
| 702 | |
| 703 | piter->sg_pgoffset += piter->__pg_advance; |
| 704 | piter->__pg_advance = 1; |
| 705 | |
| 706 | while (piter->sg_pgoffset >= sg_page_count(piter->sg)) { |
| 707 | piter->sg_pgoffset -= sg_page_count(piter->sg); |
| 708 | piter->sg = sg_next(piter->sg); |
| 709 | if (!--piter->__nents || !piter->sg) |
| 710 | return false; |
| 711 | } |
Imre Deak | a321e91 | 2013-02-27 17:02:56 -0800 | [diff] [blame] | 712 | |
| 713 | return true; |
| 714 | } |
| 715 | EXPORT_SYMBOL(__sg_page_iter_next); |
| 716 | |
Jason Gunthorpe | d901b27 | 2019-01-04 11:40:21 -0700 | [diff] [blame] | 717 | static int sg_dma_page_count(struct scatterlist *sg) |
| 718 | { |
| 719 | return PAGE_ALIGN(sg->offset + sg_dma_len(sg)) >> PAGE_SHIFT; |
| 720 | } |
| 721 | |
| 722 | bool __sg_page_iter_dma_next(struct sg_dma_page_iter *dma_iter) |
| 723 | { |
| 724 | struct sg_page_iter *piter = &dma_iter->base; |
| 725 | |
| 726 | if (!piter->__nents || !piter->sg) |
| 727 | return false; |
| 728 | |
| 729 | piter->sg_pgoffset += piter->__pg_advance; |
| 730 | piter->__pg_advance = 1; |
| 731 | |
| 732 | while (piter->sg_pgoffset >= sg_dma_page_count(piter->sg)) { |
| 733 | piter->sg_pgoffset -= sg_dma_page_count(piter->sg); |
| 734 | piter->sg = sg_next(piter->sg); |
| 735 | if (!--piter->__nents || !piter->sg) |
| 736 | return false; |
| 737 | } |
| 738 | |
| 739 | return true; |
| 740 | } |
| 741 | EXPORT_SYMBOL(__sg_page_iter_dma_next); |
| 742 | |
Tomasz Stanislawski | efc42bc | 2012-06-18 09:25:01 +0200 | [diff] [blame] | 743 | /** |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 744 | * sg_miter_start - start mapping iteration over a sg list |
| 745 | * @miter: sg mapping iter to be started |
| 746 | * @sgl: sg list to iterate over |
| 747 | * @nents: number of sg entries |
| 748 | * |
| 749 | * Description: |
| 750 | * Starts mapping iterator @miter. |
| 751 | * |
| 752 | * Context: |
| 753 | * Don't care. |
| 754 | */ |
| 755 | void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl, |
| 756 | unsigned int nents, unsigned int flags) |
| 757 | { |
| 758 | memset(miter, 0, sizeof(struct sg_mapping_iter)); |
| 759 | |
Imre Deak | 4225fc8 | 2013-02-27 17:02:57 -0800 | [diff] [blame] | 760 | __sg_page_iter_start(&miter->piter, sgl, nents, 0); |
Sebastian Andrzej Siewior | 6de7e356 | 2009-06-18 10:19:12 +0200 | [diff] [blame] | 761 | WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG))); |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 762 | miter->__flags = flags; |
| 763 | } |
| 764 | EXPORT_SYMBOL(sg_miter_start); |
| 765 | |
Akinobu Mita | 1105200 | 2013-07-08 16:01:52 -0700 | [diff] [blame] | 766 | static bool sg_miter_get_next_page(struct sg_mapping_iter *miter) |
| 767 | { |
| 768 | if (!miter->__remaining) { |
| 769 | struct scatterlist *sg; |
Akinobu Mita | 1105200 | 2013-07-08 16:01:52 -0700 | [diff] [blame] | 770 | |
| 771 | if (!__sg_page_iter_next(&miter->piter)) |
| 772 | return false; |
| 773 | |
| 774 | sg = miter->piter.sg; |
Akinobu Mita | 1105200 | 2013-07-08 16:01:52 -0700 | [diff] [blame] | 775 | |
Christophe Leroy | aeb8724 | 2019-06-24 07:20:14 +0000 | [diff] [blame] | 776 | miter->__offset = miter->piter.sg_pgoffset ? 0 : sg->offset; |
| 777 | miter->piter.sg_pgoffset += miter->__offset >> PAGE_SHIFT; |
| 778 | miter->__offset &= PAGE_SIZE - 1; |
Akinobu Mita | 1105200 | 2013-07-08 16:01:52 -0700 | [diff] [blame] | 779 | miter->__remaining = sg->offset + sg->length - |
Christophe Leroy | aeb8724 | 2019-06-24 07:20:14 +0000 | [diff] [blame] | 780 | (miter->piter.sg_pgoffset << PAGE_SHIFT) - |
| 781 | miter->__offset; |
Akinobu Mita | 1105200 | 2013-07-08 16:01:52 -0700 | [diff] [blame] | 782 | miter->__remaining = min_t(unsigned long, miter->__remaining, |
| 783 | PAGE_SIZE - miter->__offset); |
| 784 | } |
| 785 | |
| 786 | return true; |
| 787 | } |
| 788 | |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 789 | /** |
Akinobu Mita | df642ce | 2013-07-08 16:01:54 -0700 | [diff] [blame] | 790 | * sg_miter_skip - reposition mapping iterator |
| 791 | * @miter: sg mapping iter to be skipped |
| 792 | * @offset: number of bytes to plus the current location |
| 793 | * |
| 794 | * Description: |
| 795 | * Sets the offset of @miter to its current location plus @offset bytes. |
| 796 | * If mapping iterator @miter has been proceeded by sg_miter_next(), this |
| 797 | * stops @miter. |
| 798 | * |
| 799 | * Context: |
| 800 | * Don't care if @miter is stopped, or not proceeded yet. |
| 801 | * Otherwise, preemption disabled if the SG_MITER_ATOMIC is set. |
| 802 | * |
| 803 | * Returns: |
| 804 | * true if @miter contains the valid mapping. false if end of sg |
| 805 | * list is reached. |
| 806 | */ |
Ming Lei | 0d6077f | 2013-11-26 12:43:37 +0800 | [diff] [blame] | 807 | bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset) |
Akinobu Mita | df642ce | 2013-07-08 16:01:54 -0700 | [diff] [blame] | 808 | { |
| 809 | sg_miter_stop(miter); |
| 810 | |
| 811 | while (offset) { |
| 812 | off_t consumed; |
| 813 | |
| 814 | if (!sg_miter_get_next_page(miter)) |
| 815 | return false; |
| 816 | |
| 817 | consumed = min_t(off_t, offset, miter->__remaining); |
| 818 | miter->__offset += consumed; |
| 819 | miter->__remaining -= consumed; |
| 820 | offset -= consumed; |
| 821 | } |
| 822 | |
| 823 | return true; |
| 824 | } |
Ming Lei | 0d6077f | 2013-11-26 12:43:37 +0800 | [diff] [blame] | 825 | EXPORT_SYMBOL(sg_miter_skip); |
Akinobu Mita | df642ce | 2013-07-08 16:01:54 -0700 | [diff] [blame] | 826 | |
| 827 | /** |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 828 | * sg_miter_next - proceed mapping iterator to the next mapping |
| 829 | * @miter: sg mapping iter to proceed |
| 830 | * |
| 831 | * Description: |
Tejun Heo | 8290e2d | 2012-10-04 17:13:28 -0700 | [diff] [blame] | 832 | * Proceeds @miter to the next mapping. @miter should have been started |
| 833 | * using sg_miter_start(). On successful return, @miter->page, |
| 834 | * @miter->addr and @miter->length point to the current mapping. |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 835 | * |
| 836 | * Context: |
Tejun Heo | 8290e2d | 2012-10-04 17:13:28 -0700 | [diff] [blame] | 837 | * Preemption disabled if SG_MITER_ATOMIC. Preemption must stay disabled |
| 838 | * till @miter is stopped. May sleep if !SG_MITER_ATOMIC. |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 839 | * |
| 840 | * Returns: |
| 841 | * true if @miter contains the next mapping. false if end of sg |
| 842 | * list is reached. |
| 843 | */ |
| 844 | bool sg_miter_next(struct sg_mapping_iter *miter) |
| 845 | { |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 846 | sg_miter_stop(miter); |
| 847 | |
Imre Deak | 4225fc8 | 2013-02-27 17:02:57 -0800 | [diff] [blame] | 848 | /* |
| 849 | * Get to the next page if necessary. |
| 850 | * __remaining, __offset is adjusted by sg_miter_stop |
| 851 | */ |
Akinobu Mita | 1105200 | 2013-07-08 16:01:52 -0700 | [diff] [blame] | 852 | if (!sg_miter_get_next_page(miter)) |
| 853 | return false; |
Imre Deak | 4225fc8 | 2013-02-27 17:02:57 -0800 | [diff] [blame] | 854 | |
Imre Deak | 2db76d7 | 2013-03-26 15:14:18 +0200 | [diff] [blame] | 855 | miter->page = sg_page_iter_page(&miter->piter); |
Imre Deak | 4225fc8 | 2013-02-27 17:02:57 -0800 | [diff] [blame] | 856 | miter->consumed = miter->length = miter->__remaining; |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 857 | |
| 858 | if (miter->__flags & SG_MITER_ATOMIC) |
Imre Deak | 4225fc8 | 2013-02-27 17:02:57 -0800 | [diff] [blame] | 859 | miter->addr = kmap_atomic(miter->page) + miter->__offset; |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 860 | else |
Imre Deak | 4225fc8 | 2013-02-27 17:02:57 -0800 | [diff] [blame] | 861 | miter->addr = kmap(miter->page) + miter->__offset; |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 862 | |
| 863 | return true; |
| 864 | } |
| 865 | EXPORT_SYMBOL(sg_miter_next); |
| 866 | |
| 867 | /** |
| 868 | * sg_miter_stop - stop mapping iteration |
| 869 | * @miter: sg mapping iter to be stopped |
| 870 | * |
| 871 | * Description: |
| 872 | * Stops mapping iterator @miter. @miter should have been started |
Masahiro Yamada | 4ba6a2b | 2016-02-08 16:09:08 +0900 | [diff] [blame] | 873 | * using sg_miter_start(). A stopped iteration can be resumed by |
| 874 | * calling sg_miter_next() on it. This is useful when resources (kmap) |
| 875 | * need to be released during iteration. |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 876 | * |
| 877 | * Context: |
Tejun Heo | 8290e2d | 2012-10-04 17:13:28 -0700 | [diff] [blame] | 878 | * Preemption disabled if the SG_MITER_ATOMIC is set. Don't care |
| 879 | * otherwise. |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 880 | */ |
| 881 | void sg_miter_stop(struct sg_mapping_iter *miter) |
| 882 | { |
| 883 | WARN_ON(miter->consumed > miter->length); |
| 884 | |
| 885 | /* drop resources from the last iteration */ |
| 886 | if (miter->addr) { |
| 887 | miter->__offset += miter->consumed; |
Imre Deak | 4225fc8 | 2013-02-27 17:02:57 -0800 | [diff] [blame] | 888 | miter->__remaining -= miter->consumed; |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 889 | |
Ming Lei | 3d77b50 | 2013-10-31 16:34:17 -0700 | [diff] [blame] | 890 | if ((miter->__flags & SG_MITER_TO_SG) && |
| 891 | !PageSlab(miter->page)) |
Sebastian Andrzej Siewior | 6de7e356 | 2009-06-18 10:19:12 +0200 | [diff] [blame] | 892 | flush_kernel_dcache_page(miter->page); |
| 893 | |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 894 | if (miter->__flags & SG_MITER_ATOMIC) { |
Tejun Heo | 8290e2d | 2012-10-04 17:13:28 -0700 | [diff] [blame] | 895 | WARN_ON_ONCE(preemptible()); |
Cong Wang | c3eede8 | 2011-11-25 23:14:39 +0800 | [diff] [blame] | 896 | kunmap_atomic(miter->addr); |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 897 | } else |
Arjan van de Ven | f652c52 | 2008-11-19 15:36:19 -0800 | [diff] [blame] | 898 | kunmap(miter->page); |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 899 | |
| 900 | miter->page = NULL; |
| 901 | miter->addr = NULL; |
| 902 | miter->length = 0; |
| 903 | miter->consumed = 0; |
| 904 | } |
| 905 | } |
| 906 | EXPORT_SYMBOL(sg_miter_stop); |
| 907 | |
| 908 | /** |
FUJITA Tomonori | b1adaf6 | 2008-03-18 00:15:03 +0900 | [diff] [blame] | 909 | * sg_copy_buffer - Copy data between a linear buffer and an SG list |
| 910 | * @sgl: The SG list |
| 911 | * @nents: Number of SG entries |
| 912 | * @buf: Where to copy from |
| 913 | * @buflen: The number of bytes to copy |
Akinobu Mita | df642ce | 2013-07-08 16:01:54 -0700 | [diff] [blame] | 914 | * @skip: Number of bytes to skip before copying |
| 915 | * @to_buffer: transfer direction (true == from an sg list to a |
Geert Uytterhoeven | 6e85318 | 2020-04-06 20:10:09 -0700 | [diff] [blame] | 916 | * buffer, false == from a buffer to an sg list) |
FUJITA Tomonori | b1adaf6 | 2008-03-18 00:15:03 +0900 | [diff] [blame] | 917 | * |
| 918 | * Returns the number of copied bytes. |
| 919 | * |
| 920 | **/ |
Dave Gordon | 386ecb1 | 2015-06-30 14:58:57 -0700 | [diff] [blame] | 921 | size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, |
| 922 | size_t buflen, off_t skip, bool to_buffer) |
FUJITA Tomonori | b1adaf6 | 2008-03-18 00:15:03 +0900 | [diff] [blame] | 923 | { |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 924 | unsigned int offset = 0; |
| 925 | struct sg_mapping_iter miter; |
Sebastian Andrzej Siewior | 6de7e356 | 2009-06-18 10:19:12 +0200 | [diff] [blame] | 926 | unsigned int sg_flags = SG_MITER_ATOMIC; |
FUJITA Tomonori | b1adaf6 | 2008-03-18 00:15:03 +0900 | [diff] [blame] | 927 | |
Sebastian Andrzej Siewior | 6de7e356 | 2009-06-18 10:19:12 +0200 | [diff] [blame] | 928 | if (to_buffer) |
| 929 | sg_flags |= SG_MITER_FROM_SG; |
| 930 | else |
| 931 | sg_flags |= SG_MITER_TO_SG; |
| 932 | |
| 933 | sg_miter_start(&miter, sgl, nents, sg_flags); |
FUJITA Tomonori | b1adaf6 | 2008-03-18 00:15:03 +0900 | [diff] [blame] | 934 | |
Akinobu Mita | df642ce | 2013-07-08 16:01:54 -0700 | [diff] [blame] | 935 | if (!sg_miter_skip(&miter, skip)) |
| 936 | return false; |
| 937 | |
Gilad Ben-Yossef | 1d5210ef | 2017-02-27 14:28:27 -0800 | [diff] [blame] | 938 | while ((offset < buflen) && sg_miter_next(&miter)) { |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 939 | unsigned int len; |
FUJITA Tomonori | b1adaf6 | 2008-03-18 00:15:03 +0900 | [diff] [blame] | 940 | |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 941 | len = min(miter.length, buflen - offset); |
FUJITA Tomonori | b1adaf6 | 2008-03-18 00:15:03 +0900 | [diff] [blame] | 942 | |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 943 | if (to_buffer) |
| 944 | memcpy(buf + offset, miter.addr, len); |
Sebastian Andrzej Siewior | 6de7e356 | 2009-06-18 10:19:12 +0200 | [diff] [blame] | 945 | else |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 946 | memcpy(miter.addr, buf + offset, len); |
FUJITA Tomonori | b1adaf6 | 2008-03-18 00:15:03 +0900 | [diff] [blame] | 947 | |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 948 | offset += len; |
FUJITA Tomonori | b1adaf6 | 2008-03-18 00:15:03 +0900 | [diff] [blame] | 949 | } |
| 950 | |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 951 | sg_miter_stop(&miter); |
| 952 | |
| 953 | return offset; |
FUJITA Tomonori | b1adaf6 | 2008-03-18 00:15:03 +0900 | [diff] [blame] | 954 | } |
Dave Gordon | 386ecb1 | 2015-06-30 14:58:57 -0700 | [diff] [blame] | 955 | EXPORT_SYMBOL(sg_copy_buffer); |
FUJITA Tomonori | b1adaf6 | 2008-03-18 00:15:03 +0900 | [diff] [blame] | 956 | |
| 957 | /** |
| 958 | * sg_copy_from_buffer - Copy from a linear buffer to an SG list |
| 959 | * @sgl: The SG list |
| 960 | * @nents: Number of SG entries |
| 961 | * @buf: Where to copy from |
| 962 | * @buflen: The number of bytes to copy |
| 963 | * |
| 964 | * Returns the number of copied bytes. |
| 965 | * |
| 966 | **/ |
| 967 | size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents, |
Dave Gordon | 2a1bf8f | 2015-06-30 14:58:54 -0700 | [diff] [blame] | 968 | const void *buf, size_t buflen) |
FUJITA Tomonori | b1adaf6 | 2008-03-18 00:15:03 +0900 | [diff] [blame] | 969 | { |
Dave Gordon | 2a1bf8f | 2015-06-30 14:58:54 -0700 | [diff] [blame] | 970 | return sg_copy_buffer(sgl, nents, (void *)buf, buflen, 0, false); |
FUJITA Tomonori | b1adaf6 | 2008-03-18 00:15:03 +0900 | [diff] [blame] | 971 | } |
| 972 | EXPORT_SYMBOL(sg_copy_from_buffer); |
| 973 | |
| 974 | /** |
| 975 | * sg_copy_to_buffer - Copy from an SG list to a linear buffer |
| 976 | * @sgl: The SG list |
| 977 | * @nents: Number of SG entries |
| 978 | * @buf: Where to copy to |
| 979 | * @buflen: The number of bytes to copy |
| 980 | * |
| 981 | * Returns the number of copied bytes. |
| 982 | * |
| 983 | **/ |
| 984 | size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents, |
| 985 | void *buf, size_t buflen) |
| 986 | { |
Akinobu Mita | df642ce | 2013-07-08 16:01:54 -0700 | [diff] [blame] | 987 | return sg_copy_buffer(sgl, nents, buf, buflen, 0, true); |
FUJITA Tomonori | b1adaf6 | 2008-03-18 00:15:03 +0900 | [diff] [blame] | 988 | } |
| 989 | EXPORT_SYMBOL(sg_copy_to_buffer); |
Akinobu Mita | df642ce | 2013-07-08 16:01:54 -0700 | [diff] [blame] | 990 | |
| 991 | /** |
| 992 | * sg_pcopy_from_buffer - Copy from a linear buffer to an SG list |
| 993 | * @sgl: The SG list |
| 994 | * @nents: Number of SG entries |
| 995 | * @buf: Where to copy from |
Akinobu Mita | df642ce | 2013-07-08 16:01:54 -0700 | [diff] [blame] | 996 | * @buflen: The number of bytes to copy |
Dave Gordon | 4dc7daf | 2015-06-30 14:58:52 -0700 | [diff] [blame] | 997 | * @skip: Number of bytes to skip before copying |
Akinobu Mita | df642ce | 2013-07-08 16:01:54 -0700 | [diff] [blame] | 998 | * |
| 999 | * Returns the number of copied bytes. |
| 1000 | * |
| 1001 | **/ |
| 1002 | size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents, |
Dave Gordon | 2a1bf8f | 2015-06-30 14:58:54 -0700 | [diff] [blame] | 1003 | const void *buf, size_t buflen, off_t skip) |
Akinobu Mita | df642ce | 2013-07-08 16:01:54 -0700 | [diff] [blame] | 1004 | { |
Dave Gordon | 2a1bf8f | 2015-06-30 14:58:54 -0700 | [diff] [blame] | 1005 | return sg_copy_buffer(sgl, nents, (void *)buf, buflen, skip, false); |
Akinobu Mita | df642ce | 2013-07-08 16:01:54 -0700 | [diff] [blame] | 1006 | } |
| 1007 | EXPORT_SYMBOL(sg_pcopy_from_buffer); |
| 1008 | |
| 1009 | /** |
| 1010 | * sg_pcopy_to_buffer - Copy from an SG list to a linear buffer |
| 1011 | * @sgl: The SG list |
| 1012 | * @nents: Number of SG entries |
| 1013 | * @buf: Where to copy to |
Akinobu Mita | df642ce | 2013-07-08 16:01:54 -0700 | [diff] [blame] | 1014 | * @buflen: The number of bytes to copy |
Dave Gordon | 4dc7daf | 2015-06-30 14:58:52 -0700 | [diff] [blame] | 1015 | * @skip: Number of bytes to skip before copying |
Akinobu Mita | df642ce | 2013-07-08 16:01:54 -0700 | [diff] [blame] | 1016 | * |
| 1017 | * Returns the number of copied bytes. |
| 1018 | * |
| 1019 | **/ |
| 1020 | size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents, |
| 1021 | void *buf, size_t buflen, off_t skip) |
| 1022 | { |
| 1023 | return sg_copy_buffer(sgl, nents, buf, buflen, skip, true); |
| 1024 | } |
| 1025 | EXPORT_SYMBOL(sg_pcopy_to_buffer); |
Johannes Thumshirn | 0945e56 | 2017-06-07 11:45:28 +0200 | [diff] [blame] | 1026 | |
| 1027 | /** |
| 1028 | * sg_zero_buffer - Zero-out a part of a SG list |
| 1029 | * @sgl: The SG list |
| 1030 | * @nents: Number of SG entries |
| 1031 | * @buflen: The number of bytes to zero out |
| 1032 | * @skip: Number of bytes to skip before zeroing |
| 1033 | * |
| 1034 | * Returns the number of bytes zeroed. |
| 1035 | **/ |
| 1036 | size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents, |
| 1037 | size_t buflen, off_t skip) |
| 1038 | { |
| 1039 | unsigned int offset = 0; |
| 1040 | struct sg_mapping_iter miter; |
| 1041 | unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG; |
| 1042 | |
| 1043 | sg_miter_start(&miter, sgl, nents, sg_flags); |
| 1044 | |
| 1045 | if (!sg_miter_skip(&miter, skip)) |
| 1046 | return false; |
| 1047 | |
| 1048 | while (offset < buflen && sg_miter_next(&miter)) { |
| 1049 | unsigned int len; |
| 1050 | |
| 1051 | len = min(miter.length, buflen - offset); |
| 1052 | memset(miter.addr, 0, len); |
| 1053 | |
| 1054 | offset += len; |
| 1055 | } |
| 1056 | |
| 1057 | sg_miter_stop(&miter); |
| 1058 | return offset; |
| 1059 | } |
| 1060 | EXPORT_SYMBOL(sg_zero_buffer); |