blob: 5d63a8857f361d0062426d71ee525a7e2e25b2b8 [file] [log] [blame]
Thomas Gleixner40b0b3f2019-06-03 07:44:46 +02001// SPDX-License-Identifier: GPL-2.0-only
Jens Axboe0db92992007-11-30 09:16:50 +01002/*
3 * Copyright (C) 2007 Jens Axboe <jens.axboe@oracle.com>
4 *
5 * Scatterlist handling helpers.
Jens Axboe0db92992007-11-30 09:16:50 +01006 */
Paul Gortmaker8bc3bcc2011-11-16 21:29:17 -05007#include <linux/export.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09008#include <linux/slab.h>
Jens Axboe0db92992007-11-30 09:16:50 +01009#include <linux/scatterlist.h>
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +090010#include <linux/highmem.h>
Chris Wilsonb94de9b2010-07-28 22:59:02 +010011#include <linux/kmemleak.h>
Jens Axboe0db92992007-11-30 09:16:50 +010012
13/**
14 * sg_next - return the next scatterlist entry in a list
15 * @sg: The current sg entry
16 *
17 * Description:
18 * Usually the next entry will be @sg@ + 1, but if this sg element is part
19 * of a chained scatterlist, it could jump to the start of a new
20 * scatterlist array.
21 *
22 **/
23struct scatterlist *sg_next(struct scatterlist *sg)
24{
Jens Axboe0db92992007-11-30 09:16:50 +010025 if (sg_is_last(sg))
26 return NULL;
27
28 sg++;
29 if (unlikely(sg_is_chain(sg)))
30 sg = sg_chain_ptr(sg);
31
32 return sg;
33}
34EXPORT_SYMBOL(sg_next);
35
36/**
Maxim Levitsky2e484612012-09-27 12:45:28 +020037 * sg_nents - return total count of entries in scatterlist
38 * @sg: The scatterlist
39 *
40 * Description:
41 * Allows to know how many entries are in sg, taking into acount
42 * chaining as well
43 *
44 **/
45int sg_nents(struct scatterlist *sg)
46{
Maxim Levitsky232f1b52012-09-28 10:38:15 +020047 int nents;
48 for (nents = 0; sg; sg = sg_next(sg))
Maxim Levitsky2e484612012-09-27 12:45:28 +020049 nents++;
Maxim Levitsky2e484612012-09-27 12:45:28 +020050 return nents;
51}
52EXPORT_SYMBOL(sg_nents);
53
Tom Lendackycfaed102015-06-01 11:15:25 -050054/**
55 * sg_nents_for_len - return total count of entries in scatterlist
56 * needed to satisfy the supplied length
57 * @sg: The scatterlist
58 * @len: The total required length
59 *
60 * Description:
61 * Determines the number of entries in sg that are required to meet
62 * the supplied length, taking into acount chaining as well
63 *
64 * Returns:
65 * the number of sg entries needed, negative error on failure
66 *
67 **/
68int sg_nents_for_len(struct scatterlist *sg, u64 len)
69{
70 int nents;
71 u64 total;
72
73 if (!len)
74 return 0;
75
76 for (nents = 0, total = 0; sg; sg = sg_next(sg)) {
77 nents++;
78 total += sg->length;
79 if (total >= len)
80 return nents;
81 }
82
83 return -EINVAL;
84}
85EXPORT_SYMBOL(sg_nents_for_len);
Maxim Levitsky2e484612012-09-27 12:45:28 +020086
87/**
Jens Axboe0db92992007-11-30 09:16:50 +010088 * sg_last - return the last scatterlist entry in a list
89 * @sgl: First entry in the scatterlist
90 * @nents: Number of entries in the scatterlist
91 *
92 * Description:
93 * Should only be used casually, it (currently) scans the entire list
94 * to get the last entry.
95 *
96 * Note that the @sgl@ pointer passed in need not be the first one,
97 * the important bit is that @nents@ denotes the number of entries that
98 * exist from @sgl@.
99 *
100 **/
101struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
102{
Jens Axboe0db92992007-11-30 09:16:50 +0100103 struct scatterlist *sg, *ret = NULL;
104 unsigned int i;
105
106 for_each_sg(sgl, sg, nents, i)
107 ret = sg;
108
Jens Axboe0db92992007-11-30 09:16:50 +0100109 BUG_ON(!sg_is_last(ret));
Jens Axboe0db92992007-11-30 09:16:50 +0100110 return ret;
111}
112EXPORT_SYMBOL(sg_last);
113
114/**
115 * sg_init_table - Initialize SG table
116 * @sgl: The SG table
117 * @nents: Number of entries in table
118 *
119 * Notes:
120 * If this is part of a chained sg table, sg_mark_end() should be
121 * used only on the last table part.
122 *
123 **/
124void sg_init_table(struct scatterlist *sgl, unsigned int nents)
125{
126 memset(sgl, 0, sizeof(*sgl) * nents);
Prashant Bholef3851782018-03-30 09:20:59 +0900127 sg_init_marker(sgl, nents);
Jens Axboe0db92992007-11-30 09:16:50 +0100128}
129EXPORT_SYMBOL(sg_init_table);
130
131/**
132 * sg_init_one - Initialize a single entry sg list
133 * @sg: SG entry
134 * @buf: Virtual address for IO
135 * @buflen: IO length
136 *
137 **/
138void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
139{
140 sg_init_table(sg, 1);
141 sg_set_buf(sg, buf, buflen);
142}
143EXPORT_SYMBOL(sg_init_one);
144
145/*
146 * The default behaviour of sg_alloc_table() is to use these kmalloc/kfree
147 * helpers.
148 */
149static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
150{
Chris Wilsonb94de9b2010-07-28 22:59:02 +0100151 if (nents == SG_MAX_SINGLE_ALLOC) {
152 /*
153 * Kmemleak doesn't track page allocations as they are not
154 * commonly used (in a raw form) for kernel data structures.
155 * As we chain together a list of pages and then a normal
156 * kmalloc (tracked by kmemleak), in order to for that last
157 * allocation not to become decoupled (and thus a
158 * false-positive) we need to inform kmemleak of all the
159 * intermediate allocations.
160 */
161 void *ptr = (void *) __get_free_page(gfp_mask);
162 kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask);
163 return ptr;
164 } else
Kees Cook6da2ec52018-06-12 13:55:00 -0700165 return kmalloc_array(nents, sizeof(struct scatterlist),
166 gfp_mask);
Jens Axboe0db92992007-11-30 09:16:50 +0100167}
168
169static void sg_kfree(struct scatterlist *sg, unsigned int nents)
170{
Chris Wilsonb94de9b2010-07-28 22:59:02 +0100171 if (nents == SG_MAX_SINGLE_ALLOC) {
172 kmemleak_free(sg);
Jens Axboe0db92992007-11-30 09:16:50 +0100173 free_page((unsigned long) sg);
Chris Wilsonb94de9b2010-07-28 22:59:02 +0100174 } else
Jens Axboe0db92992007-11-30 09:16:50 +0100175 kfree(sg);
176}
177
178/**
179 * __sg_free_table - Free a previously mapped sg table
180 * @table: The sg table header to use
James Bottomley7cedb1f2008-01-13 14:15:28 -0600181 * @max_ents: The maximum number of entries per single scatterlist
Ming Lei46358732019-04-28 15:39:30 +0800182 * @nents_first_chunk: Number of entries int the (preallocated) first
183 * scatterlist chunk, 0 means no such preallocated first chunk
Jens Axboe0db92992007-11-30 09:16:50 +0100184 * @free_fn: Free function
185 *
186 * Description:
James Bottomley7cedb1f2008-01-13 14:15:28 -0600187 * Free an sg table previously allocated and setup with
188 * __sg_alloc_table(). The @max_ents value must be identical to
189 * that previously used with __sg_alloc_table().
Jens Axboe0db92992007-11-30 09:16:50 +0100190 *
191 **/
James Bottomley7cedb1f2008-01-13 14:15:28 -0600192void __sg_free_table(struct sg_table *table, unsigned int max_ents,
Ming Lei46358732019-04-28 15:39:30 +0800193 unsigned int nents_first_chunk, sg_free_fn *free_fn)
Jens Axboe0db92992007-11-30 09:16:50 +0100194{
195 struct scatterlist *sgl, *next;
Ming Lei46358732019-04-28 15:39:30 +0800196 unsigned curr_max_ents = nents_first_chunk ?: max_ents;
Jens Axboe0db92992007-11-30 09:16:50 +0100197
198 if (unlikely(!table->sgl))
199 return;
200
201 sgl = table->sgl;
202 while (table->orig_nents) {
203 unsigned int alloc_size = table->orig_nents;
204 unsigned int sg_size;
205
206 /*
James Bottomley7cedb1f2008-01-13 14:15:28 -0600207 * If we have more than max_ents segments left,
Jens Axboe0db92992007-11-30 09:16:50 +0100208 * then assign 'next' to the sg table after the current one.
209 * sg_size is then one less than alloc size, since the last
210 * element is the chain pointer.
211 */
Ming Lei46358732019-04-28 15:39:30 +0800212 if (alloc_size > curr_max_ents) {
213 next = sg_chain_ptr(&sgl[curr_max_ents - 1]);
214 alloc_size = curr_max_ents;
Jens Axboe0db92992007-11-30 09:16:50 +0100215 sg_size = alloc_size - 1;
216 } else {
217 sg_size = alloc_size;
218 next = NULL;
219 }
220
221 table->orig_nents -= sg_size;
Ming Lei46358732019-04-28 15:39:30 +0800222 if (nents_first_chunk)
223 nents_first_chunk = 0;
Tony Battersbyc21e59d2014-10-23 15:10:21 -0400224 else
225 free_fn(sgl, alloc_size);
Jens Axboe0db92992007-11-30 09:16:50 +0100226 sgl = next;
Ming Lei46358732019-04-28 15:39:30 +0800227 curr_max_ents = max_ents;
Jens Axboe0db92992007-11-30 09:16:50 +0100228 }
229
230 table->sgl = NULL;
231}
232EXPORT_SYMBOL(__sg_free_table);
233
234/**
235 * sg_free_table - Free a previously allocated sg table
236 * @table: The mapped sg table header
237 *
238 **/
239void sg_free_table(struct sg_table *table)
240{
Christoph Hellwigc53c6d62014-04-15 14:38:31 +0200241 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
Jens Axboe0db92992007-11-30 09:16:50 +0100242}
243EXPORT_SYMBOL(sg_free_table);
244
245/**
246 * __sg_alloc_table - Allocate and initialize an sg table with given allocator
247 * @table: The sg table header to use
248 * @nents: Number of entries in sg list
James Bottomley7cedb1f2008-01-13 14:15:28 -0600249 * @max_ents: The maximum number of entries the allocator returns per call
Ming Lei46358732019-04-28 15:39:30 +0800250 * @nents_first_chunk: Number of entries int the (preallocated) first
251 * scatterlist chunk, 0 means no such preallocated chunk provided by user
Jens Axboe0db92992007-11-30 09:16:50 +0100252 * @gfp_mask: GFP allocation mask
253 * @alloc_fn: Allocator to use
254 *
James Bottomley7cedb1f2008-01-13 14:15:28 -0600255 * Description:
256 * This function returns a @table @nents long. The allocator is
257 * defined to return scatterlist chunks of maximum size @max_ents.
258 * Thus if @nents is bigger than @max_ents, the scatterlists will be
259 * chained in units of @max_ents.
260 *
Jens Axboe0db92992007-11-30 09:16:50 +0100261 * Notes:
262 * If this function returns non-0 (eg failure), the caller must call
263 * __sg_free_table() to cleanup any leftover allocations.
264 *
265 **/
James Bottomley7cedb1f2008-01-13 14:15:28 -0600266int __sg_alloc_table(struct sg_table *table, unsigned int nents,
Christoph Hellwigc53c6d62014-04-15 14:38:31 +0200267 unsigned int max_ents, struct scatterlist *first_chunk,
Ming Lei46358732019-04-28 15:39:30 +0800268 unsigned int nents_first_chunk, gfp_t gfp_mask,
269 sg_alloc_fn *alloc_fn)
Jens Axboe0db92992007-11-30 09:16:50 +0100270{
271 struct scatterlist *sg, *prv;
272 unsigned int left;
Ming Lei46358732019-04-28 15:39:30 +0800273 unsigned curr_max_ents = nents_first_chunk ?: max_ents;
274 unsigned prv_max_ents;
Jens Axboe0db92992007-11-30 09:16:50 +0100275
Dan Carpenter27daabd2013-07-08 16:01:58 -0700276 memset(table, 0, sizeof(*table));
277
278 if (nents == 0)
279 return -EINVAL;
Christoph Hellwig7c703e52018-11-09 09:51:00 +0100280#ifdef CONFIG_ARCH_NO_SG_CHAIN
Nick Bowler6fd59a82012-12-17 16:05:20 -0800281 if (WARN_ON_ONCE(nents > max_ents))
282 return -EINVAL;
Jens Axboe0db92992007-11-30 09:16:50 +0100283#endif
284
Jens Axboe0db92992007-11-30 09:16:50 +0100285 left = nents;
286 prv = NULL;
287 do {
288 unsigned int sg_size, alloc_size = left;
289
Ming Lei46358732019-04-28 15:39:30 +0800290 if (alloc_size > curr_max_ents) {
291 alloc_size = curr_max_ents;
Jens Axboe0db92992007-11-30 09:16:50 +0100292 sg_size = alloc_size - 1;
293 } else
294 sg_size = alloc_size;
295
296 left -= sg_size;
297
Christoph Hellwigc53c6d62014-04-15 14:38:31 +0200298 if (first_chunk) {
299 sg = first_chunk;
300 first_chunk = NULL;
301 } else {
302 sg = alloc_fn(alloc_size, gfp_mask);
303 }
Jeffrey Carlyleedce6822010-08-30 19:55:09 +0200304 if (unlikely(!sg)) {
305 /*
306 * Adjust entry count to reflect that the last
307 * entry of the previous table won't be used for
308 * linkage. Without this, sg_kfree() may get
309 * confused.
310 */
311 if (prv)
312 table->nents = ++table->orig_nents;
313
Nathan Chancellor4e456fe2020-01-30 22:16:37 -0800314 return -ENOMEM;
Jeffrey Carlyleedce6822010-08-30 19:55:09 +0200315 }
Jens Axboe0db92992007-11-30 09:16:50 +0100316
317 sg_init_table(sg, alloc_size);
318 table->nents = table->orig_nents += sg_size;
319
320 /*
321 * If this is the first mapping, assign the sg table header.
322 * If this is not the first mapping, chain previous part.
323 */
324 if (prv)
Ming Lei46358732019-04-28 15:39:30 +0800325 sg_chain(prv, prv_max_ents, sg);
Jens Axboe0db92992007-11-30 09:16:50 +0100326 else
327 table->sgl = sg;
328
329 /*
330 * If no more entries after this one, mark the end
331 */
332 if (!left)
333 sg_mark_end(&sg[sg_size - 1]);
334
Jens Axboe0db92992007-11-30 09:16:50 +0100335 prv = sg;
Ming Lei46358732019-04-28 15:39:30 +0800336 prv_max_ents = curr_max_ents;
337 curr_max_ents = max_ents;
Jens Axboe0db92992007-11-30 09:16:50 +0100338 } while (left);
339
340 return 0;
341}
342EXPORT_SYMBOL(__sg_alloc_table);
343
344/**
345 * sg_alloc_table - Allocate and initialize an sg table
346 * @table: The sg table header to use
347 * @nents: Number of entries in sg list
348 * @gfp_mask: GFP allocation mask
349 *
350 * Description:
351 * Allocate and initialize an sg table. If @nents@ is larger than
352 * SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
353 *
354 **/
355int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
356{
357 int ret;
358
James Bottomley7cedb1f2008-01-13 14:15:28 -0600359 ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
Ming Lei46358732019-04-28 15:39:30 +0800360 NULL, 0, gfp_mask, sg_kmalloc);
Jens Axboe0db92992007-11-30 09:16:50 +0100361 if (unlikely(ret))
Ming Lei46358732019-04-28 15:39:30 +0800362 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, 0, sg_kfree);
Jens Axboe0db92992007-11-30 09:16:50 +0100363
364 return ret;
365}
366EXPORT_SYMBOL(sg_alloc_table);
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900367
368/**
Tvrtko Ursulin89d85892017-08-03 10:13:51 +0100369 * __sg_alloc_table_from_pages - Allocate and initialize an sg table from
370 * an array of pages
371 * @sgt: The sg table header to use
372 * @pages: Pointer to an array of page pointers
373 * @n_pages: Number of pages in the pages array
374 * @offset: Offset from start of the first page to the start of a buffer
375 * @size: Number of valid bytes in the buffer (after offset)
376 * @max_segment: Maximum size of a scatterlist node in bytes (page aligned)
377 * @gfp_mask: GFP allocation mask
Tomasz Stanislawskiefc42bc2012-06-18 09:25:01 +0200378 *
379 * Description:
380 * Allocate and initialize an sg table from a list of pages. Contiguous
Tvrtko Ursulin89d85892017-08-03 10:13:51 +0100381 * ranges of the pages are squashed into a single scatterlist node up to the
382 * maximum size specified in @max_segment. An user may provide an offset at a
383 * start and a size of valid data in a buffer specified by the page array.
384 * The returned sg table is released by sg_free_table.
Tomasz Stanislawskiefc42bc2012-06-18 09:25:01 +0200385 *
386 * Returns:
387 * 0 on success, negative error on failure
388 */
Tvrtko Ursulin89d85892017-08-03 10:13:51 +0100389int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
390 unsigned int n_pages, unsigned int offset,
391 unsigned long size, unsigned int max_segment,
392 gfp_t gfp_mask)
Tomasz Stanislawskiefc42bc2012-06-18 09:25:01 +0200393{
Tvrtko Ursulinc1259062017-08-03 10:13:12 +0100394 unsigned int chunks, cur_page, seg_len, i;
Tomasz Stanislawskiefc42bc2012-06-18 09:25:01 +0200395 int ret;
396 struct scatterlist *s;
397
Tvrtko Ursulin89d85892017-08-03 10:13:51 +0100398 if (WARN_ON(!max_segment || offset_in_page(max_segment)))
399 return -EINVAL;
400
Tomasz Stanislawskiefc42bc2012-06-18 09:25:01 +0200401 /* compute number of contiguous chunks */
402 chunks = 1;
Tvrtko Ursulinc1259062017-08-03 10:13:12 +0100403 seg_len = 0;
404 for (i = 1; i < n_pages; i++) {
405 seg_len += PAGE_SIZE;
406 if (seg_len >= max_segment ||
407 page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) {
408 chunks++;
409 seg_len = 0;
410 }
411 }
Tomasz Stanislawskiefc42bc2012-06-18 09:25:01 +0200412
413 ret = sg_alloc_table(sgt, chunks, gfp_mask);
414 if (unlikely(ret))
415 return ret;
416
417 /* merging chunks and putting them into the scatterlist */
418 cur_page = 0;
419 for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
Tvrtko Ursulinc1259062017-08-03 10:13:12 +0100420 unsigned int j, chunk_size;
Tomasz Stanislawskiefc42bc2012-06-18 09:25:01 +0200421
422 /* look for the end of the current chunk */
Tvrtko Ursulinc1259062017-08-03 10:13:12 +0100423 seg_len = 0;
424 for (j = cur_page + 1; j < n_pages; j++) {
425 seg_len += PAGE_SIZE;
426 if (seg_len >= max_segment ||
427 page_to_pfn(pages[j]) !=
Tomasz Stanislawskiefc42bc2012-06-18 09:25:01 +0200428 page_to_pfn(pages[j - 1]) + 1)
429 break;
Tvrtko Ursulinc1259062017-08-03 10:13:12 +0100430 }
Tomasz Stanislawskiefc42bc2012-06-18 09:25:01 +0200431
432 chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
Tvrtko Ursulinc1259062017-08-03 10:13:12 +0100433 sg_set_page(s, pages[cur_page],
434 min_t(unsigned long, size, chunk_size), offset);
Tomasz Stanislawskiefc42bc2012-06-18 09:25:01 +0200435 size -= chunk_size;
436 offset = 0;
437 cur_page = j;
438 }
439
440 return 0;
441}
Tvrtko Ursulin89d85892017-08-03 10:13:51 +0100442EXPORT_SYMBOL(__sg_alloc_table_from_pages);
443
444/**
445 * sg_alloc_table_from_pages - Allocate and initialize an sg table from
446 * an array of pages
447 * @sgt: The sg table header to use
448 * @pages: Pointer to an array of page pointers
449 * @n_pages: Number of pages in the pages array
450 * @offset: Offset from start of the first page to the start of a buffer
451 * @size: Number of valid bytes in the buffer (after offset)
452 * @gfp_mask: GFP allocation mask
453 *
454 * Description:
455 * Allocate and initialize an sg table from a list of pages. Contiguous
456 * ranges of the pages are squashed into a single scatterlist node. A user
457 * may provide an offset at a start and a size of valid data in a buffer
458 * specified by the page array. The returned sg table is released by
459 * sg_free_table.
460 *
461 * Returns:
462 * 0 on success, negative error on failure
463 */
464int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
465 unsigned int n_pages, unsigned int offset,
466 unsigned long size, gfp_t gfp_mask)
467{
468 return __sg_alloc_table_from_pages(sgt, pages, n_pages, offset, size,
469 SCATTERLIST_MAX_SEGMENT, gfp_mask);
470}
Tomasz Stanislawskiefc42bc2012-06-18 09:25:01 +0200471EXPORT_SYMBOL(sg_alloc_table_from_pages);
472
Bart Van Asschee80a0af2018-01-05 08:26:46 -0800473#ifdef CONFIG_SGL_ALLOC
474
475/**
476 * sgl_alloc_order - allocate a scatterlist and its pages
477 * @length: Length in bytes of the scatterlist. Must be at least one
478 * @order: Second argument for alloc_pages()
479 * @chainable: Whether or not to allocate an extra element in the scatterlist
480 * for scatterlist chaining purposes
481 * @gfp: Memory allocation flags
482 * @nent_p: [out] Number of entries in the scatterlist that have pages
483 *
484 * Returns: A pointer to an initialized scatterlist or %NULL upon failure.
485 */
486struct scatterlist *sgl_alloc_order(unsigned long long length,
487 unsigned int order, bool chainable,
488 gfp_t gfp, unsigned int *nent_p)
489{
490 struct scatterlist *sgl, *sg;
491 struct page *page;
492 unsigned int nent, nalloc;
493 u32 elem_len;
494
495 nent = round_up(length, PAGE_SIZE << order) >> (PAGE_SHIFT + order);
496 /* Check for integer overflow */
497 if (length > (nent << (PAGE_SHIFT + order)))
498 return NULL;
499 nalloc = nent;
500 if (chainable) {
501 /* Check for integer overflow */
502 if (nalloc + 1 < nalloc)
503 return NULL;
504 nalloc++;
505 }
506 sgl = kmalloc_array(nalloc, sizeof(struct scatterlist),
507 (gfp & ~GFP_DMA) | __GFP_ZERO);
508 if (!sgl)
509 return NULL;
510
Bart Van Assche8c7a8d12018-01-19 11:00:54 -0800511 sg_init_table(sgl, nalloc);
Bart Van Asschee80a0af2018-01-05 08:26:46 -0800512 sg = sgl;
513 while (length) {
514 elem_len = min_t(u64, length, PAGE_SIZE << order);
515 page = alloc_pages(gfp, order);
516 if (!page) {
517 sgl_free(sgl);
518 return NULL;
519 }
520
521 sg_set_page(sg, page, elem_len, 0);
522 length -= elem_len;
523 sg = sg_next(sg);
524 }
Bart Van Assche8c7a8d12018-01-19 11:00:54 -0800525 WARN_ONCE(length, "length = %lld\n", length);
Bart Van Asschee80a0af2018-01-05 08:26:46 -0800526 if (nent_p)
527 *nent_p = nent;
528 return sgl;
529}
530EXPORT_SYMBOL(sgl_alloc_order);
531
532/**
533 * sgl_alloc - allocate a scatterlist and its pages
534 * @length: Length in bytes of the scatterlist
535 * @gfp: Memory allocation flags
536 * @nent_p: [out] Number of entries in the scatterlist
537 *
538 * Returns: A pointer to an initialized scatterlist or %NULL upon failure.
539 */
540struct scatterlist *sgl_alloc(unsigned long long length, gfp_t gfp,
541 unsigned int *nent_p)
542{
543 return sgl_alloc_order(length, 0, false, gfp, nent_p);
544}
545EXPORT_SYMBOL(sgl_alloc);
546
547/**
Bart Van Assche8c7a8d12018-01-19 11:00:54 -0800548 * sgl_free_n_order - free a scatterlist and its pages
549 * @sgl: Scatterlist with one or more elements
550 * @nents: Maximum number of elements to free
551 * @order: Second argument for __free_pages()
552 *
553 * Notes:
554 * - If several scatterlists have been chained and each chain element is
555 * freed separately then it's essential to set nents correctly to avoid that a
556 * page would get freed twice.
557 * - All pages in a chained scatterlist can be freed at once by setting @nents
558 * to a high number.
559 */
560void sgl_free_n_order(struct scatterlist *sgl, int nents, int order)
561{
562 struct scatterlist *sg;
563 struct page *page;
564 int i;
565
566 for_each_sg(sgl, sg, nents, i) {
567 if (!sg)
568 break;
569 page = sg_page(sg);
570 if (page)
571 __free_pages(page, order);
572 }
573 kfree(sgl);
574}
575EXPORT_SYMBOL(sgl_free_n_order);
576
577/**
Bart Van Asschee80a0af2018-01-05 08:26:46 -0800578 * sgl_free_order - free a scatterlist and its pages
579 * @sgl: Scatterlist with one or more elements
580 * @order: Second argument for __free_pages()
581 */
582void sgl_free_order(struct scatterlist *sgl, int order)
583{
Bart Van Assche8c7a8d12018-01-19 11:00:54 -0800584 sgl_free_n_order(sgl, INT_MAX, order);
Bart Van Asschee80a0af2018-01-05 08:26:46 -0800585}
586EXPORT_SYMBOL(sgl_free_order);
587
588/**
589 * sgl_free - free a scatterlist and its pages
590 * @sgl: Scatterlist with one or more elements
591 */
592void sgl_free(struct scatterlist *sgl)
593{
594 sgl_free_order(sgl, 0);
595}
596EXPORT_SYMBOL(sgl_free);
597
598#endif /* CONFIG_SGL_ALLOC */
599
Imre Deaka321e912013-02-27 17:02:56 -0800600void __sg_page_iter_start(struct sg_page_iter *piter,
601 struct scatterlist *sglist, unsigned int nents,
602 unsigned long pgoffset)
603{
604 piter->__pg_advance = 0;
605 piter->__nents = nents;
606
Imre Deaka321e912013-02-27 17:02:56 -0800607 piter->sg = sglist;
608 piter->sg_pgoffset = pgoffset;
609}
610EXPORT_SYMBOL(__sg_page_iter_start);
611
612static int sg_page_count(struct scatterlist *sg)
613{
614 return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT;
615}
616
617bool __sg_page_iter_next(struct sg_page_iter *piter)
618{
619 if (!piter->__nents || !piter->sg)
620 return false;
621
622 piter->sg_pgoffset += piter->__pg_advance;
623 piter->__pg_advance = 1;
624
625 while (piter->sg_pgoffset >= sg_page_count(piter->sg)) {
626 piter->sg_pgoffset -= sg_page_count(piter->sg);
627 piter->sg = sg_next(piter->sg);
628 if (!--piter->__nents || !piter->sg)
629 return false;
630 }
Imre Deaka321e912013-02-27 17:02:56 -0800631
632 return true;
633}
634EXPORT_SYMBOL(__sg_page_iter_next);
635
Jason Gunthorped901b272019-01-04 11:40:21 -0700636static int sg_dma_page_count(struct scatterlist *sg)
637{
638 return PAGE_ALIGN(sg->offset + sg_dma_len(sg)) >> PAGE_SHIFT;
639}
640
641bool __sg_page_iter_dma_next(struct sg_dma_page_iter *dma_iter)
642{
643 struct sg_page_iter *piter = &dma_iter->base;
644
645 if (!piter->__nents || !piter->sg)
646 return false;
647
648 piter->sg_pgoffset += piter->__pg_advance;
649 piter->__pg_advance = 1;
650
651 while (piter->sg_pgoffset >= sg_dma_page_count(piter->sg)) {
652 piter->sg_pgoffset -= sg_dma_page_count(piter->sg);
653 piter->sg = sg_next(piter->sg);
654 if (!--piter->__nents || !piter->sg)
655 return false;
656 }
657
658 return true;
659}
660EXPORT_SYMBOL(__sg_page_iter_dma_next);
661
Tomasz Stanislawskiefc42bc2012-06-18 09:25:01 +0200662/**
Tejun Heo137d3ed2008-07-19 23:03:35 +0900663 * sg_miter_start - start mapping iteration over a sg list
664 * @miter: sg mapping iter to be started
665 * @sgl: sg list to iterate over
666 * @nents: number of sg entries
667 *
668 * Description:
669 * Starts mapping iterator @miter.
670 *
671 * Context:
672 * Don't care.
673 */
674void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
675 unsigned int nents, unsigned int flags)
676{
677 memset(miter, 0, sizeof(struct sg_mapping_iter));
678
Imre Deak4225fc82013-02-27 17:02:57 -0800679 __sg_page_iter_start(&miter->piter, sgl, nents, 0);
Sebastian Andrzej Siewior6de7e3562009-06-18 10:19:12 +0200680 WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
Tejun Heo137d3ed2008-07-19 23:03:35 +0900681 miter->__flags = flags;
682}
683EXPORT_SYMBOL(sg_miter_start);
684
Akinobu Mita11052002013-07-08 16:01:52 -0700685static bool sg_miter_get_next_page(struct sg_mapping_iter *miter)
686{
687 if (!miter->__remaining) {
688 struct scatterlist *sg;
Akinobu Mita11052002013-07-08 16:01:52 -0700689
690 if (!__sg_page_iter_next(&miter->piter))
691 return false;
692
693 sg = miter->piter.sg;
Akinobu Mita11052002013-07-08 16:01:52 -0700694
Christophe Leroyaeb87242019-06-24 07:20:14 +0000695 miter->__offset = miter->piter.sg_pgoffset ? 0 : sg->offset;
696 miter->piter.sg_pgoffset += miter->__offset >> PAGE_SHIFT;
697 miter->__offset &= PAGE_SIZE - 1;
Akinobu Mita11052002013-07-08 16:01:52 -0700698 miter->__remaining = sg->offset + sg->length -
Christophe Leroyaeb87242019-06-24 07:20:14 +0000699 (miter->piter.sg_pgoffset << PAGE_SHIFT) -
700 miter->__offset;
Akinobu Mita11052002013-07-08 16:01:52 -0700701 miter->__remaining = min_t(unsigned long, miter->__remaining,
702 PAGE_SIZE - miter->__offset);
703 }
704
705 return true;
706}
707
Tejun Heo137d3ed2008-07-19 23:03:35 +0900708/**
Akinobu Mitadf642ce2013-07-08 16:01:54 -0700709 * sg_miter_skip - reposition mapping iterator
710 * @miter: sg mapping iter to be skipped
711 * @offset: number of bytes to plus the current location
712 *
713 * Description:
714 * Sets the offset of @miter to its current location plus @offset bytes.
715 * If mapping iterator @miter has been proceeded by sg_miter_next(), this
716 * stops @miter.
717 *
718 * Context:
719 * Don't care if @miter is stopped, or not proceeded yet.
720 * Otherwise, preemption disabled if the SG_MITER_ATOMIC is set.
721 *
722 * Returns:
723 * true if @miter contains the valid mapping. false if end of sg
724 * list is reached.
725 */
Ming Lei0d6077f2013-11-26 12:43:37 +0800726bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset)
Akinobu Mitadf642ce2013-07-08 16:01:54 -0700727{
728 sg_miter_stop(miter);
729
730 while (offset) {
731 off_t consumed;
732
733 if (!sg_miter_get_next_page(miter))
734 return false;
735
736 consumed = min_t(off_t, offset, miter->__remaining);
737 miter->__offset += consumed;
738 miter->__remaining -= consumed;
739 offset -= consumed;
740 }
741
742 return true;
743}
Ming Lei0d6077f2013-11-26 12:43:37 +0800744EXPORT_SYMBOL(sg_miter_skip);
Akinobu Mitadf642ce2013-07-08 16:01:54 -0700745
746/**
Tejun Heo137d3ed2008-07-19 23:03:35 +0900747 * sg_miter_next - proceed mapping iterator to the next mapping
748 * @miter: sg mapping iter to proceed
749 *
750 * Description:
Tejun Heo8290e2d2012-10-04 17:13:28 -0700751 * Proceeds @miter to the next mapping. @miter should have been started
752 * using sg_miter_start(). On successful return, @miter->page,
753 * @miter->addr and @miter->length point to the current mapping.
Tejun Heo137d3ed2008-07-19 23:03:35 +0900754 *
755 * Context:
Tejun Heo8290e2d2012-10-04 17:13:28 -0700756 * Preemption disabled if SG_MITER_ATOMIC. Preemption must stay disabled
757 * till @miter is stopped. May sleep if !SG_MITER_ATOMIC.
Tejun Heo137d3ed2008-07-19 23:03:35 +0900758 *
759 * Returns:
760 * true if @miter contains the next mapping. false if end of sg
761 * list is reached.
762 */
763bool sg_miter_next(struct sg_mapping_iter *miter)
764{
Tejun Heo137d3ed2008-07-19 23:03:35 +0900765 sg_miter_stop(miter);
766
Imre Deak4225fc82013-02-27 17:02:57 -0800767 /*
768 * Get to the next page if necessary.
769 * __remaining, __offset is adjusted by sg_miter_stop
770 */
Akinobu Mita11052002013-07-08 16:01:52 -0700771 if (!sg_miter_get_next_page(miter))
772 return false;
Imre Deak4225fc82013-02-27 17:02:57 -0800773
Imre Deak2db76d72013-03-26 15:14:18 +0200774 miter->page = sg_page_iter_page(&miter->piter);
Imre Deak4225fc82013-02-27 17:02:57 -0800775 miter->consumed = miter->length = miter->__remaining;
Tejun Heo137d3ed2008-07-19 23:03:35 +0900776
777 if (miter->__flags & SG_MITER_ATOMIC)
Imre Deak4225fc82013-02-27 17:02:57 -0800778 miter->addr = kmap_atomic(miter->page) + miter->__offset;
Tejun Heo137d3ed2008-07-19 23:03:35 +0900779 else
Imre Deak4225fc82013-02-27 17:02:57 -0800780 miter->addr = kmap(miter->page) + miter->__offset;
Tejun Heo137d3ed2008-07-19 23:03:35 +0900781
782 return true;
783}
784EXPORT_SYMBOL(sg_miter_next);
785
786/**
787 * sg_miter_stop - stop mapping iteration
788 * @miter: sg mapping iter to be stopped
789 *
790 * Description:
791 * Stops mapping iterator @miter. @miter should have been started
Masahiro Yamada4ba6a2b2016-02-08 16:09:08 +0900792 * using sg_miter_start(). A stopped iteration can be resumed by
793 * calling sg_miter_next() on it. This is useful when resources (kmap)
794 * need to be released during iteration.
Tejun Heo137d3ed2008-07-19 23:03:35 +0900795 *
796 * Context:
Tejun Heo8290e2d2012-10-04 17:13:28 -0700797 * Preemption disabled if the SG_MITER_ATOMIC is set. Don't care
798 * otherwise.
Tejun Heo137d3ed2008-07-19 23:03:35 +0900799 */
800void sg_miter_stop(struct sg_mapping_iter *miter)
801{
802 WARN_ON(miter->consumed > miter->length);
803
804 /* drop resources from the last iteration */
805 if (miter->addr) {
806 miter->__offset += miter->consumed;
Imre Deak4225fc82013-02-27 17:02:57 -0800807 miter->__remaining -= miter->consumed;
Tejun Heo137d3ed2008-07-19 23:03:35 +0900808
Ming Lei3d77b502013-10-31 16:34:17 -0700809 if ((miter->__flags & SG_MITER_TO_SG) &&
810 !PageSlab(miter->page))
Sebastian Andrzej Siewior6de7e3562009-06-18 10:19:12 +0200811 flush_kernel_dcache_page(miter->page);
812
Tejun Heo137d3ed2008-07-19 23:03:35 +0900813 if (miter->__flags & SG_MITER_ATOMIC) {
Tejun Heo8290e2d2012-10-04 17:13:28 -0700814 WARN_ON_ONCE(preemptible());
Cong Wangc3eede82011-11-25 23:14:39 +0800815 kunmap_atomic(miter->addr);
Tejun Heo137d3ed2008-07-19 23:03:35 +0900816 } else
Arjan van de Venf652c522008-11-19 15:36:19 -0800817 kunmap(miter->page);
Tejun Heo137d3ed2008-07-19 23:03:35 +0900818
819 miter->page = NULL;
820 miter->addr = NULL;
821 miter->length = 0;
822 miter->consumed = 0;
823 }
824}
825EXPORT_SYMBOL(sg_miter_stop);
826
827/**
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900828 * sg_copy_buffer - Copy data between a linear buffer and an SG list
829 * @sgl: The SG list
830 * @nents: Number of SG entries
831 * @buf: Where to copy from
832 * @buflen: The number of bytes to copy
Akinobu Mitadf642ce2013-07-08 16:01:54 -0700833 * @skip: Number of bytes to skip before copying
834 * @to_buffer: transfer direction (true == from an sg list to a
Geert Uytterhoeven6e853182020-04-06 20:10:09 -0700835 * buffer, false == from a buffer to an sg list)
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900836 *
837 * Returns the number of copied bytes.
838 *
839 **/
Dave Gordon386ecb12015-06-30 14:58:57 -0700840size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
841 size_t buflen, off_t skip, bool to_buffer)
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900842{
Tejun Heo137d3ed2008-07-19 23:03:35 +0900843 unsigned int offset = 0;
844 struct sg_mapping_iter miter;
Sebastian Andrzej Siewior6de7e3562009-06-18 10:19:12 +0200845 unsigned int sg_flags = SG_MITER_ATOMIC;
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900846
Sebastian Andrzej Siewior6de7e3562009-06-18 10:19:12 +0200847 if (to_buffer)
848 sg_flags |= SG_MITER_FROM_SG;
849 else
850 sg_flags |= SG_MITER_TO_SG;
851
852 sg_miter_start(&miter, sgl, nents, sg_flags);
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900853
Akinobu Mitadf642ce2013-07-08 16:01:54 -0700854 if (!sg_miter_skip(&miter, skip))
855 return false;
856
Gilad Ben-Yossef1d5210ef2017-02-27 14:28:27 -0800857 while ((offset < buflen) && sg_miter_next(&miter)) {
Tejun Heo137d3ed2008-07-19 23:03:35 +0900858 unsigned int len;
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900859
Tejun Heo137d3ed2008-07-19 23:03:35 +0900860 len = min(miter.length, buflen - offset);
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900861
Tejun Heo137d3ed2008-07-19 23:03:35 +0900862 if (to_buffer)
863 memcpy(buf + offset, miter.addr, len);
Sebastian Andrzej Siewior6de7e3562009-06-18 10:19:12 +0200864 else
Tejun Heo137d3ed2008-07-19 23:03:35 +0900865 memcpy(miter.addr, buf + offset, len);
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900866
Tejun Heo137d3ed2008-07-19 23:03:35 +0900867 offset += len;
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900868 }
869
Tejun Heo137d3ed2008-07-19 23:03:35 +0900870 sg_miter_stop(&miter);
871
872 return offset;
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900873}
Dave Gordon386ecb12015-06-30 14:58:57 -0700874EXPORT_SYMBOL(sg_copy_buffer);
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900875
876/**
877 * sg_copy_from_buffer - Copy from a linear buffer to an SG list
878 * @sgl: The SG list
879 * @nents: Number of SG entries
880 * @buf: Where to copy from
881 * @buflen: The number of bytes to copy
882 *
883 * Returns the number of copied bytes.
884 *
885 **/
886size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
Dave Gordon2a1bf8f2015-06-30 14:58:54 -0700887 const void *buf, size_t buflen)
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900888{
Dave Gordon2a1bf8f2015-06-30 14:58:54 -0700889 return sg_copy_buffer(sgl, nents, (void *)buf, buflen, 0, false);
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900890}
891EXPORT_SYMBOL(sg_copy_from_buffer);
892
893/**
894 * sg_copy_to_buffer - Copy from an SG list to a linear buffer
895 * @sgl: The SG list
896 * @nents: Number of SG entries
897 * @buf: Where to copy to
898 * @buflen: The number of bytes to copy
899 *
900 * Returns the number of copied bytes.
901 *
902 **/
903size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
904 void *buf, size_t buflen)
905{
Akinobu Mitadf642ce2013-07-08 16:01:54 -0700906 return sg_copy_buffer(sgl, nents, buf, buflen, 0, true);
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900907}
908EXPORT_SYMBOL(sg_copy_to_buffer);
Akinobu Mitadf642ce2013-07-08 16:01:54 -0700909
910/**
911 * sg_pcopy_from_buffer - Copy from a linear buffer to an SG list
912 * @sgl: The SG list
913 * @nents: Number of SG entries
914 * @buf: Where to copy from
Akinobu Mitadf642ce2013-07-08 16:01:54 -0700915 * @buflen: The number of bytes to copy
Dave Gordon4dc7daf2015-06-30 14:58:52 -0700916 * @skip: Number of bytes to skip before copying
Akinobu Mitadf642ce2013-07-08 16:01:54 -0700917 *
918 * Returns the number of copied bytes.
919 *
920 **/
921size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
Dave Gordon2a1bf8f2015-06-30 14:58:54 -0700922 const void *buf, size_t buflen, off_t skip)
Akinobu Mitadf642ce2013-07-08 16:01:54 -0700923{
Dave Gordon2a1bf8f2015-06-30 14:58:54 -0700924 return sg_copy_buffer(sgl, nents, (void *)buf, buflen, skip, false);
Akinobu Mitadf642ce2013-07-08 16:01:54 -0700925}
926EXPORT_SYMBOL(sg_pcopy_from_buffer);
927
928/**
929 * sg_pcopy_to_buffer - Copy from an SG list to a linear buffer
930 * @sgl: The SG list
931 * @nents: Number of SG entries
932 * @buf: Where to copy to
Akinobu Mitadf642ce2013-07-08 16:01:54 -0700933 * @buflen: The number of bytes to copy
Dave Gordon4dc7daf2015-06-30 14:58:52 -0700934 * @skip: Number of bytes to skip before copying
Akinobu Mitadf642ce2013-07-08 16:01:54 -0700935 *
936 * Returns the number of copied bytes.
937 *
938 **/
939size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
940 void *buf, size_t buflen, off_t skip)
941{
942 return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
943}
944EXPORT_SYMBOL(sg_pcopy_to_buffer);
Johannes Thumshirn0945e562017-06-07 11:45:28 +0200945
946/**
947 * sg_zero_buffer - Zero-out a part of a SG list
948 * @sgl: The SG list
949 * @nents: Number of SG entries
950 * @buflen: The number of bytes to zero out
951 * @skip: Number of bytes to skip before zeroing
952 *
953 * Returns the number of bytes zeroed.
954 **/
955size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents,
956 size_t buflen, off_t skip)
957{
958 unsigned int offset = 0;
959 struct sg_mapping_iter miter;
960 unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
961
962 sg_miter_start(&miter, sgl, nents, sg_flags);
963
964 if (!sg_miter_skip(&miter, skip))
965 return false;
966
967 while (offset < buflen && sg_miter_next(&miter)) {
968 unsigned int len;
969
970 len = min(miter.length, buflen - offset);
971 memset(miter.addr, 0, len);
972
973 offset += len;
974 }
975
976 sg_miter_stop(&miter);
977 return offset;
978}
979EXPORT_SYMBOL(sg_zero_buffer);