blob: 77ec8eec3fd0266cf3f1380e8cd24ace5e6b3049 [file] [log] [blame]
Jens Axboe0db92992007-11-30 09:16:50 +01001/*
2 * Copyright (C) 2007 Jens Axboe <jens.axboe@oracle.com>
3 *
4 * Scatterlist handling helpers.
5 *
6 * This source code is licensed under the GNU General Public License,
7 * Version 2. See the file COPYING for more details.
8 */
Paul Gortmaker8bc3bcc2011-11-16 21:29:17 -05009#include <linux/export.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090010#include <linux/slab.h>
Jens Axboe0db92992007-11-30 09:16:50 +010011#include <linux/scatterlist.h>
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +090012#include <linux/highmem.h>
Chris Wilsonb94de9b2010-07-28 22:59:02 +010013#include <linux/kmemleak.h>
Jens Axboe0db92992007-11-30 09:16:50 +010014
15/**
16 * sg_next - return the next scatterlist entry in a list
17 * @sg: The current sg entry
18 *
19 * Description:
20 * Usually the next entry will be @sg@ + 1, but if this sg element is part
21 * of a chained scatterlist, it could jump to the start of a new
22 * scatterlist array.
23 *
24 **/
25struct scatterlist *sg_next(struct scatterlist *sg)
26{
Jens Axboe0db92992007-11-30 09:16:50 +010027 if (sg_is_last(sg))
28 return NULL;
29
30 sg++;
31 if (unlikely(sg_is_chain(sg)))
32 sg = sg_chain_ptr(sg);
33
34 return sg;
35}
36EXPORT_SYMBOL(sg_next);
37
38/**
Maxim Levitsky2e484612012-09-27 12:45:28 +020039 * sg_nents - return total count of entries in scatterlist
40 * @sg: The scatterlist
41 *
42 * Description:
43 * Allows to know how many entries are in sg, taking into acount
44 * chaining as well
45 *
46 **/
47int sg_nents(struct scatterlist *sg)
48{
Maxim Levitsky232f1b52012-09-28 10:38:15 +020049 int nents;
50 for (nents = 0; sg; sg = sg_next(sg))
Maxim Levitsky2e484612012-09-27 12:45:28 +020051 nents++;
Maxim Levitsky2e484612012-09-27 12:45:28 +020052 return nents;
53}
54EXPORT_SYMBOL(sg_nents);
55
Tom Lendackycfaed102015-06-01 11:15:25 -050056/**
57 * sg_nents_for_len - return total count of entries in scatterlist
58 * needed to satisfy the supplied length
59 * @sg: The scatterlist
60 * @len: The total required length
61 *
62 * Description:
63 * Determines the number of entries in sg that are required to meet
64 * the supplied length, taking into acount chaining as well
65 *
66 * Returns:
67 * the number of sg entries needed, negative error on failure
68 *
69 **/
70int sg_nents_for_len(struct scatterlist *sg, u64 len)
71{
72 int nents;
73 u64 total;
74
75 if (!len)
76 return 0;
77
78 for (nents = 0, total = 0; sg; sg = sg_next(sg)) {
79 nents++;
80 total += sg->length;
81 if (total >= len)
82 return nents;
83 }
84
85 return -EINVAL;
86}
87EXPORT_SYMBOL(sg_nents_for_len);
Maxim Levitsky2e484612012-09-27 12:45:28 +020088
89/**
Jens Axboe0db92992007-11-30 09:16:50 +010090 * sg_last - return the last scatterlist entry in a list
91 * @sgl: First entry in the scatterlist
92 * @nents: Number of entries in the scatterlist
93 *
94 * Description:
95 * Should only be used casually, it (currently) scans the entire list
96 * to get the last entry.
97 *
98 * Note that the @sgl@ pointer passed in need not be the first one,
99 * the important bit is that @nents@ denotes the number of entries that
100 * exist from @sgl@.
101 *
102 **/
103struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
104{
Jens Axboe0db92992007-11-30 09:16:50 +0100105 struct scatterlist *sg, *ret = NULL;
106 unsigned int i;
107
108 for_each_sg(sgl, sg, nents, i)
109 ret = sg;
110
Jens Axboe0db92992007-11-30 09:16:50 +0100111 BUG_ON(!sg_is_last(ret));
Jens Axboe0db92992007-11-30 09:16:50 +0100112 return ret;
113}
114EXPORT_SYMBOL(sg_last);
115
116/**
117 * sg_init_table - Initialize SG table
118 * @sgl: The SG table
119 * @nents: Number of entries in table
120 *
121 * Notes:
122 * If this is part of a chained sg table, sg_mark_end() should be
123 * used only on the last table part.
124 *
125 **/
126void sg_init_table(struct scatterlist *sgl, unsigned int nents)
127{
128 memset(sgl, 0, sizeof(*sgl) * nents);
Prashant Bholef3851782018-03-30 09:20:59 +0900129 sg_init_marker(sgl, nents);
Jens Axboe0db92992007-11-30 09:16:50 +0100130}
131EXPORT_SYMBOL(sg_init_table);
132
133/**
134 * sg_init_one - Initialize a single entry sg list
135 * @sg: SG entry
136 * @buf: Virtual address for IO
137 * @buflen: IO length
138 *
139 **/
140void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
141{
142 sg_init_table(sg, 1);
143 sg_set_buf(sg, buf, buflen);
144}
145EXPORT_SYMBOL(sg_init_one);
146
147/*
148 * The default behaviour of sg_alloc_table() is to use these kmalloc/kfree
149 * helpers.
150 */
151static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
152{
Chris Wilsonb94de9b2010-07-28 22:59:02 +0100153 if (nents == SG_MAX_SINGLE_ALLOC) {
154 /*
155 * Kmemleak doesn't track page allocations as they are not
156 * commonly used (in a raw form) for kernel data structures.
157 * As we chain together a list of pages and then a normal
158 * kmalloc (tracked by kmemleak), in order to for that last
159 * allocation not to become decoupled (and thus a
160 * false-positive) we need to inform kmemleak of all the
161 * intermediate allocations.
162 */
163 void *ptr = (void *) __get_free_page(gfp_mask);
164 kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask);
165 return ptr;
166 } else
Kees Cook6da2ec52018-06-12 13:55:00 -0700167 return kmalloc_array(nents, sizeof(struct scatterlist),
168 gfp_mask);
Jens Axboe0db92992007-11-30 09:16:50 +0100169}
170
171static void sg_kfree(struct scatterlist *sg, unsigned int nents)
172{
Chris Wilsonb94de9b2010-07-28 22:59:02 +0100173 if (nents == SG_MAX_SINGLE_ALLOC) {
174 kmemleak_free(sg);
Jens Axboe0db92992007-11-30 09:16:50 +0100175 free_page((unsigned long) sg);
Chris Wilsonb94de9b2010-07-28 22:59:02 +0100176 } else
Jens Axboe0db92992007-11-30 09:16:50 +0100177 kfree(sg);
178}
179
180/**
181 * __sg_free_table - Free a previously mapped sg table
182 * @table: The sg table header to use
James Bottomley7cedb1f2008-01-13 14:15:28 -0600183 * @max_ents: The maximum number of entries per single scatterlist
Ming Lei46358732019-04-28 15:39:30 +0800184 * @nents_first_chunk: Number of entries int the (preallocated) first
185 * scatterlist chunk, 0 means no such preallocated first chunk
Jens Axboe0db92992007-11-30 09:16:50 +0100186 * @free_fn: Free function
187 *
188 * Description:
James Bottomley7cedb1f2008-01-13 14:15:28 -0600189 * Free an sg table previously allocated and setup with
190 * __sg_alloc_table(). The @max_ents value must be identical to
191 * that previously used with __sg_alloc_table().
Jens Axboe0db92992007-11-30 09:16:50 +0100192 *
193 **/
James Bottomley7cedb1f2008-01-13 14:15:28 -0600194void __sg_free_table(struct sg_table *table, unsigned int max_ents,
Ming Lei46358732019-04-28 15:39:30 +0800195 unsigned int nents_first_chunk, sg_free_fn *free_fn)
Jens Axboe0db92992007-11-30 09:16:50 +0100196{
197 struct scatterlist *sgl, *next;
Ming Lei46358732019-04-28 15:39:30 +0800198 unsigned curr_max_ents = nents_first_chunk ?: max_ents;
Jens Axboe0db92992007-11-30 09:16:50 +0100199
200 if (unlikely(!table->sgl))
201 return;
202
203 sgl = table->sgl;
204 while (table->orig_nents) {
205 unsigned int alloc_size = table->orig_nents;
206 unsigned int sg_size;
207
208 /*
James Bottomley7cedb1f2008-01-13 14:15:28 -0600209 * If we have more than max_ents segments left,
Jens Axboe0db92992007-11-30 09:16:50 +0100210 * then assign 'next' to the sg table after the current one.
211 * sg_size is then one less than alloc size, since the last
212 * element is the chain pointer.
213 */
Ming Lei46358732019-04-28 15:39:30 +0800214 if (alloc_size > curr_max_ents) {
215 next = sg_chain_ptr(&sgl[curr_max_ents - 1]);
216 alloc_size = curr_max_ents;
Jens Axboe0db92992007-11-30 09:16:50 +0100217 sg_size = alloc_size - 1;
218 } else {
219 sg_size = alloc_size;
220 next = NULL;
221 }
222
223 table->orig_nents -= sg_size;
Ming Lei46358732019-04-28 15:39:30 +0800224 if (nents_first_chunk)
225 nents_first_chunk = 0;
Tony Battersbyc21e59d2014-10-23 15:10:21 -0400226 else
227 free_fn(sgl, alloc_size);
Jens Axboe0db92992007-11-30 09:16:50 +0100228 sgl = next;
Ming Lei46358732019-04-28 15:39:30 +0800229 curr_max_ents = max_ents;
Jens Axboe0db92992007-11-30 09:16:50 +0100230 }
231
232 table->sgl = NULL;
233}
234EXPORT_SYMBOL(__sg_free_table);
235
236/**
237 * sg_free_table - Free a previously allocated sg table
238 * @table: The mapped sg table header
239 *
240 **/
241void sg_free_table(struct sg_table *table)
242{
Christoph Hellwigc53c6d62014-04-15 14:38:31 +0200243 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
Jens Axboe0db92992007-11-30 09:16:50 +0100244}
245EXPORT_SYMBOL(sg_free_table);
246
247/**
248 * __sg_alloc_table - Allocate and initialize an sg table with given allocator
249 * @table: The sg table header to use
250 * @nents: Number of entries in sg list
James Bottomley7cedb1f2008-01-13 14:15:28 -0600251 * @max_ents: The maximum number of entries the allocator returns per call
Ming Lei46358732019-04-28 15:39:30 +0800252 * @nents_first_chunk: Number of entries int the (preallocated) first
253 * scatterlist chunk, 0 means no such preallocated chunk provided by user
Jens Axboe0db92992007-11-30 09:16:50 +0100254 * @gfp_mask: GFP allocation mask
255 * @alloc_fn: Allocator to use
256 *
James Bottomley7cedb1f2008-01-13 14:15:28 -0600257 * Description:
258 * This function returns a @table @nents long. The allocator is
259 * defined to return scatterlist chunks of maximum size @max_ents.
260 * Thus if @nents is bigger than @max_ents, the scatterlists will be
261 * chained in units of @max_ents.
262 *
Jens Axboe0db92992007-11-30 09:16:50 +0100263 * Notes:
264 * If this function returns non-0 (eg failure), the caller must call
265 * __sg_free_table() to cleanup any leftover allocations.
266 *
267 **/
James Bottomley7cedb1f2008-01-13 14:15:28 -0600268int __sg_alloc_table(struct sg_table *table, unsigned int nents,
Christoph Hellwigc53c6d62014-04-15 14:38:31 +0200269 unsigned int max_ents, struct scatterlist *first_chunk,
Ming Lei46358732019-04-28 15:39:30 +0800270 unsigned int nents_first_chunk, gfp_t gfp_mask,
271 sg_alloc_fn *alloc_fn)
Jens Axboe0db92992007-11-30 09:16:50 +0100272{
273 struct scatterlist *sg, *prv;
274 unsigned int left;
Ming Lei46358732019-04-28 15:39:30 +0800275 unsigned curr_max_ents = nents_first_chunk ?: max_ents;
276 unsigned prv_max_ents;
Jens Axboe0db92992007-11-30 09:16:50 +0100277
Dan Carpenter27daabd2013-07-08 16:01:58 -0700278 memset(table, 0, sizeof(*table));
279
280 if (nents == 0)
281 return -EINVAL;
Christoph Hellwig7c703e52018-11-09 09:51:00 +0100282#ifdef CONFIG_ARCH_NO_SG_CHAIN
Nick Bowler6fd59a82012-12-17 16:05:20 -0800283 if (WARN_ON_ONCE(nents > max_ents))
284 return -EINVAL;
Jens Axboe0db92992007-11-30 09:16:50 +0100285#endif
286
Jens Axboe0db92992007-11-30 09:16:50 +0100287 left = nents;
288 prv = NULL;
289 do {
290 unsigned int sg_size, alloc_size = left;
291
Ming Lei46358732019-04-28 15:39:30 +0800292 if (alloc_size > curr_max_ents) {
293 alloc_size = curr_max_ents;
Jens Axboe0db92992007-11-30 09:16:50 +0100294 sg_size = alloc_size - 1;
295 } else
296 sg_size = alloc_size;
297
298 left -= sg_size;
299
Christoph Hellwigc53c6d62014-04-15 14:38:31 +0200300 if (first_chunk) {
301 sg = first_chunk;
302 first_chunk = NULL;
303 } else {
304 sg = alloc_fn(alloc_size, gfp_mask);
305 }
Jeffrey Carlyleedce6822010-08-30 19:55:09 +0200306 if (unlikely(!sg)) {
307 /*
308 * Adjust entry count to reflect that the last
309 * entry of the previous table won't be used for
310 * linkage. Without this, sg_kfree() may get
311 * confused.
312 */
313 if (prv)
314 table->nents = ++table->orig_nents;
315
316 return -ENOMEM;
317 }
Jens Axboe0db92992007-11-30 09:16:50 +0100318
319 sg_init_table(sg, alloc_size);
320 table->nents = table->orig_nents += sg_size;
321
322 /*
323 * If this is the first mapping, assign the sg table header.
324 * If this is not the first mapping, chain previous part.
325 */
326 if (prv)
Ming Lei46358732019-04-28 15:39:30 +0800327 sg_chain(prv, prv_max_ents, sg);
Jens Axboe0db92992007-11-30 09:16:50 +0100328 else
329 table->sgl = sg;
330
331 /*
332 * If no more entries after this one, mark the end
333 */
334 if (!left)
335 sg_mark_end(&sg[sg_size - 1]);
336
Jens Axboe0db92992007-11-30 09:16:50 +0100337 prv = sg;
Ming Lei46358732019-04-28 15:39:30 +0800338 prv_max_ents = curr_max_ents;
339 curr_max_ents = max_ents;
Jens Axboe0db92992007-11-30 09:16:50 +0100340 } while (left);
341
342 return 0;
343}
344EXPORT_SYMBOL(__sg_alloc_table);
345
346/**
347 * sg_alloc_table - Allocate and initialize an sg table
348 * @table: The sg table header to use
349 * @nents: Number of entries in sg list
350 * @gfp_mask: GFP allocation mask
351 *
352 * Description:
353 * Allocate and initialize an sg table. If @nents@ is larger than
354 * SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
355 *
356 **/
357int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
358{
359 int ret;
360
James Bottomley7cedb1f2008-01-13 14:15:28 -0600361 ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
Ming Lei46358732019-04-28 15:39:30 +0800362 NULL, 0, gfp_mask, sg_kmalloc);
Jens Axboe0db92992007-11-30 09:16:50 +0100363 if (unlikely(ret))
Ming Lei46358732019-04-28 15:39:30 +0800364 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, 0, sg_kfree);
Jens Axboe0db92992007-11-30 09:16:50 +0100365
366 return ret;
367}
368EXPORT_SYMBOL(sg_alloc_table);
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900369
370/**
Tvrtko Ursulin89d85892017-08-03 10:13:51 +0100371 * __sg_alloc_table_from_pages - Allocate and initialize an sg table from
372 * an array of pages
373 * @sgt: The sg table header to use
374 * @pages: Pointer to an array of page pointers
375 * @n_pages: Number of pages in the pages array
376 * @offset: Offset from start of the first page to the start of a buffer
377 * @size: Number of valid bytes in the buffer (after offset)
378 * @max_segment: Maximum size of a scatterlist node in bytes (page aligned)
379 * @gfp_mask: GFP allocation mask
Tomasz Stanislawskiefc42bc2012-06-18 09:25:01 +0200380 *
381 * Description:
382 * Allocate and initialize an sg table from a list of pages. Contiguous
Tvrtko Ursulin89d85892017-08-03 10:13:51 +0100383 * ranges of the pages are squashed into a single scatterlist node up to the
384 * maximum size specified in @max_segment. An user may provide an offset at a
385 * start and a size of valid data in a buffer specified by the page array.
386 * The returned sg table is released by sg_free_table.
Tomasz Stanislawskiefc42bc2012-06-18 09:25:01 +0200387 *
388 * Returns:
389 * 0 on success, negative error on failure
390 */
Tvrtko Ursulin89d85892017-08-03 10:13:51 +0100391int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
392 unsigned int n_pages, unsigned int offset,
393 unsigned long size, unsigned int max_segment,
394 gfp_t gfp_mask)
Tomasz Stanislawskiefc42bc2012-06-18 09:25:01 +0200395{
Tvrtko Ursulinc1259062017-08-03 10:13:12 +0100396 unsigned int chunks, cur_page, seg_len, i;
Tomasz Stanislawskiefc42bc2012-06-18 09:25:01 +0200397 int ret;
398 struct scatterlist *s;
399
Tvrtko Ursulin89d85892017-08-03 10:13:51 +0100400 if (WARN_ON(!max_segment || offset_in_page(max_segment)))
401 return -EINVAL;
402
Tomasz Stanislawskiefc42bc2012-06-18 09:25:01 +0200403 /* compute number of contiguous chunks */
404 chunks = 1;
Tvrtko Ursulinc1259062017-08-03 10:13:12 +0100405 seg_len = 0;
406 for (i = 1; i < n_pages; i++) {
407 seg_len += PAGE_SIZE;
408 if (seg_len >= max_segment ||
409 page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) {
410 chunks++;
411 seg_len = 0;
412 }
413 }
Tomasz Stanislawskiefc42bc2012-06-18 09:25:01 +0200414
415 ret = sg_alloc_table(sgt, chunks, gfp_mask);
416 if (unlikely(ret))
417 return ret;
418
419 /* merging chunks and putting them into the scatterlist */
420 cur_page = 0;
421 for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
Tvrtko Ursulinc1259062017-08-03 10:13:12 +0100422 unsigned int j, chunk_size;
Tomasz Stanislawskiefc42bc2012-06-18 09:25:01 +0200423
424 /* look for the end of the current chunk */
Tvrtko Ursulinc1259062017-08-03 10:13:12 +0100425 seg_len = 0;
426 for (j = cur_page + 1; j < n_pages; j++) {
427 seg_len += PAGE_SIZE;
428 if (seg_len >= max_segment ||
429 page_to_pfn(pages[j]) !=
Tomasz Stanislawskiefc42bc2012-06-18 09:25:01 +0200430 page_to_pfn(pages[j - 1]) + 1)
431 break;
Tvrtko Ursulinc1259062017-08-03 10:13:12 +0100432 }
Tomasz Stanislawskiefc42bc2012-06-18 09:25:01 +0200433
434 chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
Tvrtko Ursulinc1259062017-08-03 10:13:12 +0100435 sg_set_page(s, pages[cur_page],
436 min_t(unsigned long, size, chunk_size), offset);
Tomasz Stanislawskiefc42bc2012-06-18 09:25:01 +0200437 size -= chunk_size;
438 offset = 0;
439 cur_page = j;
440 }
441
442 return 0;
443}
Tvrtko Ursulin89d85892017-08-03 10:13:51 +0100444EXPORT_SYMBOL(__sg_alloc_table_from_pages);
445
446/**
447 * sg_alloc_table_from_pages - Allocate and initialize an sg table from
448 * an array of pages
449 * @sgt: The sg table header to use
450 * @pages: Pointer to an array of page pointers
451 * @n_pages: Number of pages in the pages array
452 * @offset: Offset from start of the first page to the start of a buffer
453 * @size: Number of valid bytes in the buffer (after offset)
454 * @gfp_mask: GFP allocation mask
455 *
456 * Description:
457 * Allocate and initialize an sg table from a list of pages. Contiguous
458 * ranges of the pages are squashed into a single scatterlist node. A user
459 * may provide an offset at a start and a size of valid data in a buffer
460 * specified by the page array. The returned sg table is released by
461 * sg_free_table.
462 *
463 * Returns:
464 * 0 on success, negative error on failure
465 */
466int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
467 unsigned int n_pages, unsigned int offset,
468 unsigned long size, gfp_t gfp_mask)
469{
470 return __sg_alloc_table_from_pages(sgt, pages, n_pages, offset, size,
471 SCATTERLIST_MAX_SEGMENT, gfp_mask);
472}
Tomasz Stanislawskiefc42bc2012-06-18 09:25:01 +0200473EXPORT_SYMBOL(sg_alloc_table_from_pages);
474
Bart Van Asschee80a0af2018-01-05 08:26:46 -0800475#ifdef CONFIG_SGL_ALLOC
476
477/**
478 * sgl_alloc_order - allocate a scatterlist and its pages
479 * @length: Length in bytes of the scatterlist. Must be at least one
480 * @order: Second argument for alloc_pages()
481 * @chainable: Whether or not to allocate an extra element in the scatterlist
482 * for scatterlist chaining purposes
483 * @gfp: Memory allocation flags
484 * @nent_p: [out] Number of entries in the scatterlist that have pages
485 *
486 * Returns: A pointer to an initialized scatterlist or %NULL upon failure.
487 */
488struct scatterlist *sgl_alloc_order(unsigned long long length,
489 unsigned int order, bool chainable,
490 gfp_t gfp, unsigned int *nent_p)
491{
492 struct scatterlist *sgl, *sg;
493 struct page *page;
494 unsigned int nent, nalloc;
495 u32 elem_len;
496
497 nent = round_up(length, PAGE_SIZE << order) >> (PAGE_SHIFT + order);
498 /* Check for integer overflow */
499 if (length > (nent << (PAGE_SHIFT + order)))
500 return NULL;
501 nalloc = nent;
502 if (chainable) {
503 /* Check for integer overflow */
504 if (nalloc + 1 < nalloc)
505 return NULL;
506 nalloc++;
507 }
508 sgl = kmalloc_array(nalloc, sizeof(struct scatterlist),
509 (gfp & ~GFP_DMA) | __GFP_ZERO);
510 if (!sgl)
511 return NULL;
512
Bart Van Assche8c7a8d12018-01-19 11:00:54 -0800513 sg_init_table(sgl, nalloc);
Bart Van Asschee80a0af2018-01-05 08:26:46 -0800514 sg = sgl;
515 while (length) {
516 elem_len = min_t(u64, length, PAGE_SIZE << order);
517 page = alloc_pages(gfp, order);
518 if (!page) {
519 sgl_free(sgl);
520 return NULL;
521 }
522
523 sg_set_page(sg, page, elem_len, 0);
524 length -= elem_len;
525 sg = sg_next(sg);
526 }
Bart Van Assche8c7a8d12018-01-19 11:00:54 -0800527 WARN_ONCE(length, "length = %lld\n", length);
Bart Van Asschee80a0af2018-01-05 08:26:46 -0800528 if (nent_p)
529 *nent_p = nent;
530 return sgl;
531}
532EXPORT_SYMBOL(sgl_alloc_order);
533
534/**
535 * sgl_alloc - allocate a scatterlist and its pages
536 * @length: Length in bytes of the scatterlist
537 * @gfp: Memory allocation flags
538 * @nent_p: [out] Number of entries in the scatterlist
539 *
540 * Returns: A pointer to an initialized scatterlist or %NULL upon failure.
541 */
542struct scatterlist *sgl_alloc(unsigned long long length, gfp_t gfp,
543 unsigned int *nent_p)
544{
545 return sgl_alloc_order(length, 0, false, gfp, nent_p);
546}
547EXPORT_SYMBOL(sgl_alloc);
548
549/**
Bart Van Assche8c7a8d12018-01-19 11:00:54 -0800550 * sgl_free_n_order - free a scatterlist and its pages
551 * @sgl: Scatterlist with one or more elements
552 * @nents: Maximum number of elements to free
553 * @order: Second argument for __free_pages()
554 *
555 * Notes:
556 * - If several scatterlists have been chained and each chain element is
557 * freed separately then it's essential to set nents correctly to avoid that a
558 * page would get freed twice.
559 * - All pages in a chained scatterlist can be freed at once by setting @nents
560 * to a high number.
561 */
562void sgl_free_n_order(struct scatterlist *sgl, int nents, int order)
563{
564 struct scatterlist *sg;
565 struct page *page;
566 int i;
567
568 for_each_sg(sgl, sg, nents, i) {
569 if (!sg)
570 break;
571 page = sg_page(sg);
572 if (page)
573 __free_pages(page, order);
574 }
575 kfree(sgl);
576}
577EXPORT_SYMBOL(sgl_free_n_order);
578
579/**
Bart Van Asschee80a0af2018-01-05 08:26:46 -0800580 * sgl_free_order - free a scatterlist and its pages
581 * @sgl: Scatterlist with one or more elements
582 * @order: Second argument for __free_pages()
583 */
584void sgl_free_order(struct scatterlist *sgl, int order)
585{
Bart Van Assche8c7a8d12018-01-19 11:00:54 -0800586 sgl_free_n_order(sgl, INT_MAX, order);
Bart Van Asschee80a0af2018-01-05 08:26:46 -0800587}
588EXPORT_SYMBOL(sgl_free_order);
589
590/**
591 * sgl_free - free a scatterlist and its pages
592 * @sgl: Scatterlist with one or more elements
593 */
594void sgl_free(struct scatterlist *sgl)
595{
596 sgl_free_order(sgl, 0);
597}
598EXPORT_SYMBOL(sgl_free);
599
600#endif /* CONFIG_SGL_ALLOC */
601
Imre Deaka321e912013-02-27 17:02:56 -0800602void __sg_page_iter_start(struct sg_page_iter *piter,
603 struct scatterlist *sglist, unsigned int nents,
604 unsigned long pgoffset)
605{
606 piter->__pg_advance = 0;
607 piter->__nents = nents;
608
Imre Deaka321e912013-02-27 17:02:56 -0800609 piter->sg = sglist;
610 piter->sg_pgoffset = pgoffset;
611}
612EXPORT_SYMBOL(__sg_page_iter_start);
613
614static int sg_page_count(struct scatterlist *sg)
615{
616 return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT;
617}
618
619bool __sg_page_iter_next(struct sg_page_iter *piter)
620{
621 if (!piter->__nents || !piter->sg)
622 return false;
623
624 piter->sg_pgoffset += piter->__pg_advance;
625 piter->__pg_advance = 1;
626
627 while (piter->sg_pgoffset >= sg_page_count(piter->sg)) {
628 piter->sg_pgoffset -= sg_page_count(piter->sg);
629 piter->sg = sg_next(piter->sg);
630 if (!--piter->__nents || !piter->sg)
631 return false;
632 }
Imre Deaka321e912013-02-27 17:02:56 -0800633
634 return true;
635}
636EXPORT_SYMBOL(__sg_page_iter_next);
637
Jason Gunthorped901b272019-01-04 11:40:21 -0700638static int sg_dma_page_count(struct scatterlist *sg)
639{
640 return PAGE_ALIGN(sg->offset + sg_dma_len(sg)) >> PAGE_SHIFT;
641}
642
643bool __sg_page_iter_dma_next(struct sg_dma_page_iter *dma_iter)
644{
645 struct sg_page_iter *piter = &dma_iter->base;
646
647 if (!piter->__nents || !piter->sg)
648 return false;
649
650 piter->sg_pgoffset += piter->__pg_advance;
651 piter->__pg_advance = 1;
652
653 while (piter->sg_pgoffset >= sg_dma_page_count(piter->sg)) {
654 piter->sg_pgoffset -= sg_dma_page_count(piter->sg);
655 piter->sg = sg_next(piter->sg);
656 if (!--piter->__nents || !piter->sg)
657 return false;
658 }
659
660 return true;
661}
662EXPORT_SYMBOL(__sg_page_iter_dma_next);
663
Tomasz Stanislawskiefc42bc2012-06-18 09:25:01 +0200664/**
Tejun Heo137d3ed2008-07-19 23:03:35 +0900665 * sg_miter_start - start mapping iteration over a sg list
666 * @miter: sg mapping iter to be started
667 * @sgl: sg list to iterate over
668 * @nents: number of sg entries
669 *
670 * Description:
671 * Starts mapping iterator @miter.
672 *
673 * Context:
674 * Don't care.
675 */
676void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
677 unsigned int nents, unsigned int flags)
678{
679 memset(miter, 0, sizeof(struct sg_mapping_iter));
680
Imre Deak4225fc82013-02-27 17:02:57 -0800681 __sg_page_iter_start(&miter->piter, sgl, nents, 0);
Sebastian Andrzej Siewior6de7e3562009-06-18 10:19:12 +0200682 WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
Tejun Heo137d3ed2008-07-19 23:03:35 +0900683 miter->__flags = flags;
684}
685EXPORT_SYMBOL(sg_miter_start);
686
Akinobu Mita11052002013-07-08 16:01:52 -0700687static bool sg_miter_get_next_page(struct sg_mapping_iter *miter)
688{
689 if (!miter->__remaining) {
690 struct scatterlist *sg;
691 unsigned long pgoffset;
692
693 if (!__sg_page_iter_next(&miter->piter))
694 return false;
695
696 sg = miter->piter.sg;
697 pgoffset = miter->piter.sg_pgoffset;
698
699 miter->__offset = pgoffset ? 0 : sg->offset;
700 miter->__remaining = sg->offset + sg->length -
701 (pgoffset << PAGE_SHIFT) - miter->__offset;
702 miter->__remaining = min_t(unsigned long, miter->__remaining,
703 PAGE_SIZE - miter->__offset);
704 }
705
706 return true;
707}
708
Tejun Heo137d3ed2008-07-19 23:03:35 +0900709/**
Akinobu Mitadf642ce2013-07-08 16:01:54 -0700710 * sg_miter_skip - reposition mapping iterator
711 * @miter: sg mapping iter to be skipped
712 * @offset: number of bytes to plus the current location
713 *
714 * Description:
715 * Sets the offset of @miter to its current location plus @offset bytes.
716 * If mapping iterator @miter has been proceeded by sg_miter_next(), this
717 * stops @miter.
718 *
719 * Context:
720 * Don't care if @miter is stopped, or not proceeded yet.
721 * Otherwise, preemption disabled if the SG_MITER_ATOMIC is set.
722 *
723 * Returns:
724 * true if @miter contains the valid mapping. false if end of sg
725 * list is reached.
726 */
Ming Lei0d6077f2013-11-26 12:43:37 +0800727bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset)
Akinobu Mitadf642ce2013-07-08 16:01:54 -0700728{
729 sg_miter_stop(miter);
730
731 while (offset) {
732 off_t consumed;
733
734 if (!sg_miter_get_next_page(miter))
735 return false;
736
737 consumed = min_t(off_t, offset, miter->__remaining);
738 miter->__offset += consumed;
739 miter->__remaining -= consumed;
740 offset -= consumed;
741 }
742
743 return true;
744}
Ming Lei0d6077f2013-11-26 12:43:37 +0800745EXPORT_SYMBOL(sg_miter_skip);
Akinobu Mitadf642ce2013-07-08 16:01:54 -0700746
747/**
Tejun Heo137d3ed2008-07-19 23:03:35 +0900748 * sg_miter_next - proceed mapping iterator to the next mapping
749 * @miter: sg mapping iter to proceed
750 *
751 * Description:
Tejun Heo8290e2d2012-10-04 17:13:28 -0700752 * Proceeds @miter to the next mapping. @miter should have been started
753 * using sg_miter_start(). On successful return, @miter->page,
754 * @miter->addr and @miter->length point to the current mapping.
Tejun Heo137d3ed2008-07-19 23:03:35 +0900755 *
756 * Context:
Tejun Heo8290e2d2012-10-04 17:13:28 -0700757 * Preemption disabled if SG_MITER_ATOMIC. Preemption must stay disabled
758 * till @miter is stopped. May sleep if !SG_MITER_ATOMIC.
Tejun Heo137d3ed2008-07-19 23:03:35 +0900759 *
760 * Returns:
761 * true if @miter contains the next mapping. false if end of sg
762 * list is reached.
763 */
764bool sg_miter_next(struct sg_mapping_iter *miter)
765{
Tejun Heo137d3ed2008-07-19 23:03:35 +0900766 sg_miter_stop(miter);
767
Imre Deak4225fc82013-02-27 17:02:57 -0800768 /*
769 * Get to the next page if necessary.
770 * __remaining, __offset is adjusted by sg_miter_stop
771 */
Akinobu Mita11052002013-07-08 16:01:52 -0700772 if (!sg_miter_get_next_page(miter))
773 return false;
Imre Deak4225fc82013-02-27 17:02:57 -0800774
Imre Deak2db76d72013-03-26 15:14:18 +0200775 miter->page = sg_page_iter_page(&miter->piter);
Imre Deak4225fc82013-02-27 17:02:57 -0800776 miter->consumed = miter->length = miter->__remaining;
Tejun Heo137d3ed2008-07-19 23:03:35 +0900777
778 if (miter->__flags & SG_MITER_ATOMIC)
Imre Deak4225fc82013-02-27 17:02:57 -0800779 miter->addr = kmap_atomic(miter->page) + miter->__offset;
Tejun Heo137d3ed2008-07-19 23:03:35 +0900780 else
Imre Deak4225fc82013-02-27 17:02:57 -0800781 miter->addr = kmap(miter->page) + miter->__offset;
Tejun Heo137d3ed2008-07-19 23:03:35 +0900782
783 return true;
784}
785EXPORT_SYMBOL(sg_miter_next);
786
787/**
788 * sg_miter_stop - stop mapping iteration
789 * @miter: sg mapping iter to be stopped
790 *
791 * Description:
792 * Stops mapping iterator @miter. @miter should have been started
Masahiro Yamada4ba6a2b2016-02-08 16:09:08 +0900793 * using sg_miter_start(). A stopped iteration can be resumed by
794 * calling sg_miter_next() on it. This is useful when resources (kmap)
795 * need to be released during iteration.
Tejun Heo137d3ed2008-07-19 23:03:35 +0900796 *
797 * Context:
Tejun Heo8290e2d2012-10-04 17:13:28 -0700798 * Preemption disabled if the SG_MITER_ATOMIC is set. Don't care
799 * otherwise.
Tejun Heo137d3ed2008-07-19 23:03:35 +0900800 */
801void sg_miter_stop(struct sg_mapping_iter *miter)
802{
803 WARN_ON(miter->consumed > miter->length);
804
805 /* drop resources from the last iteration */
806 if (miter->addr) {
807 miter->__offset += miter->consumed;
Imre Deak4225fc82013-02-27 17:02:57 -0800808 miter->__remaining -= miter->consumed;
Tejun Heo137d3ed2008-07-19 23:03:35 +0900809
Ming Lei3d77b502013-10-31 16:34:17 -0700810 if ((miter->__flags & SG_MITER_TO_SG) &&
811 !PageSlab(miter->page))
Sebastian Andrzej Siewior6de7e3562009-06-18 10:19:12 +0200812 flush_kernel_dcache_page(miter->page);
813
Tejun Heo137d3ed2008-07-19 23:03:35 +0900814 if (miter->__flags & SG_MITER_ATOMIC) {
Tejun Heo8290e2d2012-10-04 17:13:28 -0700815 WARN_ON_ONCE(preemptible());
Cong Wangc3eede82011-11-25 23:14:39 +0800816 kunmap_atomic(miter->addr);
Tejun Heo137d3ed2008-07-19 23:03:35 +0900817 } else
Arjan van de Venf652c522008-11-19 15:36:19 -0800818 kunmap(miter->page);
Tejun Heo137d3ed2008-07-19 23:03:35 +0900819
820 miter->page = NULL;
821 miter->addr = NULL;
822 miter->length = 0;
823 miter->consumed = 0;
824 }
825}
826EXPORT_SYMBOL(sg_miter_stop);
827
828/**
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900829 * sg_copy_buffer - Copy data between a linear buffer and an SG list
830 * @sgl: The SG list
831 * @nents: Number of SG entries
832 * @buf: Where to copy from
833 * @buflen: The number of bytes to copy
Akinobu Mitadf642ce2013-07-08 16:01:54 -0700834 * @skip: Number of bytes to skip before copying
835 * @to_buffer: transfer direction (true == from an sg list to a
836 * buffer, false == from a buffer to an sg list
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900837 *
838 * Returns the number of copied bytes.
839 *
840 **/
Dave Gordon386ecb12015-06-30 14:58:57 -0700841size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
842 size_t buflen, off_t skip, bool to_buffer)
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900843{
Tejun Heo137d3ed2008-07-19 23:03:35 +0900844 unsigned int offset = 0;
845 struct sg_mapping_iter miter;
Sebastian Andrzej Siewior6de7e3562009-06-18 10:19:12 +0200846 unsigned int sg_flags = SG_MITER_ATOMIC;
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900847
Sebastian Andrzej Siewior6de7e3562009-06-18 10:19:12 +0200848 if (to_buffer)
849 sg_flags |= SG_MITER_FROM_SG;
850 else
851 sg_flags |= SG_MITER_TO_SG;
852
853 sg_miter_start(&miter, sgl, nents, sg_flags);
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900854
Akinobu Mitadf642ce2013-07-08 16:01:54 -0700855 if (!sg_miter_skip(&miter, skip))
856 return false;
857
Gilad Ben-Yossef1d5210ef2017-02-27 14:28:27 -0800858 while ((offset < buflen) && sg_miter_next(&miter)) {
Tejun Heo137d3ed2008-07-19 23:03:35 +0900859 unsigned int len;
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900860
Tejun Heo137d3ed2008-07-19 23:03:35 +0900861 len = min(miter.length, buflen - offset);
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900862
Tejun Heo137d3ed2008-07-19 23:03:35 +0900863 if (to_buffer)
864 memcpy(buf + offset, miter.addr, len);
Sebastian Andrzej Siewior6de7e3562009-06-18 10:19:12 +0200865 else
Tejun Heo137d3ed2008-07-19 23:03:35 +0900866 memcpy(miter.addr, buf + offset, len);
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900867
Tejun Heo137d3ed2008-07-19 23:03:35 +0900868 offset += len;
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900869 }
870
Tejun Heo137d3ed2008-07-19 23:03:35 +0900871 sg_miter_stop(&miter);
872
873 return offset;
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900874}
Dave Gordon386ecb12015-06-30 14:58:57 -0700875EXPORT_SYMBOL(sg_copy_buffer);
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900876
877/**
878 * sg_copy_from_buffer - Copy from a linear buffer to an SG list
879 * @sgl: The SG list
880 * @nents: Number of SG entries
881 * @buf: Where to copy from
882 * @buflen: The number of bytes to copy
883 *
884 * Returns the number of copied bytes.
885 *
886 **/
887size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
Dave Gordon2a1bf8f2015-06-30 14:58:54 -0700888 const void *buf, size_t buflen)
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900889{
Dave Gordon2a1bf8f2015-06-30 14:58:54 -0700890 return sg_copy_buffer(sgl, nents, (void *)buf, buflen, 0, false);
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900891}
892EXPORT_SYMBOL(sg_copy_from_buffer);
893
894/**
895 * sg_copy_to_buffer - Copy from an SG list to a linear buffer
896 * @sgl: The SG list
897 * @nents: Number of SG entries
898 * @buf: Where to copy to
899 * @buflen: The number of bytes to copy
900 *
901 * Returns the number of copied bytes.
902 *
903 **/
904size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
905 void *buf, size_t buflen)
906{
Akinobu Mitadf642ce2013-07-08 16:01:54 -0700907 return sg_copy_buffer(sgl, nents, buf, buflen, 0, true);
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900908}
909EXPORT_SYMBOL(sg_copy_to_buffer);
Akinobu Mitadf642ce2013-07-08 16:01:54 -0700910
911/**
912 * sg_pcopy_from_buffer - Copy from a linear buffer to an SG list
913 * @sgl: The SG list
914 * @nents: Number of SG entries
915 * @buf: Where to copy from
Akinobu Mitadf642ce2013-07-08 16:01:54 -0700916 * @buflen: The number of bytes to copy
Dave Gordon4dc7daf2015-06-30 14:58:52 -0700917 * @skip: Number of bytes to skip before copying
Akinobu Mitadf642ce2013-07-08 16:01:54 -0700918 *
919 * Returns the number of copied bytes.
920 *
921 **/
922size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
Dave Gordon2a1bf8f2015-06-30 14:58:54 -0700923 const void *buf, size_t buflen, off_t skip)
Akinobu Mitadf642ce2013-07-08 16:01:54 -0700924{
Dave Gordon2a1bf8f2015-06-30 14:58:54 -0700925 return sg_copy_buffer(sgl, nents, (void *)buf, buflen, skip, false);
Akinobu Mitadf642ce2013-07-08 16:01:54 -0700926}
927EXPORT_SYMBOL(sg_pcopy_from_buffer);
928
929/**
930 * sg_pcopy_to_buffer - Copy from an SG list to a linear buffer
931 * @sgl: The SG list
932 * @nents: Number of SG entries
933 * @buf: Where to copy to
Akinobu Mitadf642ce2013-07-08 16:01:54 -0700934 * @buflen: The number of bytes to copy
Dave Gordon4dc7daf2015-06-30 14:58:52 -0700935 * @skip: Number of bytes to skip before copying
Akinobu Mitadf642ce2013-07-08 16:01:54 -0700936 *
937 * Returns the number of copied bytes.
938 *
939 **/
940size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
941 void *buf, size_t buflen, off_t skip)
942{
943 return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
944}
945EXPORT_SYMBOL(sg_pcopy_to_buffer);
Johannes Thumshirn0945e562017-06-07 11:45:28 +0200946
947/**
948 * sg_zero_buffer - Zero-out a part of a SG list
949 * @sgl: The SG list
950 * @nents: Number of SG entries
951 * @buflen: The number of bytes to zero out
952 * @skip: Number of bytes to skip before zeroing
953 *
954 * Returns the number of bytes zeroed.
955 **/
956size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents,
957 size_t buflen, off_t skip)
958{
959 unsigned int offset = 0;
960 struct sg_mapping_iter miter;
961 unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
962
963 sg_miter_start(&miter, sgl, nents, sg_flags);
964
965 if (!sg_miter_skip(&miter, skip))
966 return false;
967
968 while (offset < buflen && sg_miter_next(&miter)) {
969 unsigned int len;
970
971 len = min(miter.length, buflen - offset);
972 memset(miter.addr, 0, len);
973
974 offset += len;
975 }
976
977 sg_miter_stop(&miter);
978 return offset;
979}
980EXPORT_SYMBOL(sg_zero_buffer);