Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * include/linux/pagevec.h |
| 4 | * |
| 5 | * In many places it is efficient to batch an operation up against multiple |
| 6 | * pages. A pagevec is a multipage container which is used for that. |
| 7 | */ |
| 8 | |
David Howells | 7885401 | 2006-01-08 01:02:37 -0800 | [diff] [blame] | 9 | #ifndef _LINUX_PAGEVEC_H |
| 10 | #define _LINUX_PAGEVEC_H |
| 11 | |
Matthew Wilcox | 10bbd23 | 2017-12-05 17:30:38 -0500 | [diff] [blame] | 12 | #include <linux/xarray.h> |
| 13 | |
Matthew Wilcox | 146500e | 2018-01-31 16:17:40 -0800 | [diff] [blame] | 14 | /* 15 pointers + header align the pagevec structure to a power of two */ |
| 15 | #define PAGEVEC_SIZE 15 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | |
| 17 | struct page; |
| 18 | struct address_space; |
| 19 | |
| 20 | struct pagevec { |
Matthew Wilcox | 146500e | 2018-01-31 16:17:40 -0800 | [diff] [blame] | 21 | unsigned char nr; |
Mel Gorman | 7f0b5fb | 2017-11-15 17:38:10 -0800 | [diff] [blame] | 22 | bool percpu_pvec_drained; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | struct page *pages[PAGEVEC_SIZE]; |
| 24 | }; |
| 25 | |
| 26 | void __pagevec_release(struct pagevec *pvec); |
Mel Gorman | a0b8cab3 | 2013-07-03 15:02:32 -0700 | [diff] [blame] | 27 | void __pagevec_lru_add(struct pagevec *pvec); |
Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 28 | unsigned pagevec_lookup_entries(struct pagevec *pvec, |
| 29 | struct address_space *mapping, |
| 30 | pgoff_t start, unsigned nr_entries, |
| 31 | pgoff_t *indices); |
| 32 | void pagevec_remove_exceptionals(struct pagevec *pvec); |
Jan Kara | b947cee | 2017-09-06 16:21:21 -0700 | [diff] [blame] | 33 | unsigned pagevec_lookup_range(struct pagevec *pvec, |
| 34 | struct address_space *mapping, |
Jan Kara | 397162f | 2017-09-06 16:21:43 -0700 | [diff] [blame] | 35 | pgoff_t *start, pgoff_t end); |
Jan Kara | b947cee | 2017-09-06 16:21:21 -0700 | [diff] [blame] | 36 | static inline unsigned pagevec_lookup(struct pagevec *pvec, |
| 37 | struct address_space *mapping, |
Jan Kara | 397162f | 2017-09-06 16:21:43 -0700 | [diff] [blame] | 38 | pgoff_t *start) |
Jan Kara | b947cee | 2017-09-06 16:21:21 -0700 | [diff] [blame] | 39 | { |
Jan Kara | 397162f | 2017-09-06 16:21:43 -0700 | [diff] [blame] | 40 | return pagevec_lookup_range(pvec, mapping, start, (pgoff_t)-1); |
Jan Kara | b947cee | 2017-09-06 16:21:21 -0700 | [diff] [blame] | 41 | } |
| 42 | |
Jan Kara | 72b045a | 2017-11-15 17:34:33 -0800 | [diff] [blame] | 43 | unsigned pagevec_lookup_range_tag(struct pagevec *pvec, |
| 44 | struct address_space *mapping, pgoff_t *index, pgoff_t end, |
Matthew Wilcox | 10bbd23 | 2017-12-05 17:30:38 -0500 | [diff] [blame] | 45 | xa_mark_t tag); |
Jan Kara | 93d3b71 | 2017-11-15 17:35:12 -0800 | [diff] [blame] | 46 | unsigned pagevec_lookup_range_nr_tag(struct pagevec *pvec, |
| 47 | struct address_space *mapping, pgoff_t *index, pgoff_t end, |
Matthew Wilcox | 10bbd23 | 2017-12-05 17:30:38 -0500 | [diff] [blame] | 48 | xa_mark_t tag, unsigned max_pages); |
Jan Kara | 72b045a | 2017-11-15 17:34:33 -0800 | [diff] [blame] | 49 | static inline unsigned pagevec_lookup_tag(struct pagevec *pvec, |
Matthew Wilcox | 10bbd23 | 2017-12-05 17:30:38 -0500 | [diff] [blame] | 50 | struct address_space *mapping, pgoff_t *index, xa_mark_t tag) |
Jan Kara | 72b045a | 2017-11-15 17:34:33 -0800 | [diff] [blame] | 51 | { |
Jan Kara | 67fd707 | 2017-11-15 17:35:19 -0800 | [diff] [blame] | 52 | return pagevec_lookup_range_tag(pvec, mapping, index, (pgoff_t)-1, tag); |
Jan Kara | 72b045a | 2017-11-15 17:34:33 -0800 | [diff] [blame] | 53 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | |
Mel Gorman | 8667982 | 2017-11-15 17:37:52 -0800 | [diff] [blame] | 55 | static inline void pagevec_init(struct pagevec *pvec) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 | { |
| 57 | pvec->nr = 0; |
Mel Gorman | 7f0b5fb | 2017-11-15 17:38:10 -0800 | [diff] [blame] | 58 | pvec->percpu_pvec_drained = false; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | } |
| 60 | |
| 61 | static inline void pagevec_reinit(struct pagevec *pvec) |
| 62 | { |
| 63 | pvec->nr = 0; |
| 64 | } |
| 65 | |
| 66 | static inline unsigned pagevec_count(struct pagevec *pvec) |
| 67 | { |
| 68 | return pvec->nr; |
| 69 | } |
| 70 | |
| 71 | static inline unsigned pagevec_space(struct pagevec *pvec) |
| 72 | { |
| 73 | return PAGEVEC_SIZE - pvec->nr; |
| 74 | } |
| 75 | |
| 76 | /* |
| 77 | * Add a page to a pagevec. Returns the number of slots still available. |
| 78 | */ |
| 79 | static inline unsigned pagevec_add(struct pagevec *pvec, struct page *page) |
| 80 | { |
| 81 | pvec->pages[pvec->nr++] = page; |
| 82 | return pagevec_space(pvec); |
| 83 | } |
| 84 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | static inline void pagevec_release(struct pagevec *pvec) |
| 86 | { |
| 87 | if (pagevec_count(pvec)) |
| 88 | __pagevec_release(pvec); |
| 89 | } |
| 90 | |
David Howells | 7885401 | 2006-01-08 01:02:37 -0800 | [diff] [blame] | 91 | #endif /* _LINUX_PAGEVEC_H */ |