Thomas Gleixner | 457c899 | 2019-05-19 13:08:55 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * linux/mm/swap.c |
| 4 | * |
| 5 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds |
| 6 | */ |
| 7 | |
| 8 | /* |
Simon Arlott | 183ff22 | 2007-10-20 01:27:18 +0200 | [diff] [blame] | 9 | * This file contains the default values for the operation of the |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | * Linux VM subsystem. Fine-tuning documentation can be found in |
Mauro Carvalho Chehab | 5704324 | 2019-04-22 16:48:00 -0300 | [diff] [blame] | 11 | * Documentation/admin-guide/sysctl/vm.rst. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | * Started 18.12.91 |
| 13 | * Swap aging added 23.2.95, Stephen Tweedie. |
| 14 | * Buffermem limits added 12.3.98, Rik van Riel. |
| 15 | */ |
| 16 | |
| 17 | #include <linux/mm.h> |
| 18 | #include <linux/sched.h> |
| 19 | #include <linux/kernel_stat.h> |
| 20 | #include <linux/swap.h> |
| 21 | #include <linux/mman.h> |
| 22 | #include <linux/pagemap.h> |
| 23 | #include <linux/pagevec.h> |
| 24 | #include <linux/init.h> |
Paul Gortmaker | b95f1b31 | 2011-10-16 02:01:52 -0400 | [diff] [blame] | 25 | #include <linux/export.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | #include <linux/mm_inline.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | #include <linux/percpu_counter.h> |
Dan Williams | 3565fce | 2016-01-15 16:56:55 -0800 | [diff] [blame] | 28 | #include <linux/memremap.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | #include <linux/percpu.h> |
| 30 | #include <linux/cpu.h> |
| 31 | #include <linux/notifier.h> |
Peter Zijlstra | e0bf68d | 2007-10-16 23:25:46 -0700 | [diff] [blame] | 32 | #include <linux/backing-dev.h> |
Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 33 | #include <linux/memcontrol.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 34 | #include <linux/gfp.h> |
Kent Overstreet | a27bb33 | 2013-05-07 16:19:08 -0700 | [diff] [blame] | 35 | #include <linux/uio.h> |
Naoya Horiguchi | 822fc61 | 2015-04-15 16:14:35 -0700 | [diff] [blame] | 36 | #include <linux/hugetlb.h> |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 37 | #include <linux/page_idle.h> |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 38 | #include <linux/local_lock.h> |
Minchan Kim | a0a0b3f | 2021-03-19 12:49:41 -0700 | [diff] [blame] | 39 | #include <linux/buffer_head.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 | |
Lee Schermerhorn | 64d6519 | 2008-10-18 20:26:52 -0700 | [diff] [blame] | 41 | #include "internal.h" |
| 42 | |
Mel Gorman | c6286c9 | 2013-07-03 15:02:26 -0700 | [diff] [blame] | 43 | #define CREATE_TRACE_POINTS |
| 44 | #include <trace/events/pagemap.h> |
| 45 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | /* How many pages do we try to swap or page in/out together? */ |
| 47 | int page_cluster; |
| 48 | |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 49 | /* Protecting only lru_rotate.pvec which requires disabling interrupts */ |
| 50 | struct lru_rotate { |
| 51 | local_lock_t lock; |
| 52 | struct pagevec pvec; |
| 53 | }; |
| 54 | static DEFINE_PER_CPU(struct lru_rotate, lru_rotate) = { |
| 55 | .lock = INIT_LOCAL_LOCK(lock), |
| 56 | }; |
| 57 | |
| 58 | /* |
| 59 | * The following struct pagevec are grouped together because they are protected |
| 60 | * by disabling preemption (and interrupts remain enabled). |
| 61 | */ |
| 62 | struct lru_pvecs { |
| 63 | local_lock_t lock; |
| 64 | struct pagevec lru_add; |
| 65 | struct pagevec lru_deactivate_file; |
| 66 | struct pagevec lru_deactivate; |
| 67 | struct pagevec lru_lazyfree; |
Vinayak Menon | 9975da5 | 2021-03-18 16:49:18 +0530 | [diff] [blame] | 68 | struct pagevec lru_lazyfree_movetail; |
Ming Li | a4a921a | 2016-05-20 16:57:56 -0700 | [diff] [blame] | 69 | #ifdef CONFIG_SMP |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 70 | struct pagevec activate_page; |
Ming Li | a4a921a | 2016-05-20 16:57:56 -0700 | [diff] [blame] | 71 | #endif |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 72 | }; |
| 73 | static DEFINE_PER_CPU(struct lru_pvecs, lru_pvecs) = { |
| 74 | .lock = INIT_LOCAL_LOCK(lock), |
| 75 | }; |
Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 76 | |
Adrian Bunk | b221385 | 2006-09-25 23:31:02 -0700 | [diff] [blame] | 77 | /* |
| 78 | * This path almost never happens for VM activity - pages are normally |
| 79 | * freed via pagevecs. But it gets used by networking. |
| 80 | */ |
Harvey Harrison | 920c7a5 | 2008-02-04 22:29:26 -0800 | [diff] [blame] | 81 | static void __page_cache_release(struct page *page) |
Adrian Bunk | b221385 | 2006-09-25 23:31:02 -0700 | [diff] [blame] | 82 | { |
| 83 | if (PageLRU(page)) { |
Andrey Ryabinin | f4b7e27 | 2019-03-05 15:49:39 -0800 | [diff] [blame] | 84 | pg_data_t *pgdat = page_pgdat(page); |
Hugh Dickins | fa9add6 | 2012-05-29 15:07:09 -0700 | [diff] [blame] | 85 | struct lruvec *lruvec; |
| 86 | unsigned long flags; |
Adrian Bunk | b221385 | 2006-09-25 23:31:02 -0700 | [diff] [blame] | 87 | |
Andrey Ryabinin | f4b7e27 | 2019-03-05 15:49:39 -0800 | [diff] [blame] | 88 | spin_lock_irqsave(&pgdat->lru_lock, flags); |
| 89 | lruvec = mem_cgroup_page_lruvec(page, pgdat); |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 90 | VM_BUG_ON_PAGE(!PageLRU(page), page); |
Adrian Bunk | b221385 | 2006-09-25 23:31:02 -0700 | [diff] [blame] | 91 | __ClearPageLRU(page); |
Hugh Dickins | fa9add6 | 2012-05-29 15:07:09 -0700 | [diff] [blame] | 92 | del_page_from_lru_list(page, lruvec, page_off_lru(page)); |
Andrey Ryabinin | f4b7e27 | 2019-03-05 15:49:39 -0800 | [diff] [blame] | 93 | spin_unlock_irqrestore(&pgdat->lru_lock, flags); |
Adrian Bunk | b221385 | 2006-09-25 23:31:02 -0700 | [diff] [blame] | 94 | } |
Nicholas Piggin | 6290602 | 2016-12-25 13:00:30 +1000 | [diff] [blame] | 95 | __ClearPageWaiters(page); |
Andrea Arcangeli | 9180706 | 2011-01-13 15:46:32 -0800 | [diff] [blame] | 96 | } |
| 97 | |
| 98 | static void __put_single_page(struct page *page) |
| 99 | { |
| 100 | __page_cache_release(page); |
Yang Shi | 7ae8853 | 2019-09-23 15:38:09 -0700 | [diff] [blame] | 101 | mem_cgroup_uncharge(page); |
Mel Gorman | 2d4894b | 2017-11-15 17:37:59 -0800 | [diff] [blame] | 102 | free_unref_page(page); |
Adrian Bunk | b221385 | 2006-09-25 23:31:02 -0700 | [diff] [blame] | 103 | } |
| 104 | |
Andrea Arcangeli | 9180706 | 2011-01-13 15:46:32 -0800 | [diff] [blame] | 105 | static void __put_compound_page(struct page *page) |
| 106 | { |
Naoya Horiguchi | 822fc61 | 2015-04-15 16:14:35 -0700 | [diff] [blame] | 107 | /* |
| 108 | * __page_cache_release() is supposed to be called for thp, not for |
| 109 | * hugetlb. This is because hugetlb page does never have PageLRU set |
| 110 | * (it's never listed to any LRU lists) and no memcg routines should |
| 111 | * be called for hugetlb (it has a separate hugetlb_cgroup.) |
| 112 | */ |
| 113 | if (!PageHuge(page)) |
| 114 | __page_cache_release(page); |
Matthew Wilcox (Oracle) | ff45fc3 | 2020-06-03 16:01:09 -0700 | [diff] [blame] | 115 | destroy_compound_page(page); |
Andrea Arcangeli | 9180706 | 2011-01-13 15:46:32 -0800 | [diff] [blame] | 116 | } |
| 117 | |
Kirill A. Shutemov | ddc58f2 | 2016-01-15 16:52:56 -0800 | [diff] [blame] | 118 | void __put_page(struct page *page) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 | { |
Dan Williams | 7138970 | 2017-04-28 10:23:37 -0700 | [diff] [blame] | 120 | if (is_zone_device_page(page)) { |
| 121 | put_dev_pagemap(page->pgmap); |
| 122 | |
| 123 | /* |
| 124 | * The page belongs to the device that created pgmap. Do |
| 125 | * not return it to page allocator. |
| 126 | */ |
| 127 | return; |
| 128 | } |
| 129 | |
Nick Piggin | 8519fb3 | 2006-02-07 12:58:52 -0800 | [diff] [blame] | 130 | if (unlikely(PageCompound(page))) |
Kirill A. Shutemov | ddc58f2 | 2016-01-15 16:52:56 -0800 | [diff] [blame] | 131 | __put_compound_page(page); |
| 132 | else |
Andrea Arcangeli | 9180706 | 2011-01-13 15:46:32 -0800 | [diff] [blame] | 133 | __put_single_page(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 134 | } |
Kirill A. Shutemov | ddc58f2 | 2016-01-15 16:52:56 -0800 | [diff] [blame] | 135 | EXPORT_SYMBOL(__put_page); |
Andrea Arcangeli | 70b50f9 | 2011-11-02 13:36:59 -0700 | [diff] [blame] | 136 | |
Alexander Zarochentsev | 1d7ea73 | 2006-08-13 23:24:27 -0700 | [diff] [blame] | 137 | /** |
Randy Dunlap | 7682486 | 2008-03-19 17:00:40 -0700 | [diff] [blame] | 138 | * put_pages_list() - release a list of pages |
| 139 | * @pages: list of pages threaded on page->lru |
Alexander Zarochentsev | 1d7ea73 | 2006-08-13 23:24:27 -0700 | [diff] [blame] | 140 | * |
| 141 | * Release a list of pages which are strung together on page.lru. Currently |
| 142 | * used by read_cache_pages() and related error recovery code. |
Alexander Zarochentsev | 1d7ea73 | 2006-08-13 23:24:27 -0700 | [diff] [blame] | 143 | */ |
| 144 | void put_pages_list(struct list_head *pages) |
| 145 | { |
| 146 | while (!list_empty(pages)) { |
| 147 | struct page *victim; |
| 148 | |
Nikolay Borisov | f86196e | 2019-01-03 15:29:02 -0800 | [diff] [blame] | 149 | victim = lru_to_page(pages); |
Alexander Zarochentsev | 1d7ea73 | 2006-08-13 23:24:27 -0700 | [diff] [blame] | 150 | list_del(&victim->lru); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 151 | put_page(victim); |
Alexander Zarochentsev | 1d7ea73 | 2006-08-13 23:24:27 -0700 | [diff] [blame] | 152 | } |
| 153 | } |
| 154 | EXPORT_SYMBOL(put_pages_list); |
| 155 | |
Mel Gorman | 18022c5 | 2012-07-31 16:44:51 -0700 | [diff] [blame] | 156 | /* |
| 157 | * get_kernel_pages() - pin kernel pages in memory |
| 158 | * @kiov: An array of struct kvec structures |
| 159 | * @nr_segs: number of segments to pin |
| 160 | * @write: pinning for read/write, currently ignored |
| 161 | * @pages: array that receives pointers to the pages pinned. |
| 162 | * Should be at least nr_segs long. |
| 163 | * |
| 164 | * Returns number of pages pinned. This may be fewer than the number |
| 165 | * requested. If nr_pages is 0 or negative, returns 0. If no pages |
| 166 | * were pinned, returns -errno. Each page returned must be released |
| 167 | * with a put_page() call when it is finished with. |
| 168 | */ |
| 169 | int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write, |
| 170 | struct page **pages) |
| 171 | { |
| 172 | int seg; |
| 173 | |
| 174 | for (seg = 0; seg < nr_segs; seg++) { |
| 175 | if (WARN_ON(kiov[seg].iov_len != PAGE_SIZE)) |
| 176 | return seg; |
| 177 | |
Mel Gorman | 5a17811 | 2012-07-31 16:45:02 -0700 | [diff] [blame] | 178 | pages[seg] = kmap_to_page(kiov[seg].iov_base); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 179 | get_page(pages[seg]); |
Mel Gorman | 18022c5 | 2012-07-31 16:44:51 -0700 | [diff] [blame] | 180 | } |
| 181 | |
| 182 | return seg; |
| 183 | } |
| 184 | EXPORT_SYMBOL_GPL(get_kernel_pages); |
| 185 | |
| 186 | /* |
| 187 | * get_kernel_page() - pin a kernel page in memory |
| 188 | * @start: starting kernel address |
| 189 | * @write: pinning for read/write, currently ignored |
| 190 | * @pages: array that receives pointer to the page pinned. |
| 191 | * Must be at least nr_segs long. |
| 192 | * |
| 193 | * Returns 1 if page is pinned. If the page was not pinned, returns |
| 194 | * -errno. The page returned must be released with a put_page() call |
| 195 | * when it is finished with. |
| 196 | */ |
| 197 | int get_kernel_page(unsigned long start, int write, struct page **pages) |
| 198 | { |
| 199 | const struct kvec kiov = { |
| 200 | .iov_base = (void *)start, |
| 201 | .iov_len = PAGE_SIZE |
| 202 | }; |
| 203 | |
| 204 | return get_kernel_pages(&kiov, 1, write, pages); |
| 205 | } |
| 206 | EXPORT_SYMBOL_GPL(get_kernel_page); |
| 207 | |
Shaohua Li | 3dd7ae8 | 2011-03-22 16:33:45 -0700 | [diff] [blame] | 208 | static void pagevec_lru_move_fn(struct pagevec *pvec, |
Hugh Dickins | fa9add6 | 2012-05-29 15:07:09 -0700 | [diff] [blame] | 209 | void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg), |
| 210 | void *arg) |
Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 211 | { |
| 212 | int i; |
Mel Gorman | 68eb073 | 2016-07-28 15:47:11 -0700 | [diff] [blame] | 213 | struct pglist_data *pgdat = NULL; |
Hugh Dickins | fa9add6 | 2012-05-29 15:07:09 -0700 | [diff] [blame] | 214 | struct lruvec *lruvec; |
Shaohua Li | 3dd7ae8 | 2011-03-22 16:33:45 -0700 | [diff] [blame] | 215 | unsigned long flags = 0; |
Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 216 | |
| 217 | for (i = 0; i < pagevec_count(pvec); i++) { |
| 218 | struct page *page = pvec->pages[i]; |
Mel Gorman | 68eb073 | 2016-07-28 15:47:11 -0700 | [diff] [blame] | 219 | struct pglist_data *pagepgdat = page_pgdat(page); |
Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 220 | |
Mel Gorman | 68eb073 | 2016-07-28 15:47:11 -0700 | [diff] [blame] | 221 | if (pagepgdat != pgdat) { |
| 222 | if (pgdat) |
| 223 | spin_unlock_irqrestore(&pgdat->lru_lock, flags); |
| 224 | pgdat = pagepgdat; |
| 225 | spin_lock_irqsave(&pgdat->lru_lock, flags); |
Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 226 | } |
Shaohua Li | 3dd7ae8 | 2011-03-22 16:33:45 -0700 | [diff] [blame] | 227 | |
Mel Gorman | 68eb073 | 2016-07-28 15:47:11 -0700 | [diff] [blame] | 228 | lruvec = mem_cgroup_page_lruvec(page, pgdat); |
Hugh Dickins | fa9add6 | 2012-05-29 15:07:09 -0700 | [diff] [blame] | 229 | (*move_fn)(page, lruvec, arg); |
Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 230 | } |
Mel Gorman | 68eb073 | 2016-07-28 15:47:11 -0700 | [diff] [blame] | 231 | if (pgdat) |
| 232 | spin_unlock_irqrestore(&pgdat->lru_lock, flags); |
Mel Gorman | c6f92f9 | 2017-11-15 17:37:55 -0800 | [diff] [blame] | 233 | release_pages(pvec->pages, pvec->nr); |
Linus Torvalds | 83896fb | 2011-01-17 14:42:34 -0800 | [diff] [blame] | 234 | pagevec_reinit(pvec); |
Shaohua Li | d8505de | 2011-01-13 15:47:33 -0800 | [diff] [blame] | 235 | } |
| 236 | |
Hugh Dickins | fa9add6 | 2012-05-29 15:07:09 -0700 | [diff] [blame] | 237 | static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec, |
| 238 | void *arg) |
Shaohua Li | 3dd7ae8 | 2011-03-22 16:33:45 -0700 | [diff] [blame] | 239 | { |
| 240 | int *pgmoved = arg; |
Shaohua Li | 3dd7ae8 | 2011-03-22 16:33:45 -0700 | [diff] [blame] | 241 | |
Johannes Weiner | c55e8d0 | 2017-02-24 14:56:23 -0800 | [diff] [blame] | 242 | if (PageLRU(page) && !PageUnevictable(page)) { |
| 243 | del_page_from_lru_list(page, lruvec, page_lru(page)); |
| 244 | ClearPageActive(page); |
| 245 | add_page_to_lru_list_tail(page, lruvec, page_lru(page)); |
Matthew Wilcox (Oracle) | 6c35784 | 2020-08-14 17:30:37 -0700 | [diff] [blame] | 246 | (*pgmoved) += thp_nr_pages(page); |
Shaohua Li | 3dd7ae8 | 2011-03-22 16:33:45 -0700 | [diff] [blame] | 247 | } |
| 248 | } |
| 249 | |
| 250 | /* |
| 251 | * pagevec_move_tail() must be called with IRQ disabled. |
| 252 | * Otherwise this may cause nasty races. |
| 253 | */ |
| 254 | static void pagevec_move_tail(struct pagevec *pvec) |
| 255 | { |
| 256 | int pgmoved = 0; |
| 257 | |
| 258 | pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved); |
| 259 | __count_vm_events(PGROTATED, pgmoved); |
| 260 | } |
| 261 | |
Minchan Kim | 68a4731 | 2021-03-19 12:39:51 -0700 | [diff] [blame] | 262 | /* return true if pagevec needs to drain */ |
| 263 | static bool pagevec_add_and_need_flush(struct pagevec *pvec, struct page *page) |
| 264 | { |
| 265 | bool ret = false; |
| 266 | |
| 267 | if (!pagevec_add(pvec, page) || PageCompound(page) || |
| 268 | lru_cache_disabled()) |
| 269 | ret = true; |
| 270 | |
| 271 | return ret; |
| 272 | } |
| 273 | |
Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 274 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 275 | * Writeback is about to end against a page which has been marked for immediate |
| 276 | * reclaim. If it still appears to be reclaimable, move it to the tail of the |
Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 277 | * inactive list. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 278 | */ |
Shaohua Li | 3dd7ae8 | 2011-03-22 16:33:45 -0700 | [diff] [blame] | 279 | void rotate_reclaimable_page(struct page *page) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 280 | { |
Johannes Weiner | c55e8d0 | 2017-02-24 14:56:23 -0800 | [diff] [blame] | 281 | if (!PageLocked(page) && !PageDirty(page) && |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 282 | !PageUnevictable(page) && PageLRU(page)) { |
Miklos Szeredi | ac6aadb | 2008-04-28 02:12:38 -0700 | [diff] [blame] | 283 | struct pagevec *pvec; |
| 284 | unsigned long flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 285 | |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 286 | get_page(page); |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 287 | local_lock_irqsave(&lru_rotate.lock, flags); |
| 288 | pvec = this_cpu_ptr(&lru_rotate.pvec); |
Minchan Kim | 68a4731 | 2021-03-19 12:39:51 -0700 | [diff] [blame] | 289 | if (pagevec_add_and_need_flush(pvec, page)) |
Miklos Szeredi | ac6aadb | 2008-04-28 02:12:38 -0700 | [diff] [blame] | 290 | pagevec_move_tail(pvec); |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 291 | local_unlock_irqrestore(&lru_rotate.lock, flags); |
Miklos Szeredi | ac6aadb | 2008-04-28 02:12:38 -0700 | [diff] [blame] | 292 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 293 | } |
| 294 | |
Johannes Weiner | 96f8bf4 | 2020-06-03 16:03:09 -0700 | [diff] [blame] | 295 | void lru_note_cost(struct lruvec *lruvec, bool file, unsigned int nr_pages) |
KOSAKI Motohiro | 3e2f41f | 2009-01-07 18:08:20 -0800 | [diff] [blame] | 296 | { |
Johannes Weiner | 7cf111b | 2020-06-03 16:03:06 -0700 | [diff] [blame] | 297 | do { |
| 298 | unsigned long lrusize; |
| 299 | |
| 300 | /* Record cost event */ |
Johannes Weiner | 96f8bf4 | 2020-06-03 16:03:09 -0700 | [diff] [blame] | 301 | if (file) |
| 302 | lruvec->file_cost += nr_pages; |
Johannes Weiner | 7cf111b | 2020-06-03 16:03:06 -0700 | [diff] [blame] | 303 | else |
Johannes Weiner | 96f8bf4 | 2020-06-03 16:03:09 -0700 | [diff] [blame] | 304 | lruvec->anon_cost += nr_pages; |
Johannes Weiner | 7cf111b | 2020-06-03 16:03:06 -0700 | [diff] [blame] | 305 | |
| 306 | /* |
| 307 | * Decay previous events |
| 308 | * |
| 309 | * Because workloads change over time (and to avoid |
| 310 | * overflow) we keep these statistics as a floating |
| 311 | * average, which ends up weighing recent refaults |
| 312 | * more than old ones. |
| 313 | */ |
| 314 | lrusize = lruvec_page_state(lruvec, NR_INACTIVE_ANON) + |
| 315 | lruvec_page_state(lruvec, NR_ACTIVE_ANON) + |
| 316 | lruvec_page_state(lruvec, NR_INACTIVE_FILE) + |
| 317 | lruvec_page_state(lruvec, NR_ACTIVE_FILE); |
| 318 | |
| 319 | if (lruvec->file_cost + lruvec->anon_cost > lrusize / 4) { |
| 320 | lruvec->file_cost /= 2; |
| 321 | lruvec->anon_cost /= 2; |
| 322 | } |
| 323 | } while ((lruvec = parent_lruvec(lruvec))); |
KOSAKI Motohiro | 3e2f41f | 2009-01-07 18:08:20 -0800 | [diff] [blame] | 324 | } |
| 325 | |
Johannes Weiner | 96f8bf4 | 2020-06-03 16:03:09 -0700 | [diff] [blame] | 326 | void lru_note_cost_page(struct page *page) |
| 327 | { |
| 328 | lru_note_cost(mem_cgroup_page_lruvec(page, page_pgdat(page)), |
Matthew Wilcox (Oracle) | 6c35784 | 2020-08-14 17:30:37 -0700 | [diff] [blame] | 329 | page_is_file_lru(page), thp_nr_pages(page)); |
Johannes Weiner | 96f8bf4 | 2020-06-03 16:03:09 -0700 | [diff] [blame] | 330 | } |
| 331 | |
Hugh Dickins | fa9add6 | 2012-05-29 15:07:09 -0700 | [diff] [blame] | 332 | static void __activate_page(struct page *page, struct lruvec *lruvec, |
| 333 | void *arg) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 334 | { |
Linus Torvalds | 7a60857 | 2011-01-17 14:42:19 -0800 | [diff] [blame] | 335 | if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { |
Linus Torvalds | 7a60857 | 2011-01-17 14:42:19 -0800 | [diff] [blame] | 336 | int lru = page_lru_base_type(page); |
Matthew Wilcox (Oracle) | 6c35784 | 2020-08-14 17:30:37 -0700 | [diff] [blame] | 337 | int nr_pages = thp_nr_pages(page); |
Linus Torvalds | 7a60857 | 2011-01-17 14:42:19 -0800 | [diff] [blame] | 338 | |
Hugh Dickins | fa9add6 | 2012-05-29 15:07:09 -0700 | [diff] [blame] | 339 | del_page_from_lru_list(page, lruvec, lru); |
Linus Torvalds | 7a60857 | 2011-01-17 14:42:19 -0800 | [diff] [blame] | 340 | SetPageActive(page); |
| 341 | lru += LRU_ACTIVE; |
Hugh Dickins | fa9add6 | 2012-05-29 15:07:09 -0700 | [diff] [blame] | 342 | add_page_to_lru_list(page, lruvec, lru); |
Mel Gorman | 24b7e58 | 2014-08-06 16:07:11 -0700 | [diff] [blame] | 343 | trace_mm_lru_activate(page); |
Linus Torvalds | 7a60857 | 2011-01-17 14:42:19 -0800 | [diff] [blame] | 344 | |
Shakeel Butt | 21e330f | 2020-06-03 16:03:19 -0700 | [diff] [blame] | 345 | __count_vm_events(PGACTIVATE, nr_pages); |
| 346 | __count_memcg_events(lruvec_memcg(lruvec), PGACTIVATE, |
| 347 | nr_pages); |
Linus Torvalds | 7a60857 | 2011-01-17 14:42:19 -0800 | [diff] [blame] | 348 | } |
Shaohua Li | eb709b0 | 2011-05-24 17:12:55 -0700 | [diff] [blame] | 349 | } |
| 350 | |
| 351 | #ifdef CONFIG_SMP |
Shaohua Li | eb709b0 | 2011-05-24 17:12:55 -0700 | [diff] [blame] | 352 | static void activate_page_drain(int cpu) |
| 353 | { |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 354 | struct pagevec *pvec = &per_cpu(lru_pvecs.activate_page, cpu); |
Shaohua Li | eb709b0 | 2011-05-24 17:12:55 -0700 | [diff] [blame] | 355 | |
| 356 | if (pagevec_count(pvec)) |
| 357 | pagevec_lru_move_fn(pvec, __activate_page, NULL); |
| 358 | } |
| 359 | |
Chris Metcalf | 5fbc461 | 2013-09-12 15:13:55 -0700 | [diff] [blame] | 360 | static bool need_activate_page_drain(int cpu) |
| 361 | { |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 362 | return pagevec_count(&per_cpu(lru_pvecs.activate_page, cpu)) != 0; |
Chris Metcalf | 5fbc461 | 2013-09-12 15:13:55 -0700 | [diff] [blame] | 363 | } |
| 364 | |
Yu Zhao | cc2828b | 2020-10-13 16:52:08 -0700 | [diff] [blame] | 365 | static void activate_page(struct page *page) |
Shaohua Li | eb709b0 | 2011-05-24 17:12:55 -0700 | [diff] [blame] | 366 | { |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 367 | page = compound_head(page); |
Shaohua Li | eb709b0 | 2011-05-24 17:12:55 -0700 | [diff] [blame] | 368 | if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 369 | struct pagevec *pvec; |
Shaohua Li | eb709b0 | 2011-05-24 17:12:55 -0700 | [diff] [blame] | 370 | |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 371 | local_lock(&lru_pvecs.lock); |
| 372 | pvec = this_cpu_ptr(&lru_pvecs.activate_page); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 373 | get_page(page); |
Minchan Kim | 68a4731 | 2021-03-19 12:39:51 -0700 | [diff] [blame] | 374 | if (pagevec_add_and_need_flush(pvec, page)) |
Shaohua Li | eb709b0 | 2011-05-24 17:12:55 -0700 | [diff] [blame] | 375 | pagevec_lru_move_fn(pvec, __activate_page, NULL); |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 376 | local_unlock(&lru_pvecs.lock); |
Shaohua Li | eb709b0 | 2011-05-24 17:12:55 -0700 | [diff] [blame] | 377 | } |
| 378 | } |
| 379 | |
| 380 | #else |
| 381 | static inline void activate_page_drain(int cpu) |
| 382 | { |
| 383 | } |
| 384 | |
Yu Zhao | cc2828b | 2020-10-13 16:52:08 -0700 | [diff] [blame] | 385 | static void activate_page(struct page *page) |
Shaohua Li | eb709b0 | 2011-05-24 17:12:55 -0700 | [diff] [blame] | 386 | { |
Andrey Ryabinin | f4b7e27 | 2019-03-05 15:49:39 -0800 | [diff] [blame] | 387 | pg_data_t *pgdat = page_pgdat(page); |
Shaohua Li | eb709b0 | 2011-05-24 17:12:55 -0700 | [diff] [blame] | 388 | |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 389 | page = compound_head(page); |
Andrey Ryabinin | f4b7e27 | 2019-03-05 15:49:39 -0800 | [diff] [blame] | 390 | spin_lock_irq(&pgdat->lru_lock); |
| 391 | __activate_page(page, mem_cgroup_page_lruvec(page, pgdat), NULL); |
| 392 | spin_unlock_irq(&pgdat->lru_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 393 | } |
Shaohua Li | eb709b0 | 2011-05-24 17:12:55 -0700 | [diff] [blame] | 394 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 395 | |
Mel Gorman | 059285a | 2013-07-03 15:02:30 -0700 | [diff] [blame] | 396 | static void __lru_cache_activate_page(struct page *page) |
| 397 | { |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 398 | struct pagevec *pvec; |
Mel Gorman | 059285a | 2013-07-03 15:02:30 -0700 | [diff] [blame] | 399 | int i; |
| 400 | |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 401 | local_lock(&lru_pvecs.lock); |
| 402 | pvec = this_cpu_ptr(&lru_pvecs.lru_add); |
| 403 | |
Mel Gorman | 059285a | 2013-07-03 15:02:30 -0700 | [diff] [blame] | 404 | /* |
| 405 | * Search backwards on the optimistic assumption that the page being |
| 406 | * activated has just been added to this pagevec. Note that only |
| 407 | * the local pagevec is examined as a !PageLRU page could be in the |
| 408 | * process of being released, reclaimed, migrated or on a remote |
| 409 | * pagevec that is currently being drained. Furthermore, marking |
| 410 | * a remote pagevec's page PageActive potentially hits a race where |
| 411 | * a page is marked PageActive just after it is added to the inactive |
| 412 | * list causing accounting errors and BUG_ON checks to trigger. |
| 413 | */ |
| 414 | for (i = pagevec_count(pvec) - 1; i >= 0; i--) { |
| 415 | struct page *pagevec_page = pvec->pages[i]; |
| 416 | |
| 417 | if (pagevec_page == page) { |
| 418 | SetPageActive(page); |
| 419 | break; |
| 420 | } |
| 421 | } |
| 422 | |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 423 | local_unlock(&lru_pvecs.lock); |
Mel Gorman | 059285a | 2013-07-03 15:02:30 -0700 | [diff] [blame] | 424 | } |
| 425 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 426 | /* |
| 427 | * Mark a page as having seen activity. |
| 428 | * |
| 429 | * inactive,unreferenced -> inactive,referenced |
| 430 | * inactive,referenced -> active,unreferenced |
| 431 | * active,unreferenced -> active,referenced |
Hugh Dickins | eb39d61 | 2014-08-06 16:06:43 -0700 | [diff] [blame] | 432 | * |
| 433 | * When a newly allocated page is not yet visible, so safe for non-atomic ops, |
| 434 | * __SetPageReferenced(page) may be substituted for mark_page_accessed(page). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 435 | */ |
Harvey Harrison | 920c7a5 | 2008-02-04 22:29:26 -0800 | [diff] [blame] | 436 | void mark_page_accessed(struct page *page) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 437 | { |
Kirill A. Shutemov | e90309c | 2016-01-15 16:54:33 -0800 | [diff] [blame] | 438 | page = compound_head(page); |
Mel Gorman | 059285a | 2013-07-03 15:02:30 -0700 | [diff] [blame] | 439 | |
Fengguang Wu | a1100a7 | 2019-11-30 17:50:00 -0800 | [diff] [blame] | 440 | if (!PageReferenced(page)) { |
| 441 | SetPageReferenced(page); |
| 442 | } else if (PageUnevictable(page)) { |
| 443 | /* |
| 444 | * Unevictable pages are on the "LRU_UNEVICTABLE" list. But, |
| 445 | * this list is never rotated or maintained, so marking an |
| 446 | * evictable page accessed has no effect. |
| 447 | */ |
| 448 | } else if (!PageActive(page)) { |
Mel Gorman | 059285a | 2013-07-03 15:02:30 -0700 | [diff] [blame] | 449 | /* |
| 450 | * If the page is on the LRU, queue it for activation via |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 451 | * lru_pvecs.activate_page. Otherwise, assume the page is on a |
Mel Gorman | 059285a | 2013-07-03 15:02:30 -0700 | [diff] [blame] | 452 | * pagevec, mark it active and it'll be moved to the active |
| 453 | * LRU on the next drain. |
| 454 | */ |
| 455 | if (PageLRU(page)) |
| 456 | activate_page(page); |
| 457 | else |
| 458 | __lru_cache_activate_page(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 459 | ClearPageReferenced(page); |
Joonsoo Kim | cb68688 | 2020-06-25 20:30:34 -0700 | [diff] [blame] | 460 | workingset_activation(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 461 | } |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 462 | if (page_is_idle(page)) |
| 463 | clear_page_idle(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 464 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 465 | EXPORT_SYMBOL(mark_page_accessed); |
| 466 | |
KOSAKI Motohiro | f04e9eb | 2008-10-18 20:26:19 -0700 | [diff] [blame] | 467 | /** |
Mel Gorman | c53954a | 2013-07-03 15:02:34 -0700 | [diff] [blame] | 468 | * lru_cache_add - add a page to a page list |
KOSAKI Motohiro | f04e9eb | 2008-10-18 20:26:19 -0700 | [diff] [blame] | 469 | * @page: the page to be added to the LRU. |
Jianyu Zhan | 2329d37 | 2014-06-04 16:07:31 -0700 | [diff] [blame] | 470 | * |
| 471 | * Queue the page for addition to the LRU via pagevec. The decision on whether |
| 472 | * to add the page to the [in]active [file|anon] list is deferred until the |
| 473 | * pagevec is drained. This gives a chance for the caller of lru_cache_add() |
| 474 | * have the page added to the active list using mark_page_accessed(). |
KOSAKI Motohiro | f04e9eb | 2008-10-18 20:26:19 -0700 | [diff] [blame] | 475 | */ |
Mel Gorman | c53954a | 2013-07-03 15:02:34 -0700 | [diff] [blame] | 476 | void lru_cache_add(struct page *page) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 477 | { |
Johannes Weiner | 6058eae | 2020-06-03 16:02:40 -0700 | [diff] [blame] | 478 | struct pagevec *pvec; |
| 479 | |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 480 | VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page); |
| 481 | VM_BUG_ON_PAGE(PageLRU(page), page); |
Johannes Weiner | 6058eae | 2020-06-03 16:02:40 -0700 | [diff] [blame] | 482 | |
| 483 | get_page(page); |
| 484 | local_lock(&lru_pvecs.lock); |
| 485 | pvec = this_cpu_ptr(&lru_pvecs.lru_add); |
Minchan Kim | 68a4731 | 2021-03-19 12:39:51 -0700 | [diff] [blame] | 486 | if (pagevec_add_and_need_flush(pvec, page)) |
Johannes Weiner | 6058eae | 2020-06-03 16:02:40 -0700 | [diff] [blame] | 487 | __pagevec_lru_add(pvec); |
| 488 | local_unlock(&lru_pvecs.lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 489 | } |
Johannes Weiner | 6058eae | 2020-06-03 16:02:40 -0700 | [diff] [blame] | 490 | EXPORT_SYMBOL(lru_cache_add); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 491 | |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 492 | /** |
Joonsoo Kim | b518154 | 2020-08-11 18:30:40 -0700 | [diff] [blame] | 493 | * lru_cache_add_inactive_or_unevictable |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 494 | * @page: the page to be added to LRU |
| 495 | * @vma: vma in which page is mapped for determining reclaimability |
| 496 | * |
Joonsoo Kim | b518154 | 2020-08-11 18:30:40 -0700 | [diff] [blame] | 497 | * Place @page on the inactive or unevictable LRU list, depending on its |
Miaohe Lin | 12eab42 | 2020-10-13 16:52:24 -0700 | [diff] [blame] | 498 | * evictability. |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 499 | */ |
Laurent Dufour | cbff8f3 | 2018-04-17 16:33:20 +0200 | [diff] [blame] | 500 | void __lru_cache_add_inactive_or_unevictable(struct page *page, |
| 501 | unsigned long vma_flags) |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 502 | { |
Joonsoo Kim | b518154 | 2020-08-11 18:30:40 -0700 | [diff] [blame] | 503 | bool unevictable; |
| 504 | |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 505 | VM_BUG_ON_PAGE(PageLRU(page), page); |
| 506 | |
Laurent Dufour | cbff8f3 | 2018-04-17 16:33:20 +0200 | [diff] [blame] | 507 | unevictable = (vma_flags & (VM_LOCKED | VM_SPECIAL)) == VM_LOCKED; |
Joonsoo Kim | b518154 | 2020-08-11 18:30:40 -0700 | [diff] [blame] | 508 | if (unlikely(unevictable) && !TestSetPageMlocked(page)) { |
Hugh Dickins | 0964730 | 2020-09-18 21:20:15 -0700 | [diff] [blame] | 509 | int nr_pages = thp_nr_pages(page); |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 510 | /* |
| 511 | * We use the irq-unsafe __mod_zone_page_stat because this |
| 512 | * counter is not modified from interrupt context, and the pte |
| 513 | * lock is held(spinlock), which implies preemption disabled. |
| 514 | */ |
Hugh Dickins | 0964730 | 2020-09-18 21:20:15 -0700 | [diff] [blame] | 515 | __mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages); |
| 516 | count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages); |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 517 | } |
Shakeel Butt | 9c4e6b1 | 2018-02-21 14:45:28 -0800 | [diff] [blame] | 518 | lru_cache_add(page); |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 519 | } |
| 520 | |
Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 521 | /* |
Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 522 | * If the page can not be invalidated, it is moved to the |
| 523 | * inactive list to speed up its reclaim. It is moved to the |
| 524 | * head of the list, rather than the tail, to give the flusher |
| 525 | * threads some time to write it out, as this is much more |
| 526 | * effective than the single-page writeout from reclaim. |
Minchan Kim | 278df9f | 2011-03-22 16:32:54 -0700 | [diff] [blame] | 527 | * |
| 528 | * If the page isn't page_mapped and dirty/writeback, the page |
| 529 | * could reclaim asap using PG_reclaim. |
| 530 | * |
| 531 | * 1. active, mapped page -> none |
| 532 | * 2. active, dirty/writeback page -> inactive, head, PG_reclaim |
| 533 | * 3. inactive, mapped page -> none |
| 534 | * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim |
| 535 | * 5. inactive, clean -> inactive, tail |
| 536 | * 6. Others -> none |
| 537 | * |
| 538 | * In 4, why it moves inactive's head, the VM expects the page would |
| 539 | * be write it out by flusher threads as this is much more effective |
| 540 | * than the single-page writeout from reclaim. |
Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 541 | */ |
Minchan Kim | cc5993b | 2015-04-15 16:13:26 -0700 | [diff] [blame] | 542 | static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec, |
Hugh Dickins | fa9add6 | 2012-05-29 15:07:09 -0700 | [diff] [blame] | 543 | void *arg) |
Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 544 | { |
Johannes Weiner | fbbb602 | 2020-06-03 16:02:57 -0700 | [diff] [blame] | 545 | int lru; |
Minchan Kim | 278df9f | 2011-03-22 16:32:54 -0700 | [diff] [blame] | 546 | bool active; |
Matthew Wilcox (Oracle) | 6c35784 | 2020-08-14 17:30:37 -0700 | [diff] [blame] | 547 | int nr_pages = thp_nr_pages(page); |
Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 548 | |
Minchan Kim | 278df9f | 2011-03-22 16:32:54 -0700 | [diff] [blame] | 549 | if (!PageLRU(page)) |
Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 550 | return; |
| 551 | |
Minchan Kim | bad49d9 | 2011-05-11 15:13:30 -0700 | [diff] [blame] | 552 | if (PageUnevictable(page)) |
| 553 | return; |
| 554 | |
Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 555 | /* Some processes are using the page */ |
| 556 | if (page_mapped(page)) |
| 557 | return; |
| 558 | |
Minchan Kim | 278df9f | 2011-03-22 16:32:54 -0700 | [diff] [blame] | 559 | active = PageActive(page); |
Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 560 | lru = page_lru_base_type(page); |
Hugh Dickins | fa9add6 | 2012-05-29 15:07:09 -0700 | [diff] [blame] | 561 | |
| 562 | del_page_from_lru_list(page, lruvec, lru + active); |
Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 563 | ClearPageActive(page); |
| 564 | ClearPageReferenced(page); |
Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 565 | |
Minchan Kim | 278df9f | 2011-03-22 16:32:54 -0700 | [diff] [blame] | 566 | if (PageWriteback(page) || PageDirty(page)) { |
| 567 | /* |
| 568 | * PG_reclaim could be raced with end_page_writeback |
| 569 | * It can make readahead confusing. But race window |
| 570 | * is _really_ small and it's non-critical problem. |
| 571 | */ |
Yu Zhao | e7a1aaf | 2019-09-23 15:34:33 -0700 | [diff] [blame] | 572 | add_page_to_lru_list(page, lruvec, lru); |
Minchan Kim | 278df9f | 2011-03-22 16:32:54 -0700 | [diff] [blame] | 573 | SetPageReclaim(page); |
| 574 | } else { |
| 575 | /* |
| 576 | * The page's writeback ends up during pagevec |
| 577 | * We moves tha page into tail of inactive. |
| 578 | */ |
Yu Zhao | e7a1aaf | 2019-09-23 15:34:33 -0700 | [diff] [blame] | 579 | add_page_to_lru_list_tail(page, lruvec, lru); |
Shakeel Butt | 5d91f31 | 2020-06-03 16:03:16 -0700 | [diff] [blame] | 580 | __count_vm_events(PGROTATED, nr_pages); |
Minchan Kim | 278df9f | 2011-03-22 16:32:54 -0700 | [diff] [blame] | 581 | } |
| 582 | |
Shakeel Butt | 21e330f | 2020-06-03 16:03:19 -0700 | [diff] [blame] | 583 | if (active) { |
Shakeel Butt | 5d91f31 | 2020-06-03 16:03:16 -0700 | [diff] [blame] | 584 | __count_vm_events(PGDEACTIVATE, nr_pages); |
Shakeel Butt | 21e330f | 2020-06-03 16:03:19 -0700 | [diff] [blame] | 585 | __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, |
| 586 | nr_pages); |
| 587 | } |
Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 588 | } |
| 589 | |
Minchan Kim | 9c276cc | 2019-09-25 16:49:08 -0700 | [diff] [blame] | 590 | static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec, |
| 591 | void *arg) |
| 592 | { |
| 593 | if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) { |
Minchan Kim | 9c276cc | 2019-09-25 16:49:08 -0700 | [diff] [blame] | 594 | int lru = page_lru_base_type(page); |
Matthew Wilcox (Oracle) | 6c35784 | 2020-08-14 17:30:37 -0700 | [diff] [blame] | 595 | int nr_pages = thp_nr_pages(page); |
Minchan Kim | 9c276cc | 2019-09-25 16:49:08 -0700 | [diff] [blame] | 596 | |
| 597 | del_page_from_lru_list(page, lruvec, lru + LRU_ACTIVE); |
| 598 | ClearPageActive(page); |
| 599 | ClearPageReferenced(page); |
| 600 | add_page_to_lru_list(page, lruvec, lru); |
| 601 | |
Shakeel Butt | 21e330f | 2020-06-03 16:03:19 -0700 | [diff] [blame] | 602 | __count_vm_events(PGDEACTIVATE, nr_pages); |
| 603 | __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, |
| 604 | nr_pages); |
Minchan Kim | 9c276cc | 2019-09-25 16:49:08 -0700 | [diff] [blame] | 605 | } |
| 606 | } |
Minchan Kim | 10853a0 | 2016-01-15 16:55:11 -0800 | [diff] [blame] | 607 | |
Shaohua Li | f7ad2a6 | 2017-05-03 14:52:29 -0700 | [diff] [blame] | 608 | static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec, |
Minchan Kim | 10853a0 | 2016-01-15 16:55:11 -0800 | [diff] [blame] | 609 | void *arg) |
| 610 | { |
Shaohua Li | f7ad2a6 | 2017-05-03 14:52:29 -0700 | [diff] [blame] | 611 | if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) && |
Shaohua Li | 24c92eb | 2017-10-03 16:15:29 -0700 | [diff] [blame] | 612 | !PageSwapCache(page) && !PageUnevictable(page)) { |
Shaohua Li | f7ad2a6 | 2017-05-03 14:52:29 -0700 | [diff] [blame] | 613 | bool active = PageActive(page); |
Matthew Wilcox (Oracle) | 6c35784 | 2020-08-14 17:30:37 -0700 | [diff] [blame] | 614 | int nr_pages = thp_nr_pages(page); |
Minchan Kim | 10853a0 | 2016-01-15 16:55:11 -0800 | [diff] [blame] | 615 | |
Shaohua Li | f7ad2a6 | 2017-05-03 14:52:29 -0700 | [diff] [blame] | 616 | del_page_from_lru_list(page, lruvec, |
| 617 | LRU_INACTIVE_ANON + active); |
Minchan Kim | 10853a0 | 2016-01-15 16:55:11 -0800 | [diff] [blame] | 618 | ClearPageActive(page); |
| 619 | ClearPageReferenced(page); |
Shaohua Li | f7ad2a6 | 2017-05-03 14:52:29 -0700 | [diff] [blame] | 620 | /* |
Huang Ying | 9de4f22 | 2020-04-06 20:04:41 -0700 | [diff] [blame] | 621 | * Lazyfree pages are clean anonymous pages. They have |
| 622 | * PG_swapbacked flag cleared, to distinguish them from normal |
| 623 | * anonymous pages |
Shaohua Li | f7ad2a6 | 2017-05-03 14:52:29 -0700 | [diff] [blame] | 624 | */ |
| 625 | ClearPageSwapBacked(page); |
| 626 | add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE); |
Minchan Kim | 10853a0 | 2016-01-15 16:55:11 -0800 | [diff] [blame] | 627 | |
Shakeel Butt | 21e330f | 2020-06-03 16:03:19 -0700 | [diff] [blame] | 628 | __count_vm_events(PGLAZYFREE, nr_pages); |
| 629 | __count_memcg_events(lruvec_memcg(lruvec), PGLAZYFREE, |
| 630 | nr_pages); |
Minchan Kim | 10853a0 | 2016-01-15 16:55:11 -0800 | [diff] [blame] | 631 | } |
| 632 | } |
| 633 | |
Vinayak Menon | 9975da5 | 2021-03-18 16:49:18 +0530 | [diff] [blame] | 634 | static void lru_lazyfree_movetail_fn(struct page *page, struct lruvec *lruvec, |
| 635 | void *arg) |
| 636 | { |
Charan Teja Reddy | 8011eb2 | 2021-06-25 12:30:50 +0530 | [diff] [blame] | 637 | bool *add_to_tail = (bool *)arg; |
| 638 | |
Vinayak Menon | 9975da5 | 2021-03-18 16:49:18 +0530 | [diff] [blame] | 639 | if (PageLRU(page) && !PageUnevictable(page) && PageSwapBacked(page) && |
| 640 | !PageSwapCache(page)) { |
| 641 | bool active = PageActive(page); |
| 642 | |
| 643 | del_page_from_lru_list(page, lruvec, |
| 644 | LRU_INACTIVE_ANON + active); |
| 645 | ClearPageActive(page); |
| 646 | ClearPageReferenced(page); |
Charan Teja Reddy | 8011eb2 | 2021-06-25 12:30:50 +0530 | [diff] [blame] | 647 | if (add_to_tail && *add_to_tail) |
| 648 | add_page_to_lru_list_tail(page, lruvec, LRU_INACTIVE_FILE); |
| 649 | else |
| 650 | add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE); |
Vinayak Menon | 9975da5 | 2021-03-18 16:49:18 +0530 | [diff] [blame] | 651 | } |
| 652 | } |
| 653 | |
Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 654 | /* |
Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 655 | * Drain pages out of the cpu's pagevecs. |
| 656 | * Either "cpu" is the current CPU, and preemption has already been |
| 657 | * disabled; or "cpu" is being hot-unplugged, and is already dead. |
| 658 | */ |
Konstantin Khlebnikov | f0cb3c7 | 2012-03-21 16:34:06 -0700 | [diff] [blame] | 659 | void lru_add_drain_cpu(int cpu) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 660 | { |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 661 | struct pagevec *pvec = &per_cpu(lru_pvecs.lru_add, cpu); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 662 | |
Mel Gorman | 13f7f78 | 2013-07-03 15:02:28 -0700 | [diff] [blame] | 663 | if (pagevec_count(pvec)) |
Mel Gorman | a0b8cab3 | 2013-07-03 15:02:32 -0700 | [diff] [blame] | 664 | __pagevec_lru_add(pvec); |
Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 665 | |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 666 | pvec = &per_cpu(lru_rotate.pvec, cpu); |
Qian Cai | 7e0cc01 | 2020-08-14 17:31:50 -0700 | [diff] [blame] | 667 | /* Disabling interrupts below acts as a compiler barrier. */ |
| 668 | if (data_race(pagevec_count(pvec))) { |
Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 669 | unsigned long flags; |
| 670 | |
| 671 | /* No harm done if a racing interrupt already did this */ |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 672 | local_lock_irqsave(&lru_rotate.lock, flags); |
Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 673 | pagevec_move_tail(pvec); |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 674 | local_unlock_irqrestore(&lru_rotate.lock, flags); |
Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 675 | } |
Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 676 | |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 677 | pvec = &per_cpu(lru_pvecs.lru_deactivate_file, cpu); |
Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 678 | if (pagevec_count(pvec)) |
Minchan Kim | cc5993b | 2015-04-15 16:13:26 -0700 | [diff] [blame] | 679 | pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL); |
Shaohua Li | eb709b0 | 2011-05-24 17:12:55 -0700 | [diff] [blame] | 680 | |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 681 | pvec = &per_cpu(lru_pvecs.lru_deactivate, cpu); |
Minchan Kim | 9c276cc | 2019-09-25 16:49:08 -0700 | [diff] [blame] | 682 | if (pagevec_count(pvec)) |
| 683 | pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); |
| 684 | |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 685 | pvec = &per_cpu(lru_pvecs.lru_lazyfree, cpu); |
Minchan Kim | 10853a0 | 2016-01-15 16:55:11 -0800 | [diff] [blame] | 686 | if (pagevec_count(pvec)) |
Shaohua Li | f7ad2a6 | 2017-05-03 14:52:29 -0700 | [diff] [blame] | 687 | pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL); |
Minchan Kim | 10853a0 | 2016-01-15 16:55:11 -0800 | [diff] [blame] | 688 | |
Vinayak Menon | 9975da5 | 2021-03-18 16:49:18 +0530 | [diff] [blame] | 689 | pvec = &per_cpu(lru_pvecs.lru_lazyfree_movetail, cpu); |
| 690 | if (pagevec_count(pvec)) |
| 691 | pagevec_lru_move_fn(pvec, lru_lazyfree_movetail_fn, NULL); |
| 692 | |
Shaohua Li | eb709b0 | 2011-05-24 17:12:55 -0700 | [diff] [blame] | 693 | activate_page_drain(cpu); |
Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 694 | } |
| 695 | |
| 696 | /** |
Minchan Kim | cc5993b | 2015-04-15 16:13:26 -0700 | [diff] [blame] | 697 | * deactivate_file_page - forcefully deactivate a file page |
Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 698 | * @page: page to deactivate |
| 699 | * |
| 700 | * This function hints the VM that @page is a good reclaim candidate, |
| 701 | * for example if its invalidation fails due to the page being dirty |
| 702 | * or under writeback. |
| 703 | */ |
Minchan Kim | cc5993b | 2015-04-15 16:13:26 -0700 | [diff] [blame] | 704 | void deactivate_file_page(struct page *page) |
Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 705 | { |
Minchan Kim | 821ed6b | 2011-05-24 17:12:31 -0700 | [diff] [blame] | 706 | /* |
Minchan Kim | cc5993b | 2015-04-15 16:13:26 -0700 | [diff] [blame] | 707 | * In a workload with many unevictable page such as mprotect, |
| 708 | * unevictable page deactivation for accelerating reclaim is pointless. |
Minchan Kim | 821ed6b | 2011-05-24 17:12:31 -0700 | [diff] [blame] | 709 | */ |
| 710 | if (PageUnevictable(page)) |
| 711 | return; |
| 712 | |
Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 713 | if (likely(get_page_unless_zero(page))) { |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 714 | struct pagevec *pvec; |
| 715 | |
| 716 | local_lock(&lru_pvecs.lock); |
| 717 | pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate_file); |
Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 718 | |
Minchan Kim | 68a4731 | 2021-03-19 12:39:51 -0700 | [diff] [blame] | 719 | if (pagevec_add_and_need_flush(pvec, page)) |
Minchan Kim | cc5993b | 2015-04-15 16:13:26 -0700 | [diff] [blame] | 720 | pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL); |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 721 | local_unlock(&lru_pvecs.lock); |
Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 722 | } |
Andrew Morton | 80bfed9 | 2006-01-06 00:11:14 -0800 | [diff] [blame] | 723 | } |
| 724 | |
Minchan Kim | 9c276cc | 2019-09-25 16:49:08 -0700 | [diff] [blame] | 725 | /* |
| 726 | * deactivate_page - deactivate a page |
| 727 | * @page: page to deactivate |
| 728 | * |
| 729 | * deactivate_page() moves @page to the inactive list if @page was on the active |
| 730 | * list and was not an unevictable page. This is done to accelerate the reclaim |
| 731 | * of @page. |
| 732 | */ |
| 733 | void deactivate_page(struct page *page) |
| 734 | { |
| 735 | if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) { |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 736 | struct pagevec *pvec; |
Minchan Kim | 9c276cc | 2019-09-25 16:49:08 -0700 | [diff] [blame] | 737 | |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 738 | local_lock(&lru_pvecs.lock); |
| 739 | pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate); |
Minchan Kim | 9c276cc | 2019-09-25 16:49:08 -0700 | [diff] [blame] | 740 | get_page(page); |
Minchan Kim | 68a4731 | 2021-03-19 12:39:51 -0700 | [diff] [blame] | 741 | if (pagevec_add_and_need_flush(pvec, page)) |
Minchan Kim | 9c276cc | 2019-09-25 16:49:08 -0700 | [diff] [blame] | 742 | pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 743 | local_unlock(&lru_pvecs.lock); |
Minchan Kim | 9c276cc | 2019-09-25 16:49:08 -0700 | [diff] [blame] | 744 | } |
| 745 | } |
| 746 | |
Minchan Kim | 10853a0 | 2016-01-15 16:55:11 -0800 | [diff] [blame] | 747 | /** |
Shaohua Li | f7ad2a6 | 2017-05-03 14:52:29 -0700 | [diff] [blame] | 748 | * mark_page_lazyfree - make an anon page lazyfree |
Minchan Kim | 10853a0 | 2016-01-15 16:55:11 -0800 | [diff] [blame] | 749 | * @page: page to deactivate |
| 750 | * |
Shaohua Li | f7ad2a6 | 2017-05-03 14:52:29 -0700 | [diff] [blame] | 751 | * mark_page_lazyfree() moves @page to the inactive file list. |
| 752 | * This is done to accelerate the reclaim of @page. |
Minchan Kim | 10853a0 | 2016-01-15 16:55:11 -0800 | [diff] [blame] | 753 | */ |
Shaohua Li | f7ad2a6 | 2017-05-03 14:52:29 -0700 | [diff] [blame] | 754 | void mark_page_lazyfree(struct page *page) |
Minchan Kim | 10853a0 | 2016-01-15 16:55:11 -0800 | [diff] [blame] | 755 | { |
Shaohua Li | f7ad2a6 | 2017-05-03 14:52:29 -0700 | [diff] [blame] | 756 | if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) && |
Shaohua Li | 24c92eb | 2017-10-03 16:15:29 -0700 | [diff] [blame] | 757 | !PageSwapCache(page) && !PageUnevictable(page)) { |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 758 | struct pagevec *pvec; |
Minchan Kim | 10853a0 | 2016-01-15 16:55:11 -0800 | [diff] [blame] | 759 | |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 760 | local_lock(&lru_pvecs.lock); |
| 761 | pvec = this_cpu_ptr(&lru_pvecs.lru_lazyfree); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 762 | get_page(page); |
Minchan Kim | 68a4731 | 2021-03-19 12:39:51 -0700 | [diff] [blame] | 763 | if (pagevec_add_and_need_flush(pvec, page)) |
Shaohua Li | f7ad2a6 | 2017-05-03 14:52:29 -0700 | [diff] [blame] | 764 | pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL); |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 765 | local_unlock(&lru_pvecs.lock); |
Minchan Kim | 10853a0 | 2016-01-15 16:55:11 -0800 | [diff] [blame] | 766 | } |
| 767 | } |
| 768 | |
Vinayak Menon | 9975da5 | 2021-03-18 16:49:18 +0530 | [diff] [blame] | 769 | /** |
| 770 | * mark_page_lazyfree_movetail - make a swapbacked page lazyfree |
| 771 | * @page: page to deactivate |
| 772 | * |
| 773 | * mark_page_lazyfree_movetail() moves @page to the tail of inactive file list. |
| 774 | * This is done to accelerate the reclaim of @page. |
| 775 | */ |
Charan Teja Reddy | 8011eb2 | 2021-06-25 12:30:50 +0530 | [diff] [blame] | 776 | void mark_page_lazyfree_movetail(struct page *page, bool tail) |
Vinayak Menon | 9975da5 | 2021-03-18 16:49:18 +0530 | [diff] [blame] | 777 | { |
| 778 | if (PageLRU(page) && !PageUnevictable(page) && PageSwapBacked(page) && |
| 779 | !PageSwapCache(page)) { |
| 780 | struct pagevec *pvec; |
| 781 | |
| 782 | local_lock(&lru_pvecs.lock); |
| 783 | pvec = this_cpu_ptr(&lru_pvecs.lru_lazyfree_movetail); |
| 784 | get_page(page); |
| 785 | if (pagevec_add_and_need_flush(pvec, page)) |
| 786 | pagevec_lru_move_fn(pvec, |
Charan Teja Reddy | 8011eb2 | 2021-06-25 12:30:50 +0530 | [diff] [blame] | 787 | lru_lazyfree_movetail_fn, &tail); |
Vinayak Menon | 9975da5 | 2021-03-18 16:49:18 +0530 | [diff] [blame] | 788 | local_unlock(&lru_pvecs.lock); |
| 789 | } |
| 790 | } |
| 791 | |
Andrew Morton | 80bfed9 | 2006-01-06 00:11:14 -0800 | [diff] [blame] | 792 | void lru_add_drain(void) |
| 793 | { |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 794 | local_lock(&lru_pvecs.lock); |
| 795 | lru_add_drain_cpu(smp_processor_id()); |
| 796 | local_unlock(&lru_pvecs.lock); |
| 797 | } |
| 798 | |
Minchan Kim | a9ac6ae | 2021-09-24 18:19:40 -0700 | [diff] [blame] | 799 | /* |
| 800 | * It's called from per-cpu workqueue context in SMP case so |
| 801 | * lru_add_drain_cpu and invalidate_bh_lrus_cpu should run on |
| 802 | * the same cpu. It shouldn't be a problem in !SMP case since |
| 803 | * the core is only one and the locks will disable preemption. |
| 804 | */ |
| 805 | static void lru_add_and_bh_lrus_drain(void) |
| 806 | { |
| 807 | local_lock(&lru_pvecs.lock); |
| 808 | lru_add_drain_cpu(smp_processor_id()); |
| 809 | local_unlock(&lru_pvecs.lock); |
| 810 | invalidate_bh_lrus_cpu(); |
| 811 | } |
| 812 | |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 813 | void lru_add_drain_cpu_zone(struct zone *zone) |
| 814 | { |
| 815 | local_lock(&lru_pvecs.lock); |
| 816 | lru_add_drain_cpu(smp_processor_id()); |
| 817 | drain_local_pages(zone); |
| 818 | local_unlock(&lru_pvecs.lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 819 | } |
| 820 | |
Michal Hocko | 6ea183d | 2019-02-20 22:19:54 -0800 | [diff] [blame] | 821 | #ifdef CONFIG_SMP |
| 822 | |
| 823 | static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work); |
| 824 | |
David Howells | c402895 | 2006-11-22 14:57:56 +0000 | [diff] [blame] | 825 | static void lru_add_drain_per_cpu(struct work_struct *dummy) |
Nick Piggin | 053837f | 2006-01-18 17:42:27 -0800 | [diff] [blame] | 826 | { |
Minchan Kim | a9ac6ae | 2021-09-24 18:19:40 -0700 | [diff] [blame] | 827 | lru_add_and_bh_lrus_drain(); |
Nick Piggin | 053837f | 2006-01-18 17:42:27 -0800 | [diff] [blame] | 828 | } |
| 829 | |
Michal Hocko | 9852a72 | 2018-01-31 16:16:19 -0800 | [diff] [blame] | 830 | /* |
| 831 | * Doesn't need any cpu hotplug locking because we do rely on per-cpu |
| 832 | * kworkers being shut down before our page_alloc_cpu_dead callback is |
| 833 | * executed on the offlined cpu. |
| 834 | * Calling this function with cpu hotplug locks held can actually lead |
| 835 | * to obscure indirect dependencies via WQ context. |
| 836 | */ |
Minchan Kim | 68a4731 | 2021-03-19 12:39:51 -0700 | [diff] [blame] | 837 | inline void __lru_add_drain_all(bool force_all_cpus) |
Nick Piggin | 053837f | 2006-01-18 17:42:27 -0800 | [diff] [blame] | 838 | { |
Ahmed S. Darwish | 6446a51 | 2020-08-27 13:40:38 +0200 | [diff] [blame] | 839 | /* |
| 840 | * lru_drain_gen - Global pages generation number |
| 841 | * |
| 842 | * (A) Definition: global lru_drain_gen = x implies that all generations |
| 843 | * 0 < n <= x are already *scheduled* for draining. |
| 844 | * |
| 845 | * This is an optimization for the highly-contended use case where a |
| 846 | * user space workload keeps constantly generating a flow of pages for |
| 847 | * each CPU. |
| 848 | */ |
| 849 | static unsigned int lru_drain_gen; |
Chris Metcalf | 5fbc461 | 2013-09-12 15:13:55 -0700 | [diff] [blame] | 850 | static struct cpumask has_work; |
Ahmed S. Darwish | 6446a51 | 2020-08-27 13:40:38 +0200 | [diff] [blame] | 851 | static DEFINE_MUTEX(lock); |
| 852 | unsigned cpu, this_gen; |
Chris Metcalf | 5fbc461 | 2013-09-12 15:13:55 -0700 | [diff] [blame] | 853 | |
Michal Hocko | ce61287 | 2017-04-07 16:05:05 -0700 | [diff] [blame] | 854 | /* |
| 855 | * Make sure nobody triggers this path before mm_percpu_wq is fully |
| 856 | * initialized. |
| 857 | */ |
| 858 | if (WARN_ON(!mm_percpu_wq)) |
| 859 | return; |
| 860 | |
Ahmed S. Darwish | 6446a51 | 2020-08-27 13:40:38 +0200 | [diff] [blame] | 861 | /* |
| 862 | * Guarantee pagevec counter stores visible by this CPU are visible to |
| 863 | * other CPUs before loading the current drain generation. |
| 864 | */ |
| 865 | smp_mb(); |
| 866 | |
| 867 | /* |
| 868 | * (B) Locally cache global LRU draining generation number |
| 869 | * |
| 870 | * The read barrier ensures that the counter is loaded before the mutex |
| 871 | * is taken. It pairs with smp_mb() inside the mutex critical section |
| 872 | * at (D). |
| 873 | */ |
| 874 | this_gen = smp_load_acquire(&lru_drain_gen); |
Konstantin Khlebnikov | eef1a42 | 2019-11-30 17:50:40 -0800 | [diff] [blame] | 875 | |
Chris Metcalf | 5fbc461 | 2013-09-12 15:13:55 -0700 | [diff] [blame] | 876 | mutex_lock(&lock); |
Konstantin Khlebnikov | eef1a42 | 2019-11-30 17:50:40 -0800 | [diff] [blame] | 877 | |
| 878 | /* |
Ahmed S. Darwish | 6446a51 | 2020-08-27 13:40:38 +0200 | [diff] [blame] | 879 | * (C) Exit the draining operation if a newer generation, from another |
| 880 | * lru_add_drain_all(), was already scheduled for draining. Check (A). |
Konstantin Khlebnikov | eef1a42 | 2019-11-30 17:50:40 -0800 | [diff] [blame] | 881 | */ |
Minchan Kim | 68a4731 | 2021-03-19 12:39:51 -0700 | [diff] [blame] | 882 | if (unlikely(this_gen != lru_drain_gen && !force_all_cpus)) |
Konstantin Khlebnikov | eef1a42 | 2019-11-30 17:50:40 -0800 | [diff] [blame] | 883 | goto done; |
| 884 | |
Ahmed S. Darwish | 6446a51 | 2020-08-27 13:40:38 +0200 | [diff] [blame] | 885 | /* |
| 886 | * (D) Increment global generation number |
| 887 | * |
| 888 | * Pairs with smp_load_acquire() at (B), outside of the critical |
| 889 | * section. Use a full memory barrier to guarantee that the new global |
| 890 | * drain generation number is stored before loading pagevec counters. |
| 891 | * |
| 892 | * This pairing must be done here, before the for_each_online_cpu loop |
| 893 | * below which drains the page vectors. |
| 894 | * |
| 895 | * Let x, y, and z represent some system CPU numbers, where x < y < z. |
| 896 | * Assume CPU #z is is in the middle of the for_each_online_cpu loop |
| 897 | * below and has already reached CPU #y's per-cpu data. CPU #x comes |
| 898 | * along, adds some pages to its per-cpu vectors, then calls |
| 899 | * lru_add_drain_all(). |
| 900 | * |
| 901 | * If the paired barrier is done at any later step, e.g. after the |
| 902 | * loop, CPU #x will just exit at (C) and miss flushing out all of its |
| 903 | * added pages. |
| 904 | */ |
| 905 | WRITE_ONCE(lru_drain_gen, lru_drain_gen + 1); |
| 906 | smp_mb(); |
Konstantin Khlebnikov | eef1a42 | 2019-11-30 17:50:40 -0800 | [diff] [blame] | 907 | |
Chris Metcalf | 5fbc461 | 2013-09-12 15:13:55 -0700 | [diff] [blame] | 908 | cpumask_clear(&has_work); |
Chris Metcalf | 5fbc461 | 2013-09-12 15:13:55 -0700 | [diff] [blame] | 909 | for_each_online_cpu(cpu) { |
| 910 | struct work_struct *work = &per_cpu(lru_add_drain_work, cpu); |
| 911 | |
Minchan Kim | 68a4731 | 2021-03-19 12:39:51 -0700 | [diff] [blame] | 912 | if (force_all_cpus || |
| 913 | pagevec_count(&per_cpu(lru_pvecs.lru_add, cpu)) || |
Qian Cai | 7e0cc01 | 2020-08-14 17:31:50 -0700 | [diff] [blame] | 914 | data_race(pagevec_count(&per_cpu(lru_rotate.pvec, cpu))) || |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 915 | pagevec_count(&per_cpu(lru_pvecs.lru_deactivate_file, cpu)) || |
| 916 | pagevec_count(&per_cpu(lru_pvecs.lru_deactivate, cpu)) || |
| 917 | pagevec_count(&per_cpu(lru_pvecs.lru_lazyfree, cpu)) || |
Vinayak Menon | 9975da5 | 2021-03-18 16:49:18 +0530 | [diff] [blame] | 918 | pagevec_count(&per_cpu(lru_pvecs.lru_lazyfree_movetail, cpu)) || |
Minchan Kim | a0a0b3f | 2021-03-19 12:49:41 -0700 | [diff] [blame] | 919 | need_activate_page_drain(cpu) || |
| 920 | has_bh_in_lru(cpu, NULL)) { |
Chris Metcalf | 5fbc461 | 2013-09-12 15:13:55 -0700 | [diff] [blame] | 921 | INIT_WORK(work, lru_add_drain_per_cpu); |
Michal Hocko | ce61287 | 2017-04-07 16:05:05 -0700 | [diff] [blame] | 922 | queue_work_on(cpu, mm_percpu_wq, work); |
Ahmed S. Darwish | 6446a51 | 2020-08-27 13:40:38 +0200 | [diff] [blame] | 923 | __cpumask_set_cpu(cpu, &has_work); |
Chris Metcalf | 5fbc461 | 2013-09-12 15:13:55 -0700 | [diff] [blame] | 924 | } |
| 925 | } |
| 926 | |
| 927 | for_each_cpu(cpu, &has_work) |
| 928 | flush_work(&per_cpu(lru_add_drain_work, cpu)); |
| 929 | |
Konstantin Khlebnikov | eef1a42 | 2019-11-30 17:50:40 -0800 | [diff] [blame] | 930 | done: |
Chris Metcalf | 5fbc461 | 2013-09-12 15:13:55 -0700 | [diff] [blame] | 931 | mutex_unlock(&lock); |
Nick Piggin | 053837f | 2006-01-18 17:42:27 -0800 | [diff] [blame] | 932 | } |
Minchan Kim | 68a4731 | 2021-03-19 12:39:51 -0700 | [diff] [blame] | 933 | |
| 934 | void lru_add_drain_all(void) |
| 935 | { |
| 936 | __lru_add_drain_all(false); |
| 937 | } |
Michal Hocko | 6ea183d | 2019-02-20 22:19:54 -0800 | [diff] [blame] | 938 | #else |
| 939 | void lru_add_drain_all(void) |
| 940 | { |
| 941 | lru_add_drain(); |
| 942 | } |
Ahmed S. Darwish | 6446a51 | 2020-08-27 13:40:38 +0200 | [diff] [blame] | 943 | #endif /* CONFIG_SMP */ |
Nick Piggin | 053837f | 2006-01-18 17:42:27 -0800 | [diff] [blame] | 944 | |
Minchan Kim | c8578a3 | 2021-07-13 12:24:14 -0700 | [diff] [blame] | 945 | static atomic_t lru_disable_count = ATOMIC_INIT(0); |
| 946 | |
| 947 | bool lru_cache_disabled(void) |
| 948 | { |
| 949 | return atomic_read(&lru_disable_count) != 0; |
| 950 | } |
| 951 | |
| 952 | void lru_cache_enable(void) |
| 953 | { |
| 954 | atomic_dec(&lru_disable_count); |
| 955 | } |
| 956 | EXPORT_SYMBOL_GPL(lru_cache_enable); |
Minchan Kim | 68a4731 | 2021-03-19 12:39:51 -0700 | [diff] [blame] | 957 | |
| 958 | /* |
| 959 | * lru_cache_disable() needs to be called before we start compiling |
| 960 | * a list of pages to be migrated using isolate_lru_page(). |
| 961 | * It drains pages on LRU cache and then disable on all cpus until |
| 962 | * lru_cache_enable is called. |
| 963 | * |
| 964 | * Must be paired with a call to lru_cache_enable(). |
| 965 | */ |
| 966 | void lru_cache_disable(void) |
| 967 | { |
Minchan Kim | c8578a3 | 2021-07-13 12:24:14 -0700 | [diff] [blame] | 968 | /* |
| 969 | * If someone is already disabled lru_cache, just return with |
| 970 | * increasing the lru_disable_count. |
| 971 | */ |
| 972 | if (atomic_inc_not_zero(&lru_disable_count)) |
| 973 | return; |
Minchan Kim | 68a4731 | 2021-03-19 12:39:51 -0700 | [diff] [blame] | 974 | #ifdef CONFIG_SMP |
| 975 | /* |
| 976 | * lru_add_drain_all in the force mode will schedule draining on |
| 977 | * all online CPUs so any calls of lru_cache_disabled wrapped by |
| 978 | * local_lock or preemption disabled would be ordered by that. |
| 979 | * The atomic operation doesn't need to have stronger ordering |
| 980 | * requirements because that is enforeced by the scheduling |
| 981 | * guarantees. |
| 982 | */ |
| 983 | __lru_add_drain_all(true); |
| 984 | #else |
Minchan Kim | a9ac6ae | 2021-09-24 18:19:40 -0700 | [diff] [blame] | 985 | lru_add_and_bh_lrus_drain(); |
Minchan Kim | 68a4731 | 2021-03-19 12:39:51 -0700 | [diff] [blame] | 986 | #endif |
Minchan Kim | c8578a3 | 2021-07-13 12:24:14 -0700 | [diff] [blame] | 987 | atomic_inc(&lru_disable_count); |
Minchan Kim | 68a4731 | 2021-03-19 12:39:51 -0700 | [diff] [blame] | 988 | } |
Minchan Kim | c8578a3 | 2021-07-13 12:24:14 -0700 | [diff] [blame] | 989 | EXPORT_SYMBOL_GPL(lru_cache_disable); |
Minchan Kim | 68a4731 | 2021-03-19 12:39:51 -0700 | [diff] [blame] | 990 | |
Michal Hocko | aabfb57 | 2014-10-09 15:28:52 -0700 | [diff] [blame] | 991 | /** |
Kirill A. Shutemov | ea1754a | 2016-04-01 15:29:48 +0300 | [diff] [blame] | 992 | * release_pages - batched put_page() |
Michal Hocko | aabfb57 | 2014-10-09 15:28:52 -0700 | [diff] [blame] | 993 | * @pages: array of pages to release |
| 994 | * @nr: number of pages |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 995 | * |
Michal Hocko | aabfb57 | 2014-10-09 15:28:52 -0700 | [diff] [blame] | 996 | * Decrement the reference count on all the pages in @pages. If it |
| 997 | * fell to zero, remove the page from the LRU and free it. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 998 | */ |
Mel Gorman | c6f92f9 | 2017-11-15 17:37:55 -0800 | [diff] [blame] | 999 | void release_pages(struct page **pages, int nr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1000 | { |
| 1001 | int i; |
Konstantin Khlebnikov | cc59850 | 2012-01-10 15:07:04 -0800 | [diff] [blame] | 1002 | LIST_HEAD(pages_to_free); |
Mel Gorman | 599d0c9 | 2016-07-28 15:45:31 -0700 | [diff] [blame] | 1003 | struct pglist_data *locked_pgdat = NULL; |
Hugh Dickins | fa9add6 | 2012-05-29 15:07:09 -0700 | [diff] [blame] | 1004 | struct lruvec *lruvec; |
Kees Cook | 3f649ab | 2020-06-03 13:09:38 -0700 | [diff] [blame] | 1005 | unsigned long flags; |
| 1006 | unsigned int lock_batch; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1007 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1008 | for (i = 0; i < nr; i++) { |
| 1009 | struct page *page = pages[i]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1010 | |
Michal Hocko | aabfb57 | 2014-10-09 15:28:52 -0700 | [diff] [blame] | 1011 | /* |
| 1012 | * Make sure the IRQ-safe lock-holding time does not get |
| 1013 | * excessive with a continuous string of pages from the |
Mel Gorman | 599d0c9 | 2016-07-28 15:45:31 -0700 | [diff] [blame] | 1014 | * same pgdat. The lock is held only if pgdat != NULL. |
Michal Hocko | aabfb57 | 2014-10-09 15:28:52 -0700 | [diff] [blame] | 1015 | */ |
Mel Gorman | 599d0c9 | 2016-07-28 15:45:31 -0700 | [diff] [blame] | 1016 | if (locked_pgdat && ++lock_batch == SWAP_CLUSTER_MAX) { |
| 1017 | spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags); |
| 1018 | locked_pgdat = NULL; |
Michal Hocko | aabfb57 | 2014-10-09 15:28:52 -0700 | [diff] [blame] | 1019 | } |
| 1020 | |
Ralph Campbell | a9b576f | 2020-10-13 16:56:00 -0700 | [diff] [blame] | 1021 | page = compound_head(page); |
Aaron Lu | 6fcb52a | 2016-10-07 17:00:08 -0700 | [diff] [blame] | 1022 | if (is_huge_zero_page(page)) |
Kirill A. Shutemov | aa88b68 | 2016-04-28 16:18:27 -0700 | [diff] [blame] | 1023 | continue; |
Kirill A. Shutemov | aa88b68 | 2016-04-28 16:18:27 -0700 | [diff] [blame] | 1024 | |
Ira Weiny | c5d6c45 | 2019-06-05 14:49:22 -0700 | [diff] [blame] | 1025 | if (is_zone_device_page(page)) { |
Jérôme Glisse | df6ad69 | 2017-09-08 16:12:24 -0700 | [diff] [blame] | 1026 | if (locked_pgdat) { |
| 1027 | spin_unlock_irqrestore(&locked_pgdat->lru_lock, |
| 1028 | flags); |
| 1029 | locked_pgdat = NULL; |
| 1030 | } |
Ira Weiny | c5d6c45 | 2019-06-05 14:49:22 -0700 | [diff] [blame] | 1031 | /* |
| 1032 | * ZONE_DEVICE pages that return 'false' from |
Miaohe Lin | a3e7bea | 2020-10-13 16:52:15 -0700 | [diff] [blame] | 1033 | * page_is_devmap_managed() do not require special |
Ira Weiny | c5d6c45 | 2019-06-05 14:49:22 -0700 | [diff] [blame] | 1034 | * processing, and instead, expect a call to |
| 1035 | * put_page_testzero(). |
| 1036 | */ |
John Hubbard | 07d8026 | 2020-01-30 22:12:28 -0800 | [diff] [blame] | 1037 | if (page_is_devmap_managed(page)) { |
| 1038 | put_devmap_managed_page(page); |
Ira Weiny | c5d6c45 | 2019-06-05 14:49:22 -0700 | [diff] [blame] | 1039 | continue; |
John Hubbard | 07d8026 | 2020-01-30 22:12:28 -0800 | [diff] [blame] | 1040 | } |
Jérôme Glisse | df6ad69 | 2017-09-08 16:12:24 -0700 | [diff] [blame] | 1041 | } |
| 1042 | |
Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame] | 1043 | if (!put_page_testzero(page)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1044 | continue; |
| 1045 | |
Kirill A. Shutemov | ddc58f2 | 2016-01-15 16:52:56 -0800 | [diff] [blame] | 1046 | if (PageCompound(page)) { |
Mel Gorman | 599d0c9 | 2016-07-28 15:45:31 -0700 | [diff] [blame] | 1047 | if (locked_pgdat) { |
| 1048 | spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags); |
| 1049 | locked_pgdat = NULL; |
Kirill A. Shutemov | ddc58f2 | 2016-01-15 16:52:56 -0800 | [diff] [blame] | 1050 | } |
| 1051 | __put_compound_page(page); |
| 1052 | continue; |
| 1053 | } |
| 1054 | |
Nick Piggin | 46453a6 | 2006-03-22 00:07:58 -0800 | [diff] [blame] | 1055 | if (PageLRU(page)) { |
Mel Gorman | 599d0c9 | 2016-07-28 15:45:31 -0700 | [diff] [blame] | 1056 | struct pglist_data *pgdat = page_pgdat(page); |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 1057 | |
Mel Gorman | 599d0c9 | 2016-07-28 15:45:31 -0700 | [diff] [blame] | 1058 | if (pgdat != locked_pgdat) { |
| 1059 | if (locked_pgdat) |
| 1060 | spin_unlock_irqrestore(&locked_pgdat->lru_lock, |
Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 1061 | flags); |
Michal Hocko | aabfb57 | 2014-10-09 15:28:52 -0700 | [diff] [blame] | 1062 | lock_batch = 0; |
Mel Gorman | 599d0c9 | 2016-07-28 15:45:31 -0700 | [diff] [blame] | 1063 | locked_pgdat = pgdat; |
| 1064 | spin_lock_irqsave(&locked_pgdat->lru_lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1065 | } |
Hugh Dickins | fa9add6 | 2012-05-29 15:07:09 -0700 | [diff] [blame] | 1066 | |
Mel Gorman | 599d0c9 | 2016-07-28 15:45:31 -0700 | [diff] [blame] | 1067 | lruvec = mem_cgroup_page_lruvec(page, locked_pgdat); |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 1068 | VM_BUG_ON_PAGE(!PageLRU(page), page); |
Nick Piggin | 6745391 | 2006-03-22 00:08:00 -0800 | [diff] [blame] | 1069 | __ClearPageLRU(page); |
Hugh Dickins | fa9add6 | 2012-05-29 15:07:09 -0700 | [diff] [blame] | 1070 | del_page_from_lru_list(page, lruvec, page_off_lru(page)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1071 | } |
Nick Piggin | 46453a6 | 2006-03-22 00:07:58 -0800 | [diff] [blame] | 1072 | |
Nicholas Piggin | 6290602 | 2016-12-25 13:00:30 +1000 | [diff] [blame] | 1073 | __ClearPageWaiters(page); |
Mel Gorman | c53954a | 2013-07-03 15:02:34 -0700 | [diff] [blame] | 1074 | |
Konstantin Khlebnikov | cc59850 | 2012-01-10 15:07:04 -0800 | [diff] [blame] | 1075 | list_add(&page->lru, &pages_to_free); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1076 | } |
Mel Gorman | 599d0c9 | 2016-07-28 15:45:31 -0700 | [diff] [blame] | 1077 | if (locked_pgdat) |
| 1078 | spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1079 | |
Johannes Weiner | 747db95 | 2014-08-08 14:19:24 -0700 | [diff] [blame] | 1080 | mem_cgroup_uncharge_list(&pages_to_free); |
Mel Gorman | 2d4894b | 2017-11-15 17:37:59 -0800 | [diff] [blame] | 1081 | free_unref_page_list(&pages_to_free); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1082 | } |
Miklos Szeredi | 0be8557 | 2010-10-27 15:34:46 -0700 | [diff] [blame] | 1083 | EXPORT_SYMBOL(release_pages); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1084 | |
| 1085 | /* |
| 1086 | * The pages which we're about to release may be in the deferred lru-addition |
| 1087 | * queues. That would prevent them from really being freed right now. That's |
| 1088 | * OK from a correctness point of view but is inefficient - those pages may be |
| 1089 | * cache-warm and we want to give them back to the page allocator ASAP. |
| 1090 | * |
| 1091 | * So __pagevec_release() will drain those queues here. __pagevec_lru_add() |
| 1092 | * and __pagevec_lru_add_active() call release_pages() directly to avoid |
| 1093 | * mutual recursion. |
| 1094 | */ |
| 1095 | void __pagevec_release(struct pagevec *pvec) |
| 1096 | { |
Mel Gorman | 7f0b5fb | 2017-11-15 17:38:10 -0800 | [diff] [blame] | 1097 | if (!pvec->percpu_pvec_drained) { |
Mel Gorman | d9ed0d0 | 2017-11-15 17:37:48 -0800 | [diff] [blame] | 1098 | lru_add_drain(); |
Mel Gorman | 7f0b5fb | 2017-11-15 17:38:10 -0800 | [diff] [blame] | 1099 | pvec->percpu_pvec_drained = true; |
Mel Gorman | d9ed0d0 | 2017-11-15 17:37:48 -0800 | [diff] [blame] | 1100 | } |
Mel Gorman | c6f92f9 | 2017-11-15 17:37:55 -0800 | [diff] [blame] | 1101 | release_pages(pvec->pages, pagevec_count(pvec)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1102 | pagevec_reinit(pvec); |
| 1103 | } |
Steve French | 7f28570 | 2005-11-01 10:22:55 -0800 | [diff] [blame] | 1104 | EXPORT_SYMBOL(__pagevec_release); |
| 1105 | |
Hugh Dickins | 12d2710 | 2012-01-12 17:19:52 -0800 | [diff] [blame] | 1106 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1107 | /* used by __split_huge_page_refcount() */ |
Hugh Dickins | fa9add6 | 2012-05-29 15:07:09 -0700 | [diff] [blame] | 1108 | void lru_add_page_tail(struct page *page, struct page *page_tail, |
Shaohua Li | 5bc7b8a | 2013-04-29 15:08:36 -0700 | [diff] [blame] | 1109 | struct lruvec *lruvec, struct list_head *list) |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1110 | { |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 1111 | VM_BUG_ON_PAGE(!PageHead(page), page); |
| 1112 | VM_BUG_ON_PAGE(PageCompound(page_tail), page); |
| 1113 | VM_BUG_ON_PAGE(PageLRU(page_tail), page); |
Lance Roy | 35f3aa3 | 2018-10-04 23:45:47 -0700 | [diff] [blame] | 1114 | lockdep_assert_held(&lruvec_pgdat(lruvec)->lru_lock); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1115 | |
Shaohua Li | 5bc7b8a | 2013-04-29 15:08:36 -0700 | [diff] [blame] | 1116 | if (!list) |
| 1117 | SetPageLRU(page_tail); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1118 | |
Hugh Dickins | 12d2710 | 2012-01-12 17:19:52 -0800 | [diff] [blame] | 1119 | if (likely(PageLRU(page))) |
| 1120 | list_add_tail(&page_tail->lru, &page->lru); |
Shaohua Li | 5bc7b8a | 2013-04-29 15:08:36 -0700 | [diff] [blame] | 1121 | else if (list) { |
| 1122 | /* page reclaim is reclaiming a huge page */ |
| 1123 | get_page(page_tail); |
| 1124 | list_add_tail(&page_tail->lru, list); |
| 1125 | } else { |
Hugh Dickins | 12d2710 | 2012-01-12 17:19:52 -0800 | [diff] [blame] | 1126 | /* |
| 1127 | * Head page has not yet been counted, as an hpage, |
| 1128 | * so we must account for each subpage individually. |
| 1129 | * |
Yu Zhao | e7a1aaf | 2019-09-23 15:34:33 -0700 | [diff] [blame] | 1130 | * Put page_tail on the list at the correct position |
| 1131 | * so they all end up in order. |
Hugh Dickins | 12d2710 | 2012-01-12 17:19:52 -0800 | [diff] [blame] | 1132 | */ |
Yu Zhao | e7a1aaf | 2019-09-23 15:34:33 -0700 | [diff] [blame] | 1133 | add_page_to_lru_list_tail(page_tail, lruvec, |
| 1134 | page_lru(page_tail)); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1135 | } |
| 1136 | } |
Hugh Dickins | 12d2710 | 2012-01-12 17:19:52 -0800 | [diff] [blame] | 1137 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1138 | |
Hugh Dickins | fa9add6 | 2012-05-29 15:07:09 -0700 | [diff] [blame] | 1139 | static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec, |
| 1140 | void *arg) |
Shaohua Li | 3dd7ae8 | 2011-03-22 16:33:45 -0700 | [diff] [blame] | 1141 | { |
Shakeel Butt | 9c4e6b1 | 2018-02-21 14:45:28 -0800 | [diff] [blame] | 1142 | enum lru_list lru; |
| 1143 | int was_unevictable = TestClearPageUnevictable(page); |
Matthew Wilcox (Oracle) | 6c35784 | 2020-08-14 17:30:37 -0700 | [diff] [blame] | 1144 | int nr_pages = thp_nr_pages(page); |
Shaohua Li | 3dd7ae8 | 2011-03-22 16:33:45 -0700 | [diff] [blame] | 1145 | |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 1146 | VM_BUG_ON_PAGE(PageLRU(page), page); |
Shaohua Li | 3dd7ae8 | 2011-03-22 16:33:45 -0700 | [diff] [blame] | 1147 | |
Shakeel Butt | 9c4e6b1 | 2018-02-21 14:45:28 -0800 | [diff] [blame] | 1148 | /* |
| 1149 | * Page becomes evictable in two ways: |
Peng Fan | dae966d | 2019-05-13 17:19:26 -0700 | [diff] [blame] | 1150 | * 1) Within LRU lock [munlock_vma_page() and __munlock_pagevec()]. |
Shakeel Butt | 9c4e6b1 | 2018-02-21 14:45:28 -0800 | [diff] [blame] | 1151 | * 2) Before acquiring LRU lock to put the page to correct LRU and then |
| 1152 | * a) do PageLRU check with lock [check_move_unevictable_pages] |
| 1153 | * b) do PageLRU check before lock [clear_page_mlock] |
| 1154 | * |
| 1155 | * (1) & (2a) are ok as LRU lock will serialize them. For (2b), we need |
| 1156 | * following strict ordering: |
| 1157 | * |
| 1158 | * #0: __pagevec_lru_add_fn #1: clear_page_mlock |
| 1159 | * |
| 1160 | * SetPageLRU() TestClearPageMlocked() |
| 1161 | * smp_mb() // explicit ordering // above provides strict |
| 1162 | * // ordering |
| 1163 | * PageMlocked() PageLRU() |
| 1164 | * |
| 1165 | * |
| 1166 | * if '#1' does not observe setting of PG_lru by '#0' and fails |
| 1167 | * isolation, the explicit barrier will make sure that page_evictable |
| 1168 | * check will put the page in correct LRU. Without smp_mb(), SetPageLRU |
| 1169 | * can be reordered after PageMlocked check and can make '#1' to fail |
| 1170 | * the isolation of the page whose Mlocked bit is cleared (#0 is also |
| 1171 | * looking at the same page) and the evictable page will be stranded |
| 1172 | * in an unevictable LRU. |
| 1173 | */ |
Yang Shi | 9a9b6cc | 2020-04-01 21:06:23 -0700 | [diff] [blame] | 1174 | SetPageLRU(page); |
| 1175 | smp_mb__after_atomic(); |
Shakeel Butt | 9c4e6b1 | 2018-02-21 14:45:28 -0800 | [diff] [blame] | 1176 | |
| 1177 | if (page_evictable(page)) { |
| 1178 | lru = page_lru(page); |
Shakeel Butt | 9c4e6b1 | 2018-02-21 14:45:28 -0800 | [diff] [blame] | 1179 | if (was_unevictable) |
Shakeel Butt | 5d91f31 | 2020-06-03 16:03:16 -0700 | [diff] [blame] | 1180 | __count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages); |
Shakeel Butt | 9c4e6b1 | 2018-02-21 14:45:28 -0800 | [diff] [blame] | 1181 | } else { |
| 1182 | lru = LRU_UNEVICTABLE; |
| 1183 | ClearPageActive(page); |
| 1184 | SetPageUnevictable(page); |
| 1185 | if (!was_unevictable) |
Shakeel Butt | 5d91f31 | 2020-06-03 16:03:16 -0700 | [diff] [blame] | 1186 | __count_vm_events(UNEVICTABLE_PGCULLED, nr_pages); |
Shakeel Butt | 9c4e6b1 | 2018-02-21 14:45:28 -0800 | [diff] [blame] | 1187 | } |
| 1188 | |
Hugh Dickins | fa9add6 | 2012-05-29 15:07:09 -0700 | [diff] [blame] | 1189 | add_page_to_lru_list(page, lruvec, lru); |
Mel Gorman | 24b7e58 | 2014-08-06 16:07:11 -0700 | [diff] [blame] | 1190 | trace_mm_lru_insertion(page, lru); |
Shaohua Li | 3dd7ae8 | 2011-03-22 16:33:45 -0700 | [diff] [blame] | 1191 | } |
| 1192 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1193 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1194 | * Add the passed pages to the LRU, then drop the caller's refcount |
| 1195 | * on them. Reinitialises the caller's pagevec. |
| 1196 | */ |
Mel Gorman | a0b8cab3 | 2013-07-03 15:02:32 -0700 | [diff] [blame] | 1197 | void __pagevec_lru_add(struct pagevec *pvec) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1198 | { |
Mel Gorman | a0b8cab3 | 2013-07-03 15:02:32 -0700 | [diff] [blame] | 1199 | pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1200 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1201 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1202 | /** |
Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 1203 | * pagevec_lookup_entries - gang pagecache lookup |
| 1204 | * @pvec: Where the resulting entries are placed |
| 1205 | * @mapping: The address_space to search |
| 1206 | * @start: The starting entry index |
Mike Rapoport | cb6f0f3 | 2018-02-21 14:45:50 -0800 | [diff] [blame] | 1207 | * @nr_entries: The maximum number of pages |
Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 1208 | * @indices: The cache indices corresponding to the entries in @pvec |
| 1209 | * |
| 1210 | * pagevec_lookup_entries() will search for and return a group of up |
Mike Rapoport | f144c39 | 2018-02-06 15:42:16 -0800 | [diff] [blame] | 1211 | * to @nr_pages pages and shadow entries in the mapping. All |
Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 1212 | * entries are placed in @pvec. pagevec_lookup_entries() takes a |
| 1213 | * reference against actual pages in @pvec. |
| 1214 | * |
| 1215 | * The search returns a group of mapping-contiguous entries with |
| 1216 | * ascending indexes. There may be holes in the indices due to |
| 1217 | * not-present entries. |
| 1218 | * |
Hugh Dickins | 71725ed | 2020-04-06 20:07:57 -0700 | [diff] [blame] | 1219 | * Only one subpage of a Transparent Huge Page is returned in one call: |
| 1220 | * allowing truncate_inode_pages_range() to evict the whole THP without |
| 1221 | * cycling through a pagevec of extra references. |
| 1222 | * |
Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 1223 | * pagevec_lookup_entries() returns the number of entries which were |
| 1224 | * found. |
| 1225 | */ |
| 1226 | unsigned pagevec_lookup_entries(struct pagevec *pvec, |
| 1227 | struct address_space *mapping, |
Randy Dunlap | e02a9f0 | 2018-01-31 16:21:19 -0800 | [diff] [blame] | 1228 | pgoff_t start, unsigned nr_entries, |
Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 1229 | pgoff_t *indices) |
| 1230 | { |
Randy Dunlap | e02a9f0 | 2018-01-31 16:21:19 -0800 | [diff] [blame] | 1231 | pvec->nr = find_get_entries(mapping, start, nr_entries, |
Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 1232 | pvec->pages, indices); |
| 1233 | return pagevec_count(pvec); |
| 1234 | } |
| 1235 | |
| 1236 | /** |
| 1237 | * pagevec_remove_exceptionals - pagevec exceptionals pruning |
| 1238 | * @pvec: The pagevec to prune |
| 1239 | * |
| 1240 | * pagevec_lookup_entries() fills both pages and exceptional radix |
| 1241 | * tree entries into the pagevec. This function prunes all |
| 1242 | * exceptionals from @pvec without leaving holes, so that it can be |
| 1243 | * passed on to page-only pagevec operations. |
| 1244 | */ |
| 1245 | void pagevec_remove_exceptionals(struct pagevec *pvec) |
| 1246 | { |
| 1247 | int i, j; |
| 1248 | |
| 1249 | for (i = 0, j = 0; i < pagevec_count(pvec); i++) { |
| 1250 | struct page *page = pvec->pages[i]; |
Matthew Wilcox | 3159f94 | 2017-11-03 13:30:42 -0400 | [diff] [blame] | 1251 | if (!xa_is_value(page)) |
Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 1252 | pvec->pages[j++] = page; |
| 1253 | } |
| 1254 | pvec->nr = j; |
| 1255 | } |
| 1256 | |
| 1257 | /** |
Jan Kara | b947cee | 2017-09-06 16:21:21 -0700 | [diff] [blame] | 1258 | * pagevec_lookup_range - gang pagecache lookup |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1259 | * @pvec: Where the resulting pages are placed |
| 1260 | * @mapping: The address_space to search |
| 1261 | * @start: The starting page index |
Jan Kara | b947cee | 2017-09-06 16:21:21 -0700 | [diff] [blame] | 1262 | * @end: The final page index |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1263 | * |
Randy Dunlap | e02a9f0 | 2018-01-31 16:21:19 -0800 | [diff] [blame] | 1264 | * pagevec_lookup_range() will search for & return a group of up to PAGEVEC_SIZE |
Jan Kara | b947cee | 2017-09-06 16:21:21 -0700 | [diff] [blame] | 1265 | * pages in the mapping starting from index @start and upto index @end |
| 1266 | * (inclusive). The pages are placed in @pvec. pagevec_lookup() takes a |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1267 | * reference against the pages in @pvec. |
| 1268 | * |
| 1269 | * The search returns a group of mapping-contiguous pages with ascending |
Jan Kara | d72dc8a | 2017-09-06 16:21:18 -0700 | [diff] [blame] | 1270 | * indexes. There may be holes in the indices due to not-present pages. We |
| 1271 | * also update @start to index the next page for the traversal. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1272 | * |
Jan Kara | b947cee | 2017-09-06 16:21:21 -0700 | [diff] [blame] | 1273 | * pagevec_lookup_range() returns the number of pages which were found. If this |
Randy Dunlap | e02a9f0 | 2018-01-31 16:21:19 -0800 | [diff] [blame] | 1274 | * number is smaller than PAGEVEC_SIZE, the end of specified range has been |
Jan Kara | b947cee | 2017-09-06 16:21:21 -0700 | [diff] [blame] | 1275 | * reached. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1276 | */ |
Jan Kara | b947cee | 2017-09-06 16:21:21 -0700 | [diff] [blame] | 1277 | unsigned pagevec_lookup_range(struct pagevec *pvec, |
Jan Kara | 397162f | 2017-09-06 16:21:43 -0700 | [diff] [blame] | 1278 | struct address_space *mapping, pgoff_t *start, pgoff_t end) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1279 | { |
Jan Kara | 397162f | 2017-09-06 16:21:43 -0700 | [diff] [blame] | 1280 | pvec->nr = find_get_pages_range(mapping, start, end, PAGEVEC_SIZE, |
Jan Kara | b947cee | 2017-09-06 16:21:21 -0700 | [diff] [blame] | 1281 | pvec->pages); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1282 | return pagevec_count(pvec); |
| 1283 | } |
Jan Kara | b947cee | 2017-09-06 16:21:21 -0700 | [diff] [blame] | 1284 | EXPORT_SYMBOL(pagevec_lookup_range); |
Christoph Hellwig | 78539fd | 2006-01-11 20:47:41 +1100 | [diff] [blame] | 1285 | |
Jan Kara | 72b045a | 2017-11-15 17:34:33 -0800 | [diff] [blame] | 1286 | unsigned pagevec_lookup_range_tag(struct pagevec *pvec, |
| 1287 | struct address_space *mapping, pgoff_t *index, pgoff_t end, |
Matthew Wilcox | 10bbd23 | 2017-12-05 17:30:38 -0500 | [diff] [blame] | 1288 | xa_mark_t tag) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1289 | { |
Jan Kara | 72b045a | 2017-11-15 17:34:33 -0800 | [diff] [blame] | 1290 | pvec->nr = find_get_pages_range_tag(mapping, index, end, tag, |
Jan Kara | 67fd707 | 2017-11-15 17:35:19 -0800 | [diff] [blame] | 1291 | PAGEVEC_SIZE, pvec->pages); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1292 | return pagevec_count(pvec); |
| 1293 | } |
Jan Kara | 72b045a | 2017-11-15 17:34:33 -0800 | [diff] [blame] | 1294 | EXPORT_SYMBOL(pagevec_lookup_range_tag); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1295 | |
Jan Kara | 93d3b71 | 2017-11-15 17:35:12 -0800 | [diff] [blame] | 1296 | unsigned pagevec_lookup_range_nr_tag(struct pagevec *pvec, |
| 1297 | struct address_space *mapping, pgoff_t *index, pgoff_t end, |
Matthew Wilcox | 10bbd23 | 2017-12-05 17:30:38 -0500 | [diff] [blame] | 1298 | xa_mark_t tag, unsigned max_pages) |
Jan Kara | 93d3b71 | 2017-11-15 17:35:12 -0800 | [diff] [blame] | 1299 | { |
| 1300 | pvec->nr = find_get_pages_range_tag(mapping, index, end, tag, |
| 1301 | min_t(unsigned int, max_pages, PAGEVEC_SIZE), pvec->pages); |
| 1302 | return pagevec_count(pvec); |
| 1303 | } |
| 1304 | EXPORT_SYMBOL(pagevec_lookup_range_nr_tag); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1305 | /* |
| 1306 | * Perform any setup for the swap system |
| 1307 | */ |
| 1308 | void __init swap_setup(void) |
| 1309 | { |
Arun KS | ca79b0c | 2018-12-28 00:34:29 -0800 | [diff] [blame] | 1310 | unsigned long megs = totalram_pages() >> (20 - PAGE_SHIFT); |
Peter Zijlstra | e0bf68d | 2007-10-16 23:25:46 -0700 | [diff] [blame] | 1311 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1312 | /* Use a smaller cluster for small-memory machines */ |
| 1313 | if (megs < 16) |
| 1314 | page_cluster = 2; |
| 1315 | else |
| 1316 | page_cluster = 3; |
| 1317 | /* |
| 1318 | * Right now other parts of the system means that we |
| 1319 | * _really_ don't want to cluster much more |
| 1320 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1321 | } |
John Hubbard | 07d8026 | 2020-01-30 22:12:28 -0800 | [diff] [blame] | 1322 | |
| 1323 | #ifdef CONFIG_DEV_PAGEMAP_OPS |
| 1324 | void put_devmap_managed_page(struct page *page) |
| 1325 | { |
| 1326 | int count; |
| 1327 | |
| 1328 | if (WARN_ON_ONCE(!page_is_devmap_managed(page))) |
| 1329 | return; |
| 1330 | |
| 1331 | count = page_ref_dec_return(page); |
| 1332 | |
| 1333 | /* |
| 1334 | * devmap page refcounts are 1-based, rather than 0-based: if |
| 1335 | * refcount is 1, then the page is free and the refcount is |
| 1336 | * stable because nobody holds a reference on the page. |
| 1337 | */ |
| 1338 | if (count == 1) |
| 1339 | free_devmap_managed_page(page); |
| 1340 | else if (!count) |
| 1341 | __put_page(page); |
| 1342 | } |
| 1343 | EXPORT_SYMBOL(put_devmap_managed_page); |
| 1344 | #endif |