Thomas Gleixner | 457c899 | 2019-05-19 13:08:55 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * linux/mm/swap.c |
| 4 | * |
| 5 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds |
| 6 | */ |
| 7 | |
| 8 | /* |
Simon Arlott | 183ff22 | 2007-10-20 01:27:18 +0200 | [diff] [blame] | 9 | * This file contains the default values for the operation of the |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | * Linux VM subsystem. Fine-tuning documentation can be found in |
Mauro Carvalho Chehab | 5704324 | 2019-04-22 16:48:00 -0300 | [diff] [blame] | 11 | * Documentation/admin-guide/sysctl/vm.rst. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | * Started 18.12.91 |
| 13 | * Swap aging added 23.2.95, Stephen Tweedie. |
| 14 | * Buffermem limits added 12.3.98, Rik van Riel. |
| 15 | */ |
| 16 | |
| 17 | #include <linux/mm.h> |
| 18 | #include <linux/sched.h> |
| 19 | #include <linux/kernel_stat.h> |
| 20 | #include <linux/swap.h> |
| 21 | #include <linux/mman.h> |
| 22 | #include <linux/pagemap.h> |
| 23 | #include <linux/pagevec.h> |
| 24 | #include <linux/init.h> |
Paul Gortmaker | b95f1b31 | 2011-10-16 02:01:52 -0400 | [diff] [blame] | 25 | #include <linux/export.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | #include <linux/mm_inline.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | #include <linux/percpu_counter.h> |
Dan Williams | 3565fce | 2016-01-15 16:56:55 -0800 | [diff] [blame] | 28 | #include <linux/memremap.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | #include <linux/percpu.h> |
| 30 | #include <linux/cpu.h> |
| 31 | #include <linux/notifier.h> |
Peter Zijlstra | e0bf68d | 2007-10-16 23:25:46 -0700 | [diff] [blame] | 32 | #include <linux/backing-dev.h> |
Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 33 | #include <linux/memcontrol.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 34 | #include <linux/gfp.h> |
Kent Overstreet | a27bb33 | 2013-05-07 16:19:08 -0700 | [diff] [blame] | 35 | #include <linux/uio.h> |
Naoya Horiguchi | 822fc61 | 2015-04-15 16:14:35 -0700 | [diff] [blame] | 36 | #include <linux/hugetlb.h> |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 37 | #include <linux/page_idle.h> |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 38 | #include <linux/local_lock.h> |
Minchan Kim | 8cc621d | 2021-05-04 18:37:00 -0700 | [diff] [blame] | 39 | #include <linux/buffer_head.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 | |
Lee Schermerhorn | 64d6519 | 2008-10-18 20:26:52 -0700 | [diff] [blame] | 41 | #include "internal.h" |
| 42 | |
Mel Gorman | c6286c9 | 2013-07-03 15:02:26 -0700 | [diff] [blame] | 43 | #define CREATE_TRACE_POINTS |
| 44 | #include <trace/events/pagemap.h> |
| 45 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | /* How many pages do we try to swap or page in/out together? */ |
| 47 | int page_cluster; |
| 48 | |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 49 | /* Protecting only lru_rotate.pvec which requires disabling interrupts */ |
| 50 | struct lru_rotate { |
| 51 | local_lock_t lock; |
| 52 | struct pagevec pvec; |
| 53 | }; |
| 54 | static DEFINE_PER_CPU(struct lru_rotate, lru_rotate) = { |
| 55 | .lock = INIT_LOCAL_LOCK(lock), |
| 56 | }; |
| 57 | |
| 58 | /* |
| 59 | * The following struct pagevec are grouped together because they are protected |
| 60 | * by disabling preemption (and interrupts remain enabled). |
| 61 | */ |
| 62 | struct lru_pvecs { |
| 63 | local_lock_t lock; |
| 64 | struct pagevec lru_add; |
| 65 | struct pagevec lru_deactivate_file; |
| 66 | struct pagevec lru_deactivate; |
| 67 | struct pagevec lru_lazyfree; |
Ming Li | a4a921a | 2016-05-20 16:57:56 -0700 | [diff] [blame] | 68 | #ifdef CONFIG_SMP |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 69 | struct pagevec activate_page; |
Ming Li | a4a921a | 2016-05-20 16:57:56 -0700 | [diff] [blame] | 70 | #endif |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 71 | }; |
| 72 | static DEFINE_PER_CPU(struct lru_pvecs, lru_pvecs) = { |
| 73 | .lock = INIT_LOCAL_LOCK(lock), |
| 74 | }; |
Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 75 | |
Adrian Bunk | b221385 | 2006-09-25 23:31:02 -0700 | [diff] [blame] | 76 | /* |
| 77 | * This path almost never happens for VM activity - pages are normally |
| 78 | * freed via pagevecs. But it gets used by networking. |
| 79 | */ |
Harvey Harrison | 920c7a5 | 2008-02-04 22:29:26 -0800 | [diff] [blame] | 80 | static void __page_cache_release(struct page *page) |
Adrian Bunk | b221385 | 2006-09-25 23:31:02 -0700 | [diff] [blame] | 81 | { |
| 82 | if (PageLRU(page)) { |
Matthew Wilcox (Oracle) | e809c3f | 2021-06-28 21:59:47 -0400 | [diff] [blame] | 83 | struct folio *folio = page_folio(page); |
Hugh Dickins | fa9add6 | 2012-05-29 15:07:09 -0700 | [diff] [blame] | 84 | struct lruvec *lruvec; |
| 85 | unsigned long flags; |
Adrian Bunk | b221385 | 2006-09-25 23:31:02 -0700 | [diff] [blame] | 86 | |
Matthew Wilcox (Oracle) | e809c3f | 2021-06-28 21:59:47 -0400 | [diff] [blame] | 87 | lruvec = folio_lruvec_lock_irqsave(folio, &flags); |
Yu Zhao | 46ae6b2 | 2021-02-24 12:08:25 -0800 | [diff] [blame] | 88 | del_page_from_lru_list(page, lruvec); |
Yu Zhao | 8756017 | 2021-02-24 12:08:28 -0800 | [diff] [blame] | 89 | __clear_page_lru_flags(page); |
Alex Shi | 6168d0d | 2020-12-15 12:34:29 -0800 | [diff] [blame] | 90 | unlock_page_lruvec_irqrestore(lruvec, flags); |
Adrian Bunk | b221385 | 2006-09-25 23:31:02 -0700 | [diff] [blame] | 91 | } |
Nicholas Piggin | 6290602 | 2016-12-25 13:00:30 +1000 | [diff] [blame] | 92 | __ClearPageWaiters(page); |
Andrea Arcangeli | 9180706 | 2011-01-13 15:46:32 -0800 | [diff] [blame] | 93 | } |
| 94 | |
| 95 | static void __put_single_page(struct page *page) |
| 96 | { |
| 97 | __page_cache_release(page); |
Matthew Wilcox (Oracle) | bbc6b70 | 2021-05-01 20:42:23 -0400 | [diff] [blame] | 98 | mem_cgroup_uncharge(page_folio(page)); |
Mel Gorman | 44042b4 | 2021-06-28 19:43:08 -0700 | [diff] [blame] | 99 | free_unref_page(page, 0); |
Adrian Bunk | b221385 | 2006-09-25 23:31:02 -0700 | [diff] [blame] | 100 | } |
| 101 | |
Andrea Arcangeli | 9180706 | 2011-01-13 15:46:32 -0800 | [diff] [blame] | 102 | static void __put_compound_page(struct page *page) |
| 103 | { |
Naoya Horiguchi | 822fc61 | 2015-04-15 16:14:35 -0700 | [diff] [blame] | 104 | /* |
| 105 | * __page_cache_release() is supposed to be called for thp, not for |
| 106 | * hugetlb. This is because hugetlb page does never have PageLRU set |
| 107 | * (it's never listed to any LRU lists) and no memcg routines should |
| 108 | * be called for hugetlb (it has a separate hugetlb_cgroup.) |
| 109 | */ |
| 110 | if (!PageHuge(page)) |
| 111 | __page_cache_release(page); |
Matthew Wilcox (Oracle) | ff45fc3 | 2020-06-03 16:01:09 -0700 | [diff] [blame] | 112 | destroy_compound_page(page); |
Andrea Arcangeli | 9180706 | 2011-01-13 15:46:32 -0800 | [diff] [blame] | 113 | } |
| 114 | |
Kirill A. Shutemov | ddc58f2 | 2016-01-15 16:52:56 -0800 | [diff] [blame] | 115 | void __put_page(struct page *page) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 116 | { |
Dan Williams | 7138970 | 2017-04-28 10:23:37 -0700 | [diff] [blame] | 117 | if (is_zone_device_page(page)) { |
| 118 | put_dev_pagemap(page->pgmap); |
| 119 | |
| 120 | /* |
| 121 | * The page belongs to the device that created pgmap. Do |
| 122 | * not return it to page allocator. |
| 123 | */ |
| 124 | return; |
| 125 | } |
| 126 | |
Nick Piggin | 8519fb3 | 2006-02-07 12:58:52 -0800 | [diff] [blame] | 127 | if (unlikely(PageCompound(page))) |
Kirill A. Shutemov | ddc58f2 | 2016-01-15 16:52:56 -0800 | [diff] [blame] | 128 | __put_compound_page(page); |
| 129 | else |
Andrea Arcangeli | 9180706 | 2011-01-13 15:46:32 -0800 | [diff] [blame] | 130 | __put_single_page(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 | } |
Kirill A. Shutemov | ddc58f2 | 2016-01-15 16:52:56 -0800 | [diff] [blame] | 132 | EXPORT_SYMBOL(__put_page); |
Andrea Arcangeli | 70b50f9 | 2011-11-02 13:36:59 -0700 | [diff] [blame] | 133 | |
Alexander Zarochentsev | 1d7ea73 | 2006-08-13 23:24:27 -0700 | [diff] [blame] | 134 | /** |
Randy Dunlap | 7682486 | 2008-03-19 17:00:40 -0700 | [diff] [blame] | 135 | * put_pages_list() - release a list of pages |
| 136 | * @pages: list of pages threaded on page->lru |
Alexander Zarochentsev | 1d7ea73 | 2006-08-13 23:24:27 -0700 | [diff] [blame] | 137 | * |
Matthew Wilcox (Oracle) | 988c69f | 2021-11-05 13:37:25 -0700 | [diff] [blame] | 138 | * Release a list of pages which are strung together on page.lru. |
Alexander Zarochentsev | 1d7ea73 | 2006-08-13 23:24:27 -0700 | [diff] [blame] | 139 | */ |
| 140 | void put_pages_list(struct list_head *pages) |
| 141 | { |
Matthew Wilcox (Oracle) | 988c69f | 2021-11-05 13:37:25 -0700 | [diff] [blame] | 142 | struct page *page, *next; |
Alexander Zarochentsev | 1d7ea73 | 2006-08-13 23:24:27 -0700 | [diff] [blame] | 143 | |
Matthew Wilcox (Oracle) | 988c69f | 2021-11-05 13:37:25 -0700 | [diff] [blame] | 144 | list_for_each_entry_safe(page, next, pages, lru) { |
| 145 | if (!put_page_testzero(page)) { |
| 146 | list_del(&page->lru); |
| 147 | continue; |
| 148 | } |
| 149 | if (PageHead(page)) { |
| 150 | list_del(&page->lru); |
| 151 | __put_compound_page(page); |
| 152 | continue; |
| 153 | } |
| 154 | /* Cannot be PageLRU because it's passed to us using the lru */ |
| 155 | __ClearPageWaiters(page); |
Alexander Zarochentsev | 1d7ea73 | 2006-08-13 23:24:27 -0700 | [diff] [blame] | 156 | } |
Matthew Wilcox (Oracle) | 988c69f | 2021-11-05 13:37:25 -0700 | [diff] [blame] | 157 | |
| 158 | free_unref_page_list(pages); |
Matthew Wilcox | 3cd018b4 | 2021-11-19 16:43:15 -0800 | [diff] [blame] | 159 | INIT_LIST_HEAD(pages); |
Alexander Zarochentsev | 1d7ea73 | 2006-08-13 23:24:27 -0700 | [diff] [blame] | 160 | } |
| 161 | EXPORT_SYMBOL(put_pages_list); |
| 162 | |
Mel Gorman | 18022c5 | 2012-07-31 16:44:51 -0700 | [diff] [blame] | 163 | /* |
| 164 | * get_kernel_pages() - pin kernel pages in memory |
| 165 | * @kiov: An array of struct kvec structures |
| 166 | * @nr_segs: number of segments to pin |
| 167 | * @write: pinning for read/write, currently ignored |
| 168 | * @pages: array that receives pointers to the pages pinned. |
| 169 | * Should be at least nr_segs long. |
| 170 | * |
| 171 | * Returns number of pages pinned. This may be fewer than the number |
| 172 | * requested. If nr_pages is 0 or negative, returns 0. If no pages |
| 173 | * were pinned, returns -errno. Each page returned must be released |
| 174 | * with a put_page() call when it is finished with. |
| 175 | */ |
| 176 | int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write, |
| 177 | struct page **pages) |
| 178 | { |
| 179 | int seg; |
| 180 | |
| 181 | for (seg = 0; seg < nr_segs; seg++) { |
| 182 | if (WARN_ON(kiov[seg].iov_len != PAGE_SIZE)) |
| 183 | return seg; |
| 184 | |
Mel Gorman | 5a17811 | 2012-07-31 16:45:02 -0700 | [diff] [blame] | 185 | pages[seg] = kmap_to_page(kiov[seg].iov_base); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 186 | get_page(pages[seg]); |
Mel Gorman | 18022c5 | 2012-07-31 16:44:51 -0700 | [diff] [blame] | 187 | } |
| 188 | |
| 189 | return seg; |
| 190 | } |
| 191 | EXPORT_SYMBOL_GPL(get_kernel_pages); |
| 192 | |
Shaohua Li | 3dd7ae8 | 2011-03-22 16:33:45 -0700 | [diff] [blame] | 193 | static void pagevec_lru_move_fn(struct pagevec *pvec, |
Alex Shi | c7c7b80 | 2020-12-15 12:33:56 -0800 | [diff] [blame] | 194 | void (*move_fn)(struct page *page, struct lruvec *lruvec)) |
Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 195 | { |
| 196 | int i; |
Alex Shi | 6168d0d | 2020-12-15 12:34:29 -0800 | [diff] [blame] | 197 | struct lruvec *lruvec = NULL; |
Shaohua Li | 3dd7ae8 | 2011-03-22 16:33:45 -0700 | [diff] [blame] | 198 | unsigned long flags = 0; |
Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 199 | |
| 200 | for (i = 0; i < pagevec_count(pvec); i++) { |
| 201 | struct page *page = pvec->pages[i]; |
Matthew Wilcox (Oracle) | 0de340c | 2021-06-29 22:27:31 -0400 | [diff] [blame] | 202 | struct folio *folio = page_folio(page); |
Shaohua Li | 3dd7ae8 | 2011-03-22 16:33:45 -0700 | [diff] [blame] | 203 | |
Alex Shi | fc574c2 | 2020-12-15 12:34:25 -0800 | [diff] [blame] | 204 | /* block memcg migration during page moving between lru */ |
| 205 | if (!TestClearPageLRU(page)) |
| 206 | continue; |
| 207 | |
Matthew Wilcox (Oracle) | 0de340c | 2021-06-29 22:27:31 -0400 | [diff] [blame] | 208 | lruvec = folio_lruvec_relock_irqsave(folio, lruvec, &flags); |
Alex Shi | c7c7b80 | 2020-12-15 12:33:56 -0800 | [diff] [blame] | 209 | (*move_fn)(page, lruvec); |
Alex Shi | fc574c2 | 2020-12-15 12:34:25 -0800 | [diff] [blame] | 210 | |
| 211 | SetPageLRU(page); |
Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 212 | } |
Alex Shi | 6168d0d | 2020-12-15 12:34:29 -0800 | [diff] [blame] | 213 | if (lruvec) |
| 214 | unlock_page_lruvec_irqrestore(lruvec, flags); |
Mel Gorman | c6f92f9 | 2017-11-15 17:37:55 -0800 | [diff] [blame] | 215 | release_pages(pvec->pages, pvec->nr); |
Linus Torvalds | 83896fb | 2011-01-17 14:42:34 -0800 | [diff] [blame] | 216 | pagevec_reinit(pvec); |
Shaohua Li | d8505de | 2011-01-13 15:47:33 -0800 | [diff] [blame] | 217 | } |
| 218 | |
Alex Shi | c7c7b80 | 2020-12-15 12:33:56 -0800 | [diff] [blame] | 219 | static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec) |
Shaohua Li | 3dd7ae8 | 2011-03-22 16:33:45 -0700 | [diff] [blame] | 220 | { |
Matthew Wilcox (Oracle) | 575ced1 | 2020-12-08 01:25:39 -0500 | [diff] [blame] | 221 | struct folio *folio = page_folio(page); |
| 222 | |
| 223 | if (!folio_test_unevictable(folio)) { |
| 224 | lruvec_del_folio(lruvec, folio); |
| 225 | folio_clear_active(folio); |
| 226 | lruvec_add_folio_tail(lruvec, folio); |
| 227 | __count_vm_events(PGROTATED, folio_nr_pages(folio)); |
Shaohua Li | 3dd7ae8 | 2011-03-22 16:33:45 -0700 | [diff] [blame] | 228 | } |
| 229 | } |
| 230 | |
Minchan Kim | d479960e | 2021-05-04 18:36:54 -0700 | [diff] [blame] | 231 | /* return true if pagevec needs to drain */ |
| 232 | static bool pagevec_add_and_need_flush(struct pagevec *pvec, struct page *page) |
| 233 | { |
| 234 | bool ret = false; |
| 235 | |
| 236 | if (!pagevec_add(pvec, page) || PageCompound(page) || |
| 237 | lru_cache_disabled()) |
| 238 | ret = true; |
| 239 | |
| 240 | return ret; |
| 241 | } |
| 242 | |
Shaohua Li | 3dd7ae8 | 2011-03-22 16:33:45 -0700 | [diff] [blame] | 243 | /* |
Matthew Wilcox (Oracle) | 575ced1 | 2020-12-08 01:25:39 -0500 | [diff] [blame] | 244 | * Writeback is about to end against a folio which has been marked for |
| 245 | * immediate reclaim. If it still appears to be reclaimable, move it |
| 246 | * to the tail of the inactive list. |
Alex Shi | c7c7b80 | 2020-12-15 12:33:56 -0800 | [diff] [blame] | 247 | * |
Matthew Wilcox (Oracle) | 575ced1 | 2020-12-08 01:25:39 -0500 | [diff] [blame] | 248 | * folio_rotate_reclaimable() must disable IRQs, to prevent nasty races. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 249 | */ |
Matthew Wilcox (Oracle) | 575ced1 | 2020-12-08 01:25:39 -0500 | [diff] [blame] | 250 | void folio_rotate_reclaimable(struct folio *folio) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 251 | { |
Matthew Wilcox (Oracle) | 575ced1 | 2020-12-08 01:25:39 -0500 | [diff] [blame] | 252 | if (!folio_test_locked(folio) && !folio_test_dirty(folio) && |
| 253 | !folio_test_unevictable(folio) && folio_test_lru(folio)) { |
Miklos Szeredi | ac6aadb | 2008-04-28 02:12:38 -0700 | [diff] [blame] | 254 | struct pagevec *pvec; |
| 255 | unsigned long flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 256 | |
Matthew Wilcox (Oracle) | 575ced1 | 2020-12-08 01:25:39 -0500 | [diff] [blame] | 257 | folio_get(folio); |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 258 | local_lock_irqsave(&lru_rotate.lock, flags); |
| 259 | pvec = this_cpu_ptr(&lru_rotate.pvec); |
Matthew Wilcox (Oracle) | 575ced1 | 2020-12-08 01:25:39 -0500 | [diff] [blame] | 260 | if (pagevec_add_and_need_flush(pvec, &folio->page)) |
Alex Shi | c7c7b80 | 2020-12-15 12:33:56 -0800 | [diff] [blame] | 261 | pagevec_lru_move_fn(pvec, pagevec_move_tail_fn); |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 262 | local_unlock_irqrestore(&lru_rotate.lock, flags); |
Miklos Szeredi | ac6aadb | 2008-04-28 02:12:38 -0700 | [diff] [blame] | 263 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 264 | } |
| 265 | |
Johannes Weiner | 96f8bf4 | 2020-06-03 16:03:09 -0700 | [diff] [blame] | 266 | void lru_note_cost(struct lruvec *lruvec, bool file, unsigned int nr_pages) |
KOSAKI Motohiro | 3e2f41f | 2009-01-07 18:08:20 -0800 | [diff] [blame] | 267 | { |
Johannes Weiner | 7cf111b | 2020-06-03 16:03:06 -0700 | [diff] [blame] | 268 | do { |
| 269 | unsigned long lrusize; |
| 270 | |
Alex Shi | 6168d0d | 2020-12-15 12:34:29 -0800 | [diff] [blame] | 271 | /* |
| 272 | * Hold lruvec->lru_lock is safe here, since |
| 273 | * 1) The pinned lruvec in reclaim, or |
| 274 | * 2) From a pre-LRU page during refault (which also holds the |
| 275 | * rcu lock, so would be safe even if the page was on the LRU |
| 276 | * and could move simultaneously to a new lruvec). |
| 277 | */ |
| 278 | spin_lock_irq(&lruvec->lru_lock); |
Johannes Weiner | 7cf111b | 2020-06-03 16:03:06 -0700 | [diff] [blame] | 279 | /* Record cost event */ |
Johannes Weiner | 96f8bf4 | 2020-06-03 16:03:09 -0700 | [diff] [blame] | 280 | if (file) |
| 281 | lruvec->file_cost += nr_pages; |
Johannes Weiner | 7cf111b | 2020-06-03 16:03:06 -0700 | [diff] [blame] | 282 | else |
Johannes Weiner | 96f8bf4 | 2020-06-03 16:03:09 -0700 | [diff] [blame] | 283 | lruvec->anon_cost += nr_pages; |
Johannes Weiner | 7cf111b | 2020-06-03 16:03:06 -0700 | [diff] [blame] | 284 | |
| 285 | /* |
| 286 | * Decay previous events |
| 287 | * |
| 288 | * Because workloads change over time (and to avoid |
| 289 | * overflow) we keep these statistics as a floating |
| 290 | * average, which ends up weighing recent refaults |
| 291 | * more than old ones. |
| 292 | */ |
| 293 | lrusize = lruvec_page_state(lruvec, NR_INACTIVE_ANON) + |
| 294 | lruvec_page_state(lruvec, NR_ACTIVE_ANON) + |
| 295 | lruvec_page_state(lruvec, NR_INACTIVE_FILE) + |
| 296 | lruvec_page_state(lruvec, NR_ACTIVE_FILE); |
| 297 | |
| 298 | if (lruvec->file_cost + lruvec->anon_cost > lrusize / 4) { |
| 299 | lruvec->file_cost /= 2; |
| 300 | lruvec->anon_cost /= 2; |
| 301 | } |
Alex Shi | 6168d0d | 2020-12-15 12:34:29 -0800 | [diff] [blame] | 302 | spin_unlock_irq(&lruvec->lru_lock); |
Johannes Weiner | 7cf111b | 2020-06-03 16:03:06 -0700 | [diff] [blame] | 303 | } while ((lruvec = parent_lruvec(lruvec))); |
KOSAKI Motohiro | 3e2f41f | 2009-01-07 18:08:20 -0800 | [diff] [blame] | 304 | } |
| 305 | |
Matthew Wilcox (Oracle) | 0995d7e | 2021-04-29 10:27:16 -0400 | [diff] [blame] | 306 | void lru_note_cost_folio(struct folio *folio) |
Johannes Weiner | 96f8bf4 | 2020-06-03 16:03:09 -0700 | [diff] [blame] | 307 | { |
Matthew Wilcox (Oracle) | 0995d7e | 2021-04-29 10:27:16 -0400 | [diff] [blame] | 308 | lru_note_cost(folio_lruvec(folio), folio_is_file_lru(folio), |
| 309 | folio_nr_pages(folio)); |
Johannes Weiner | 96f8bf4 | 2020-06-03 16:03:09 -0700 | [diff] [blame] | 310 | } |
| 311 | |
Matthew Wilcox (Oracle) | f2d2739 | 2021-04-27 10:37:50 -0400 | [diff] [blame] | 312 | static void __folio_activate(struct folio *folio, struct lruvec *lruvec) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 313 | { |
Matthew Wilcox (Oracle) | f2d2739 | 2021-04-27 10:37:50 -0400 | [diff] [blame] | 314 | if (!folio_test_active(folio) && !folio_test_unevictable(folio)) { |
| 315 | long nr_pages = folio_nr_pages(folio); |
Linus Torvalds | 7a60857 | 2011-01-17 14:42:19 -0800 | [diff] [blame] | 316 | |
Matthew Wilcox (Oracle) | f2d2739 | 2021-04-27 10:37:50 -0400 | [diff] [blame] | 317 | lruvec_del_folio(lruvec, folio); |
| 318 | folio_set_active(folio); |
| 319 | lruvec_add_folio(lruvec, folio); |
| 320 | trace_mm_lru_activate(folio); |
Linus Torvalds | 7a60857 | 2011-01-17 14:42:19 -0800 | [diff] [blame] | 321 | |
Shakeel Butt | 21e330f | 2020-06-03 16:03:19 -0700 | [diff] [blame] | 322 | __count_vm_events(PGACTIVATE, nr_pages); |
| 323 | __count_memcg_events(lruvec_memcg(lruvec), PGACTIVATE, |
| 324 | nr_pages); |
Linus Torvalds | 7a60857 | 2011-01-17 14:42:19 -0800 | [diff] [blame] | 325 | } |
Shaohua Li | eb709b0 | 2011-05-24 17:12:55 -0700 | [diff] [blame] | 326 | } |
| 327 | |
| 328 | #ifdef CONFIG_SMP |
Matthew Wilcox (Oracle) | f2d2739 | 2021-04-27 10:37:50 -0400 | [diff] [blame] | 329 | static void __activate_page(struct page *page, struct lruvec *lruvec) |
| 330 | { |
| 331 | return __folio_activate(page_folio(page), lruvec); |
| 332 | } |
| 333 | |
Shaohua Li | eb709b0 | 2011-05-24 17:12:55 -0700 | [diff] [blame] | 334 | static void activate_page_drain(int cpu) |
| 335 | { |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 336 | struct pagevec *pvec = &per_cpu(lru_pvecs.activate_page, cpu); |
Shaohua Li | eb709b0 | 2011-05-24 17:12:55 -0700 | [diff] [blame] | 337 | |
| 338 | if (pagevec_count(pvec)) |
Alex Shi | c7c7b80 | 2020-12-15 12:33:56 -0800 | [diff] [blame] | 339 | pagevec_lru_move_fn(pvec, __activate_page); |
Shaohua Li | eb709b0 | 2011-05-24 17:12:55 -0700 | [diff] [blame] | 340 | } |
| 341 | |
Chris Metcalf | 5fbc461 | 2013-09-12 15:13:55 -0700 | [diff] [blame] | 342 | static bool need_activate_page_drain(int cpu) |
| 343 | { |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 344 | return pagevec_count(&per_cpu(lru_pvecs.activate_page, cpu)) != 0; |
Chris Metcalf | 5fbc461 | 2013-09-12 15:13:55 -0700 | [diff] [blame] | 345 | } |
| 346 | |
Matthew Wilcox (Oracle) | f2d2739 | 2021-04-27 10:37:50 -0400 | [diff] [blame] | 347 | static void folio_activate(struct folio *folio) |
Shaohua Li | eb709b0 | 2011-05-24 17:12:55 -0700 | [diff] [blame] | 348 | { |
Matthew Wilcox (Oracle) | f2d2739 | 2021-04-27 10:37:50 -0400 | [diff] [blame] | 349 | if (folio_test_lru(folio) && !folio_test_active(folio) && |
| 350 | !folio_test_unevictable(folio)) { |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 351 | struct pagevec *pvec; |
Shaohua Li | eb709b0 | 2011-05-24 17:12:55 -0700 | [diff] [blame] | 352 | |
Matthew Wilcox (Oracle) | f2d2739 | 2021-04-27 10:37:50 -0400 | [diff] [blame] | 353 | folio_get(folio); |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 354 | local_lock(&lru_pvecs.lock); |
| 355 | pvec = this_cpu_ptr(&lru_pvecs.activate_page); |
Matthew Wilcox (Oracle) | f2d2739 | 2021-04-27 10:37:50 -0400 | [diff] [blame] | 356 | if (pagevec_add_and_need_flush(pvec, &folio->page)) |
Alex Shi | c7c7b80 | 2020-12-15 12:33:56 -0800 | [diff] [blame] | 357 | pagevec_lru_move_fn(pvec, __activate_page); |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 358 | local_unlock(&lru_pvecs.lock); |
Shaohua Li | eb709b0 | 2011-05-24 17:12:55 -0700 | [diff] [blame] | 359 | } |
| 360 | } |
| 361 | |
| 362 | #else |
| 363 | static inline void activate_page_drain(int cpu) |
| 364 | { |
| 365 | } |
| 366 | |
Matthew Wilcox (Oracle) | f2d2739 | 2021-04-27 10:37:50 -0400 | [diff] [blame] | 367 | static void folio_activate(struct folio *folio) |
Shaohua Li | eb709b0 | 2011-05-24 17:12:55 -0700 | [diff] [blame] | 368 | { |
Alex Shi | 6168d0d | 2020-12-15 12:34:29 -0800 | [diff] [blame] | 369 | struct lruvec *lruvec; |
Shaohua Li | eb709b0 | 2011-05-24 17:12:55 -0700 | [diff] [blame] | 370 | |
Matthew Wilcox (Oracle) | f2d2739 | 2021-04-27 10:37:50 -0400 | [diff] [blame] | 371 | if (folio_test_clear_lru(folio)) { |
Matthew Wilcox (Oracle) | e809c3f | 2021-06-28 21:59:47 -0400 | [diff] [blame] | 372 | lruvec = folio_lruvec_lock_irq(folio); |
Matthew Wilcox (Oracle) | f2d2739 | 2021-04-27 10:37:50 -0400 | [diff] [blame] | 373 | __folio_activate(folio, lruvec); |
Alex Shi | 6168d0d | 2020-12-15 12:34:29 -0800 | [diff] [blame] | 374 | unlock_page_lruvec_irq(lruvec); |
Matthew Wilcox (Oracle) | f2d2739 | 2021-04-27 10:37:50 -0400 | [diff] [blame] | 375 | folio_set_lru(folio); |
Alex Shi | 6168d0d | 2020-12-15 12:34:29 -0800 | [diff] [blame] | 376 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 377 | } |
Shaohua Li | eb709b0 | 2011-05-24 17:12:55 -0700 | [diff] [blame] | 378 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 379 | |
Matthew Wilcox (Oracle) | 76580b6 | 2021-04-27 10:47:39 -0400 | [diff] [blame] | 380 | static void __lru_cache_activate_folio(struct folio *folio) |
Mel Gorman | 059285a | 2013-07-03 15:02:30 -0700 | [diff] [blame] | 381 | { |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 382 | struct pagevec *pvec; |
Mel Gorman | 059285a | 2013-07-03 15:02:30 -0700 | [diff] [blame] | 383 | int i; |
| 384 | |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 385 | local_lock(&lru_pvecs.lock); |
| 386 | pvec = this_cpu_ptr(&lru_pvecs.lru_add); |
| 387 | |
Mel Gorman | 059285a | 2013-07-03 15:02:30 -0700 | [diff] [blame] | 388 | /* |
| 389 | * Search backwards on the optimistic assumption that the page being |
| 390 | * activated has just been added to this pagevec. Note that only |
| 391 | * the local pagevec is examined as a !PageLRU page could be in the |
| 392 | * process of being released, reclaimed, migrated or on a remote |
| 393 | * pagevec that is currently being drained. Furthermore, marking |
| 394 | * a remote pagevec's page PageActive potentially hits a race where |
| 395 | * a page is marked PageActive just after it is added to the inactive |
| 396 | * list causing accounting errors and BUG_ON checks to trigger. |
| 397 | */ |
| 398 | for (i = pagevec_count(pvec) - 1; i >= 0; i--) { |
| 399 | struct page *pagevec_page = pvec->pages[i]; |
| 400 | |
Matthew Wilcox (Oracle) | 76580b6 | 2021-04-27 10:47:39 -0400 | [diff] [blame] | 401 | if (pagevec_page == &folio->page) { |
| 402 | folio_set_active(folio); |
Mel Gorman | 059285a | 2013-07-03 15:02:30 -0700 | [diff] [blame] | 403 | break; |
| 404 | } |
| 405 | } |
| 406 | |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 407 | local_unlock(&lru_pvecs.lock); |
Mel Gorman | 059285a | 2013-07-03 15:02:30 -0700 | [diff] [blame] | 408 | } |
| 409 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 410 | /* |
| 411 | * Mark a page as having seen activity. |
| 412 | * |
| 413 | * inactive,unreferenced -> inactive,referenced |
| 414 | * inactive,referenced -> active,unreferenced |
| 415 | * active,unreferenced -> active,referenced |
Hugh Dickins | eb39d61 | 2014-08-06 16:06:43 -0700 | [diff] [blame] | 416 | * |
| 417 | * When a newly allocated page is not yet visible, so safe for non-atomic ops, |
| 418 | * __SetPageReferenced(page) may be substituted for mark_page_accessed(page). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 419 | */ |
Matthew Wilcox (Oracle) | 76580b6 | 2021-04-27 10:47:39 -0400 | [diff] [blame] | 420 | void folio_mark_accessed(struct folio *folio) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 421 | { |
Matthew Wilcox (Oracle) | 76580b6 | 2021-04-27 10:47:39 -0400 | [diff] [blame] | 422 | if (!folio_test_referenced(folio)) { |
| 423 | folio_set_referenced(folio); |
| 424 | } else if (folio_test_unevictable(folio)) { |
Fengguang Wu | a1100a7 | 2019-11-30 17:50:00 -0800 | [diff] [blame] | 425 | /* |
| 426 | * Unevictable pages are on the "LRU_UNEVICTABLE" list. But, |
| 427 | * this list is never rotated or maintained, so marking an |
| 428 | * evictable page accessed has no effect. |
| 429 | */ |
Matthew Wilcox (Oracle) | 76580b6 | 2021-04-27 10:47:39 -0400 | [diff] [blame] | 430 | } else if (!folio_test_active(folio)) { |
Mel Gorman | 059285a | 2013-07-03 15:02:30 -0700 | [diff] [blame] | 431 | /* |
| 432 | * If the page is on the LRU, queue it for activation via |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 433 | * lru_pvecs.activate_page. Otherwise, assume the page is on a |
Mel Gorman | 059285a | 2013-07-03 15:02:30 -0700 | [diff] [blame] | 434 | * pagevec, mark it active and it'll be moved to the active |
| 435 | * LRU on the next drain. |
| 436 | */ |
Matthew Wilcox (Oracle) | 76580b6 | 2021-04-27 10:47:39 -0400 | [diff] [blame] | 437 | if (folio_test_lru(folio)) |
| 438 | folio_activate(folio); |
Mel Gorman | 059285a | 2013-07-03 15:02:30 -0700 | [diff] [blame] | 439 | else |
Matthew Wilcox (Oracle) | 76580b6 | 2021-04-27 10:47:39 -0400 | [diff] [blame] | 440 | __lru_cache_activate_folio(folio); |
| 441 | folio_clear_referenced(folio); |
| 442 | workingset_activation(folio); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 443 | } |
Matthew Wilcox (Oracle) | 76580b6 | 2021-04-27 10:47:39 -0400 | [diff] [blame] | 444 | if (folio_test_idle(folio)) |
| 445 | folio_clear_idle(folio); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 446 | } |
Matthew Wilcox (Oracle) | 76580b6 | 2021-04-27 10:47:39 -0400 | [diff] [blame] | 447 | EXPORT_SYMBOL(folio_mark_accessed); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 448 | |
KOSAKI Motohiro | f04e9eb | 2008-10-18 20:26:19 -0700 | [diff] [blame] | 449 | /** |
Matthew Wilcox (Oracle) | 0d31125 | 2021-04-29 11:09:31 -0400 | [diff] [blame] | 450 | * folio_add_lru - Add a folio to an LRU list. |
| 451 | * @folio: The folio to be added to the LRU. |
Jianyu Zhan | 2329d37 | 2014-06-04 16:07:31 -0700 | [diff] [blame] | 452 | * |
Matthew Wilcox (Oracle) | 0d31125 | 2021-04-29 11:09:31 -0400 | [diff] [blame] | 453 | * Queue the folio for addition to the LRU. The decision on whether |
Jianyu Zhan | 2329d37 | 2014-06-04 16:07:31 -0700 | [diff] [blame] | 454 | * to add the page to the [in]active [file|anon] list is deferred until the |
Matthew Wilcox (Oracle) | 0d31125 | 2021-04-29 11:09:31 -0400 | [diff] [blame] | 455 | * pagevec is drained. This gives a chance for the caller of folio_add_lru() |
| 456 | * have the folio added to the active list using folio_mark_accessed(). |
KOSAKI Motohiro | f04e9eb | 2008-10-18 20:26:19 -0700 | [diff] [blame] | 457 | */ |
Matthew Wilcox (Oracle) | 0d31125 | 2021-04-29 11:09:31 -0400 | [diff] [blame] | 458 | void folio_add_lru(struct folio *folio) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 459 | { |
Johannes Weiner | 6058eae | 2020-06-03 16:02:40 -0700 | [diff] [blame] | 460 | struct pagevec *pvec; |
| 461 | |
Matthew Wilcox (Oracle) | 0d31125 | 2021-04-29 11:09:31 -0400 | [diff] [blame] | 462 | VM_BUG_ON_FOLIO(folio_test_active(folio) && folio_test_unevictable(folio), folio); |
| 463 | VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); |
Johannes Weiner | 6058eae | 2020-06-03 16:02:40 -0700 | [diff] [blame] | 464 | |
Matthew Wilcox (Oracle) | 0d31125 | 2021-04-29 11:09:31 -0400 | [diff] [blame] | 465 | folio_get(folio); |
Johannes Weiner | 6058eae | 2020-06-03 16:02:40 -0700 | [diff] [blame] | 466 | local_lock(&lru_pvecs.lock); |
| 467 | pvec = this_cpu_ptr(&lru_pvecs.lru_add); |
Matthew Wilcox (Oracle) | 0d31125 | 2021-04-29 11:09:31 -0400 | [diff] [blame] | 468 | if (pagevec_add_and_need_flush(pvec, &folio->page)) |
Johannes Weiner | 6058eae | 2020-06-03 16:02:40 -0700 | [diff] [blame] | 469 | __pagevec_lru_add(pvec); |
| 470 | local_unlock(&lru_pvecs.lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 471 | } |
Matthew Wilcox (Oracle) | 0d31125 | 2021-04-29 11:09:31 -0400 | [diff] [blame] | 472 | EXPORT_SYMBOL(folio_add_lru); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 473 | |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 474 | /** |
Joonsoo Kim | b518154 | 2020-08-11 18:30:40 -0700 | [diff] [blame] | 475 | * lru_cache_add_inactive_or_unevictable |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 476 | * @page: the page to be added to LRU |
| 477 | * @vma: vma in which page is mapped for determining reclaimability |
| 478 | * |
Joonsoo Kim | b518154 | 2020-08-11 18:30:40 -0700 | [diff] [blame] | 479 | * Place @page on the inactive or unevictable LRU list, depending on its |
Miaohe Lin | 12eab42 | 2020-10-13 16:52:24 -0700 | [diff] [blame] | 480 | * evictability. |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 481 | */ |
Joonsoo Kim | b518154 | 2020-08-11 18:30:40 -0700 | [diff] [blame] | 482 | void lru_cache_add_inactive_or_unevictable(struct page *page, |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 483 | struct vm_area_struct *vma) |
| 484 | { |
Joonsoo Kim | b518154 | 2020-08-11 18:30:40 -0700 | [diff] [blame] | 485 | bool unevictable; |
| 486 | |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 487 | VM_BUG_ON_PAGE(PageLRU(page), page); |
| 488 | |
Joonsoo Kim | b518154 | 2020-08-11 18:30:40 -0700 | [diff] [blame] | 489 | unevictable = (vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) == VM_LOCKED; |
| 490 | if (unlikely(unevictable) && !TestSetPageMlocked(page)) { |
Hugh Dickins | 0964730 | 2020-09-18 21:20:15 -0700 | [diff] [blame] | 491 | int nr_pages = thp_nr_pages(page); |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 492 | /* |
Shijie Luo | cb152a1 | 2021-05-06 18:05:51 -0700 | [diff] [blame] | 493 | * We use the irq-unsafe __mod_zone_page_state because this |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 494 | * counter is not modified from interrupt context, and the pte |
| 495 | * lock is held(spinlock), which implies preemption disabled. |
| 496 | */ |
Hugh Dickins | 0964730 | 2020-09-18 21:20:15 -0700 | [diff] [blame] | 497 | __mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages); |
| 498 | count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages); |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 499 | } |
Shakeel Butt | 9c4e6b1 | 2018-02-21 14:45:28 -0800 | [diff] [blame] | 500 | lru_cache_add(page); |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 501 | } |
| 502 | |
Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 503 | /* |
Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 504 | * If the page can not be invalidated, it is moved to the |
| 505 | * inactive list to speed up its reclaim. It is moved to the |
| 506 | * head of the list, rather than the tail, to give the flusher |
| 507 | * threads some time to write it out, as this is much more |
| 508 | * effective than the single-page writeout from reclaim. |
Minchan Kim | 278df9f | 2011-03-22 16:32:54 -0700 | [diff] [blame] | 509 | * |
| 510 | * If the page isn't page_mapped and dirty/writeback, the page |
| 511 | * could reclaim asap using PG_reclaim. |
| 512 | * |
| 513 | * 1. active, mapped page -> none |
| 514 | * 2. active, dirty/writeback page -> inactive, head, PG_reclaim |
| 515 | * 3. inactive, mapped page -> none |
| 516 | * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim |
| 517 | * 5. inactive, clean -> inactive, tail |
| 518 | * 6. Others -> none |
| 519 | * |
| 520 | * In 4, why it moves inactive's head, the VM expects the page would |
| 521 | * be write it out by flusher threads as this is much more effective |
| 522 | * than the single-page writeout from reclaim. |
Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 523 | */ |
Alex Shi | c7c7b80 | 2020-12-15 12:33:56 -0800 | [diff] [blame] | 524 | static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec) |
Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 525 | { |
Yu Zhao | 46ae6b2 | 2021-02-24 12:08:25 -0800 | [diff] [blame] | 526 | bool active = PageActive(page); |
Matthew Wilcox (Oracle) | 6c35784 | 2020-08-14 17:30:37 -0700 | [diff] [blame] | 527 | int nr_pages = thp_nr_pages(page); |
Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 528 | |
Minchan Kim | bad49d9 | 2011-05-11 15:13:30 -0700 | [diff] [blame] | 529 | if (PageUnevictable(page)) |
| 530 | return; |
| 531 | |
Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 532 | /* Some processes are using the page */ |
| 533 | if (page_mapped(page)) |
| 534 | return; |
| 535 | |
Yu Zhao | 46ae6b2 | 2021-02-24 12:08:25 -0800 | [diff] [blame] | 536 | del_page_from_lru_list(page, lruvec); |
Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 537 | ClearPageActive(page); |
| 538 | ClearPageReferenced(page); |
Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 539 | |
Minchan Kim | 278df9f | 2011-03-22 16:32:54 -0700 | [diff] [blame] | 540 | if (PageWriteback(page) || PageDirty(page)) { |
| 541 | /* |
| 542 | * PG_reclaim could be raced with end_page_writeback |
| 543 | * It can make readahead confusing. But race window |
| 544 | * is _really_ small and it's non-critical problem. |
| 545 | */ |
Yu Zhao | 3a9c978 | 2021-02-24 12:08:17 -0800 | [diff] [blame] | 546 | add_page_to_lru_list(page, lruvec); |
Minchan Kim | 278df9f | 2011-03-22 16:32:54 -0700 | [diff] [blame] | 547 | SetPageReclaim(page); |
| 548 | } else { |
| 549 | /* |
| 550 | * The page's writeback ends up during pagevec |
Hyeonggon Yoo | c4ffefd | 2021-06-30 18:53:10 -0700 | [diff] [blame] | 551 | * We move that page into tail of inactive. |
Minchan Kim | 278df9f | 2011-03-22 16:32:54 -0700 | [diff] [blame] | 552 | */ |
Yu Zhao | 3a9c978 | 2021-02-24 12:08:17 -0800 | [diff] [blame] | 553 | add_page_to_lru_list_tail(page, lruvec); |
Shakeel Butt | 5d91f31 | 2020-06-03 16:03:16 -0700 | [diff] [blame] | 554 | __count_vm_events(PGROTATED, nr_pages); |
Minchan Kim | 278df9f | 2011-03-22 16:32:54 -0700 | [diff] [blame] | 555 | } |
| 556 | |
Shakeel Butt | 21e330f | 2020-06-03 16:03:19 -0700 | [diff] [blame] | 557 | if (active) { |
Shakeel Butt | 5d91f31 | 2020-06-03 16:03:16 -0700 | [diff] [blame] | 558 | __count_vm_events(PGDEACTIVATE, nr_pages); |
Shakeel Butt | 21e330f | 2020-06-03 16:03:19 -0700 | [diff] [blame] | 559 | __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, |
| 560 | nr_pages); |
| 561 | } |
Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 562 | } |
| 563 | |
Alex Shi | c7c7b80 | 2020-12-15 12:33:56 -0800 | [diff] [blame] | 564 | static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec) |
Minchan Kim | 9c276cc | 2019-09-25 16:49:08 -0700 | [diff] [blame] | 565 | { |
Alex Shi | fc574c2 | 2020-12-15 12:34:25 -0800 | [diff] [blame] | 566 | if (PageActive(page) && !PageUnevictable(page)) { |
Matthew Wilcox (Oracle) | 6c35784 | 2020-08-14 17:30:37 -0700 | [diff] [blame] | 567 | int nr_pages = thp_nr_pages(page); |
Minchan Kim | 9c276cc | 2019-09-25 16:49:08 -0700 | [diff] [blame] | 568 | |
Yu Zhao | 46ae6b2 | 2021-02-24 12:08:25 -0800 | [diff] [blame] | 569 | del_page_from_lru_list(page, lruvec); |
Minchan Kim | 9c276cc | 2019-09-25 16:49:08 -0700 | [diff] [blame] | 570 | ClearPageActive(page); |
| 571 | ClearPageReferenced(page); |
Yu Zhao | 3a9c978 | 2021-02-24 12:08:17 -0800 | [diff] [blame] | 572 | add_page_to_lru_list(page, lruvec); |
Minchan Kim | 9c276cc | 2019-09-25 16:49:08 -0700 | [diff] [blame] | 573 | |
Shakeel Butt | 21e330f | 2020-06-03 16:03:19 -0700 | [diff] [blame] | 574 | __count_vm_events(PGDEACTIVATE, nr_pages); |
| 575 | __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, |
| 576 | nr_pages); |
Minchan Kim | 9c276cc | 2019-09-25 16:49:08 -0700 | [diff] [blame] | 577 | } |
| 578 | } |
Minchan Kim | 10853a0 | 2016-01-15 16:55:11 -0800 | [diff] [blame] | 579 | |
Alex Shi | c7c7b80 | 2020-12-15 12:33:56 -0800 | [diff] [blame] | 580 | static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec) |
Minchan Kim | 10853a0 | 2016-01-15 16:55:11 -0800 | [diff] [blame] | 581 | { |
Alex Shi | fc574c2 | 2020-12-15 12:34:25 -0800 | [diff] [blame] | 582 | if (PageAnon(page) && PageSwapBacked(page) && |
Shaohua Li | 24c92eb | 2017-10-03 16:15:29 -0700 | [diff] [blame] | 583 | !PageSwapCache(page) && !PageUnevictable(page)) { |
Matthew Wilcox (Oracle) | 6c35784 | 2020-08-14 17:30:37 -0700 | [diff] [blame] | 584 | int nr_pages = thp_nr_pages(page); |
Minchan Kim | 10853a0 | 2016-01-15 16:55:11 -0800 | [diff] [blame] | 585 | |
Yu Zhao | 46ae6b2 | 2021-02-24 12:08:25 -0800 | [diff] [blame] | 586 | del_page_from_lru_list(page, lruvec); |
Minchan Kim | 10853a0 | 2016-01-15 16:55:11 -0800 | [diff] [blame] | 587 | ClearPageActive(page); |
| 588 | ClearPageReferenced(page); |
Shaohua Li | f7ad2a6 | 2017-05-03 14:52:29 -0700 | [diff] [blame] | 589 | /* |
Huang Ying | 9de4f22 | 2020-04-06 20:04:41 -0700 | [diff] [blame] | 590 | * Lazyfree pages are clean anonymous pages. They have |
| 591 | * PG_swapbacked flag cleared, to distinguish them from normal |
| 592 | * anonymous pages |
Shaohua Li | f7ad2a6 | 2017-05-03 14:52:29 -0700 | [diff] [blame] | 593 | */ |
| 594 | ClearPageSwapBacked(page); |
Yu Zhao | 3a9c978 | 2021-02-24 12:08:17 -0800 | [diff] [blame] | 595 | add_page_to_lru_list(page, lruvec); |
Minchan Kim | 10853a0 | 2016-01-15 16:55:11 -0800 | [diff] [blame] | 596 | |
Shakeel Butt | 21e330f | 2020-06-03 16:03:19 -0700 | [diff] [blame] | 597 | __count_vm_events(PGLAZYFREE, nr_pages); |
| 598 | __count_memcg_events(lruvec_memcg(lruvec), PGLAZYFREE, |
| 599 | nr_pages); |
Minchan Kim | 10853a0 | 2016-01-15 16:55:11 -0800 | [diff] [blame] | 600 | } |
| 601 | } |
| 602 | |
Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 603 | /* |
Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 604 | * Drain pages out of the cpu's pagevecs. |
| 605 | * Either "cpu" is the current CPU, and preemption has already been |
| 606 | * disabled; or "cpu" is being hot-unplugged, and is already dead. |
| 607 | */ |
Konstantin Khlebnikov | f0cb3c7 | 2012-03-21 16:34:06 -0700 | [diff] [blame] | 608 | void lru_add_drain_cpu(int cpu) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 609 | { |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 610 | struct pagevec *pvec = &per_cpu(lru_pvecs.lru_add, cpu); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 611 | |
Mel Gorman | 13f7f78 | 2013-07-03 15:02:28 -0700 | [diff] [blame] | 612 | if (pagevec_count(pvec)) |
Mel Gorman | a0b8cab3 | 2013-07-03 15:02:32 -0700 | [diff] [blame] | 613 | __pagevec_lru_add(pvec); |
Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 614 | |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 615 | pvec = &per_cpu(lru_rotate.pvec, cpu); |
Qian Cai | 7e0cc01 | 2020-08-14 17:31:50 -0700 | [diff] [blame] | 616 | /* Disabling interrupts below acts as a compiler barrier. */ |
| 617 | if (data_race(pagevec_count(pvec))) { |
Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 618 | unsigned long flags; |
| 619 | |
| 620 | /* No harm done if a racing interrupt already did this */ |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 621 | local_lock_irqsave(&lru_rotate.lock, flags); |
Alex Shi | c7c7b80 | 2020-12-15 12:33:56 -0800 | [diff] [blame] | 622 | pagevec_lru_move_fn(pvec, pagevec_move_tail_fn); |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 623 | local_unlock_irqrestore(&lru_rotate.lock, flags); |
Hisashi Hifumi | 902aaed | 2007-10-16 01:24:52 -0700 | [diff] [blame] | 624 | } |
Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 625 | |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 626 | pvec = &per_cpu(lru_pvecs.lru_deactivate_file, cpu); |
Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 627 | if (pagevec_count(pvec)) |
Alex Shi | c7c7b80 | 2020-12-15 12:33:56 -0800 | [diff] [blame] | 628 | pagevec_lru_move_fn(pvec, lru_deactivate_file_fn); |
Shaohua Li | eb709b0 | 2011-05-24 17:12:55 -0700 | [diff] [blame] | 629 | |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 630 | pvec = &per_cpu(lru_pvecs.lru_deactivate, cpu); |
Minchan Kim | 9c276cc | 2019-09-25 16:49:08 -0700 | [diff] [blame] | 631 | if (pagevec_count(pvec)) |
Alex Shi | c7c7b80 | 2020-12-15 12:33:56 -0800 | [diff] [blame] | 632 | pagevec_lru_move_fn(pvec, lru_deactivate_fn); |
Minchan Kim | 9c276cc | 2019-09-25 16:49:08 -0700 | [diff] [blame] | 633 | |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 634 | pvec = &per_cpu(lru_pvecs.lru_lazyfree, cpu); |
Minchan Kim | 10853a0 | 2016-01-15 16:55:11 -0800 | [diff] [blame] | 635 | if (pagevec_count(pvec)) |
Alex Shi | c7c7b80 | 2020-12-15 12:33:56 -0800 | [diff] [blame] | 636 | pagevec_lru_move_fn(pvec, lru_lazyfree_fn); |
Minchan Kim | 10853a0 | 2016-01-15 16:55:11 -0800 | [diff] [blame] | 637 | |
Shaohua Li | eb709b0 | 2011-05-24 17:12:55 -0700 | [diff] [blame] | 638 | activate_page_drain(cpu); |
Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 639 | } |
| 640 | |
| 641 | /** |
Minchan Kim | cc5993b | 2015-04-15 16:13:26 -0700 | [diff] [blame] | 642 | * deactivate_file_page - forcefully deactivate a file page |
Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 643 | * @page: page to deactivate |
| 644 | * |
| 645 | * This function hints the VM that @page is a good reclaim candidate, |
| 646 | * for example if its invalidation fails due to the page being dirty |
| 647 | * or under writeback. |
| 648 | */ |
Minchan Kim | cc5993b | 2015-04-15 16:13:26 -0700 | [diff] [blame] | 649 | void deactivate_file_page(struct page *page) |
Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 650 | { |
Minchan Kim | 821ed6b | 2011-05-24 17:12:31 -0700 | [diff] [blame] | 651 | /* |
Minchan Kim | cc5993b | 2015-04-15 16:13:26 -0700 | [diff] [blame] | 652 | * In a workload with many unevictable page such as mprotect, |
| 653 | * unevictable page deactivation for accelerating reclaim is pointless. |
Minchan Kim | 821ed6b | 2011-05-24 17:12:31 -0700 | [diff] [blame] | 654 | */ |
| 655 | if (PageUnevictable(page)) |
| 656 | return; |
| 657 | |
Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 658 | if (likely(get_page_unless_zero(page))) { |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 659 | struct pagevec *pvec; |
| 660 | |
| 661 | local_lock(&lru_pvecs.lock); |
| 662 | pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate_file); |
Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 663 | |
Minchan Kim | d479960e | 2021-05-04 18:36:54 -0700 | [diff] [blame] | 664 | if (pagevec_add_and_need_flush(pvec, page)) |
Alex Shi | c7c7b80 | 2020-12-15 12:33:56 -0800 | [diff] [blame] | 665 | pagevec_lru_move_fn(pvec, lru_deactivate_file_fn); |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 666 | local_unlock(&lru_pvecs.lock); |
Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 667 | } |
Andrew Morton | 80bfed9 | 2006-01-06 00:11:14 -0800 | [diff] [blame] | 668 | } |
| 669 | |
Minchan Kim | 9c276cc | 2019-09-25 16:49:08 -0700 | [diff] [blame] | 670 | /* |
| 671 | * deactivate_page - deactivate a page |
| 672 | * @page: page to deactivate |
| 673 | * |
| 674 | * deactivate_page() moves @page to the inactive list if @page was on the active |
| 675 | * list and was not an unevictable page. This is done to accelerate the reclaim |
| 676 | * of @page. |
| 677 | */ |
| 678 | void deactivate_page(struct page *page) |
| 679 | { |
| 680 | if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) { |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 681 | struct pagevec *pvec; |
Minchan Kim | 9c276cc | 2019-09-25 16:49:08 -0700 | [diff] [blame] | 682 | |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 683 | local_lock(&lru_pvecs.lock); |
| 684 | pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate); |
Minchan Kim | 9c276cc | 2019-09-25 16:49:08 -0700 | [diff] [blame] | 685 | get_page(page); |
Minchan Kim | d479960e | 2021-05-04 18:36:54 -0700 | [diff] [blame] | 686 | if (pagevec_add_and_need_flush(pvec, page)) |
Alex Shi | c7c7b80 | 2020-12-15 12:33:56 -0800 | [diff] [blame] | 687 | pagevec_lru_move_fn(pvec, lru_deactivate_fn); |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 688 | local_unlock(&lru_pvecs.lock); |
Minchan Kim | 9c276cc | 2019-09-25 16:49:08 -0700 | [diff] [blame] | 689 | } |
| 690 | } |
| 691 | |
Minchan Kim | 10853a0 | 2016-01-15 16:55:11 -0800 | [diff] [blame] | 692 | /** |
Shaohua Li | f7ad2a6 | 2017-05-03 14:52:29 -0700 | [diff] [blame] | 693 | * mark_page_lazyfree - make an anon page lazyfree |
Minchan Kim | 10853a0 | 2016-01-15 16:55:11 -0800 | [diff] [blame] | 694 | * @page: page to deactivate |
| 695 | * |
Shaohua Li | f7ad2a6 | 2017-05-03 14:52:29 -0700 | [diff] [blame] | 696 | * mark_page_lazyfree() moves @page to the inactive file list. |
| 697 | * This is done to accelerate the reclaim of @page. |
Minchan Kim | 10853a0 | 2016-01-15 16:55:11 -0800 | [diff] [blame] | 698 | */ |
Shaohua Li | f7ad2a6 | 2017-05-03 14:52:29 -0700 | [diff] [blame] | 699 | void mark_page_lazyfree(struct page *page) |
Minchan Kim | 10853a0 | 2016-01-15 16:55:11 -0800 | [diff] [blame] | 700 | { |
Shaohua Li | f7ad2a6 | 2017-05-03 14:52:29 -0700 | [diff] [blame] | 701 | if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) && |
Shaohua Li | 24c92eb | 2017-10-03 16:15:29 -0700 | [diff] [blame] | 702 | !PageSwapCache(page) && !PageUnevictable(page)) { |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 703 | struct pagevec *pvec; |
Minchan Kim | 10853a0 | 2016-01-15 16:55:11 -0800 | [diff] [blame] | 704 | |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 705 | local_lock(&lru_pvecs.lock); |
| 706 | pvec = this_cpu_ptr(&lru_pvecs.lru_lazyfree); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 707 | get_page(page); |
Minchan Kim | d479960e | 2021-05-04 18:36:54 -0700 | [diff] [blame] | 708 | if (pagevec_add_and_need_flush(pvec, page)) |
Alex Shi | c7c7b80 | 2020-12-15 12:33:56 -0800 | [diff] [blame] | 709 | pagevec_lru_move_fn(pvec, lru_lazyfree_fn); |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 710 | local_unlock(&lru_pvecs.lock); |
Minchan Kim | 10853a0 | 2016-01-15 16:55:11 -0800 | [diff] [blame] | 711 | } |
| 712 | } |
| 713 | |
Andrew Morton | 80bfed9 | 2006-01-06 00:11:14 -0800 | [diff] [blame] | 714 | void lru_add_drain(void) |
| 715 | { |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 716 | local_lock(&lru_pvecs.lock); |
| 717 | lru_add_drain_cpu(smp_processor_id()); |
| 718 | local_unlock(&lru_pvecs.lock); |
| 719 | } |
| 720 | |
Minchan Kim | 243418e | 2021-09-24 15:43:47 -0700 | [diff] [blame] | 721 | /* |
| 722 | * It's called from per-cpu workqueue context in SMP case so |
| 723 | * lru_add_drain_cpu and invalidate_bh_lrus_cpu should run on |
| 724 | * the same cpu. It shouldn't be a problem in !SMP case since |
| 725 | * the core is only one and the locks will disable preemption. |
| 726 | */ |
| 727 | static void lru_add_and_bh_lrus_drain(void) |
| 728 | { |
| 729 | local_lock(&lru_pvecs.lock); |
| 730 | lru_add_drain_cpu(smp_processor_id()); |
| 731 | local_unlock(&lru_pvecs.lock); |
| 732 | invalidate_bh_lrus_cpu(); |
| 733 | } |
| 734 | |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 735 | void lru_add_drain_cpu_zone(struct zone *zone) |
| 736 | { |
| 737 | local_lock(&lru_pvecs.lock); |
| 738 | lru_add_drain_cpu(smp_processor_id()); |
| 739 | drain_local_pages(zone); |
| 740 | local_unlock(&lru_pvecs.lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 741 | } |
| 742 | |
Michal Hocko | 6ea183d | 2019-02-20 22:19:54 -0800 | [diff] [blame] | 743 | #ifdef CONFIG_SMP |
| 744 | |
| 745 | static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work); |
| 746 | |
David Howells | c402895 | 2006-11-22 14:57:56 +0000 | [diff] [blame] | 747 | static void lru_add_drain_per_cpu(struct work_struct *dummy) |
Nick Piggin | 053837f | 2006-01-18 17:42:27 -0800 | [diff] [blame] | 748 | { |
Minchan Kim | 243418e | 2021-09-24 15:43:47 -0700 | [diff] [blame] | 749 | lru_add_and_bh_lrus_drain(); |
Nick Piggin | 053837f | 2006-01-18 17:42:27 -0800 | [diff] [blame] | 750 | } |
| 751 | |
Michal Hocko | 9852a72 | 2018-01-31 16:16:19 -0800 | [diff] [blame] | 752 | /* |
| 753 | * Doesn't need any cpu hotplug locking because we do rely on per-cpu |
| 754 | * kworkers being shut down before our page_alloc_cpu_dead callback is |
| 755 | * executed on the offlined cpu. |
| 756 | * Calling this function with cpu hotplug locks held can actually lead |
| 757 | * to obscure indirect dependencies via WQ context. |
| 758 | */ |
Minchan Kim | d479960e | 2021-05-04 18:36:54 -0700 | [diff] [blame] | 759 | inline void __lru_add_drain_all(bool force_all_cpus) |
Nick Piggin | 053837f | 2006-01-18 17:42:27 -0800 | [diff] [blame] | 760 | { |
Ahmed S. Darwish | 6446a51 | 2020-08-27 13:40:38 +0200 | [diff] [blame] | 761 | /* |
| 762 | * lru_drain_gen - Global pages generation number |
| 763 | * |
| 764 | * (A) Definition: global lru_drain_gen = x implies that all generations |
| 765 | * 0 < n <= x are already *scheduled* for draining. |
| 766 | * |
| 767 | * This is an optimization for the highly-contended use case where a |
| 768 | * user space workload keeps constantly generating a flow of pages for |
| 769 | * each CPU. |
| 770 | */ |
| 771 | static unsigned int lru_drain_gen; |
Chris Metcalf | 5fbc461 | 2013-09-12 15:13:55 -0700 | [diff] [blame] | 772 | static struct cpumask has_work; |
Ahmed S. Darwish | 6446a51 | 2020-08-27 13:40:38 +0200 | [diff] [blame] | 773 | static DEFINE_MUTEX(lock); |
| 774 | unsigned cpu, this_gen; |
Chris Metcalf | 5fbc461 | 2013-09-12 15:13:55 -0700 | [diff] [blame] | 775 | |
Michal Hocko | ce61287 | 2017-04-07 16:05:05 -0700 | [diff] [blame] | 776 | /* |
| 777 | * Make sure nobody triggers this path before mm_percpu_wq is fully |
| 778 | * initialized. |
| 779 | */ |
| 780 | if (WARN_ON(!mm_percpu_wq)) |
| 781 | return; |
| 782 | |
Ahmed S. Darwish | 6446a51 | 2020-08-27 13:40:38 +0200 | [diff] [blame] | 783 | /* |
| 784 | * Guarantee pagevec counter stores visible by this CPU are visible to |
| 785 | * other CPUs before loading the current drain generation. |
| 786 | */ |
| 787 | smp_mb(); |
| 788 | |
| 789 | /* |
| 790 | * (B) Locally cache global LRU draining generation number |
| 791 | * |
| 792 | * The read barrier ensures that the counter is loaded before the mutex |
| 793 | * is taken. It pairs with smp_mb() inside the mutex critical section |
| 794 | * at (D). |
| 795 | */ |
| 796 | this_gen = smp_load_acquire(&lru_drain_gen); |
Konstantin Khlebnikov | eef1a42 | 2019-11-30 17:50:40 -0800 | [diff] [blame] | 797 | |
Chris Metcalf | 5fbc461 | 2013-09-12 15:13:55 -0700 | [diff] [blame] | 798 | mutex_lock(&lock); |
Konstantin Khlebnikov | eef1a42 | 2019-11-30 17:50:40 -0800 | [diff] [blame] | 799 | |
| 800 | /* |
Ahmed S. Darwish | 6446a51 | 2020-08-27 13:40:38 +0200 | [diff] [blame] | 801 | * (C) Exit the draining operation if a newer generation, from another |
| 802 | * lru_add_drain_all(), was already scheduled for draining. Check (A). |
Konstantin Khlebnikov | eef1a42 | 2019-11-30 17:50:40 -0800 | [diff] [blame] | 803 | */ |
Minchan Kim | d479960e | 2021-05-04 18:36:54 -0700 | [diff] [blame] | 804 | if (unlikely(this_gen != lru_drain_gen && !force_all_cpus)) |
Konstantin Khlebnikov | eef1a42 | 2019-11-30 17:50:40 -0800 | [diff] [blame] | 805 | goto done; |
| 806 | |
Ahmed S. Darwish | 6446a51 | 2020-08-27 13:40:38 +0200 | [diff] [blame] | 807 | /* |
| 808 | * (D) Increment global generation number |
| 809 | * |
| 810 | * Pairs with smp_load_acquire() at (B), outside of the critical |
| 811 | * section. Use a full memory barrier to guarantee that the new global |
| 812 | * drain generation number is stored before loading pagevec counters. |
| 813 | * |
| 814 | * This pairing must be done here, before the for_each_online_cpu loop |
| 815 | * below which drains the page vectors. |
| 816 | * |
| 817 | * Let x, y, and z represent some system CPU numbers, where x < y < z. |
Shijie Luo | cb152a1 | 2021-05-06 18:05:51 -0700 | [diff] [blame] | 818 | * Assume CPU #z is in the middle of the for_each_online_cpu loop |
Ahmed S. Darwish | 6446a51 | 2020-08-27 13:40:38 +0200 | [diff] [blame] | 819 | * below and has already reached CPU #y's per-cpu data. CPU #x comes |
| 820 | * along, adds some pages to its per-cpu vectors, then calls |
| 821 | * lru_add_drain_all(). |
| 822 | * |
| 823 | * If the paired barrier is done at any later step, e.g. after the |
| 824 | * loop, CPU #x will just exit at (C) and miss flushing out all of its |
| 825 | * added pages. |
| 826 | */ |
| 827 | WRITE_ONCE(lru_drain_gen, lru_drain_gen + 1); |
| 828 | smp_mb(); |
Konstantin Khlebnikov | eef1a42 | 2019-11-30 17:50:40 -0800 | [diff] [blame] | 829 | |
Chris Metcalf | 5fbc461 | 2013-09-12 15:13:55 -0700 | [diff] [blame] | 830 | cpumask_clear(&has_work); |
Chris Metcalf | 5fbc461 | 2013-09-12 15:13:55 -0700 | [diff] [blame] | 831 | for_each_online_cpu(cpu) { |
| 832 | struct work_struct *work = &per_cpu(lru_add_drain_work, cpu); |
| 833 | |
Minchan Kim | d479960e | 2021-05-04 18:36:54 -0700 | [diff] [blame] | 834 | if (force_all_cpus || |
| 835 | pagevec_count(&per_cpu(lru_pvecs.lru_add, cpu)) || |
Qian Cai | 7e0cc01 | 2020-08-14 17:31:50 -0700 | [diff] [blame] | 836 | data_race(pagevec_count(&per_cpu(lru_rotate.pvec, cpu))) || |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 837 | pagevec_count(&per_cpu(lru_pvecs.lru_deactivate_file, cpu)) || |
| 838 | pagevec_count(&per_cpu(lru_pvecs.lru_deactivate, cpu)) || |
| 839 | pagevec_count(&per_cpu(lru_pvecs.lru_lazyfree, cpu)) || |
Minchan Kim | 8cc621d | 2021-05-04 18:37:00 -0700 | [diff] [blame] | 840 | need_activate_page_drain(cpu) || |
| 841 | has_bh_in_lru(cpu, NULL)) { |
Chris Metcalf | 5fbc461 | 2013-09-12 15:13:55 -0700 | [diff] [blame] | 842 | INIT_WORK(work, lru_add_drain_per_cpu); |
Michal Hocko | ce61287 | 2017-04-07 16:05:05 -0700 | [diff] [blame] | 843 | queue_work_on(cpu, mm_percpu_wq, work); |
Ahmed S. Darwish | 6446a51 | 2020-08-27 13:40:38 +0200 | [diff] [blame] | 844 | __cpumask_set_cpu(cpu, &has_work); |
Chris Metcalf | 5fbc461 | 2013-09-12 15:13:55 -0700 | [diff] [blame] | 845 | } |
| 846 | } |
| 847 | |
| 848 | for_each_cpu(cpu, &has_work) |
| 849 | flush_work(&per_cpu(lru_add_drain_work, cpu)); |
| 850 | |
Konstantin Khlebnikov | eef1a42 | 2019-11-30 17:50:40 -0800 | [diff] [blame] | 851 | done: |
Chris Metcalf | 5fbc461 | 2013-09-12 15:13:55 -0700 | [diff] [blame] | 852 | mutex_unlock(&lock); |
Nick Piggin | 053837f | 2006-01-18 17:42:27 -0800 | [diff] [blame] | 853 | } |
Minchan Kim | d479960e | 2021-05-04 18:36:54 -0700 | [diff] [blame] | 854 | |
| 855 | void lru_add_drain_all(void) |
| 856 | { |
| 857 | __lru_add_drain_all(false); |
| 858 | } |
Michal Hocko | 6ea183d | 2019-02-20 22:19:54 -0800 | [diff] [blame] | 859 | #else |
| 860 | void lru_add_drain_all(void) |
| 861 | { |
| 862 | lru_add_drain(); |
| 863 | } |
Ahmed S. Darwish | 6446a51 | 2020-08-27 13:40:38 +0200 | [diff] [blame] | 864 | #endif /* CONFIG_SMP */ |
Nick Piggin | 053837f | 2006-01-18 17:42:27 -0800 | [diff] [blame] | 865 | |
Minchan Kim | d479960e | 2021-05-04 18:36:54 -0700 | [diff] [blame] | 866 | atomic_t lru_disable_count = ATOMIC_INIT(0); |
| 867 | |
| 868 | /* |
| 869 | * lru_cache_disable() needs to be called before we start compiling |
| 870 | * a list of pages to be migrated using isolate_lru_page(). |
| 871 | * It drains pages on LRU cache and then disable on all cpus until |
| 872 | * lru_cache_enable is called. |
| 873 | * |
| 874 | * Must be paired with a call to lru_cache_enable(). |
| 875 | */ |
| 876 | void lru_cache_disable(void) |
| 877 | { |
| 878 | atomic_inc(&lru_disable_count); |
| 879 | #ifdef CONFIG_SMP |
| 880 | /* |
| 881 | * lru_add_drain_all in the force mode will schedule draining on |
| 882 | * all online CPUs so any calls of lru_cache_disabled wrapped by |
| 883 | * local_lock or preemption disabled would be ordered by that. |
| 884 | * The atomic operation doesn't need to have stronger ordering |
| 885 | * requirements because that is enforeced by the scheduling |
| 886 | * guarantees. |
| 887 | */ |
| 888 | __lru_add_drain_all(true); |
| 889 | #else |
Minchan Kim | 243418e | 2021-09-24 15:43:47 -0700 | [diff] [blame] | 890 | lru_add_and_bh_lrus_drain(); |
Minchan Kim | d479960e | 2021-05-04 18:36:54 -0700 | [diff] [blame] | 891 | #endif |
| 892 | } |
| 893 | |
Michal Hocko | aabfb57 | 2014-10-09 15:28:52 -0700 | [diff] [blame] | 894 | /** |
Kirill A. Shutemov | ea1754a | 2016-04-01 15:29:48 +0300 | [diff] [blame] | 895 | * release_pages - batched put_page() |
Michal Hocko | aabfb57 | 2014-10-09 15:28:52 -0700 | [diff] [blame] | 896 | * @pages: array of pages to release |
| 897 | * @nr: number of pages |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 898 | * |
Michal Hocko | aabfb57 | 2014-10-09 15:28:52 -0700 | [diff] [blame] | 899 | * Decrement the reference count on all the pages in @pages. If it |
| 900 | * fell to zero, remove the page from the LRU and free it. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 901 | */ |
Mel Gorman | c6f92f9 | 2017-11-15 17:37:55 -0800 | [diff] [blame] | 902 | void release_pages(struct page **pages, int nr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 903 | { |
| 904 | int i; |
Konstantin Khlebnikov | cc59850 | 2012-01-10 15:07:04 -0800 | [diff] [blame] | 905 | LIST_HEAD(pages_to_free); |
Alex Shi | 6168d0d | 2020-12-15 12:34:29 -0800 | [diff] [blame] | 906 | struct lruvec *lruvec = NULL; |
Matthew Wilcox (Oracle) | 0de340c | 2021-06-29 22:27:31 -0400 | [diff] [blame] | 907 | unsigned long flags = 0; |
Kees Cook | 3f649ab | 2020-06-03 13:09:38 -0700 | [diff] [blame] | 908 | unsigned int lock_batch; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 909 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 910 | for (i = 0; i < nr; i++) { |
| 911 | struct page *page = pages[i]; |
Matthew Wilcox (Oracle) | 0de340c | 2021-06-29 22:27:31 -0400 | [diff] [blame] | 912 | struct folio *folio = page_folio(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 913 | |
Michal Hocko | aabfb57 | 2014-10-09 15:28:52 -0700 | [diff] [blame] | 914 | /* |
| 915 | * Make sure the IRQ-safe lock-holding time does not get |
| 916 | * excessive with a continuous string of pages from the |
Alex Shi | 6168d0d | 2020-12-15 12:34:29 -0800 | [diff] [blame] | 917 | * same lruvec. The lock is held only if lruvec != NULL. |
Michal Hocko | aabfb57 | 2014-10-09 15:28:52 -0700 | [diff] [blame] | 918 | */ |
Alex Shi | 6168d0d | 2020-12-15 12:34:29 -0800 | [diff] [blame] | 919 | if (lruvec && ++lock_batch == SWAP_CLUSTER_MAX) { |
| 920 | unlock_page_lruvec_irqrestore(lruvec, flags); |
| 921 | lruvec = NULL; |
Michal Hocko | aabfb57 | 2014-10-09 15:28:52 -0700 | [diff] [blame] | 922 | } |
| 923 | |
Matthew Wilcox (Oracle) | 0de340c | 2021-06-29 22:27:31 -0400 | [diff] [blame] | 924 | page = &folio->page; |
Aaron Lu | 6fcb52a | 2016-10-07 17:00:08 -0700 | [diff] [blame] | 925 | if (is_huge_zero_page(page)) |
Kirill A. Shutemov | aa88b68 | 2016-04-28 16:18:27 -0700 | [diff] [blame] | 926 | continue; |
Kirill A. Shutemov | aa88b68 | 2016-04-28 16:18:27 -0700 | [diff] [blame] | 927 | |
Ira Weiny | c5d6c45 | 2019-06-05 14:49:22 -0700 | [diff] [blame] | 928 | if (is_zone_device_page(page)) { |
Alex Shi | 6168d0d | 2020-12-15 12:34:29 -0800 | [diff] [blame] | 929 | if (lruvec) { |
| 930 | unlock_page_lruvec_irqrestore(lruvec, flags); |
| 931 | lruvec = NULL; |
Jérôme Glisse | df6ad69 | 2017-09-08 16:12:24 -0700 | [diff] [blame] | 932 | } |
Ira Weiny | c5d6c45 | 2019-06-05 14:49:22 -0700 | [diff] [blame] | 933 | /* |
| 934 | * ZONE_DEVICE pages that return 'false' from |
Miaohe Lin | a3e7bea | 2020-10-13 16:52:15 -0700 | [diff] [blame] | 935 | * page_is_devmap_managed() do not require special |
Ira Weiny | c5d6c45 | 2019-06-05 14:49:22 -0700 | [diff] [blame] | 936 | * processing, and instead, expect a call to |
| 937 | * put_page_testzero(). |
| 938 | */ |
John Hubbard | 07d8026 | 2020-01-30 22:12:28 -0800 | [diff] [blame] | 939 | if (page_is_devmap_managed(page)) { |
| 940 | put_devmap_managed_page(page); |
Ira Weiny | c5d6c45 | 2019-06-05 14:49:22 -0700 | [diff] [blame] | 941 | continue; |
John Hubbard | 07d8026 | 2020-01-30 22:12:28 -0800 | [diff] [blame] | 942 | } |
Ralph Campbell | 43fbdeb | 2020-12-14 19:05:55 -0800 | [diff] [blame] | 943 | if (put_page_testzero(page)) |
| 944 | put_dev_pagemap(page->pgmap); |
| 945 | continue; |
Jérôme Glisse | df6ad69 | 2017-09-08 16:12:24 -0700 | [diff] [blame] | 946 | } |
| 947 | |
Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame] | 948 | if (!put_page_testzero(page)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 949 | continue; |
| 950 | |
Kirill A. Shutemov | ddc58f2 | 2016-01-15 16:52:56 -0800 | [diff] [blame] | 951 | if (PageCompound(page)) { |
Alex Shi | 6168d0d | 2020-12-15 12:34:29 -0800 | [diff] [blame] | 952 | if (lruvec) { |
| 953 | unlock_page_lruvec_irqrestore(lruvec, flags); |
| 954 | lruvec = NULL; |
Kirill A. Shutemov | ddc58f2 | 2016-01-15 16:52:56 -0800 | [diff] [blame] | 955 | } |
| 956 | __put_compound_page(page); |
| 957 | continue; |
| 958 | } |
| 959 | |
Nick Piggin | 46453a6 | 2006-03-22 00:07:58 -0800 | [diff] [blame] | 960 | if (PageLRU(page)) { |
Alexander Duyck | 2a5e4e3 | 2020-12-15 12:34:33 -0800 | [diff] [blame] | 961 | struct lruvec *prev_lruvec = lruvec; |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 962 | |
Matthew Wilcox (Oracle) | 0de340c | 2021-06-29 22:27:31 -0400 | [diff] [blame] | 963 | lruvec = folio_lruvec_relock_irqsave(folio, lruvec, |
Alexander Duyck | 2a5e4e3 | 2020-12-15 12:34:33 -0800 | [diff] [blame] | 964 | &flags); |
| 965 | if (prev_lruvec != lruvec) |
Michal Hocko | aabfb57 | 2014-10-09 15:28:52 -0700 | [diff] [blame] | 966 | lock_batch = 0; |
Hugh Dickins | fa9add6 | 2012-05-29 15:07:09 -0700 | [diff] [blame] | 967 | |
Yu Zhao | 46ae6b2 | 2021-02-24 12:08:25 -0800 | [diff] [blame] | 968 | del_page_from_lru_list(page, lruvec); |
Yu Zhao | 8756017 | 2021-02-24 12:08:28 -0800 | [diff] [blame] | 969 | __clear_page_lru_flags(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 970 | } |
Nick Piggin | 46453a6 | 2006-03-22 00:07:58 -0800 | [diff] [blame] | 971 | |
Nicholas Piggin | 6290602 | 2016-12-25 13:00:30 +1000 | [diff] [blame] | 972 | __ClearPageWaiters(page); |
Mel Gorman | c53954a | 2013-07-03 15:02:34 -0700 | [diff] [blame] | 973 | |
Konstantin Khlebnikov | cc59850 | 2012-01-10 15:07:04 -0800 | [diff] [blame] | 974 | list_add(&page->lru, &pages_to_free); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 975 | } |
Alex Shi | 6168d0d | 2020-12-15 12:34:29 -0800 | [diff] [blame] | 976 | if (lruvec) |
| 977 | unlock_page_lruvec_irqrestore(lruvec, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 978 | |
Johannes Weiner | 747db95 | 2014-08-08 14:19:24 -0700 | [diff] [blame] | 979 | mem_cgroup_uncharge_list(&pages_to_free); |
Mel Gorman | 2d4894b | 2017-11-15 17:37:59 -0800 | [diff] [blame] | 980 | free_unref_page_list(&pages_to_free); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 981 | } |
Miklos Szeredi | 0be8557 | 2010-10-27 15:34:46 -0700 | [diff] [blame] | 982 | EXPORT_SYMBOL(release_pages); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 983 | |
| 984 | /* |
| 985 | * The pages which we're about to release may be in the deferred lru-addition |
| 986 | * queues. That would prevent them from really being freed right now. That's |
| 987 | * OK from a correctness point of view but is inefficient - those pages may be |
| 988 | * cache-warm and we want to give them back to the page allocator ASAP. |
| 989 | * |
| 990 | * So __pagevec_release() will drain those queues here. __pagevec_lru_add() |
| 991 | * and __pagevec_lru_add_active() call release_pages() directly to avoid |
| 992 | * mutual recursion. |
| 993 | */ |
| 994 | void __pagevec_release(struct pagevec *pvec) |
| 995 | { |
Mel Gorman | 7f0b5fb | 2017-11-15 17:38:10 -0800 | [diff] [blame] | 996 | if (!pvec->percpu_pvec_drained) { |
Mel Gorman | d9ed0d0 | 2017-11-15 17:37:48 -0800 | [diff] [blame] | 997 | lru_add_drain(); |
Mel Gorman | 7f0b5fb | 2017-11-15 17:38:10 -0800 | [diff] [blame] | 998 | pvec->percpu_pvec_drained = true; |
Mel Gorman | d9ed0d0 | 2017-11-15 17:37:48 -0800 | [diff] [blame] | 999 | } |
Mel Gorman | c6f92f9 | 2017-11-15 17:37:55 -0800 | [diff] [blame] | 1000 | release_pages(pvec->pages, pagevec_count(pvec)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1001 | pagevec_reinit(pvec); |
| 1002 | } |
Steve French | 7f28570 | 2005-11-01 10:22:55 -0800 | [diff] [blame] | 1003 | EXPORT_SYMBOL(__pagevec_release); |
| 1004 | |
Matthew Wilcox (Oracle) | 934387c | 2021-05-14 15:08:29 -0400 | [diff] [blame] | 1005 | static void __pagevec_lru_add_fn(struct folio *folio, struct lruvec *lruvec) |
Shaohua Li | 3dd7ae8 | 2011-03-22 16:33:45 -0700 | [diff] [blame] | 1006 | { |
Matthew Wilcox (Oracle) | 934387c | 2021-05-14 15:08:29 -0400 | [diff] [blame] | 1007 | int was_unevictable = folio_test_clear_unevictable(folio); |
| 1008 | long nr_pages = folio_nr_pages(folio); |
Shaohua Li | 3dd7ae8 | 2011-03-22 16:33:45 -0700 | [diff] [blame] | 1009 | |
Matthew Wilcox (Oracle) | 934387c | 2021-05-14 15:08:29 -0400 | [diff] [blame] | 1010 | VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); |
Shaohua Li | 3dd7ae8 | 2011-03-22 16:33:45 -0700 | [diff] [blame] | 1011 | |
Shakeel Butt | 9c4e6b1 | 2018-02-21 14:45:28 -0800 | [diff] [blame] | 1012 | /* |
Matthew Wilcox (Oracle) | 934387c | 2021-05-14 15:08:29 -0400 | [diff] [blame] | 1013 | * A folio becomes evictable in two ways: |
Peng Fan | dae966d | 2019-05-13 17:19:26 -0700 | [diff] [blame] | 1014 | * 1) Within LRU lock [munlock_vma_page() and __munlock_pagevec()]. |
Matthew Wilcox (Oracle) | 934387c | 2021-05-14 15:08:29 -0400 | [diff] [blame] | 1015 | * 2) Before acquiring LRU lock to put the folio on the correct LRU |
| 1016 | * and then |
Shakeel Butt | 9c4e6b1 | 2018-02-21 14:45:28 -0800 | [diff] [blame] | 1017 | * a) do PageLRU check with lock [check_move_unevictable_pages] |
| 1018 | * b) do PageLRU check before lock [clear_page_mlock] |
| 1019 | * |
| 1020 | * (1) & (2a) are ok as LRU lock will serialize them. For (2b), we need |
| 1021 | * following strict ordering: |
| 1022 | * |
| 1023 | * #0: __pagevec_lru_add_fn #1: clear_page_mlock |
| 1024 | * |
Matthew Wilcox (Oracle) | 934387c | 2021-05-14 15:08:29 -0400 | [diff] [blame] | 1025 | * folio_set_lru() folio_test_clear_mlocked() |
Shakeel Butt | 9c4e6b1 | 2018-02-21 14:45:28 -0800 | [diff] [blame] | 1026 | * smp_mb() // explicit ordering // above provides strict |
| 1027 | * // ordering |
Matthew Wilcox (Oracle) | 934387c | 2021-05-14 15:08:29 -0400 | [diff] [blame] | 1028 | * folio_test_mlocked() folio_test_lru() |
Shakeel Butt | 9c4e6b1 | 2018-02-21 14:45:28 -0800 | [diff] [blame] | 1029 | * |
| 1030 | * |
Matthew Wilcox (Oracle) | 934387c | 2021-05-14 15:08:29 -0400 | [diff] [blame] | 1031 | * if '#1' does not observe setting of PG_lru by '#0' and |
| 1032 | * fails isolation, the explicit barrier will make sure that |
| 1033 | * folio_evictable check will put the folio on the correct |
| 1034 | * LRU. Without smp_mb(), folio_set_lru() can be reordered |
| 1035 | * after folio_test_mlocked() check and can make '#1' fail the |
| 1036 | * isolation of the folio whose mlocked bit is cleared (#0 is |
| 1037 | * also looking at the same folio) and the evictable folio will |
| 1038 | * be stranded on an unevictable LRU. |
Shakeel Butt | 9c4e6b1 | 2018-02-21 14:45:28 -0800 | [diff] [blame] | 1039 | */ |
Matthew Wilcox (Oracle) | 934387c | 2021-05-14 15:08:29 -0400 | [diff] [blame] | 1040 | folio_set_lru(folio); |
Yang Shi | 9a9b6cc | 2020-04-01 21:06:23 -0700 | [diff] [blame] | 1041 | smp_mb__after_atomic(); |
Shakeel Butt | 9c4e6b1 | 2018-02-21 14:45:28 -0800 | [diff] [blame] | 1042 | |
Matthew Wilcox (Oracle) | 934387c | 2021-05-14 15:08:29 -0400 | [diff] [blame] | 1043 | if (folio_evictable(folio)) { |
Shakeel Butt | 9c4e6b1 | 2018-02-21 14:45:28 -0800 | [diff] [blame] | 1044 | if (was_unevictable) |
Shakeel Butt | 5d91f31 | 2020-06-03 16:03:16 -0700 | [diff] [blame] | 1045 | __count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages); |
Shakeel Butt | 9c4e6b1 | 2018-02-21 14:45:28 -0800 | [diff] [blame] | 1046 | } else { |
Matthew Wilcox (Oracle) | 934387c | 2021-05-14 15:08:29 -0400 | [diff] [blame] | 1047 | folio_clear_active(folio); |
| 1048 | folio_set_unevictable(folio); |
Shakeel Butt | 9c4e6b1 | 2018-02-21 14:45:28 -0800 | [diff] [blame] | 1049 | if (!was_unevictable) |
Shakeel Butt | 5d91f31 | 2020-06-03 16:03:16 -0700 | [diff] [blame] | 1050 | __count_vm_events(UNEVICTABLE_PGCULLED, nr_pages); |
Shakeel Butt | 9c4e6b1 | 2018-02-21 14:45:28 -0800 | [diff] [blame] | 1051 | } |
| 1052 | |
Matthew Wilcox (Oracle) | 934387c | 2021-05-14 15:08:29 -0400 | [diff] [blame] | 1053 | lruvec_add_folio(lruvec, folio); |
| 1054 | trace_mm_lru_insertion(folio); |
Shaohua Li | 3dd7ae8 | 2011-03-22 16:33:45 -0700 | [diff] [blame] | 1055 | } |
| 1056 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1057 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1058 | * Add the passed pages to the LRU, then drop the caller's refcount |
| 1059 | * on them. Reinitialises the caller's pagevec. |
| 1060 | */ |
Mel Gorman | a0b8cab3 | 2013-07-03 15:02:32 -0700 | [diff] [blame] | 1061 | void __pagevec_lru_add(struct pagevec *pvec) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1062 | { |
Alex Shi | fc574c2 | 2020-12-15 12:34:25 -0800 | [diff] [blame] | 1063 | int i; |
Alex Shi | 6168d0d | 2020-12-15 12:34:29 -0800 | [diff] [blame] | 1064 | struct lruvec *lruvec = NULL; |
Alex Shi | fc574c2 | 2020-12-15 12:34:25 -0800 | [diff] [blame] | 1065 | unsigned long flags = 0; |
| 1066 | |
| 1067 | for (i = 0; i < pagevec_count(pvec); i++) { |
Matthew Wilcox (Oracle) | 934387c | 2021-05-14 15:08:29 -0400 | [diff] [blame] | 1068 | struct folio *folio = page_folio(pvec->pages[i]); |
Alex Shi | fc574c2 | 2020-12-15 12:34:25 -0800 | [diff] [blame] | 1069 | |
Matthew Wilcox (Oracle) | 0de340c | 2021-06-29 22:27:31 -0400 | [diff] [blame] | 1070 | lruvec = folio_lruvec_relock_irqsave(folio, lruvec, &flags); |
Matthew Wilcox (Oracle) | 934387c | 2021-05-14 15:08:29 -0400 | [diff] [blame] | 1071 | __pagevec_lru_add_fn(folio, lruvec); |
Alex Shi | fc574c2 | 2020-12-15 12:34:25 -0800 | [diff] [blame] | 1072 | } |
Alex Shi | 6168d0d | 2020-12-15 12:34:29 -0800 | [diff] [blame] | 1073 | if (lruvec) |
| 1074 | unlock_page_lruvec_irqrestore(lruvec, flags); |
Alex Shi | fc574c2 | 2020-12-15 12:34:25 -0800 | [diff] [blame] | 1075 | release_pages(pvec->pages, pvec->nr); |
| 1076 | pagevec_reinit(pvec); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1077 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1078 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1079 | /** |
Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 1080 | * pagevec_remove_exceptionals - pagevec exceptionals pruning |
| 1081 | * @pvec: The pagevec to prune |
| 1082 | * |
Matthew Wilcox (Oracle) | a656a20 | 2021-02-25 17:16:14 -0800 | [diff] [blame] | 1083 | * find_get_entries() fills both pages and XArray value entries (aka |
| 1084 | * exceptional entries) into the pagevec. This function prunes all |
Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 1085 | * exceptionals from @pvec without leaving holes, so that it can be |
| 1086 | * passed on to page-only pagevec operations. |
| 1087 | */ |
| 1088 | void pagevec_remove_exceptionals(struct pagevec *pvec) |
| 1089 | { |
| 1090 | int i, j; |
| 1091 | |
| 1092 | for (i = 0, j = 0; i < pagevec_count(pvec); i++) { |
| 1093 | struct page *page = pvec->pages[i]; |
Matthew Wilcox | 3159f94 | 2017-11-03 13:30:42 -0400 | [diff] [blame] | 1094 | if (!xa_is_value(page)) |
Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 1095 | pvec->pages[j++] = page; |
| 1096 | } |
| 1097 | pvec->nr = j; |
| 1098 | } |
| 1099 | |
| 1100 | /** |
Jan Kara | b947cee | 2017-09-06 16:21:21 -0700 | [diff] [blame] | 1101 | * pagevec_lookup_range - gang pagecache lookup |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1102 | * @pvec: Where the resulting pages are placed |
| 1103 | * @mapping: The address_space to search |
| 1104 | * @start: The starting page index |
Jan Kara | b947cee | 2017-09-06 16:21:21 -0700 | [diff] [blame] | 1105 | * @end: The final page index |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1106 | * |
Randy Dunlap | e02a9f0 | 2018-01-31 16:21:19 -0800 | [diff] [blame] | 1107 | * pagevec_lookup_range() will search for & return a group of up to PAGEVEC_SIZE |
Jan Kara | b947cee | 2017-09-06 16:21:21 -0700 | [diff] [blame] | 1108 | * pages in the mapping starting from index @start and upto index @end |
| 1109 | * (inclusive). The pages are placed in @pvec. pagevec_lookup() takes a |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1110 | * reference against the pages in @pvec. |
| 1111 | * |
| 1112 | * The search returns a group of mapping-contiguous pages with ascending |
Jan Kara | d72dc8a | 2017-09-06 16:21:18 -0700 | [diff] [blame] | 1113 | * indexes. There may be holes in the indices due to not-present pages. We |
| 1114 | * also update @start to index the next page for the traversal. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1115 | * |
Jan Kara | b947cee | 2017-09-06 16:21:21 -0700 | [diff] [blame] | 1116 | * pagevec_lookup_range() returns the number of pages which were found. If this |
Randy Dunlap | e02a9f0 | 2018-01-31 16:21:19 -0800 | [diff] [blame] | 1117 | * number is smaller than PAGEVEC_SIZE, the end of specified range has been |
Jan Kara | b947cee | 2017-09-06 16:21:21 -0700 | [diff] [blame] | 1118 | * reached. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1119 | */ |
Jan Kara | b947cee | 2017-09-06 16:21:21 -0700 | [diff] [blame] | 1120 | unsigned pagevec_lookup_range(struct pagevec *pvec, |
Jan Kara | 397162f | 2017-09-06 16:21:43 -0700 | [diff] [blame] | 1121 | struct address_space *mapping, pgoff_t *start, pgoff_t end) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1122 | { |
Jan Kara | 397162f | 2017-09-06 16:21:43 -0700 | [diff] [blame] | 1123 | pvec->nr = find_get_pages_range(mapping, start, end, PAGEVEC_SIZE, |
Jan Kara | b947cee | 2017-09-06 16:21:21 -0700 | [diff] [blame] | 1124 | pvec->pages); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1125 | return pagevec_count(pvec); |
| 1126 | } |
Jan Kara | b947cee | 2017-09-06 16:21:21 -0700 | [diff] [blame] | 1127 | EXPORT_SYMBOL(pagevec_lookup_range); |
Christoph Hellwig | 78539fd | 2006-01-11 20:47:41 +1100 | [diff] [blame] | 1128 | |
Jan Kara | 72b045a | 2017-11-15 17:34:33 -0800 | [diff] [blame] | 1129 | unsigned pagevec_lookup_range_tag(struct pagevec *pvec, |
| 1130 | struct address_space *mapping, pgoff_t *index, pgoff_t end, |
Matthew Wilcox | 10bbd23 | 2017-12-05 17:30:38 -0500 | [diff] [blame] | 1131 | xa_mark_t tag) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1132 | { |
Jan Kara | 72b045a | 2017-11-15 17:34:33 -0800 | [diff] [blame] | 1133 | pvec->nr = find_get_pages_range_tag(mapping, index, end, tag, |
Jan Kara | 67fd707 | 2017-11-15 17:35:19 -0800 | [diff] [blame] | 1134 | PAGEVEC_SIZE, pvec->pages); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1135 | return pagevec_count(pvec); |
| 1136 | } |
Jan Kara | 72b045a | 2017-11-15 17:34:33 -0800 | [diff] [blame] | 1137 | EXPORT_SYMBOL(pagevec_lookup_range_tag); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1138 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1139 | /* |
| 1140 | * Perform any setup for the swap system |
| 1141 | */ |
| 1142 | void __init swap_setup(void) |
| 1143 | { |
Arun KS | ca79b0c | 2018-12-28 00:34:29 -0800 | [diff] [blame] | 1144 | unsigned long megs = totalram_pages() >> (20 - PAGE_SHIFT); |
Peter Zijlstra | e0bf68d | 2007-10-16 23:25:46 -0700 | [diff] [blame] | 1145 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1146 | /* Use a smaller cluster for small-memory machines */ |
| 1147 | if (megs < 16) |
| 1148 | page_cluster = 2; |
| 1149 | else |
| 1150 | page_cluster = 3; |
| 1151 | /* |
| 1152 | * Right now other parts of the system means that we |
| 1153 | * _really_ don't want to cluster much more |
| 1154 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1155 | } |
John Hubbard | 07d8026 | 2020-01-30 22:12:28 -0800 | [diff] [blame] | 1156 | |
| 1157 | #ifdef CONFIG_DEV_PAGEMAP_OPS |
| 1158 | void put_devmap_managed_page(struct page *page) |
| 1159 | { |
| 1160 | int count; |
| 1161 | |
| 1162 | if (WARN_ON_ONCE(!page_is_devmap_managed(page))) |
| 1163 | return; |
| 1164 | |
| 1165 | count = page_ref_dec_return(page); |
| 1166 | |
| 1167 | /* |
| 1168 | * devmap page refcounts are 1-based, rather than 0-based: if |
| 1169 | * refcount is 1, then the page is free and the refcount is |
| 1170 | * stable because nobody holds a reference on the page. |
| 1171 | */ |
| 1172 | if (count == 1) |
| 1173 | free_devmap_managed_page(page); |
| 1174 | else if (!count) |
| 1175 | __put_page(page); |
| 1176 | } |
| 1177 | EXPORT_SYMBOL(put_devmap_managed_page); |
| 1178 | #endif |