Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Johannes Weiner | a528910 | 2014-04-03 14:47:51 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Workingset detection |
| 4 | * |
| 5 | * Copyright (C) 2013 Red Hat, Inc., Johannes Weiner |
| 6 | */ |
| 7 | |
| 8 | #include <linux/memcontrol.h> |
| 9 | #include <linux/writeback.h> |
Hugh Dickins | 3a4f8a0 | 2017-02-24 14:59:36 -0800 | [diff] [blame] | 10 | #include <linux/shmem_fs.h> |
Johannes Weiner | a528910 | 2014-04-03 14:47:51 -0700 | [diff] [blame] | 11 | #include <linux/pagemap.h> |
| 12 | #include <linux/atomic.h> |
| 13 | #include <linux/module.h> |
| 14 | #include <linux/swap.h> |
Johannes Weiner | 14b4687 | 2016-12-12 16:43:52 -0800 | [diff] [blame] | 15 | #include <linux/dax.h> |
Johannes Weiner | a528910 | 2014-04-03 14:47:51 -0700 | [diff] [blame] | 16 | #include <linux/fs.h> |
| 17 | #include <linux/mm.h> |
| 18 | |
| 19 | /* |
| 20 | * Double CLOCK lists |
| 21 | * |
Mel Gorman | 1e6b1085 | 2016-07-28 15:46:08 -0700 | [diff] [blame] | 22 | * Per node, two clock lists are maintained for file pages: the |
Johannes Weiner | a528910 | 2014-04-03 14:47:51 -0700 | [diff] [blame] | 23 | * inactive and the active list. Freshly faulted pages start out at |
| 24 | * the head of the inactive list and page reclaim scans pages from the |
| 25 | * tail. Pages that are accessed multiple times on the inactive list |
| 26 | * are promoted to the active list, to protect them from reclaim, |
| 27 | * whereas active pages are demoted to the inactive list when the |
| 28 | * active list grows too big. |
| 29 | * |
| 30 | * fault ------------------------+ |
| 31 | * | |
| 32 | * +--------------+ | +-------------+ |
| 33 | * reclaim <- | inactive | <-+-- demotion | active | <--+ |
| 34 | * +--------------+ +-------------+ | |
| 35 | * | | |
| 36 | * +-------------- promotion ------------------+ |
| 37 | * |
| 38 | * |
| 39 | * Access frequency and refault distance |
| 40 | * |
| 41 | * A workload is thrashing when its pages are frequently used but they |
| 42 | * are evicted from the inactive list every time before another access |
| 43 | * would have promoted them to the active list. |
| 44 | * |
| 45 | * In cases where the average access distance between thrashing pages |
| 46 | * is bigger than the size of memory there is nothing that can be |
| 47 | * done - the thrashing set could never fit into memory under any |
| 48 | * circumstance. |
| 49 | * |
| 50 | * However, the average access distance could be bigger than the |
| 51 | * inactive list, yet smaller than the size of memory. In this case, |
| 52 | * the set could fit into memory if it weren't for the currently |
| 53 | * active pages - which may be used more, hopefully less frequently: |
| 54 | * |
| 55 | * +-memory available to cache-+ |
| 56 | * | | |
| 57 | * +-inactive------+-active----+ |
| 58 | * a b | c d e f g h i | J K L M N | |
| 59 | * +---------------+-----------+ |
| 60 | * |
| 61 | * It is prohibitively expensive to accurately track access frequency |
| 62 | * of pages. But a reasonable approximation can be made to measure |
| 63 | * thrashing on the inactive list, after which refaulting pages can be |
| 64 | * activated optimistically to compete with the existing active pages. |
| 65 | * |
| 66 | * Approximating inactive page access frequency - Observations: |
| 67 | * |
| 68 | * 1. When a page is accessed for the first time, it is added to the |
| 69 | * head of the inactive list, slides every existing inactive page |
| 70 | * towards the tail by one slot, and pushes the current tail page |
| 71 | * out of memory. |
| 72 | * |
| 73 | * 2. When a page is accessed for the second time, it is promoted to |
| 74 | * the active list, shrinking the inactive list by one slot. This |
| 75 | * also slides all inactive pages that were faulted into the cache |
| 76 | * more recently than the activated page towards the tail of the |
| 77 | * inactive list. |
| 78 | * |
| 79 | * Thus: |
| 80 | * |
| 81 | * 1. The sum of evictions and activations between any two points in |
| 82 | * time indicate the minimum number of inactive pages accessed in |
| 83 | * between. |
| 84 | * |
| 85 | * 2. Moving one inactive page N page slots towards the tail of the |
| 86 | * list requires at least N inactive page accesses. |
| 87 | * |
| 88 | * Combining these: |
| 89 | * |
| 90 | * 1. When a page is finally evicted from memory, the number of |
| 91 | * inactive pages accessed while the page was in cache is at least |
| 92 | * the number of page slots on the inactive list. |
| 93 | * |
| 94 | * 2. In addition, measuring the sum of evictions and activations (E) |
| 95 | * at the time of a page's eviction, and comparing it to another |
| 96 | * reading (R) at the time the page faults back into memory tells |
| 97 | * the minimum number of accesses while the page was not cached. |
| 98 | * This is called the refault distance. |
| 99 | * |
| 100 | * Because the first access of the page was the fault and the second |
| 101 | * access the refault, we combine the in-cache distance with the |
| 102 | * out-of-cache distance to get the complete minimum access distance |
| 103 | * of this page: |
| 104 | * |
| 105 | * NR_inactive + (R - E) |
| 106 | * |
| 107 | * And knowing the minimum access distance of a page, we can easily |
| 108 | * tell if the page would be able to stay in cache assuming all page |
| 109 | * slots in the cache were available: |
| 110 | * |
| 111 | * NR_inactive + (R - E) <= NR_inactive + NR_active |
| 112 | * |
| 113 | * which can be further simplified to |
| 114 | * |
| 115 | * (R - E) <= NR_active |
| 116 | * |
| 117 | * Put into words, the refault distance (out-of-cache) can be seen as |
| 118 | * a deficit in inactive list space (in-cache). If the inactive list |
| 119 | * had (R - E) more page slots, the page would not have been evicted |
| 120 | * in between accesses, but activated instead. And on a full system, |
| 121 | * the only thing eating into inactive list space is active pages. |
| 122 | * |
| 123 | * |
Johannes Weiner | 1899ad1 | 2018-10-26 15:06:04 -0700 | [diff] [blame] | 124 | * Refaulting inactive pages |
Johannes Weiner | a528910 | 2014-04-03 14:47:51 -0700 | [diff] [blame] | 125 | * |
| 126 | * All that is known about the active list is that the pages have been |
| 127 | * accessed more than once in the past. This means that at any given |
| 128 | * time there is actually a good chance that pages on the active list |
| 129 | * are no longer in active use. |
| 130 | * |
| 131 | * So when a refault distance of (R - E) is observed and there are at |
| 132 | * least (R - E) active pages, the refaulting page is activated |
| 133 | * optimistically in the hope that (R - E) active pages are actually |
| 134 | * used less frequently than the refaulting page - or even not used at |
| 135 | * all anymore. |
| 136 | * |
Johannes Weiner | 1899ad1 | 2018-10-26 15:06:04 -0700 | [diff] [blame] | 137 | * That means if inactive cache is refaulting with a suitable refault |
| 138 | * distance, we assume the cache workingset is transitioning and put |
| 139 | * pressure on the current active list. |
| 140 | * |
Johannes Weiner | a528910 | 2014-04-03 14:47:51 -0700 | [diff] [blame] | 141 | * If this is wrong and demotion kicks in, the pages which are truly |
| 142 | * used more frequently will be reactivated while the less frequently |
| 143 | * used once will be evicted from memory. |
| 144 | * |
| 145 | * But if this is right, the stale pages will be pushed out of memory |
| 146 | * and the used pages get to stay in cache. |
| 147 | * |
Johannes Weiner | 1899ad1 | 2018-10-26 15:06:04 -0700 | [diff] [blame] | 148 | * Refaulting active pages |
| 149 | * |
| 150 | * If on the other hand the refaulting pages have recently been |
| 151 | * deactivated, it means that the active list is no longer protecting |
| 152 | * actively used cache from reclaim. The cache is NOT transitioning to |
| 153 | * a different workingset; the existing workingset is thrashing in the |
| 154 | * space allocated to the page cache. |
| 155 | * |
Johannes Weiner | a528910 | 2014-04-03 14:47:51 -0700 | [diff] [blame] | 156 | * |
| 157 | * Implementation |
| 158 | * |
Mel Gorman | 1e6b1085 | 2016-07-28 15:46:08 -0700 | [diff] [blame] | 159 | * For each node's file LRU lists, a counter for inactive evictions |
| 160 | * and activations is maintained (node->inactive_age). |
Johannes Weiner | a528910 | 2014-04-03 14:47:51 -0700 | [diff] [blame] | 161 | * |
| 162 | * On eviction, a snapshot of this counter (along with some bits to |
Matthew Wilcox | a97e790 | 2017-11-24 14:24:59 -0500 | [diff] [blame] | 163 | * identify the node) is stored in the now empty page cache |
Johannes Weiner | a528910 | 2014-04-03 14:47:51 -0700 | [diff] [blame] | 164 | * slot of the evicted page. This is called a shadow entry. |
| 165 | * |
| 166 | * On cache misses for which there are shadow entries, an eligible |
| 167 | * refault distance will immediately activate the refaulting page. |
| 168 | */ |
| 169 | |
Matthew Wilcox | 3159f94 | 2017-11-03 13:30:42 -0400 | [diff] [blame] | 170 | #define EVICTION_SHIFT ((BITS_PER_LONG - BITS_PER_XA_VALUE) + \ |
Johannes Weiner | 1899ad1 | 2018-10-26 15:06:04 -0700 | [diff] [blame] | 171 | 1 + NODES_SHIFT + MEM_CGROUP_ID_SHIFT) |
Johannes Weiner | 689c94f | 2016-03-15 14:57:07 -0700 | [diff] [blame] | 172 | #define EVICTION_MASK (~0UL >> EVICTION_SHIFT) |
| 173 | |
Johannes Weiner | 612e449 | 2016-03-15 14:57:13 -0700 | [diff] [blame] | 174 | /* |
| 175 | * Eviction timestamps need to be able to cover the full range of |
Matthew Wilcox | a97e790 | 2017-11-24 14:24:59 -0500 | [diff] [blame] | 176 | * actionable refaults. However, bits are tight in the xarray |
Johannes Weiner | 612e449 | 2016-03-15 14:57:13 -0700 | [diff] [blame] | 177 | * entry, and after storing the identifier for the lruvec there might |
| 178 | * not be enough left to represent every single actionable refault. In |
| 179 | * that case, we have to sacrifice granularity for distance, and group |
| 180 | * evictions into coarser buckets by shaving off lower timestamp bits. |
| 181 | */ |
| 182 | static unsigned int bucket_order __read_mostly; |
| 183 | |
Johannes Weiner | 1899ad1 | 2018-10-26 15:06:04 -0700 | [diff] [blame] | 184 | static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction, |
| 185 | bool workingset) |
Johannes Weiner | a528910 | 2014-04-03 14:47:51 -0700 | [diff] [blame] | 186 | { |
Johannes Weiner | 612e449 | 2016-03-15 14:57:13 -0700 | [diff] [blame] | 187 | eviction >>= bucket_order; |
Matthew Wilcox | 3159f94 | 2017-11-03 13:30:42 -0400 | [diff] [blame] | 188 | eviction &= EVICTION_MASK; |
Johannes Weiner | 23047a9 | 2016-03-15 14:57:16 -0700 | [diff] [blame] | 189 | eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid; |
Mel Gorman | 1e6b1085 | 2016-07-28 15:46:08 -0700 | [diff] [blame] | 190 | eviction = (eviction << NODES_SHIFT) | pgdat->node_id; |
Johannes Weiner | 1899ad1 | 2018-10-26 15:06:04 -0700 | [diff] [blame] | 191 | eviction = (eviction << 1) | workingset; |
Johannes Weiner | a528910 | 2014-04-03 14:47:51 -0700 | [diff] [blame] | 192 | |
Matthew Wilcox | 3159f94 | 2017-11-03 13:30:42 -0400 | [diff] [blame] | 193 | return xa_mk_value(eviction); |
Johannes Weiner | a528910 | 2014-04-03 14:47:51 -0700 | [diff] [blame] | 194 | } |
| 195 | |
Mel Gorman | 1e6b1085 | 2016-07-28 15:46:08 -0700 | [diff] [blame] | 196 | static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat, |
Johannes Weiner | 1899ad1 | 2018-10-26 15:06:04 -0700 | [diff] [blame] | 197 | unsigned long *evictionp, bool *workingsetp) |
Johannes Weiner | a528910 | 2014-04-03 14:47:51 -0700 | [diff] [blame] | 198 | { |
Matthew Wilcox | 3159f94 | 2017-11-03 13:30:42 -0400 | [diff] [blame] | 199 | unsigned long entry = xa_to_value(shadow); |
Mel Gorman | 1e6b1085 | 2016-07-28 15:46:08 -0700 | [diff] [blame] | 200 | int memcgid, nid; |
Johannes Weiner | 1899ad1 | 2018-10-26 15:06:04 -0700 | [diff] [blame] | 201 | bool workingset; |
Johannes Weiner | a528910 | 2014-04-03 14:47:51 -0700 | [diff] [blame] | 202 | |
Johannes Weiner | 1899ad1 | 2018-10-26 15:06:04 -0700 | [diff] [blame] | 203 | workingset = entry & 1; |
| 204 | entry >>= 1; |
Johannes Weiner | a528910 | 2014-04-03 14:47:51 -0700 | [diff] [blame] | 205 | nid = entry & ((1UL << NODES_SHIFT) - 1); |
| 206 | entry >>= NODES_SHIFT; |
Johannes Weiner | 23047a9 | 2016-03-15 14:57:16 -0700 | [diff] [blame] | 207 | memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1); |
| 208 | entry >>= MEM_CGROUP_ID_SHIFT; |
Johannes Weiner | a528910 | 2014-04-03 14:47:51 -0700 | [diff] [blame] | 209 | |
Johannes Weiner | 23047a9 | 2016-03-15 14:57:16 -0700 | [diff] [blame] | 210 | *memcgidp = memcgid; |
Mel Gorman | 1e6b1085 | 2016-07-28 15:46:08 -0700 | [diff] [blame] | 211 | *pgdat = NODE_DATA(nid); |
Johannes Weiner | 612e449 | 2016-03-15 14:57:13 -0700 | [diff] [blame] | 212 | *evictionp = entry << bucket_order; |
Johannes Weiner | 1899ad1 | 2018-10-26 15:06:04 -0700 | [diff] [blame] | 213 | *workingsetp = workingset; |
Johannes Weiner | a528910 | 2014-04-03 14:47:51 -0700 | [diff] [blame] | 214 | } |
| 215 | |
Johannes Weiner | b910718 | 2019-11-30 17:55:59 -0800 | [diff] [blame] | 216 | static void advance_inactive_age(struct mem_cgroup *memcg, pg_data_t *pgdat) |
| 217 | { |
| 218 | /* |
| 219 | * Reclaiming a cgroup means reclaiming all its children in a |
| 220 | * round-robin fashion. That means that each cgroup has an LRU |
| 221 | * order that is composed of the LRU orders of its child |
| 222 | * cgroups; and every page has an LRU position not just in the |
| 223 | * cgroup that owns it, but in all of that group's ancestors. |
| 224 | * |
| 225 | * So when the physical inactive list of a leaf cgroup ages, |
| 226 | * the virtual inactive lists of all its parents, including |
| 227 | * the root cgroup's, age as well. |
| 228 | */ |
| 229 | do { |
| 230 | struct lruvec *lruvec; |
| 231 | |
| 232 | lruvec = mem_cgroup_lruvec(memcg, pgdat); |
| 233 | atomic_long_inc(&lruvec->inactive_age); |
| 234 | } while (memcg && (memcg = parent_mem_cgroup(memcg))); |
| 235 | } |
| 236 | |
Johannes Weiner | a528910 | 2014-04-03 14:47:51 -0700 | [diff] [blame] | 237 | /** |
| 238 | * workingset_eviction - note the eviction of a page from memory |
Johannes Weiner | b910718 | 2019-11-30 17:55:59 -0800 | [diff] [blame] | 239 | * @target_memcg: the cgroup that is causing the reclaim |
Johannes Weiner | a528910 | 2014-04-03 14:47:51 -0700 | [diff] [blame] | 240 | * @page: the page being evicted |
| 241 | * |
Andrey Ryabinin | a7ca12f | 2019-03-05 15:49:35 -0800 | [diff] [blame] | 242 | * Returns a shadow entry to be stored in @page->mapping->i_pages in place |
Johannes Weiner | a528910 | 2014-04-03 14:47:51 -0700 | [diff] [blame] | 243 | * of the evicted @page so that a later refault can be detected. |
| 244 | */ |
Johannes Weiner | b910718 | 2019-11-30 17:55:59 -0800 | [diff] [blame] | 245 | void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg) |
Johannes Weiner | a528910 | 2014-04-03 14:47:51 -0700 | [diff] [blame] | 246 | { |
Mel Gorman | 1e6b1085 | 2016-07-28 15:46:08 -0700 | [diff] [blame] | 247 | struct pglist_data *pgdat = page_pgdat(page); |
Johannes Weiner | a528910 | 2014-04-03 14:47:51 -0700 | [diff] [blame] | 248 | unsigned long eviction; |
Johannes Weiner | 23047a9 | 2016-03-15 14:57:16 -0700 | [diff] [blame] | 249 | struct lruvec *lruvec; |
Johannes Weiner | b910718 | 2019-11-30 17:55:59 -0800 | [diff] [blame] | 250 | int memcgid; |
Johannes Weiner | a528910 | 2014-04-03 14:47:51 -0700 | [diff] [blame] | 251 | |
Johannes Weiner | 23047a9 | 2016-03-15 14:57:16 -0700 | [diff] [blame] | 252 | /* Page is fully exclusive and pins page->mem_cgroup */ |
| 253 | VM_BUG_ON_PAGE(PageLRU(page), page); |
| 254 | VM_BUG_ON_PAGE(page_count(page), page); |
| 255 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
| 256 | |
Johannes Weiner | b910718 | 2019-11-30 17:55:59 -0800 | [diff] [blame] | 257 | advance_inactive_age(page_memcg(page), pgdat); |
| 258 | |
| 259 | lruvec = mem_cgroup_lruvec(target_memcg, pgdat); |
| 260 | /* XXX: target_memcg can be NULL, go through lruvec */ |
| 261 | memcgid = mem_cgroup_id(lruvec_memcg(lruvec)); |
| 262 | eviction = atomic_long_read(&lruvec->inactive_age); |
Johannes Weiner | 1899ad1 | 2018-10-26 15:06:04 -0700 | [diff] [blame] | 263 | return pack_shadow(memcgid, pgdat, eviction, PageWorkingset(page)); |
Johannes Weiner | a528910 | 2014-04-03 14:47:51 -0700 | [diff] [blame] | 264 | } |
| 265 | |
| 266 | /** |
| 267 | * workingset_refault - evaluate the refault of a previously evicted page |
Johannes Weiner | 1899ad1 | 2018-10-26 15:06:04 -0700 | [diff] [blame] | 268 | * @page: the freshly allocated replacement page |
Johannes Weiner | a528910 | 2014-04-03 14:47:51 -0700 | [diff] [blame] | 269 | * @shadow: shadow entry of the evicted page |
| 270 | * |
| 271 | * Calculates and evaluates the refault distance of the previously |
Johannes Weiner | b910718 | 2019-11-30 17:55:59 -0800 | [diff] [blame] | 272 | * evicted page in the context of the node and the memcg whose memory |
| 273 | * pressure caused the eviction. |
Johannes Weiner | a528910 | 2014-04-03 14:47:51 -0700 | [diff] [blame] | 274 | */ |
Johannes Weiner | 1899ad1 | 2018-10-26 15:06:04 -0700 | [diff] [blame] | 275 | void workingset_refault(struct page *page, void *shadow) |
Johannes Weiner | a528910 | 2014-04-03 14:47:51 -0700 | [diff] [blame] | 276 | { |
Johannes Weiner | b910718 | 2019-11-30 17:55:59 -0800 | [diff] [blame] | 277 | struct mem_cgroup *eviction_memcg; |
| 278 | struct lruvec *eviction_lruvec; |
Johannes Weiner | a528910 | 2014-04-03 14:47:51 -0700 | [diff] [blame] | 279 | unsigned long refault_distance; |
Johannes Weiner | 34e58ca | 2020-06-03 16:02:43 -0700 | [diff] [blame] | 280 | unsigned long workingset_size; |
Johannes Weiner | 1899ad1 | 2018-10-26 15:06:04 -0700 | [diff] [blame] | 281 | struct pglist_data *pgdat; |
Johannes Weiner | 23047a9 | 2016-03-15 14:57:16 -0700 | [diff] [blame] | 282 | struct mem_cgroup *memcg; |
Johannes Weiner | 162453b | 2016-03-15 14:57:10 -0700 | [diff] [blame] | 283 | unsigned long eviction; |
Johannes Weiner | 23047a9 | 2016-03-15 14:57:16 -0700 | [diff] [blame] | 284 | struct lruvec *lruvec; |
Johannes Weiner | 162453b | 2016-03-15 14:57:10 -0700 | [diff] [blame] | 285 | unsigned long refault; |
Johannes Weiner | 1899ad1 | 2018-10-26 15:06:04 -0700 | [diff] [blame] | 286 | bool workingset; |
Johannes Weiner | 23047a9 | 2016-03-15 14:57:16 -0700 | [diff] [blame] | 287 | int memcgid; |
Johannes Weiner | a528910 | 2014-04-03 14:47:51 -0700 | [diff] [blame] | 288 | |
Johannes Weiner | 1899ad1 | 2018-10-26 15:06:04 -0700 | [diff] [blame] | 289 | unpack_shadow(shadow, &memcgid, &pgdat, &eviction, &workingset); |
Johannes Weiner | 162453b | 2016-03-15 14:57:10 -0700 | [diff] [blame] | 290 | |
Johannes Weiner | 23047a9 | 2016-03-15 14:57:16 -0700 | [diff] [blame] | 291 | rcu_read_lock(); |
| 292 | /* |
| 293 | * Look up the memcg associated with the stored ID. It might |
| 294 | * have been deleted since the page's eviction. |
| 295 | * |
| 296 | * Note that in rare events the ID could have been recycled |
| 297 | * for a new cgroup that refaults a shared page. This is |
| 298 | * impossible to tell from the available data. However, this |
| 299 | * should be a rare and limited disturbance, and activations |
| 300 | * are always speculative anyway. Ultimately, it's the aging |
| 301 | * algorithm's job to shake out the minimum access frequency |
| 302 | * for the active cache. |
| 303 | * |
| 304 | * XXX: On !CONFIG_MEMCG, this will always return NULL; it |
| 305 | * would be better if the root_mem_cgroup existed in all |
| 306 | * configurations instead. |
| 307 | */ |
Johannes Weiner | b910718 | 2019-11-30 17:55:59 -0800 | [diff] [blame] | 308 | eviction_memcg = mem_cgroup_from_id(memcgid); |
| 309 | if (!mem_cgroup_disabled() && !eviction_memcg) |
Johannes Weiner | 1899ad1 | 2018-10-26 15:06:04 -0700 | [diff] [blame] | 310 | goto out; |
Johannes Weiner | b910718 | 2019-11-30 17:55:59 -0800 | [diff] [blame] | 311 | eviction_lruvec = mem_cgroup_lruvec(eviction_memcg, pgdat); |
| 312 | refault = atomic_long_read(&eviction_lruvec->inactive_age); |
Johannes Weiner | 162453b | 2016-03-15 14:57:10 -0700 | [diff] [blame] | 313 | |
| 314 | /* |
Johannes Weiner | 1899ad1 | 2018-10-26 15:06:04 -0700 | [diff] [blame] | 315 | * Calculate the refault distance |
Johannes Weiner | 162453b | 2016-03-15 14:57:10 -0700 | [diff] [blame] | 316 | * |
Johannes Weiner | 1899ad1 | 2018-10-26 15:06:04 -0700 | [diff] [blame] | 317 | * The unsigned subtraction here gives an accurate distance |
| 318 | * across inactive_age overflows in most cases. There is a |
| 319 | * special case: usually, shadow entries have a short lifetime |
| 320 | * and are either refaulted or reclaimed along with the inode |
| 321 | * before they get too old. But it is not impossible for the |
| 322 | * inactive_age to lap a shadow entry in the field, which can |
| 323 | * then result in a false small refault distance, leading to a |
| 324 | * false activation should this old entry actually refault |
| 325 | * again. However, earlier kernels used to deactivate |
| 326 | * unconditionally with *every* reclaim invocation for the |
| 327 | * longest time, so the occasional inappropriate activation |
| 328 | * leading to pressure on the active list is not a problem. |
Johannes Weiner | 162453b | 2016-03-15 14:57:10 -0700 | [diff] [blame] | 329 | */ |
| 330 | refault_distance = (refault - eviction) & EVICTION_MASK; |
| 331 | |
Johannes Weiner | b910718 | 2019-11-30 17:55:59 -0800 | [diff] [blame] | 332 | /* |
| 333 | * The activation decision for this page is made at the level |
| 334 | * where the eviction occurred, as that is where the LRU order |
| 335 | * during page reclaim is being determined. |
| 336 | * |
| 337 | * However, the cgroup that will own the page is the one that |
| 338 | * is actually experiencing the refault event. |
| 339 | */ |
| 340 | memcg = page_memcg(page); |
| 341 | lruvec = mem_cgroup_lruvec(memcg, pgdat); |
| 342 | |
Johannes Weiner | 00f3ca2 | 2017-07-06 15:40:52 -0700 | [diff] [blame] | 343 | inc_lruvec_state(lruvec, WORKINGSET_REFAULT); |
Johannes Weiner | a528910 | 2014-04-03 14:47:51 -0700 | [diff] [blame] | 344 | |
Johannes Weiner | 1899ad1 | 2018-10-26 15:06:04 -0700 | [diff] [blame] | 345 | /* |
| 346 | * Compare the distance to the existing workingset size. We |
Johannes Weiner | 34e58ca | 2020-06-03 16:02:43 -0700 | [diff] [blame] | 347 | * don't activate pages that couldn't stay resident even if |
| 348 | * all the memory was available to the page cache. Whether |
| 349 | * cache can compete with anon or not depends on having swap. |
Johannes Weiner | 1899ad1 | 2018-10-26 15:06:04 -0700 | [diff] [blame] | 350 | */ |
Johannes Weiner | 34e58ca | 2020-06-03 16:02:43 -0700 | [diff] [blame] | 351 | workingset_size = lruvec_page_state(eviction_lruvec, NR_ACTIVE_FILE); |
| 352 | if (mem_cgroup_get_nr_swap_pages(memcg) > 0) { |
| 353 | workingset_size += lruvec_page_state(eviction_lruvec, |
| 354 | NR_INACTIVE_ANON); |
| 355 | workingset_size += lruvec_page_state(eviction_lruvec, |
| 356 | NR_ACTIVE_ANON); |
| 357 | } |
| 358 | if (refault_distance > workingset_size) |
Johannes Weiner | 1899ad1 | 2018-10-26 15:06:04 -0700 | [diff] [blame] | 359 | goto out; |
| 360 | |
| 361 | SetPageActive(page); |
Johannes Weiner | b910718 | 2019-11-30 17:55:59 -0800 | [diff] [blame] | 362 | advance_inactive_age(memcg, pgdat); |
Johannes Weiner | 1899ad1 | 2018-10-26 15:06:04 -0700 | [diff] [blame] | 363 | inc_lruvec_state(lruvec, WORKINGSET_ACTIVATE); |
| 364 | |
| 365 | /* Page was active prior to eviction */ |
| 366 | if (workingset) { |
| 367 | SetPageWorkingset(page); |
Johannes Weiner | 314b57f | 2020-06-03 16:03:03 -0700 | [diff] [blame] | 368 | /* XXX: Move to lru_cache_add() when it supports new vs putback */ |
| 369 | spin_lock_irq(&page_pgdat(page)->lru_lock); |
Johannes Weiner | 96f8bf4 | 2020-06-03 16:03:09 -0700 | [diff] [blame] | 370 | lru_note_cost_page(page); |
Johannes Weiner | 314b57f | 2020-06-03 16:03:03 -0700 | [diff] [blame] | 371 | spin_unlock_irq(&page_pgdat(page)->lru_lock); |
Johannes Weiner | 1899ad1 | 2018-10-26 15:06:04 -0700 | [diff] [blame] | 372 | inc_lruvec_state(lruvec, WORKINGSET_RESTORE); |
Johannes Weiner | a528910 | 2014-04-03 14:47:51 -0700 | [diff] [blame] | 373 | } |
Johannes Weiner | 1899ad1 | 2018-10-26 15:06:04 -0700 | [diff] [blame] | 374 | out: |
Johannes Weiner | 2a2e488 | 2017-05-03 14:55:03 -0700 | [diff] [blame] | 375 | rcu_read_unlock(); |
Johannes Weiner | a528910 | 2014-04-03 14:47:51 -0700 | [diff] [blame] | 376 | } |
| 377 | |
| 378 | /** |
| 379 | * workingset_activation - note a page activation |
| 380 | * @page: page that is being activated |
| 381 | */ |
| 382 | void workingset_activation(struct page *page) |
| 383 | { |
Johannes Weiner | 55779ec | 2016-07-28 15:45:10 -0700 | [diff] [blame] | 384 | struct mem_cgroup *memcg; |
Johannes Weiner | 23047a9 | 2016-03-15 14:57:16 -0700 | [diff] [blame] | 385 | |
Johannes Weiner | 55779ec | 2016-07-28 15:45:10 -0700 | [diff] [blame] | 386 | rcu_read_lock(); |
Johannes Weiner | 23047a9 | 2016-03-15 14:57:16 -0700 | [diff] [blame] | 387 | /* |
| 388 | * Filter non-memcg pages here, e.g. unmap can call |
| 389 | * mark_page_accessed() on VDSO pages. |
| 390 | * |
| 391 | * XXX: See workingset_refault() - this should return |
| 392 | * root_mem_cgroup even for !CONFIG_MEMCG. |
| 393 | */ |
Johannes Weiner | 55779ec | 2016-07-28 15:45:10 -0700 | [diff] [blame] | 394 | memcg = page_memcg_rcu(page); |
| 395 | if (!mem_cgroup_disabled() && !memcg) |
Johannes Weiner | 23047a9 | 2016-03-15 14:57:16 -0700 | [diff] [blame] | 396 | goto out; |
Johannes Weiner | b910718 | 2019-11-30 17:55:59 -0800 | [diff] [blame] | 397 | advance_inactive_age(memcg, page_pgdat(page)); |
Johannes Weiner | 23047a9 | 2016-03-15 14:57:16 -0700 | [diff] [blame] | 398 | out: |
Johannes Weiner | 55779ec | 2016-07-28 15:45:10 -0700 | [diff] [blame] | 399 | rcu_read_unlock(); |
Johannes Weiner | a528910 | 2014-04-03 14:47:51 -0700 | [diff] [blame] | 400 | } |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 401 | |
| 402 | /* |
| 403 | * Shadow entries reflect the share of the working set that does not |
| 404 | * fit into memory, so their number depends on the access pattern of |
| 405 | * the workload. In most cases, they will refault or get reclaimed |
| 406 | * along with the inode, but a (malicious) workload that streams |
| 407 | * through files with a total size several times that of available |
| 408 | * memory, while preventing the inodes from being reclaimed, can |
| 409 | * create excessive amounts of shadow nodes. To keep a lid on this, |
| 410 | * track shadow nodes and reclaim them when they grow way past the |
| 411 | * point where they would still be useful. |
| 412 | */ |
| 413 | |
Johannes Weiner | 14b4687 | 2016-12-12 16:43:52 -0800 | [diff] [blame] | 414 | static struct list_lru shadow_nodes; |
| 415 | |
Matthew Wilcox | a97e790 | 2017-11-24 14:24:59 -0500 | [diff] [blame] | 416 | void workingset_update_node(struct xa_node *node) |
Johannes Weiner | 14b4687 | 2016-12-12 16:43:52 -0800 | [diff] [blame] | 417 | { |
Johannes Weiner | 14b4687 | 2016-12-12 16:43:52 -0800 | [diff] [blame] | 418 | /* |
| 419 | * Track non-empty nodes that contain only shadow entries; |
| 420 | * unlink those that contain pages or are being freed. |
| 421 | * |
| 422 | * Avoid acquiring the list_lru lock when the nodes are |
| 423 | * already where they should be. The list_empty() test is safe |
Matthew Wilcox | b93b016 | 2018-04-10 16:36:56 -0700 | [diff] [blame] | 424 | * as node->private_list is protected by the i_pages lock. |
Johannes Weiner | 14b4687 | 2016-12-12 16:43:52 -0800 | [diff] [blame] | 425 | */ |
Johannes Weiner | 68d48e6 | 2018-10-26 15:06:39 -0700 | [diff] [blame] | 426 | VM_WARN_ON_ONCE(!irqs_disabled()); /* For __inc_lruvec_page_state */ |
| 427 | |
Matthew Wilcox | 01959df | 2017-11-09 09:23:56 -0500 | [diff] [blame] | 428 | if (node->count && node->count == node->nr_values) { |
Johannes Weiner | 68d48e6 | 2018-10-26 15:06:39 -0700 | [diff] [blame] | 429 | if (list_empty(&node->private_list)) { |
Johannes Weiner | 14b4687 | 2016-12-12 16:43:52 -0800 | [diff] [blame] | 430 | list_lru_add(&shadow_nodes, &node->private_list); |
Roman Gushchin | ec9f023 | 2019-08-13 15:37:41 -0700 | [diff] [blame] | 431 | __inc_lruvec_slab_state(node, WORKINGSET_NODES); |
Johannes Weiner | 68d48e6 | 2018-10-26 15:06:39 -0700 | [diff] [blame] | 432 | } |
Johannes Weiner | 14b4687 | 2016-12-12 16:43:52 -0800 | [diff] [blame] | 433 | } else { |
Johannes Weiner | 68d48e6 | 2018-10-26 15:06:39 -0700 | [diff] [blame] | 434 | if (!list_empty(&node->private_list)) { |
Johannes Weiner | 14b4687 | 2016-12-12 16:43:52 -0800 | [diff] [blame] | 435 | list_lru_del(&shadow_nodes, &node->private_list); |
Roman Gushchin | ec9f023 | 2019-08-13 15:37:41 -0700 | [diff] [blame] | 436 | __dec_lruvec_slab_state(node, WORKINGSET_NODES); |
Johannes Weiner | 68d48e6 | 2018-10-26 15:06:39 -0700 | [diff] [blame] | 437 | } |
Johannes Weiner | 14b4687 | 2016-12-12 16:43:52 -0800 | [diff] [blame] | 438 | } |
| 439 | } |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 440 | |
| 441 | static unsigned long count_shadow_nodes(struct shrinker *shrinker, |
| 442 | struct shrink_control *sc) |
| 443 | { |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 444 | unsigned long max_nodes; |
Johannes Weiner | 14b4687 | 2016-12-12 16:43:52 -0800 | [diff] [blame] | 445 | unsigned long nodes; |
Johannes Weiner | 95f9ab2 | 2018-10-26 15:05:59 -0700 | [diff] [blame] | 446 | unsigned long pages; |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 447 | |
Johannes Weiner | 14b4687 | 2016-12-12 16:43:52 -0800 | [diff] [blame] | 448 | nodes = list_lru_shrink_count(&shadow_nodes, sc); |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 449 | |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 450 | /* |
Matthew Wilcox | a97e790 | 2017-11-24 14:24:59 -0500 | [diff] [blame] | 451 | * Approximate a reasonable limit for the nodes |
Johannes Weiner | b538899 | 2016-12-12 16:43:58 -0800 | [diff] [blame] | 452 | * containing shadow entries. We don't need to keep more |
| 453 | * shadow entries than possible pages on the active list, |
| 454 | * since refault distances bigger than that are dismissed. |
| 455 | * |
| 456 | * The size of the active list converges toward 100% of |
| 457 | * overall page cache as memory grows, with only a tiny |
| 458 | * inactive list. Assume the total cache size for that. |
| 459 | * |
| 460 | * Nodes might be sparsely populated, with only one shadow |
| 461 | * entry in the extreme case. Obviously, we cannot keep one |
| 462 | * node for every eligible shadow entry, so compromise on a |
| 463 | * worst-case density of 1/8th. Below that, not all eligible |
| 464 | * refaults can be detected anymore. |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 465 | * |
Matthew Wilcox | a97e790 | 2017-11-24 14:24:59 -0500 | [diff] [blame] | 466 | * On 64-bit with 7 xa_nodes per page and 64 slots |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 467 | * each, this will reclaim shadow entries when they consume |
Johannes Weiner | b538899 | 2016-12-12 16:43:58 -0800 | [diff] [blame] | 468 | * ~1.8% of available memory: |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 469 | * |
Matthew Wilcox | a97e790 | 2017-11-24 14:24:59 -0500 | [diff] [blame] | 470 | * PAGE_SIZE / xa_nodes / node_entries * 8 / PAGE_SIZE |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 471 | */ |
Johannes Weiner | 95f9ab2 | 2018-10-26 15:05:59 -0700 | [diff] [blame] | 472 | #ifdef CONFIG_MEMCG |
Johannes Weiner | b538899 | 2016-12-12 16:43:58 -0800 | [diff] [blame] | 473 | if (sc->memcg) { |
Johannes Weiner | 95f9ab2 | 2018-10-26 15:05:59 -0700 | [diff] [blame] | 474 | struct lruvec *lruvec; |
Johannes Weiner | 2b487e5 | 2019-05-13 17:18:05 -0700 | [diff] [blame] | 475 | int i; |
Johannes Weiner | 95f9ab2 | 2018-10-26 15:05:59 -0700 | [diff] [blame] | 476 | |
Johannes Weiner | 867e5e1 | 2019-11-30 17:55:34 -0800 | [diff] [blame] | 477 | lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid)); |
Johannes Weiner | 2b487e5 | 2019-05-13 17:18:05 -0700 | [diff] [blame] | 478 | for (pages = 0, i = 0; i < NR_LRU_LISTS; i++) |
Johannes Weiner | 205b20c | 2019-05-14 15:47:06 -0700 | [diff] [blame] | 479 | pages += lruvec_page_state_local(lruvec, |
| 480 | NR_LRU_BASE + i); |
| 481 | pages += lruvec_page_state_local(lruvec, NR_SLAB_RECLAIMABLE); |
| 482 | pages += lruvec_page_state_local(lruvec, NR_SLAB_UNRECLAIMABLE); |
Johannes Weiner | 95f9ab2 | 2018-10-26 15:05:59 -0700 | [diff] [blame] | 483 | } else |
| 484 | #endif |
| 485 | pages = node_present_pages(sc->nid); |
| 486 | |
Linus Torvalds | dad4f14 | 2018-10-28 11:35:40 -0700 | [diff] [blame] | 487 | max_nodes = pages >> (XA_CHUNK_SHIFT - 3); |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 488 | |
Kirill Tkhai | 9b99646 | 2018-08-17 15:48:21 -0700 | [diff] [blame] | 489 | if (!nodes) |
| 490 | return SHRINK_EMPTY; |
| 491 | |
Johannes Weiner | 14b4687 | 2016-12-12 16:43:52 -0800 | [diff] [blame] | 492 | if (nodes <= max_nodes) |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 493 | return 0; |
Johannes Weiner | 14b4687 | 2016-12-12 16:43:52 -0800 | [diff] [blame] | 494 | return nodes - max_nodes; |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 495 | } |
| 496 | |
| 497 | static enum lru_status shadow_lru_isolate(struct list_head *item, |
Vladimir Davydov | 3f97b16 | 2015-02-12 14:59:35 -0800 | [diff] [blame] | 498 | struct list_lru_one *lru, |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 499 | spinlock_t *lru_lock, |
Matthew Wilcox | a97e790 | 2017-11-24 14:24:59 -0500 | [diff] [blame] | 500 | void *arg) __must_hold(lru_lock) |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 501 | { |
Matthew Wilcox | a97e790 | 2017-11-24 14:24:59 -0500 | [diff] [blame] | 502 | struct xa_node *node = container_of(item, struct xa_node, private_list); |
| 503 | XA_STATE(xas, node->array, 0); |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 504 | struct address_space *mapping; |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 505 | int ret; |
| 506 | |
| 507 | /* |
| 508 | * Page cache insertions and deletions synchroneously maintain |
Matthew Wilcox | b93b016 | 2018-04-10 16:36:56 -0700 | [diff] [blame] | 509 | * the shadow node LRU under the i_pages lock and the |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 510 | * lru_lock. Because the page cache tree is emptied before |
| 511 | * the inode can be destroyed, holding the lru_lock pins any |
Matthew Wilcox | a97e790 | 2017-11-24 14:24:59 -0500 | [diff] [blame] | 512 | * address_space that has nodes on the LRU. |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 513 | * |
Matthew Wilcox | b93b016 | 2018-04-10 16:36:56 -0700 | [diff] [blame] | 514 | * We can then safely transition to the i_pages lock to |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 515 | * pin only the address_space of the particular node we want |
| 516 | * to reclaim, take the node off-LRU, and drop the lru_lock. |
| 517 | */ |
| 518 | |
Matthew Wilcox | 01959df | 2017-11-09 09:23:56 -0500 | [diff] [blame] | 519 | mapping = container_of(node->array, struct address_space, i_pages); |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 520 | |
| 521 | /* Coming from the list, invert the lock order */ |
Matthew Wilcox | b93b016 | 2018-04-10 16:36:56 -0700 | [diff] [blame] | 522 | if (!xa_trylock(&mapping->i_pages)) { |
Sebastian Andrzej Siewior | 6ca342d | 2018-08-17 15:46:08 -0700 | [diff] [blame] | 523 | spin_unlock_irq(lru_lock); |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 524 | ret = LRU_RETRY; |
| 525 | goto out; |
| 526 | } |
| 527 | |
Vladimir Davydov | 3f97b16 | 2015-02-12 14:59:35 -0800 | [diff] [blame] | 528 | list_lru_isolate(lru, item); |
Roman Gushchin | ec9f023 | 2019-08-13 15:37:41 -0700 | [diff] [blame] | 529 | __dec_lruvec_slab_state(node, WORKINGSET_NODES); |
Johannes Weiner | 68d48e6 | 2018-10-26 15:06:39 -0700 | [diff] [blame] | 530 | |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 531 | spin_unlock(lru_lock); |
| 532 | |
| 533 | /* |
| 534 | * The nodes should only contain one or more shadow entries, |
| 535 | * no pages, so we expect to be able to remove them all and |
| 536 | * delete and free the empty node afterwards. |
| 537 | */ |
Matthew Wilcox | 01959df | 2017-11-09 09:23:56 -0500 | [diff] [blame] | 538 | if (WARN_ON_ONCE(!node->nr_values)) |
Johannes Weiner | b936887 | 2016-12-12 16:43:38 -0800 | [diff] [blame] | 539 | goto out_invalid; |
Matthew Wilcox | 01959df | 2017-11-09 09:23:56 -0500 | [diff] [blame] | 540 | if (WARN_ON_ONCE(node->count != node->nr_values)) |
Johannes Weiner | b936887 | 2016-12-12 16:43:38 -0800 | [diff] [blame] | 541 | goto out_invalid; |
Matthew Wilcox | a97e790 | 2017-11-24 14:24:59 -0500 | [diff] [blame] | 542 | mapping->nrexceptional -= node->nr_values; |
| 543 | xas.xa_node = xa_parent_locked(&mapping->i_pages, node); |
| 544 | xas.xa_offset = node->offset; |
| 545 | xas.xa_shift = node->shift + XA_CHUNK_SHIFT; |
| 546 | xas_set_update(&xas, workingset_update_node); |
| 547 | /* |
| 548 | * We could store a shadow entry here which was the minimum of the |
| 549 | * shadow entries we were tracking ... |
| 550 | */ |
| 551 | xas_store(&xas, NULL); |
Roman Gushchin | ec9f023 | 2019-08-13 15:37:41 -0700 | [diff] [blame] | 552 | __inc_lruvec_slab_state(node, WORKINGSET_NODERECLAIM); |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 553 | |
Johannes Weiner | b936887 | 2016-12-12 16:43:38 -0800 | [diff] [blame] | 554 | out_invalid: |
Sebastian Andrzej Siewior | 6ca342d | 2018-08-17 15:46:08 -0700 | [diff] [blame] | 555 | xa_unlock_irq(&mapping->i_pages); |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 556 | ret = LRU_REMOVED_RETRY; |
| 557 | out: |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 558 | cond_resched(); |
Sebastian Andrzej Siewior | 6ca342d | 2018-08-17 15:46:08 -0700 | [diff] [blame] | 559 | spin_lock_irq(lru_lock); |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 560 | return ret; |
| 561 | } |
| 562 | |
| 563 | static unsigned long scan_shadow_nodes(struct shrinker *shrinker, |
| 564 | struct shrink_control *sc) |
| 565 | { |
Matthew Wilcox | b93b016 | 2018-04-10 16:36:56 -0700 | [diff] [blame] | 566 | /* list_lru lock nests inside the IRQ-safe i_pages lock */ |
Sebastian Andrzej Siewior | 6b51e88 | 2018-08-17 15:49:55 -0700 | [diff] [blame] | 567 | return list_lru_shrink_walk_irq(&shadow_nodes, sc, shadow_lru_isolate, |
| 568 | NULL); |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 569 | } |
| 570 | |
| 571 | static struct shrinker workingset_shadow_shrinker = { |
| 572 | .count_objects = count_shadow_nodes, |
| 573 | .scan_objects = scan_shadow_nodes, |
Johannes Weiner | 4b85afb | 2018-10-26 15:06:42 -0700 | [diff] [blame] | 574 | .seeks = 0, /* ->count reports only fully expendable nodes */ |
Vladimir Davydov | 0a6b76d | 2016-03-17 14:18:42 -0700 | [diff] [blame] | 575 | .flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE, |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 576 | }; |
| 577 | |
| 578 | /* |
| 579 | * Our list_lru->lock is IRQ-safe as it nests inside the IRQ-safe |
Matthew Wilcox | b93b016 | 2018-04-10 16:36:56 -0700 | [diff] [blame] | 580 | * i_pages lock. |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 581 | */ |
| 582 | static struct lock_class_key shadow_nodes_key; |
| 583 | |
| 584 | static int __init workingset_init(void) |
| 585 | { |
Johannes Weiner | 612e449 | 2016-03-15 14:57:13 -0700 | [diff] [blame] | 586 | unsigned int timestamp_bits; |
| 587 | unsigned int max_order; |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 588 | int ret; |
| 589 | |
Johannes Weiner | 612e449 | 2016-03-15 14:57:13 -0700 | [diff] [blame] | 590 | BUILD_BUG_ON(BITS_PER_LONG < EVICTION_SHIFT); |
| 591 | /* |
| 592 | * Calculate the eviction bucket size to cover the longest |
| 593 | * actionable refault distance, which is currently half of |
| 594 | * memory (totalram_pages/2). However, memory hotplug may add |
| 595 | * some more pages at runtime, so keep working with up to |
| 596 | * double the initial memory by using totalram_pages as-is. |
| 597 | */ |
| 598 | timestamp_bits = BITS_PER_LONG - EVICTION_SHIFT; |
Arun KS | ca79b0c | 2018-12-28 00:34:29 -0800 | [diff] [blame] | 599 | max_order = fls_long(totalram_pages() - 1); |
Johannes Weiner | 612e449 | 2016-03-15 14:57:13 -0700 | [diff] [blame] | 600 | if (max_order > timestamp_bits) |
| 601 | bucket_order = max_order - timestamp_bits; |
Anton Blanchard | d3d36c4 | 2016-07-14 12:07:41 -0700 | [diff] [blame] | 602 | pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n", |
Johannes Weiner | 612e449 | 2016-03-15 14:57:13 -0700 | [diff] [blame] | 603 | timestamp_bits, max_order, bucket_order); |
| 604 | |
Kirill Tkhai | 3988765 | 2018-08-17 15:47:41 -0700 | [diff] [blame] | 605 | ret = prealloc_shrinker(&workingset_shadow_shrinker); |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 606 | if (ret) |
| 607 | goto err; |
Kirill Tkhai | c92e8e1 | 2018-08-17 15:47:50 -0700 | [diff] [blame] | 608 | ret = __list_lru_init(&shadow_nodes, true, &shadow_nodes_key, |
| 609 | &workingset_shadow_shrinker); |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 610 | if (ret) |
| 611 | goto err_list_lru; |
Kirill Tkhai | 3988765 | 2018-08-17 15:47:41 -0700 | [diff] [blame] | 612 | register_shrinker_prepared(&workingset_shadow_shrinker); |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 613 | return 0; |
| 614 | err_list_lru: |
Kirill Tkhai | 3988765 | 2018-08-17 15:47:41 -0700 | [diff] [blame] | 615 | free_prealloced_shrinker(&workingset_shadow_shrinker); |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 616 | err: |
| 617 | return ret; |
| 618 | } |
| 619 | module_init(workingset_init); |