Thomas Gleixner | 457c899 | 2019-05-19 13:08:55 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * linux/mm/page_alloc.c |
| 4 | * |
| 5 | * Manages the free list, the system allocates free pages here. |
| 6 | * Note that kmalloc() lives in slab.c |
| 7 | * |
| 8 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds |
| 9 | * Swap reorganised 29.12.95, Stephen Tweedie |
| 10 | * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 |
| 11 | * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999 |
| 12 | * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 |
| 13 | * Zone balancing, Kanoj Sarcar, SGI, Jan 2000 |
| 14 | * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002 |
| 15 | * (lots of bits borrowed from Ingo Molnar & Andrew Morton) |
| 16 | */ |
| 17 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include <linux/stddef.h> |
| 19 | #include <linux/mm.h> |
Arun KS | ca79b0c | 2018-12-28 00:34:29 -0800 | [diff] [blame] | 20 | #include <linux/highmem.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | #include <linux/swap.h> |
| 22 | #include <linux/interrupt.h> |
| 23 | #include <linux/pagemap.h> |
KOSAKI Motohiro | 10ed273 | 2008-03-04 14:28:32 -0800 | [diff] [blame] | 24 | #include <linux/jiffies.h> |
Yinghai Lu | edbe7d2 | 2010-08-25 13:39:16 -0700 | [diff] [blame] | 25 | #include <linux/memblock.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | #include <linux/compiler.h> |
Randy Dunlap | 9f15833 | 2005-09-13 01:25:16 -0700 | [diff] [blame] | 27 | #include <linux/kernel.h> |
Andrey Ryabinin | b8c73fc | 2015-02-13 14:39:28 -0800 | [diff] [blame] | 28 | #include <linux/kasan.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | #include <linux/module.h> |
| 30 | #include <linux/suspend.h> |
| 31 | #include <linux/pagevec.h> |
| 32 | #include <linux/blkdev.h> |
| 33 | #include <linux/slab.h> |
Dave Hansen | a238ab5 | 2011-05-24 17:12:16 -0700 | [diff] [blame] | 34 | #include <linux/ratelimit.h> |
David Rientjes | 5a3135c2 | 2007-10-16 23:25:53 -0700 | [diff] [blame] | 35 | #include <linux/oom.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | #include <linux/topology.h> |
| 37 | #include <linux/sysctl.h> |
| 38 | #include <linux/cpu.h> |
| 39 | #include <linux/cpuset.h> |
Dave Hansen | bdc8cb9 | 2005-10-29 18:16:53 -0700 | [diff] [blame] | 40 | #include <linux/memory_hotplug.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | #include <linux/nodemask.h> |
| 42 | #include <linux/vmalloc.h> |
KOSAKI Motohiro | a6cccdc | 2011-05-24 17:11:33 -0700 | [diff] [blame] | 43 | #include <linux/vmstat.h> |
Christoph Lameter | 4be38e3 | 2006-01-06 00:11:17 -0800 | [diff] [blame] | 44 | #include <linux/mempolicy.h> |
Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 45 | #include <linux/memremap.h> |
Yasunori Goto | 6811378 | 2006-06-23 02:03:11 -0700 | [diff] [blame] | 46 | #include <linux/stop_machine.h> |
Dan Williams | 97500a4 | 2019-05-14 15:41:35 -0700 | [diff] [blame] | 47 | #include <linux/random.h> |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 48 | #include <linux/sort.h> |
| 49 | #include <linux/pfn.h> |
Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 50 | #include <linux/backing-dev.h> |
Akinobu Mita | 933e312 | 2006-12-08 02:39:45 -0800 | [diff] [blame] | 51 | #include <linux/fault-inject.h> |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 52 | #include <linux/page-isolation.h> |
Thomas Gleixner | 3ac7fe5 | 2008-04-30 00:55:01 -0700 | [diff] [blame] | 53 | #include <linux/debugobjects.h> |
Catalin Marinas | dbb1f81 | 2009-06-11 13:23:19 +0100 | [diff] [blame] | 54 | #include <linux/kmemleak.h> |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 55 | #include <linux/compaction.h> |
Mel Gorman | 0d3d062 | 2009-09-21 17:02:44 -0700 | [diff] [blame] | 56 | #include <trace/events/kmem.h> |
Michal Hocko | d379f01 | 2017-02-22 15:42:00 -0800 | [diff] [blame] | 57 | #include <trace/events/oom.h> |
Linus Torvalds | 268bb0c | 2011-05-20 12:50:29 -0700 | [diff] [blame] | 58 | #include <linux/prefetch.h> |
Lisa Du | 6e543d5 | 2013-09-11 14:22:36 -0700 | [diff] [blame] | 59 | #include <linux/mm_inline.h> |
Daniel Vetter | f920e41 | 2020-12-14 19:08:30 -0800 | [diff] [blame] | 60 | #include <linux/mmu_notifier.h> |
Michal Nazarewicz | 041d3a8 | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 61 | #include <linux/migrate.h> |
David Rientjes | 949f7ec | 2013-04-29 15:07:48 -0700 | [diff] [blame] | 62 | #include <linux/hugetlb.h> |
Clark Williams | 8bd75c7 | 2013-02-07 09:47:07 -0600 | [diff] [blame] | 63 | #include <linux/sched/rt.h> |
Ingo Molnar | 5b3cc15 | 2017-02-02 20:43:54 +0100 | [diff] [blame] | 64 | #include <linux/sched/mm.h> |
Joonsoo Kim | 48c96a3 | 2014-12-12 16:56:01 -0800 | [diff] [blame] | 65 | #include <linux/page_owner.h> |
Mel Gorman | 0e1cc95 | 2015-06-30 14:57:27 -0700 | [diff] [blame] | 66 | #include <linux/kthread.h> |
Vladimir Davydov | 4949148 | 2016-07-26 15:24:24 -0700 | [diff] [blame] | 67 | #include <linux/memcontrol.h> |
Steven Rostedt (VMware) | 42c269c | 2017-03-03 16:15:39 -0500 | [diff] [blame] | 68 | #include <linux/ftrace.h> |
Peter Zijlstra | d92a8cf | 2017-03-03 10:13:38 +0100 | [diff] [blame] | 69 | #include <linux/lockdep.h> |
Chen Yu | 556b969 | 2017-08-25 15:55:30 -0700 | [diff] [blame] | 70 | #include <linux/nmi.h> |
Johannes Weiner | eb41468 | 2018-10-26 15:06:27 -0700 | [diff] [blame] | 71 | #include <linux/psi.h> |
Daniel Jordan | e444314 | 2020-06-03 15:59:51 -0700 | [diff] [blame] | 72 | #include <linux/padata.h> |
Vijay Balakrishna | 4aab2be | 2020-10-10 23:16:40 -0700 | [diff] [blame] | 73 | #include <linux/khugepaged.h> |
Lin Feng | ba8f358 | 2020-12-14 19:11:19 -0800 | [diff] [blame] | 74 | #include <linux/buffer_head.h> |
Jiang Liu | 7ee3d4e | 2013-07-03 15:03:41 -0700 | [diff] [blame] | 75 | #include <asm/sections.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 76 | #include <asm/tlbflush.h> |
Andrew Morton | ac924c6 | 2006-05-15 09:43:59 -0700 | [diff] [blame] | 77 | #include <asm/div64.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 78 | #include "internal.h" |
Dan Williams | e900a91 | 2019-05-14 15:41:28 -0700 | [diff] [blame] | 79 | #include "shuffle.h" |
Alexander Duyck | 36e66c5 | 2020-04-06 20:04:56 -0700 | [diff] [blame] | 80 | #include "page_reporting.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 | |
David Hildenbrand | f04a5d5 | 2020-10-15 20:09:20 -0700 | [diff] [blame] | 82 | /* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */ |
| 83 | typedef int __bitwise fpi_t; |
| 84 | |
| 85 | /* No special request */ |
| 86 | #define FPI_NONE ((__force fpi_t)0) |
| 87 | |
| 88 | /* |
| 89 | * Skip free page reporting notification for the (possibly merged) page. |
| 90 | * This does not hinder free page reporting from grabbing the page, |
| 91 | * reporting it and marking it "reported" - it only skips notifying |
| 92 | * the free page reporting infrastructure about a newly freed page. For |
| 93 | * example, used when temporarily pulling a page from a freelist and |
| 94 | * putting it back unmodified. |
| 95 | */ |
| 96 | #define FPI_SKIP_REPORT_NOTIFY ((__force fpi_t)BIT(0)) |
| 97 | |
David Hildenbrand | 47b6a24a2 | 2020-10-15 20:09:26 -0700 | [diff] [blame] | 98 | /* |
| 99 | * Place the (possibly merged) page to the tail of the freelist. Will ignore |
| 100 | * page shuffling (relevant code - e.g., memory onlining - is expected to |
| 101 | * shuffle the whole zone). |
| 102 | * |
| 103 | * Note: No code should rely on this flag for correctness - it's purely |
| 104 | * to allow for optimizations when handing back either fresh pages |
| 105 | * (memory onlining) or untouched pages (page isolation, free page |
| 106 | * reporting). |
| 107 | */ |
| 108 | #define FPI_TO_TAIL ((__force fpi_t)BIT(1)) |
| 109 | |
Andrey Konovalov | 2c33568 | 2021-04-29 22:59:52 -0700 | [diff] [blame] | 110 | /* |
| 111 | * Don't poison memory with KASAN (only for the tag-based modes). |
| 112 | * During boot, all non-reserved memblock memory is exposed to page_alloc. |
| 113 | * Poisoning all that memory lengthens boot time, especially on systems with |
| 114 | * large amount of RAM. This flag is used to skip that poisoning. |
| 115 | * This is only done for the tag-based KASAN modes, as those are able to |
| 116 | * detect memory corruptions with the memory tags assigned by default. |
| 117 | * All memory allocated normally after boot gets poisoned as usual. |
| 118 | */ |
| 119 | #define FPI_SKIP_KASAN_POISON ((__force fpi_t)BIT(2)) |
| 120 | |
Cody P Schafer | c8e251f | 2013-07-03 15:01:29 -0700 | [diff] [blame] | 121 | /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */ |
| 122 | static DEFINE_MUTEX(pcp_batch_high_lock); |
Mel Gorman | 74f4482 | 2021-06-28 19:42:24 -0700 | [diff] [blame] | 123 | #define MIN_PERCPU_PAGELIST_HIGH_FRACTION (8) |
Cody P Schafer | c8e251f | 2013-07-03 15:01:29 -0700 | [diff] [blame] | 124 | |
Mel Gorman | dbbee9d | 2021-06-28 19:41:41 -0700 | [diff] [blame] | 125 | struct pagesets { |
| 126 | local_lock_t lock; |
Mel Gorman | dbbee9d | 2021-06-28 19:41:41 -0700 | [diff] [blame] | 127 | }; |
| 128 | static DEFINE_PER_CPU(struct pagesets, pagesets) = { |
| 129 | .lock = INIT_LOCAL_LOCK(lock), |
| 130 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 | |
Lee Schermerhorn | 7281201 | 2010-05-26 14:44:56 -0700 | [diff] [blame] | 132 | #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID |
| 133 | DEFINE_PER_CPU(int, numa_node); |
| 134 | EXPORT_PER_CPU_SYMBOL(numa_node); |
| 135 | #endif |
| 136 | |
Kemi Wang | 4518085 | 2017-11-15 17:38:22 -0800 | [diff] [blame] | 137 | DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key); |
| 138 | |
Lee Schermerhorn | 7aac789 | 2010-05-26 14:45:00 -0700 | [diff] [blame] | 139 | #ifdef CONFIG_HAVE_MEMORYLESS_NODES |
| 140 | /* |
| 141 | * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly. |
| 142 | * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined. |
| 143 | * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem() |
| 144 | * defined in <linux/topology.h>. |
| 145 | */ |
| 146 | DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */ |
| 147 | EXPORT_PER_CPU_SYMBOL(_numa_mem_); |
| 148 | #endif |
| 149 | |
Mel Gorman | bd233f5 | 2017-02-24 14:56:56 -0800 | [diff] [blame] | 150 | /* work_structs for global per-cpu drains */ |
Wei Yang | d9367bd | 2018-12-28 00:38:58 -0800 | [diff] [blame] | 151 | struct pcpu_drain { |
| 152 | struct zone *zone; |
| 153 | struct work_struct work; |
| 154 | }; |
Jason Yan | 8b885f5 | 2020-04-10 14:32:32 -0700 | [diff] [blame] | 155 | static DEFINE_MUTEX(pcpu_drain_mutex); |
| 156 | static DEFINE_PER_CPU(struct pcpu_drain, pcpu_drain); |
Mel Gorman | bd233f5 | 2017-02-24 14:56:56 -0800 | [diff] [blame] | 157 | |
Emese Revfy | 38addce | 2016-06-20 20:41:19 +0200 | [diff] [blame] | 158 | #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY |
Kees Cook | 58bea41 | 2016-10-19 00:08:04 +0200 | [diff] [blame] | 159 | volatile unsigned long latent_entropy __latent_entropy; |
Emese Revfy | 38addce | 2016-06-20 20:41:19 +0200 | [diff] [blame] | 160 | EXPORT_SYMBOL(latent_entropy); |
| 161 | #endif |
| 162 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | /* |
Christoph Lameter | 1380891 | 2007-10-16 01:25:27 -0700 | [diff] [blame] | 164 | * Array of node states. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 165 | */ |
Christoph Lameter | 1380891 | 2007-10-16 01:25:27 -0700 | [diff] [blame] | 166 | nodemask_t node_states[NR_NODE_STATES] __read_mostly = { |
| 167 | [N_POSSIBLE] = NODE_MASK_ALL, |
| 168 | [N_ONLINE] = { { [0] = 1UL } }, |
| 169 | #ifndef CONFIG_NUMA |
| 170 | [N_NORMAL_MEMORY] = { { [0] = 1UL } }, |
| 171 | #ifdef CONFIG_HIGHMEM |
| 172 | [N_HIGH_MEMORY] = { { [0] = 1UL } }, |
| 173 | #endif |
Lai Jiangshan | 20b2f52 | 2012-12-12 13:52:00 -0800 | [diff] [blame] | 174 | [N_MEMORY] = { { [0] = 1UL } }, |
Christoph Lameter | 1380891 | 2007-10-16 01:25:27 -0700 | [diff] [blame] | 175 | [N_CPU] = { { [0] = 1UL } }, |
| 176 | #endif /* NUMA */ |
| 177 | }; |
| 178 | EXPORT_SYMBOL(node_states); |
| 179 | |
Arun KS | ca79b0c | 2018-12-28 00:34:29 -0800 | [diff] [blame] | 180 | atomic_long_t _totalram_pages __read_mostly; |
| 181 | EXPORT_SYMBOL(_totalram_pages); |
Hideo AOKI | cb45b0e | 2006-04-10 22:52:59 -0700 | [diff] [blame] | 182 | unsigned long totalreserve_pages __read_mostly; |
Pintu Kumar | e48322a | 2014-12-18 16:17:15 -0800 | [diff] [blame] | 183 | unsigned long totalcma_pages __read_mostly; |
Johannes Weiner | ab8fabd | 2012-01-10 15:07:42 -0800 | [diff] [blame] | 184 | |
Mel Gorman | 74f4482 | 2021-06-28 19:42:24 -0700 | [diff] [blame] | 185 | int percpu_pagelist_high_fraction; |
Benjamin Herrenschmidt | dcce284 | 2009-06-18 13:24:12 +1000 | [diff] [blame] | 186 | gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; |
Kees Cook | 51cba1e | 2021-04-01 16:23:43 -0700 | [diff] [blame] | 187 | DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc); |
Alexander Potapenko | 6471384 | 2019-07-11 20:59:19 -0700 | [diff] [blame] | 188 | EXPORT_SYMBOL(init_on_alloc); |
| 189 | |
Kees Cook | 51cba1e | 2021-04-01 16:23:43 -0700 | [diff] [blame] | 190 | DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free); |
Alexander Potapenko | 6471384 | 2019-07-11 20:59:19 -0700 | [diff] [blame] | 191 | EXPORT_SYMBOL(init_on_free); |
| 192 | |
Vlastimil Babka | 0401351 | 2020-12-14 19:13:30 -0800 | [diff] [blame] | 193 | static bool _init_on_alloc_enabled_early __read_mostly |
| 194 | = IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON); |
Alexander Potapenko | 6471384 | 2019-07-11 20:59:19 -0700 | [diff] [blame] | 195 | static int __init early_init_on_alloc(char *buf) |
| 196 | { |
Alexander Potapenko | 6471384 | 2019-07-11 20:59:19 -0700 | [diff] [blame] | 197 | |
Vlastimil Babka | 0401351 | 2020-12-14 19:13:30 -0800 | [diff] [blame] | 198 | return kstrtobool(buf, &_init_on_alloc_enabled_early); |
Alexander Potapenko | 6471384 | 2019-07-11 20:59:19 -0700 | [diff] [blame] | 199 | } |
| 200 | early_param("init_on_alloc", early_init_on_alloc); |
| 201 | |
Vlastimil Babka | 0401351 | 2020-12-14 19:13:30 -0800 | [diff] [blame] | 202 | static bool _init_on_free_enabled_early __read_mostly |
| 203 | = IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON); |
Alexander Potapenko | 6471384 | 2019-07-11 20:59:19 -0700 | [diff] [blame] | 204 | static int __init early_init_on_free(char *buf) |
| 205 | { |
Vlastimil Babka | 0401351 | 2020-12-14 19:13:30 -0800 | [diff] [blame] | 206 | return kstrtobool(buf, &_init_on_free_enabled_early); |
Alexander Potapenko | 6471384 | 2019-07-11 20:59:19 -0700 | [diff] [blame] | 207 | } |
| 208 | early_param("init_on_free", early_init_on_free); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 209 | |
Vlastimil Babka | bb14c2c | 2015-09-08 15:01:25 -0700 | [diff] [blame] | 210 | /* |
| 211 | * A cached value of the page's pageblock's migratetype, used when the page is |
| 212 | * put on a pcplist. Used to avoid the pageblock migratetype lookup when |
| 213 | * freeing from pcplists in most cases, at the cost of possibly becoming stale. |
| 214 | * Also the migratetype set in the page does not necessarily match the pcplist |
| 215 | * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any |
| 216 | * other index - this ensures that it will be put on the correct CMA freelist. |
| 217 | */ |
| 218 | static inline int get_pcppage_migratetype(struct page *page) |
| 219 | { |
| 220 | return page->index; |
| 221 | } |
| 222 | |
| 223 | static inline void set_pcppage_migratetype(struct page *page, int migratetype) |
| 224 | { |
| 225 | page->index = migratetype; |
| 226 | } |
| 227 | |
Rafael J. Wysocki | 452aa69 | 2010-03-05 13:42:13 -0800 | [diff] [blame] | 228 | #ifdef CONFIG_PM_SLEEP |
| 229 | /* |
| 230 | * The following functions are used by the suspend/hibernate code to temporarily |
| 231 | * change gfp_allowed_mask in order to avoid using I/O during memory allocations |
| 232 | * while devices are suspended. To avoid races with the suspend/hibernate code, |
Pingfan Liu | 55f2503 | 2018-07-31 16:51:32 +0800 | [diff] [blame] | 233 | * they should always be called with system_transition_mutex held |
| 234 | * (gfp_allowed_mask also should only be modified with system_transition_mutex |
| 235 | * held, unless the suspend/hibernate code is guaranteed not to run in parallel |
| 236 | * with that modification). |
Rafael J. Wysocki | 452aa69 | 2010-03-05 13:42:13 -0800 | [diff] [blame] | 237 | */ |
Rafael J. Wysocki | c9e664f | 2010-12-03 22:57:45 +0100 | [diff] [blame] | 238 | |
| 239 | static gfp_t saved_gfp_mask; |
| 240 | |
| 241 | void pm_restore_gfp_mask(void) |
Rafael J. Wysocki | 452aa69 | 2010-03-05 13:42:13 -0800 | [diff] [blame] | 242 | { |
Pingfan Liu | 55f2503 | 2018-07-31 16:51:32 +0800 | [diff] [blame] | 243 | WARN_ON(!mutex_is_locked(&system_transition_mutex)); |
Rafael J. Wysocki | c9e664f | 2010-12-03 22:57:45 +0100 | [diff] [blame] | 244 | if (saved_gfp_mask) { |
| 245 | gfp_allowed_mask = saved_gfp_mask; |
| 246 | saved_gfp_mask = 0; |
| 247 | } |
Rafael J. Wysocki | 452aa69 | 2010-03-05 13:42:13 -0800 | [diff] [blame] | 248 | } |
| 249 | |
Rafael J. Wysocki | c9e664f | 2010-12-03 22:57:45 +0100 | [diff] [blame] | 250 | void pm_restrict_gfp_mask(void) |
Rafael J. Wysocki | 452aa69 | 2010-03-05 13:42:13 -0800 | [diff] [blame] | 251 | { |
Pingfan Liu | 55f2503 | 2018-07-31 16:51:32 +0800 | [diff] [blame] | 252 | WARN_ON(!mutex_is_locked(&system_transition_mutex)); |
Rafael J. Wysocki | c9e664f | 2010-12-03 22:57:45 +0100 | [diff] [blame] | 253 | WARN_ON(saved_gfp_mask); |
| 254 | saved_gfp_mask = gfp_allowed_mask; |
Mel Gorman | d0164ad | 2015-11-06 16:28:21 -0800 | [diff] [blame] | 255 | gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS); |
Rafael J. Wysocki | 452aa69 | 2010-03-05 13:42:13 -0800 | [diff] [blame] | 256 | } |
Mel Gorman | f90ac39 | 2012-01-10 15:07:15 -0800 | [diff] [blame] | 257 | |
| 258 | bool pm_suspended_storage(void) |
| 259 | { |
Mel Gorman | d0164ad | 2015-11-06 16:28:21 -0800 | [diff] [blame] | 260 | if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS)) |
Mel Gorman | f90ac39 | 2012-01-10 15:07:15 -0800 | [diff] [blame] | 261 | return false; |
| 262 | return true; |
| 263 | } |
Rafael J. Wysocki | 452aa69 | 2010-03-05 13:42:13 -0800 | [diff] [blame] | 264 | #endif /* CONFIG_PM_SLEEP */ |
| 265 | |
Mel Gorman | d9c2340 | 2007-10-16 01:26:01 -0700 | [diff] [blame] | 266 | #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE |
Kirill A. Shutemov | d00181b | 2015-11-06 16:29:57 -0800 | [diff] [blame] | 267 | unsigned int pageblock_order __read_mostly; |
Mel Gorman | d9c2340 | 2007-10-16 01:26:01 -0700 | [diff] [blame] | 268 | #endif |
| 269 | |
David Hildenbrand | 7fef431 | 2020-10-15 20:09:35 -0700 | [diff] [blame] | 270 | static void __free_pages_ok(struct page *page, unsigned int order, |
| 271 | fpi_t fpi_flags); |
David Howells | a226f6c | 2006-01-06 00:11:08 -0800 | [diff] [blame] | 272 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 273 | /* |
| 274 | * results with 256, 32 in the lowmem_reserve sysctl: |
| 275 | * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high) |
| 276 | * 1G machine -> (16M dma, 784M normal, 224M high) |
| 277 | * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA |
| 278 | * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL |
Yaowei Bai | 84109e1 | 2015-02-12 15:00:22 -0800 | [diff] [blame] | 279 | * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA |
Andi Kleen | a2f1b42 | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 280 | * |
| 281 | * TBD: should special case ZONE_DMA32 machines here - in those we normally |
| 282 | * don't need any ZONE_NORMAL reservation |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 283 | */ |
Joonsoo Kim | d3cda23 | 2018-04-10 16:30:11 -0700 | [diff] [blame] | 284 | int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = { |
Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 285 | #ifdef CONFIG_ZONE_DMA |
Joonsoo Kim | d3cda23 | 2018-04-10 16:30:11 -0700 | [diff] [blame] | 286 | [ZONE_DMA] = 256, |
Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 287 | #endif |
Christoph Lameter | fb0e794 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 288 | #ifdef CONFIG_ZONE_DMA32 |
Joonsoo Kim | d3cda23 | 2018-04-10 16:30:11 -0700 | [diff] [blame] | 289 | [ZONE_DMA32] = 256, |
Christoph Lameter | fb0e794 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 290 | #endif |
Joonsoo Kim | d3cda23 | 2018-04-10 16:30:11 -0700 | [diff] [blame] | 291 | [ZONE_NORMAL] = 32, |
Christoph Lameter | e53ef38 | 2006-09-25 23:31:14 -0700 | [diff] [blame] | 292 | #ifdef CONFIG_HIGHMEM |
Joonsoo Kim | d3cda23 | 2018-04-10 16:30:11 -0700 | [diff] [blame] | 293 | [ZONE_HIGHMEM] = 0, |
Christoph Lameter | e53ef38 | 2006-09-25 23:31:14 -0700 | [diff] [blame] | 294 | #endif |
Joonsoo Kim | d3cda23 | 2018-04-10 16:30:11 -0700 | [diff] [blame] | 295 | [ZONE_MOVABLE] = 0, |
Christoph Lameter | 2f1b624 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 296 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 297 | |
Helge Deller | 15ad7cd | 2006-12-06 20:40:36 -0800 | [diff] [blame] | 298 | static char * const zone_names[MAX_NR_ZONES] = { |
Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 299 | #ifdef CONFIG_ZONE_DMA |
Christoph Lameter | 2f1b624 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 300 | "DMA", |
Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 301 | #endif |
Christoph Lameter | fb0e794 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 302 | #ifdef CONFIG_ZONE_DMA32 |
Christoph Lameter | 2f1b624 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 303 | "DMA32", |
Christoph Lameter | fb0e794 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 304 | #endif |
Christoph Lameter | 2f1b624 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 305 | "Normal", |
Christoph Lameter | e53ef38 | 2006-09-25 23:31:14 -0700 | [diff] [blame] | 306 | #ifdef CONFIG_HIGHMEM |
Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 307 | "HighMem", |
Christoph Lameter | e53ef38 | 2006-09-25 23:31:14 -0700 | [diff] [blame] | 308 | #endif |
Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 309 | "Movable", |
Dan Williams | 033fbae | 2015-08-09 15:29:06 -0400 | [diff] [blame] | 310 | #ifdef CONFIG_ZONE_DEVICE |
| 311 | "Device", |
| 312 | #endif |
Christoph Lameter | 2f1b624 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 313 | }; |
| 314 | |
Alexey Dobriyan | c999fbd | 2018-12-28 00:35:55 -0800 | [diff] [blame] | 315 | const char * const migratetype_names[MIGRATE_TYPES] = { |
Vlastimil Babka | 60f3035 | 2016-03-15 14:56:08 -0700 | [diff] [blame] | 316 | "Unmovable", |
| 317 | "Movable", |
| 318 | "Reclaimable", |
| 319 | "HighAtomic", |
| 320 | #ifdef CONFIG_CMA |
| 321 | "CMA", |
| 322 | #endif |
| 323 | #ifdef CONFIG_MEMORY_ISOLATION |
| 324 | "Isolate", |
| 325 | #endif |
| 326 | }; |
| 327 | |
Anshuman Khandual | ae70edd | 2020-06-03 15:59:17 -0700 | [diff] [blame] | 328 | compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS] = { |
| 329 | [NULL_COMPOUND_DTOR] = NULL, |
| 330 | [COMPOUND_PAGE_DTOR] = free_compound_page, |
Kirill A. Shutemov | f1e6155 | 2015-11-06 16:29:50 -0800 | [diff] [blame] | 331 | #ifdef CONFIG_HUGETLB_PAGE |
Anshuman Khandual | ae70edd | 2020-06-03 15:59:17 -0700 | [diff] [blame] | 332 | [HUGETLB_PAGE_DTOR] = free_huge_page, |
Kirill A. Shutemov | f1e6155 | 2015-11-06 16:29:50 -0800 | [diff] [blame] | 333 | #endif |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 334 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
Anshuman Khandual | ae70edd | 2020-06-03 15:59:17 -0700 | [diff] [blame] | 335 | [TRANSHUGE_PAGE_DTOR] = free_transhuge_page, |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 336 | #endif |
Kirill A. Shutemov | f1e6155 | 2015-11-06 16:29:50 -0800 | [diff] [blame] | 337 | }; |
| 338 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 339 | int min_free_kbytes = 1024; |
Han Pingtian | 42aa83c | 2014-01-23 15:53:28 -0800 | [diff] [blame] | 340 | int user_min_free_kbytes = -1; |
Mel Gorman | 1c30844 | 2018-12-28 00:35:52 -0800 | [diff] [blame] | 341 | int watermark_boost_factor __read_mostly = 15000; |
Johannes Weiner | 795ae7a | 2016-03-17 14:19:14 -0700 | [diff] [blame] | 342 | int watermark_scale_factor = 10; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 343 | |
Oscar Salvador | bbe5d99 | 2018-12-28 00:37:24 -0800 | [diff] [blame] | 344 | static unsigned long nr_kernel_pages __initdata; |
| 345 | static unsigned long nr_all_pages __initdata; |
| 346 | static unsigned long dma_reserve __initdata; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 347 | |
Oscar Salvador | bbe5d99 | 2018-12-28 00:37:24 -0800 | [diff] [blame] | 348 | static unsigned long arch_zone_lowest_possible_pfn[MAX_NR_ZONES] __initdata; |
| 349 | static unsigned long arch_zone_highest_possible_pfn[MAX_NR_ZONES] __initdata; |
David Rientjes | 7f16f91 | 2018-04-05 16:23:12 -0700 | [diff] [blame] | 350 | static unsigned long required_kernelcore __initdata; |
David Rientjes | a5c6d65 | 2018-04-05 16:23:09 -0700 | [diff] [blame] | 351 | static unsigned long required_kernelcore_percent __initdata; |
David Rientjes | 7f16f91 | 2018-04-05 16:23:12 -0700 | [diff] [blame] | 352 | static unsigned long required_movablecore __initdata; |
David Rientjes | a5c6d65 | 2018-04-05 16:23:09 -0700 | [diff] [blame] | 353 | static unsigned long required_movablecore_percent __initdata; |
Oscar Salvador | bbe5d99 | 2018-12-28 00:37:24 -0800 | [diff] [blame] | 354 | static unsigned long zone_movable_pfn[MAX_NUMNODES] __initdata; |
David Rientjes | 7f16f91 | 2018-04-05 16:23:12 -0700 | [diff] [blame] | 355 | static bool mirrored_kernelcore __meminitdata; |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 356 | |
Tejun Heo | 0ee332c | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 357 | /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ |
| 358 | int movable_zone; |
| 359 | EXPORT_SYMBOL(movable_zone); |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 360 | |
Miklos Szeredi | 418508c | 2007-05-23 13:57:55 -0700 | [diff] [blame] | 361 | #if MAX_NUMNODES > 1 |
Alexey Dobriyan | b9726c2 | 2019-03-05 15:48:26 -0800 | [diff] [blame] | 362 | unsigned int nr_node_ids __read_mostly = MAX_NUMNODES; |
Alexey Dobriyan | ce0725f | 2019-03-05 15:48:29 -0800 | [diff] [blame] | 363 | unsigned int nr_online_nodes __read_mostly = 1; |
Miklos Szeredi | 418508c | 2007-05-23 13:57:55 -0700 | [diff] [blame] | 364 | EXPORT_SYMBOL(nr_node_ids); |
Christoph Lameter | 62bc62a | 2009-06-16 15:32:15 -0700 | [diff] [blame] | 365 | EXPORT_SYMBOL(nr_online_nodes); |
Miklos Szeredi | 418508c | 2007-05-23 13:57:55 -0700 | [diff] [blame] | 366 | #endif |
| 367 | |
Mel Gorman | 9ef9acb | 2007-10-16 01:25:54 -0700 | [diff] [blame] | 368 | int page_group_by_mobility_disabled __read_mostly; |
| 369 | |
Mel Gorman | 3a80a7f | 2015-06-30 14:57:02 -0700 | [diff] [blame] | 370 | #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT |
Waiman Long | 3c0c12c | 2018-12-28 00:38:51 -0800 | [diff] [blame] | 371 | /* |
| 372 | * During boot we initialize deferred pages on-demand, as needed, but once |
| 373 | * page_alloc_init_late() has finished, the deferred pages are all initialized, |
| 374 | * and we can permanently disable that path. |
| 375 | */ |
| 376 | static DEFINE_STATIC_KEY_TRUE(deferred_pages); |
| 377 | |
| 378 | /* |
Peter Collingbourne | 7a3b835 | 2021-06-02 16:52:28 -0700 | [diff] [blame] | 379 | * Calling kasan_poison_pages() only after deferred memory initialization |
Waiman Long | 3c0c12c | 2018-12-28 00:38:51 -0800 | [diff] [blame] | 380 | * has completed. Poisoning pages during deferred memory init will greatly |
| 381 | * lengthen the process and cause problem in large memory systems as the |
| 382 | * deferred pages initialization is done with interrupt disabled. |
| 383 | * |
| 384 | * Assuming that there will be no reference to those newly initialized |
| 385 | * pages before they are ever allocated, this should have no effect on |
| 386 | * KASAN memory tracking as the poison will be properly inserted at page |
| 387 | * allocation time. The only corner case is when pages are allocated by |
| 388 | * on-demand allocation and then freed again before the deferred pages |
| 389 | * initialization is done, but this is not likely to happen. |
| 390 | */ |
Peter Collingbourne | c275c5c | 2021-06-02 16:52:30 -0700 | [diff] [blame] | 391 | static inline bool should_skip_kasan_poison(struct page *page, fpi_t fpi_flags) |
Waiman Long | 3c0c12c | 2018-12-28 00:38:51 -0800 | [diff] [blame] | 392 | { |
Peter Collingbourne | 7a3b835 | 2021-06-02 16:52:28 -0700 | [diff] [blame] | 393 | return static_branch_unlikely(&deferred_pages) || |
| 394 | (!IS_ENABLED(CONFIG_KASAN_GENERIC) && |
Peter Collingbourne | c275c5c | 2021-06-02 16:52:30 -0700 | [diff] [blame] | 395 | (fpi_flags & FPI_SKIP_KASAN_POISON)) || |
| 396 | PageSkipKASanPoison(page); |
Waiman Long | 3c0c12c | 2018-12-28 00:38:51 -0800 | [diff] [blame] | 397 | } |
| 398 | |
Mel Gorman | 3a80a7f | 2015-06-30 14:57:02 -0700 | [diff] [blame] | 399 | /* Returns true if the struct page for the pfn is uninitialised */ |
Mel Gorman | 0e1cc95 | 2015-06-30 14:57:27 -0700 | [diff] [blame] | 400 | static inline bool __meminit early_page_uninitialised(unsigned long pfn) |
Mel Gorman | 3a80a7f | 2015-06-30 14:57:02 -0700 | [diff] [blame] | 401 | { |
Mel Gorman | ef70b6f | 2016-07-14 12:07:23 -0700 | [diff] [blame] | 402 | int nid = early_pfn_to_nid(pfn); |
| 403 | |
| 404 | if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn) |
Mel Gorman | 3a80a7f | 2015-06-30 14:57:02 -0700 | [diff] [blame] | 405 | return true; |
| 406 | |
| 407 | return false; |
| 408 | } |
| 409 | |
| 410 | /* |
Pavel Tatashin | d3035be | 2018-10-26 15:09:37 -0700 | [diff] [blame] | 411 | * Returns true when the remaining initialisation should be deferred until |
Mel Gorman | 3a80a7f | 2015-06-30 14:57:02 -0700 | [diff] [blame] | 412 | * later in the boot cycle when it can be parallelised. |
| 413 | */ |
Pavel Tatashin | d3035be | 2018-10-26 15:09:37 -0700 | [diff] [blame] | 414 | static bool __meminit |
| 415 | defer_init(int nid, unsigned long pfn, unsigned long end_pfn) |
Mel Gorman | 3a80a7f | 2015-06-30 14:57:02 -0700 | [diff] [blame] | 416 | { |
Pavel Tatashin | d3035be | 2018-10-26 15:09:37 -0700 | [diff] [blame] | 417 | static unsigned long prev_end_pfn, nr_initialised; |
| 418 | |
| 419 | /* |
| 420 | * prev_end_pfn static that contains the end of previous zone |
| 421 | * No need to protect because called very early in boot before smp_init. |
| 422 | */ |
| 423 | if (prev_end_pfn != end_pfn) { |
| 424 | prev_end_pfn = end_pfn; |
| 425 | nr_initialised = 0; |
Mel Gorman | 3a80a7f | 2015-06-30 14:57:02 -0700 | [diff] [blame] | 426 | } |
| 427 | |
Pavel Tatashin | d3035be | 2018-10-26 15:09:37 -0700 | [diff] [blame] | 428 | /* Always populate low zones for address-constrained allocations */ |
| 429 | if (end_pfn < pgdat_end_pfn(NODE_DATA(nid))) |
| 430 | return false; |
Wei Yang | 23b68cf | 2018-12-28 00:36:18 -0800 | [diff] [blame] | 431 | |
Baoquan He | dc2da7b | 2020-12-29 15:14:37 -0800 | [diff] [blame] | 432 | if (NODE_DATA(nid)->first_deferred_pfn != ULONG_MAX) |
| 433 | return true; |
Wei Yang | 23b68cf | 2018-12-28 00:36:18 -0800 | [diff] [blame] | 434 | /* |
| 435 | * We start only with one section of pages, more pages are added as |
| 436 | * needed until the rest of deferred pages are initialized. |
| 437 | */ |
Pavel Tatashin | d3035be | 2018-10-26 15:09:37 -0700 | [diff] [blame] | 438 | nr_initialised++; |
Wei Yang | 23b68cf | 2018-12-28 00:36:18 -0800 | [diff] [blame] | 439 | if ((nr_initialised > PAGES_PER_SECTION) && |
Pavel Tatashin | d3035be | 2018-10-26 15:09:37 -0700 | [diff] [blame] | 440 | (pfn & (PAGES_PER_SECTION - 1)) == 0) { |
| 441 | NODE_DATA(nid)->first_deferred_pfn = pfn; |
| 442 | return true; |
| 443 | } |
| 444 | return false; |
Mel Gorman | 3a80a7f | 2015-06-30 14:57:02 -0700 | [diff] [blame] | 445 | } |
| 446 | #else |
Peter Collingbourne | c275c5c | 2021-06-02 16:52:30 -0700 | [diff] [blame] | 447 | static inline bool should_skip_kasan_poison(struct page *page, fpi_t fpi_flags) |
Andrey Konovalov | 2c33568 | 2021-04-29 22:59:52 -0700 | [diff] [blame] | 448 | { |
Peter Collingbourne | 7a3b835 | 2021-06-02 16:52:28 -0700 | [diff] [blame] | 449 | return (!IS_ENABLED(CONFIG_KASAN_GENERIC) && |
Peter Collingbourne | c275c5c | 2021-06-02 16:52:30 -0700 | [diff] [blame] | 450 | (fpi_flags & FPI_SKIP_KASAN_POISON)) || |
| 451 | PageSkipKASanPoison(page); |
Andrey Konovalov | 2c33568 | 2021-04-29 22:59:52 -0700 | [diff] [blame] | 452 | } |
Waiman Long | 3c0c12c | 2018-12-28 00:38:51 -0800 | [diff] [blame] | 453 | |
Mel Gorman | 3a80a7f | 2015-06-30 14:57:02 -0700 | [diff] [blame] | 454 | static inline bool early_page_uninitialised(unsigned long pfn) |
| 455 | { |
| 456 | return false; |
| 457 | } |
| 458 | |
Pavel Tatashin | d3035be | 2018-10-26 15:09:37 -0700 | [diff] [blame] | 459 | static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn) |
Mel Gorman | 3a80a7f | 2015-06-30 14:57:02 -0700 | [diff] [blame] | 460 | { |
Pavel Tatashin | d3035be | 2018-10-26 15:09:37 -0700 | [diff] [blame] | 461 | return false; |
Mel Gorman | 3a80a7f | 2015-06-30 14:57:02 -0700 | [diff] [blame] | 462 | } |
| 463 | #endif |
| 464 | |
Mel Gorman | 0b423ca | 2016-05-19 17:14:27 -0700 | [diff] [blame] | 465 | /* Return a pointer to the bitmap storing bits affecting a block of pages */ |
Matthew Wilcox (Oracle) | ca891f4 | 2021-06-28 19:41:22 -0700 | [diff] [blame] | 466 | static inline unsigned long *get_pageblock_bitmap(const struct page *page, |
Mel Gorman | 0b423ca | 2016-05-19 17:14:27 -0700 | [diff] [blame] | 467 | unsigned long pfn) |
| 468 | { |
| 469 | #ifdef CONFIG_SPARSEMEM |
Dan Williams | f1eca35 | 2019-07-18 15:57:57 -0700 | [diff] [blame] | 470 | return section_to_usemap(__pfn_to_section(pfn)); |
Mel Gorman | 0b423ca | 2016-05-19 17:14:27 -0700 | [diff] [blame] | 471 | #else |
| 472 | return page_zone(page)->pageblock_flags; |
| 473 | #endif /* CONFIG_SPARSEMEM */ |
| 474 | } |
| 475 | |
Matthew Wilcox (Oracle) | ca891f4 | 2021-06-28 19:41:22 -0700 | [diff] [blame] | 476 | static inline int pfn_to_bitidx(const struct page *page, unsigned long pfn) |
Mel Gorman | 0b423ca | 2016-05-19 17:14:27 -0700 | [diff] [blame] | 477 | { |
| 478 | #ifdef CONFIG_SPARSEMEM |
| 479 | pfn &= (PAGES_PER_SECTION-1); |
Mel Gorman | 0b423ca | 2016-05-19 17:14:27 -0700 | [diff] [blame] | 480 | #else |
| 481 | pfn = pfn - round_down(page_zone(page)->zone_start_pfn, pageblock_nr_pages); |
Mel Gorman | 0b423ca | 2016-05-19 17:14:27 -0700 | [diff] [blame] | 482 | #endif /* CONFIG_SPARSEMEM */ |
Wei Yang | 399b795 | 2020-08-06 23:25:44 -0700 | [diff] [blame] | 483 | return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; |
Mel Gorman | 0b423ca | 2016-05-19 17:14:27 -0700 | [diff] [blame] | 484 | } |
| 485 | |
Wei Yang | 535b81e | 2020-08-06 23:25:51 -0700 | [diff] [blame] | 486 | static __always_inline |
Matthew Wilcox (Oracle) | ca891f4 | 2021-06-28 19:41:22 -0700 | [diff] [blame] | 487 | unsigned long __get_pfnblock_flags_mask(const struct page *page, |
Mel Gorman | 0b423ca | 2016-05-19 17:14:27 -0700 | [diff] [blame] | 488 | unsigned long pfn, |
Mel Gorman | 0b423ca | 2016-05-19 17:14:27 -0700 | [diff] [blame] | 489 | unsigned long mask) |
| 490 | { |
| 491 | unsigned long *bitmap; |
| 492 | unsigned long bitidx, word_bitidx; |
| 493 | unsigned long word; |
| 494 | |
| 495 | bitmap = get_pageblock_bitmap(page, pfn); |
| 496 | bitidx = pfn_to_bitidx(page, pfn); |
| 497 | word_bitidx = bitidx / BITS_PER_LONG; |
| 498 | bitidx &= (BITS_PER_LONG-1); |
| 499 | |
| 500 | word = bitmap[word_bitidx]; |
Wei Yang | d93d5ab | 2020-08-06 23:25:48 -0700 | [diff] [blame] | 501 | return (word >> bitidx) & mask; |
Mel Gorman | 0b423ca | 2016-05-19 17:14:27 -0700 | [diff] [blame] | 502 | } |
| 503 | |
Mauro Carvalho Chehab | a00cda3 | 2020-12-14 19:14:39 -0800 | [diff] [blame] | 504 | /** |
| 505 | * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages |
| 506 | * @page: The page within the block of interest |
| 507 | * @pfn: The target page frame number |
| 508 | * @mask: mask of bits that the caller is interested in |
| 509 | * |
| 510 | * Return: pageblock_bits flags |
| 511 | */ |
Matthew Wilcox (Oracle) | ca891f4 | 2021-06-28 19:41:22 -0700 | [diff] [blame] | 512 | unsigned long get_pfnblock_flags_mask(const struct page *page, |
| 513 | unsigned long pfn, unsigned long mask) |
Mel Gorman | 0b423ca | 2016-05-19 17:14:27 -0700 | [diff] [blame] | 514 | { |
Wei Yang | 535b81e | 2020-08-06 23:25:51 -0700 | [diff] [blame] | 515 | return __get_pfnblock_flags_mask(page, pfn, mask); |
Mel Gorman | 0b423ca | 2016-05-19 17:14:27 -0700 | [diff] [blame] | 516 | } |
| 517 | |
Matthew Wilcox (Oracle) | ca891f4 | 2021-06-28 19:41:22 -0700 | [diff] [blame] | 518 | static __always_inline int get_pfnblock_migratetype(const struct page *page, |
| 519 | unsigned long pfn) |
Mel Gorman | 0b423ca | 2016-05-19 17:14:27 -0700 | [diff] [blame] | 520 | { |
Wei Yang | 535b81e | 2020-08-06 23:25:51 -0700 | [diff] [blame] | 521 | return __get_pfnblock_flags_mask(page, pfn, MIGRATETYPE_MASK); |
Mel Gorman | 0b423ca | 2016-05-19 17:14:27 -0700 | [diff] [blame] | 522 | } |
| 523 | |
| 524 | /** |
| 525 | * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages |
| 526 | * @page: The page within the block of interest |
| 527 | * @flags: The flags to set |
| 528 | * @pfn: The target page frame number |
Mel Gorman | 0b423ca | 2016-05-19 17:14:27 -0700 | [diff] [blame] | 529 | * @mask: mask of bits that the caller is interested in |
| 530 | */ |
| 531 | void set_pfnblock_flags_mask(struct page *page, unsigned long flags, |
| 532 | unsigned long pfn, |
Mel Gorman | 0b423ca | 2016-05-19 17:14:27 -0700 | [diff] [blame] | 533 | unsigned long mask) |
| 534 | { |
| 535 | unsigned long *bitmap; |
| 536 | unsigned long bitidx, word_bitidx; |
| 537 | unsigned long old_word, word; |
| 538 | |
| 539 | BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4); |
Pingfan Liu | 125b860 | 2018-12-28 00:38:43 -0800 | [diff] [blame] | 540 | BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits)); |
Mel Gorman | 0b423ca | 2016-05-19 17:14:27 -0700 | [diff] [blame] | 541 | |
| 542 | bitmap = get_pageblock_bitmap(page, pfn); |
| 543 | bitidx = pfn_to_bitidx(page, pfn); |
| 544 | word_bitidx = bitidx / BITS_PER_LONG; |
| 545 | bitidx &= (BITS_PER_LONG-1); |
| 546 | |
| 547 | VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page); |
| 548 | |
Wei Yang | d93d5ab | 2020-08-06 23:25:48 -0700 | [diff] [blame] | 549 | mask <<= bitidx; |
| 550 | flags <<= bitidx; |
Mel Gorman | 0b423ca | 2016-05-19 17:14:27 -0700 | [diff] [blame] | 551 | |
| 552 | word = READ_ONCE(bitmap[word_bitidx]); |
| 553 | for (;;) { |
| 554 | old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags); |
| 555 | if (word == old_word) |
| 556 | break; |
| 557 | word = old_word; |
| 558 | } |
| 559 | } |
Mel Gorman | 3a80a7f | 2015-06-30 14:57:02 -0700 | [diff] [blame] | 560 | |
Minchan Kim | ee6f509 | 2012-07-31 16:43:50 -0700 | [diff] [blame] | 561 | void set_pageblock_migratetype(struct page *page, int migratetype) |
Mel Gorman | b2a0ac8 | 2007-10-16 01:25:48 -0700 | [diff] [blame] | 562 | { |
KOSAKI Motohiro | 5d0f3f7 | 2013-11-12 15:08:18 -0800 | [diff] [blame] | 563 | if (unlikely(page_group_by_mobility_disabled && |
| 564 | migratetype < MIGRATE_PCPTYPES)) |
Mel Gorman | 49255c6 | 2009-06-16 15:31:58 -0700 | [diff] [blame] | 565 | migratetype = MIGRATE_UNMOVABLE; |
| 566 | |
Wei Yang | d93d5ab | 2020-08-06 23:25:48 -0700 | [diff] [blame] | 567 | set_pfnblock_flags_mask(page, (unsigned long)migratetype, |
Wei Yang | 535b81e | 2020-08-06 23:25:51 -0700 | [diff] [blame] | 568 | page_to_pfn(page), MIGRATETYPE_MASK); |
Mel Gorman | b2a0ac8 | 2007-10-16 01:25:48 -0700 | [diff] [blame] | 569 | } |
| 570 | |
Nick Piggin | 13e7444 | 2006-01-06 00:10:58 -0800 | [diff] [blame] | 571 | #ifdef CONFIG_DEBUG_VM |
Dave Hansen | c6a57e1 | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 572 | static int page_outside_zone_boundaries(struct zone *zone, struct page *page) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 573 | { |
Dave Hansen | bdc8cb9 | 2005-10-29 18:16:53 -0700 | [diff] [blame] | 574 | int ret = 0; |
| 575 | unsigned seq; |
| 576 | unsigned long pfn = page_to_pfn(page); |
Cody P Schafer | b5e6a5a | 2013-02-22 16:35:28 -0800 | [diff] [blame] | 577 | unsigned long sp, start_pfn; |
Dave Hansen | c6a57e1 | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 578 | |
Dave Hansen | bdc8cb9 | 2005-10-29 18:16:53 -0700 | [diff] [blame] | 579 | do { |
| 580 | seq = zone_span_seqbegin(zone); |
Cody P Schafer | b5e6a5a | 2013-02-22 16:35:28 -0800 | [diff] [blame] | 581 | start_pfn = zone->zone_start_pfn; |
| 582 | sp = zone->spanned_pages; |
Cody P Schafer | 108bcc9 | 2013-02-22 16:35:23 -0800 | [diff] [blame] | 583 | if (!zone_spans_pfn(zone, pfn)) |
Dave Hansen | bdc8cb9 | 2005-10-29 18:16:53 -0700 | [diff] [blame] | 584 | ret = 1; |
| 585 | } while (zone_span_seqretry(zone, seq)); |
| 586 | |
Cody P Schafer | b5e6a5a | 2013-02-22 16:35:28 -0800 | [diff] [blame] | 587 | if (ret) |
Dave Hansen | 613813e | 2014-06-04 16:07:27 -0700 | [diff] [blame] | 588 | pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n", |
| 589 | pfn, zone_to_nid(zone), zone->name, |
| 590 | start_pfn, start_pfn + sp); |
Cody P Schafer | b5e6a5a | 2013-02-22 16:35:28 -0800 | [diff] [blame] | 591 | |
Dave Hansen | bdc8cb9 | 2005-10-29 18:16:53 -0700 | [diff] [blame] | 592 | return ret; |
Dave Hansen | c6a57e1 | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 593 | } |
| 594 | |
| 595 | static int page_is_consistent(struct zone *zone, struct page *page) |
| 596 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 597 | if (zone != page_zone(page)) |
Dave Hansen | c6a57e1 | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 598 | return 0; |
| 599 | |
| 600 | return 1; |
| 601 | } |
| 602 | /* |
| 603 | * Temporary debugging check for pages not lying within a given zone. |
| 604 | */ |
Matthias Kaehlcke | d73d3c9f | 2017-07-06 15:39:23 -0700 | [diff] [blame] | 605 | static int __maybe_unused bad_range(struct zone *zone, struct page *page) |
Dave Hansen | c6a57e1 | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 606 | { |
| 607 | if (page_outside_zone_boundaries(zone, page)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 608 | return 1; |
Dave Hansen | c6a57e1 | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 609 | if (!page_is_consistent(zone, page)) |
| 610 | return 1; |
| 611 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 612 | return 0; |
| 613 | } |
Nick Piggin | 13e7444 | 2006-01-06 00:10:58 -0800 | [diff] [blame] | 614 | #else |
Matthias Kaehlcke | d73d3c9f | 2017-07-06 15:39:23 -0700 | [diff] [blame] | 615 | static inline int __maybe_unused bad_range(struct zone *zone, struct page *page) |
Nick Piggin | 13e7444 | 2006-01-06 00:10:58 -0800 | [diff] [blame] | 616 | { |
| 617 | return 0; |
| 618 | } |
| 619 | #endif |
| 620 | |
Wei Yang | 82a3241 | 2020-06-03 15:58:29 -0700 | [diff] [blame] | 621 | static void bad_page(struct page *page, const char *reason) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 622 | { |
Hugh Dickins | d936cf9 | 2009-01-06 14:40:12 -0800 | [diff] [blame] | 623 | static unsigned long resume; |
| 624 | static unsigned long nr_shown; |
| 625 | static unsigned long nr_unshown; |
| 626 | |
| 627 | /* |
| 628 | * Allow a burst of 60 reports, then keep quiet for that minute; |
| 629 | * or allow a steady drip of one report per second. |
| 630 | */ |
| 631 | if (nr_shown == 60) { |
| 632 | if (time_before(jiffies, resume)) { |
| 633 | nr_unshown++; |
| 634 | goto out; |
| 635 | } |
| 636 | if (nr_unshown) { |
Vlastimil Babka | ff8e811 | 2016-03-15 14:56:24 -0700 | [diff] [blame] | 637 | pr_alert( |
Hugh Dickins | 1e9e636 | 2009-01-06 14:40:13 -0800 | [diff] [blame] | 638 | "BUG: Bad page state: %lu messages suppressed\n", |
Hugh Dickins | d936cf9 | 2009-01-06 14:40:12 -0800 | [diff] [blame] | 639 | nr_unshown); |
| 640 | nr_unshown = 0; |
| 641 | } |
| 642 | nr_shown = 0; |
| 643 | } |
| 644 | if (nr_shown++ == 0) |
| 645 | resume = jiffies + 60 * HZ; |
| 646 | |
Vlastimil Babka | ff8e811 | 2016-03-15 14:56:24 -0700 | [diff] [blame] | 647 | pr_alert("BUG: Bad page state in process %s pfn:%05lx\n", |
Hugh Dickins | 3dc1474 | 2009-01-06 14:40:08 -0800 | [diff] [blame] | 648 | current->comm, page_to_pfn(page)); |
Matthew Wilcox (Oracle) | d2f07ec | 2021-06-28 19:41:07 -0700 | [diff] [blame] | 649 | dump_page(page, reason); |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 650 | |
Dave Jones | 4f31888 | 2011-10-31 17:07:24 -0700 | [diff] [blame] | 651 | print_modules(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 652 | dump_stack(); |
Hugh Dickins | d936cf9 | 2009-01-06 14:40:12 -0800 | [diff] [blame] | 653 | out: |
Hugh Dickins | 8cc3b39 | 2009-01-06 14:40:06 -0800 | [diff] [blame] | 654 | /* Leave bad fields for debug, except PageBuddy could make trouble */ |
Mel Gorman | 22b751c | 2013-02-22 16:34:59 -0800 | [diff] [blame] | 655 | page_mapcount_reset(page); /* remove PageBuddy */ |
Rusty Russell | 373d4d0 | 2013-01-21 17:17:39 +1030 | [diff] [blame] | 656 | add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 657 | } |
| 658 | |
Mel Gorman | 44042b4 | 2021-06-28 19:43:08 -0700 | [diff] [blame] | 659 | static inline unsigned int order_to_pindex(int migratetype, int order) |
| 660 | { |
| 661 | int base = order; |
| 662 | |
| 663 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 664 | if (order > PAGE_ALLOC_COSTLY_ORDER) { |
| 665 | VM_BUG_ON(order != pageblock_order); |
| 666 | base = PAGE_ALLOC_COSTLY_ORDER + 1; |
| 667 | } |
| 668 | #else |
| 669 | VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); |
| 670 | #endif |
| 671 | |
| 672 | return (MIGRATE_PCPTYPES * base) + migratetype; |
| 673 | } |
| 674 | |
| 675 | static inline int pindex_to_order(unsigned int pindex) |
| 676 | { |
| 677 | int order = pindex / MIGRATE_PCPTYPES; |
| 678 | |
| 679 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
Miaohe Lin | ea808b4 | 2021-11-05 13:40:02 -0700 | [diff] [blame] | 680 | if (order > PAGE_ALLOC_COSTLY_ORDER) |
Mel Gorman | 44042b4 | 2021-06-28 19:43:08 -0700 | [diff] [blame] | 681 | order = pageblock_order; |
Mel Gorman | 44042b4 | 2021-06-28 19:43:08 -0700 | [diff] [blame] | 682 | #else |
| 683 | VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); |
| 684 | #endif |
| 685 | |
| 686 | return order; |
| 687 | } |
| 688 | |
| 689 | static inline bool pcp_allowed_order(unsigned int order) |
| 690 | { |
| 691 | if (order <= PAGE_ALLOC_COSTLY_ORDER) |
| 692 | return true; |
| 693 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 694 | if (order == pageblock_order) |
| 695 | return true; |
| 696 | #endif |
| 697 | return false; |
| 698 | } |
| 699 | |
Mel Gorman | 21d02f8 | 2021-06-28 19:42:36 -0700 | [diff] [blame] | 700 | static inline void free_the_page(struct page *page, unsigned int order) |
| 701 | { |
Mel Gorman | 44042b4 | 2021-06-28 19:43:08 -0700 | [diff] [blame] | 702 | if (pcp_allowed_order(order)) /* Via pcp? */ |
| 703 | free_unref_page(page, order); |
Mel Gorman | 21d02f8 | 2021-06-28 19:42:36 -0700 | [diff] [blame] | 704 | else |
| 705 | __free_pages_ok(page, order, FPI_NONE); |
| 706 | } |
| 707 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 708 | /* |
| 709 | * Higher-order pages are called "compound pages". They are structured thusly: |
| 710 | * |
Kirill A. Shutemov | 1d798ca | 2015-11-06 16:29:54 -0800 | [diff] [blame] | 711 | * The first PAGE_SIZE page is called the "head page" and have PG_head set. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 712 | * |
Kirill A. Shutemov | 1d798ca | 2015-11-06 16:29:54 -0800 | [diff] [blame] | 713 | * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded |
| 714 | * in bit 0 of page->compound_head. The rest of bits is pointer to head page. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 715 | * |
Kirill A. Shutemov | 1d798ca | 2015-11-06 16:29:54 -0800 | [diff] [blame] | 716 | * The first tail page's ->compound_dtor holds the offset in array of compound |
| 717 | * page destructors. See compound_page_dtors. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 718 | * |
Kirill A. Shutemov | 1d798ca | 2015-11-06 16:29:54 -0800 | [diff] [blame] | 719 | * The first tail page's ->compound_order holds the order of allocation. |
Hugh Dickins | 41d78ba | 2006-02-14 13:52:58 -0800 | [diff] [blame] | 720 | * This usage means that zero-order pages may not be compound. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 721 | */ |
Hugh Dickins | d98c7a0 | 2006-02-14 13:52:59 -0800 | [diff] [blame] | 722 | |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 723 | void free_compound_page(struct page *page) |
Hugh Dickins | d98c7a0 | 2006-02-14 13:52:59 -0800 | [diff] [blame] | 724 | { |
Matthew Wilcox (Oracle) | bbc6b70 | 2021-05-01 20:42:23 -0400 | [diff] [blame] | 725 | mem_cgroup_uncharge(page_folio(page)); |
Mel Gorman | 44042b4 | 2021-06-28 19:43:08 -0700 | [diff] [blame] | 726 | free_the_page(page, compound_order(page)); |
Hugh Dickins | d98c7a0 | 2006-02-14 13:52:59 -0800 | [diff] [blame] | 727 | } |
| 728 | |
Kirill A. Shutemov | d00181b | 2015-11-06 16:29:57 -0800 | [diff] [blame] | 729 | void prep_compound_page(struct page *page, unsigned int order) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 730 | { |
| 731 | int i; |
| 732 | int nr_pages = 1 << order; |
| 733 | |
Christoph Lameter | 6d77795 | 2007-05-06 14:49:40 -0700 | [diff] [blame] | 734 | __SetPageHead(page); |
Andy Whitcroft | 18229df | 2008-11-06 12:53:27 -0800 | [diff] [blame] | 735 | for (i = 1; i < nr_pages; i++) { |
| 736 | struct page *p = page + i; |
Kirill A. Shutemov | 1c290f6 | 2016-01-15 16:52:07 -0800 | [diff] [blame] | 737 | p->mapping = TAIL_MAPPING; |
Kirill A. Shutemov | 1d798ca | 2015-11-06 16:29:54 -0800 | [diff] [blame] | 738 | set_compound_head(p, page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 739 | } |
Matthew Wilcox (Oracle) | 1378a5e | 2020-08-14 17:30:23 -0700 | [diff] [blame] | 740 | |
| 741 | set_compound_page_dtor(page, COMPOUND_PAGE_DTOR); |
| 742 | set_compound_order(page, order); |
Kirill A. Shutemov | 53f9263 | 2016-01-15 16:53:42 -0800 | [diff] [blame] | 743 | atomic_set(compound_mapcount_ptr(page), -1); |
John Hubbard | 47e29d3 | 2020-04-01 21:05:33 -0700 | [diff] [blame] | 744 | if (hpage_pincount_available(page)) |
| 745 | atomic_set(compound_pincount_ptr(page), 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 746 | } |
| 747 | |
Stanislaw Gruszka | c0a32fc | 2012-01-10 15:07:28 -0800 | [diff] [blame] | 748 | #ifdef CONFIG_DEBUG_PAGEALLOC |
| 749 | unsigned int _debug_guardpage_minorder; |
Vlastimil Babka | 96a2b03 | 2019-07-11 20:55:06 -0700 | [diff] [blame] | 750 | |
Vlastimil Babka | 8e57f8a | 2020-01-13 16:29:20 -0800 | [diff] [blame] | 751 | bool _debug_pagealloc_enabled_early __read_mostly |
| 752 | = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT); |
| 753 | EXPORT_SYMBOL(_debug_pagealloc_enabled_early); |
Vlastimil Babka | 96a2b03 | 2019-07-11 20:55:06 -0700 | [diff] [blame] | 754 | DEFINE_STATIC_KEY_FALSE(_debug_pagealloc_enabled); |
Joonsoo Kim | 505f6d2 | 2016-03-17 14:17:56 -0700 | [diff] [blame] | 755 | EXPORT_SYMBOL(_debug_pagealloc_enabled); |
Vlastimil Babka | 96a2b03 | 2019-07-11 20:55:06 -0700 | [diff] [blame] | 756 | |
| 757 | DEFINE_STATIC_KEY_FALSE(_debug_guardpage_enabled); |
Joonsoo Kim | e30825f | 2014-12-12 16:55:49 -0800 | [diff] [blame] | 758 | |
Joonsoo Kim | 031bc57 | 2014-12-12 16:55:52 -0800 | [diff] [blame] | 759 | static int __init early_debug_pagealloc(char *buf) |
| 760 | { |
Vlastimil Babka | 8e57f8a | 2020-01-13 16:29:20 -0800 | [diff] [blame] | 761 | return kstrtobool(buf, &_debug_pagealloc_enabled_early); |
Joonsoo Kim | 031bc57 | 2014-12-12 16:55:52 -0800 | [diff] [blame] | 762 | } |
| 763 | early_param("debug_pagealloc", early_debug_pagealloc); |
| 764 | |
Stanislaw Gruszka | c0a32fc | 2012-01-10 15:07:28 -0800 | [diff] [blame] | 765 | static int __init debug_guardpage_minorder_setup(char *buf) |
| 766 | { |
| 767 | unsigned long res; |
| 768 | |
| 769 | if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) { |
Joe Perches | 1170532 | 2016-03-17 14:19:50 -0700 | [diff] [blame] | 770 | pr_err("Bad debug_guardpage_minorder value\n"); |
Stanislaw Gruszka | c0a32fc | 2012-01-10 15:07:28 -0800 | [diff] [blame] | 771 | return 0; |
| 772 | } |
| 773 | _debug_guardpage_minorder = res; |
Joe Perches | 1170532 | 2016-03-17 14:19:50 -0700 | [diff] [blame] | 774 | pr_info("Setting debug_guardpage_minorder to %lu\n", res); |
Stanislaw Gruszka | c0a32fc | 2012-01-10 15:07:28 -0800 | [diff] [blame] | 775 | return 0; |
| 776 | } |
Joonsoo Kim | f1c1e9f | 2016-10-07 16:58:18 -0700 | [diff] [blame] | 777 | early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup); |
Stanislaw Gruszka | c0a32fc | 2012-01-10 15:07:28 -0800 | [diff] [blame] | 778 | |
Joonsoo Kim | acbc15a | 2016-10-07 16:58:15 -0700 | [diff] [blame] | 779 | static inline bool set_page_guard(struct zone *zone, struct page *page, |
Joonsoo Kim | 2847cf9 | 2014-12-12 16:55:01 -0800 | [diff] [blame] | 780 | unsigned int order, int migratetype) |
Stanislaw Gruszka | c0a32fc | 2012-01-10 15:07:28 -0800 | [diff] [blame] | 781 | { |
Joonsoo Kim | e30825f | 2014-12-12 16:55:49 -0800 | [diff] [blame] | 782 | if (!debug_guardpage_enabled()) |
Joonsoo Kim | acbc15a | 2016-10-07 16:58:15 -0700 | [diff] [blame] | 783 | return false; |
| 784 | |
| 785 | if (order >= debug_guardpage_minorder()) |
| 786 | return false; |
Joonsoo Kim | e30825f | 2014-12-12 16:55:49 -0800 | [diff] [blame] | 787 | |
Vlastimil Babka | 3972f6b | 2019-07-11 20:55:13 -0700 | [diff] [blame] | 788 | __SetPageGuard(page); |
Joonsoo Kim | 2847cf9 | 2014-12-12 16:55:01 -0800 | [diff] [blame] | 789 | INIT_LIST_HEAD(&page->lru); |
| 790 | set_page_private(page, order); |
| 791 | /* Guard pages are not available for any usage */ |
| 792 | __mod_zone_freepage_state(zone, -(1 << order), migratetype); |
Joonsoo Kim | acbc15a | 2016-10-07 16:58:15 -0700 | [diff] [blame] | 793 | |
| 794 | return true; |
Stanislaw Gruszka | c0a32fc | 2012-01-10 15:07:28 -0800 | [diff] [blame] | 795 | } |
| 796 | |
Joonsoo Kim | 2847cf9 | 2014-12-12 16:55:01 -0800 | [diff] [blame] | 797 | static inline void clear_page_guard(struct zone *zone, struct page *page, |
| 798 | unsigned int order, int migratetype) |
Stanislaw Gruszka | c0a32fc | 2012-01-10 15:07:28 -0800 | [diff] [blame] | 799 | { |
Joonsoo Kim | e30825f | 2014-12-12 16:55:49 -0800 | [diff] [blame] | 800 | if (!debug_guardpage_enabled()) |
| 801 | return; |
| 802 | |
Vlastimil Babka | 3972f6b | 2019-07-11 20:55:13 -0700 | [diff] [blame] | 803 | __ClearPageGuard(page); |
Joonsoo Kim | e30825f | 2014-12-12 16:55:49 -0800 | [diff] [blame] | 804 | |
Joonsoo Kim | 2847cf9 | 2014-12-12 16:55:01 -0800 | [diff] [blame] | 805 | set_page_private(page, 0); |
| 806 | if (!is_migrate_isolate(migratetype)) |
| 807 | __mod_zone_freepage_state(zone, (1 << order), migratetype); |
Stanislaw Gruszka | c0a32fc | 2012-01-10 15:07:28 -0800 | [diff] [blame] | 808 | } |
| 809 | #else |
Joonsoo Kim | acbc15a | 2016-10-07 16:58:15 -0700 | [diff] [blame] | 810 | static inline bool set_page_guard(struct zone *zone, struct page *page, |
| 811 | unsigned int order, int migratetype) { return false; } |
Joonsoo Kim | 2847cf9 | 2014-12-12 16:55:01 -0800 | [diff] [blame] | 812 | static inline void clear_page_guard(struct zone *zone, struct page *page, |
| 813 | unsigned int order, int migratetype) {} |
Stanislaw Gruszka | c0a32fc | 2012-01-10 15:07:28 -0800 | [diff] [blame] | 814 | #endif |
| 815 | |
Vlastimil Babka | 0401351 | 2020-12-14 19:13:30 -0800 | [diff] [blame] | 816 | /* |
| 817 | * Enable static keys related to various memory debugging and hardening options. |
| 818 | * Some override others, and depend on early params that are evaluated in the |
| 819 | * order of appearance. So we need to first gather the full picture of what was |
| 820 | * enabled, and then make decisions. |
| 821 | */ |
| 822 | void init_mem_debugging_and_hardening(void) |
| 823 | { |
Sergei Trofimovich | 9df65f5 | 2021-04-29 23:02:11 -0700 | [diff] [blame] | 824 | bool page_poisoning_requested = false; |
Vlastimil Babka | 0401351 | 2020-12-14 19:13:30 -0800 | [diff] [blame] | 825 | |
Vlastimil Babka | 8db26a3 | 2020-12-14 19:13:34 -0800 | [diff] [blame] | 826 | #ifdef CONFIG_PAGE_POISONING |
| 827 | /* |
| 828 | * Page poisoning is debug page alloc for some arches. If |
| 829 | * either of those options are enabled, enable poisoning. |
| 830 | */ |
| 831 | if (page_poisoning_enabled() || |
| 832 | (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) && |
Sergei Trofimovich | 9df65f5 | 2021-04-29 23:02:11 -0700 | [diff] [blame] | 833 | debug_pagealloc_enabled())) { |
Vlastimil Babka | 8db26a3 | 2020-12-14 19:13:34 -0800 | [diff] [blame] | 834 | static_branch_enable(&_page_poisoning_enabled); |
Sergei Trofimovich | 9df65f5 | 2021-04-29 23:02:11 -0700 | [diff] [blame] | 835 | page_poisoning_requested = true; |
| 836 | } |
Vlastimil Babka | 8db26a3 | 2020-12-14 19:13:34 -0800 | [diff] [blame] | 837 | #endif |
| 838 | |
Sergei Trofimovich | 69e5d32 | 2021-07-23 15:50:23 -0700 | [diff] [blame] | 839 | if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early) && |
| 840 | page_poisoning_requested) { |
| 841 | pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, " |
| 842 | "will take precedence over init_on_alloc and init_on_free\n"); |
| 843 | _init_on_alloc_enabled_early = false; |
| 844 | _init_on_free_enabled_early = false; |
Sergei Trofimovich | 9df65f5 | 2021-04-29 23:02:11 -0700 | [diff] [blame] | 845 | } |
Sergei Trofimovich | 69e5d32 | 2021-07-23 15:50:23 -0700 | [diff] [blame] | 846 | |
| 847 | if (_init_on_alloc_enabled_early) |
| 848 | static_branch_enable(&init_on_alloc); |
| 849 | else |
| 850 | static_branch_disable(&init_on_alloc); |
| 851 | |
| 852 | if (_init_on_free_enabled_early) |
| 853 | static_branch_enable(&init_on_free); |
| 854 | else |
| 855 | static_branch_disable(&init_on_free); |
Sergei Trofimovich | 9df65f5 | 2021-04-29 23:02:11 -0700 | [diff] [blame] | 856 | |
Vlastimil Babka | 0401351 | 2020-12-14 19:13:30 -0800 | [diff] [blame] | 857 | #ifdef CONFIG_DEBUG_PAGEALLOC |
| 858 | if (!debug_pagealloc_enabled()) |
| 859 | return; |
| 860 | |
| 861 | static_branch_enable(&_debug_pagealloc_enabled); |
| 862 | |
| 863 | if (!debug_guardpage_minorder()) |
| 864 | return; |
| 865 | |
| 866 | static_branch_enable(&_debug_guardpage_enabled); |
| 867 | #endif |
| 868 | } |
| 869 | |
Matthew Wilcox (Oracle) | ab130f91 | 2020-10-15 20:10:15 -0700 | [diff] [blame] | 870 | static inline void set_buddy_order(struct page *page, unsigned int order) |
Andrew Morton | 6aa3001b2 | 2006-04-18 22:20:52 -0700 | [diff] [blame] | 871 | { |
Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 872 | set_page_private(page, order); |
Nick Piggin | 676165a | 2006-04-10 11:21:48 +1000 | [diff] [blame] | 873 | __SetPageBuddy(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 874 | } |
| 875 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 876 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 877 | * This function checks whether a page is free && is the buddy |
Matthew Wilcox | 6e292b9 | 2018-06-07 17:08:18 -0700 | [diff] [blame] | 878 | * we can coalesce a page and its buddy if |
Vlastimil Babka | 13ad59d | 2017-02-22 15:41:51 -0800 | [diff] [blame] | 879 | * (a) the buddy is not in a hole (check before calling!) && |
Nick Piggin | 676165a | 2006-04-10 11:21:48 +1000 | [diff] [blame] | 880 | * (b) the buddy is in the buddy system && |
Andy Whitcroft | cb2b95e | 2006-06-23 02:03:01 -0700 | [diff] [blame] | 881 | * (c) a page and its buddy have the same order && |
| 882 | * (d) a page and its buddy are in the same zone. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 883 | * |
Matthew Wilcox | 6e292b9 | 2018-06-07 17:08:18 -0700 | [diff] [blame] | 884 | * For recording whether a page is in the buddy system, we set PageBuddy. |
| 885 | * Setting, clearing, and testing PageBuddy is serialized by zone->lock. |
Nick Piggin | 676165a | 2006-04-10 11:21:48 +1000 | [diff] [blame] | 886 | * |
| 887 | * For recording page's order, we use page_private(page). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 888 | */ |
chenqiwu | fe925c0 | 2020-04-01 21:09:56 -0700 | [diff] [blame] | 889 | static inline bool page_is_buddy(struct page *page, struct page *buddy, |
Mel Gorman | 7aeb09f | 2014-06-04 16:10:21 -0700 | [diff] [blame] | 890 | unsigned int order) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 891 | { |
chenqiwu | fe925c0 | 2020-04-01 21:09:56 -0700 | [diff] [blame] | 892 | if (!page_is_guard(buddy) && !PageBuddy(buddy)) |
| 893 | return false; |
Mel Gorman | d34c5fa | 2014-06-04 16:10:10 -0700 | [diff] [blame] | 894 | |
Matthew Wilcox (Oracle) | ab130f91 | 2020-10-15 20:10:15 -0700 | [diff] [blame] | 895 | if (buddy_order(buddy) != order) |
chenqiwu | fe925c0 | 2020-04-01 21:09:56 -0700 | [diff] [blame] | 896 | return false; |
Weijie Yang | 4c5018c | 2015-02-10 14:11:39 -0800 | [diff] [blame] | 897 | |
chenqiwu | fe925c0 | 2020-04-01 21:09:56 -0700 | [diff] [blame] | 898 | /* |
| 899 | * zone check is done late to avoid uselessly calculating |
| 900 | * zone/node ids for pages that could never merge. |
| 901 | */ |
| 902 | if (page_zone_id(page) != page_zone_id(buddy)) |
| 903 | return false; |
Stanislaw Gruszka | c0a32fc | 2012-01-10 15:07:28 -0800 | [diff] [blame] | 904 | |
chenqiwu | fe925c0 | 2020-04-01 21:09:56 -0700 | [diff] [blame] | 905 | VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy); |
Mel Gorman | d34c5fa | 2014-06-04 16:10:10 -0700 | [diff] [blame] | 906 | |
chenqiwu | fe925c0 | 2020-04-01 21:09:56 -0700 | [diff] [blame] | 907 | return true; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 908 | } |
| 909 | |
Mel Gorman | 5e1f0f0 | 2019-03-05 15:45:41 -0800 | [diff] [blame] | 910 | #ifdef CONFIG_COMPACTION |
| 911 | static inline struct capture_control *task_capc(struct zone *zone) |
| 912 | { |
| 913 | struct capture_control *capc = current->capture_control; |
| 914 | |
Vlastimil Babka | deba048 | 2020-08-06 23:25:16 -0700 | [diff] [blame] | 915 | return unlikely(capc) && |
Mel Gorman | 5e1f0f0 | 2019-03-05 15:45:41 -0800 | [diff] [blame] | 916 | !(current->flags & PF_KTHREAD) && |
| 917 | !capc->page && |
Vlastimil Babka | deba048 | 2020-08-06 23:25:16 -0700 | [diff] [blame] | 918 | capc->cc->zone == zone ? capc : NULL; |
Mel Gorman | 5e1f0f0 | 2019-03-05 15:45:41 -0800 | [diff] [blame] | 919 | } |
| 920 | |
| 921 | static inline bool |
| 922 | compaction_capture(struct capture_control *capc, struct page *page, |
| 923 | int order, int migratetype) |
| 924 | { |
| 925 | if (!capc || order != capc->cc->order) |
| 926 | return false; |
| 927 | |
| 928 | /* Do not accidentally pollute CMA or isolated regions*/ |
| 929 | if (is_migrate_cma(migratetype) || |
| 930 | is_migrate_isolate(migratetype)) |
| 931 | return false; |
| 932 | |
| 933 | /* |
Ingo Molnar | f0953a1 | 2021-05-06 18:06:47 -0700 | [diff] [blame] | 934 | * Do not let lower order allocations pollute a movable pageblock. |
Mel Gorman | 5e1f0f0 | 2019-03-05 15:45:41 -0800 | [diff] [blame] | 935 | * This might let an unmovable request use a reclaimable pageblock |
| 936 | * and vice-versa but no more than normal fallback logic which can |
| 937 | * have trouble finding a high-order free page. |
| 938 | */ |
| 939 | if (order < pageblock_order && migratetype == MIGRATE_MOVABLE) |
| 940 | return false; |
| 941 | |
| 942 | capc->page = page; |
| 943 | return true; |
| 944 | } |
| 945 | |
| 946 | #else |
| 947 | static inline struct capture_control *task_capc(struct zone *zone) |
| 948 | { |
| 949 | return NULL; |
| 950 | } |
| 951 | |
| 952 | static inline bool |
| 953 | compaction_capture(struct capture_control *capc, struct page *page, |
| 954 | int order, int migratetype) |
| 955 | { |
| 956 | return false; |
| 957 | } |
| 958 | #endif /* CONFIG_COMPACTION */ |
| 959 | |
Alexander Duyck | 6ab0136 | 2020-04-06 20:04:49 -0700 | [diff] [blame] | 960 | /* Used for pages not on another list */ |
| 961 | static inline void add_to_free_list(struct page *page, struct zone *zone, |
| 962 | unsigned int order, int migratetype) |
| 963 | { |
| 964 | struct free_area *area = &zone->free_area[order]; |
| 965 | |
| 966 | list_add(&page->lru, &area->free_list[migratetype]); |
| 967 | area->nr_free++; |
| 968 | } |
| 969 | |
| 970 | /* Used for pages not on another list */ |
| 971 | static inline void add_to_free_list_tail(struct page *page, struct zone *zone, |
| 972 | unsigned int order, int migratetype) |
| 973 | { |
| 974 | struct free_area *area = &zone->free_area[order]; |
| 975 | |
| 976 | list_add_tail(&page->lru, &area->free_list[migratetype]); |
| 977 | area->nr_free++; |
| 978 | } |
| 979 | |
David Hildenbrand | 293ffa5 | 2020-10-15 20:09:30 -0700 | [diff] [blame] | 980 | /* |
| 981 | * Used for pages which are on another list. Move the pages to the tail |
| 982 | * of the list - so the moved pages won't immediately be considered for |
| 983 | * allocation again (e.g., optimization for memory onlining). |
| 984 | */ |
Alexander Duyck | 6ab0136 | 2020-04-06 20:04:49 -0700 | [diff] [blame] | 985 | static inline void move_to_free_list(struct page *page, struct zone *zone, |
| 986 | unsigned int order, int migratetype) |
| 987 | { |
| 988 | struct free_area *area = &zone->free_area[order]; |
| 989 | |
David Hildenbrand | 293ffa5 | 2020-10-15 20:09:30 -0700 | [diff] [blame] | 990 | list_move_tail(&page->lru, &area->free_list[migratetype]); |
Alexander Duyck | 6ab0136 | 2020-04-06 20:04:49 -0700 | [diff] [blame] | 991 | } |
| 992 | |
| 993 | static inline void del_page_from_free_list(struct page *page, struct zone *zone, |
| 994 | unsigned int order) |
| 995 | { |
Alexander Duyck | 36e66c5 | 2020-04-06 20:04:56 -0700 | [diff] [blame] | 996 | /* clear reported state and update reported page count */ |
| 997 | if (page_reported(page)) |
| 998 | __ClearPageReported(page); |
| 999 | |
Alexander Duyck | 6ab0136 | 2020-04-06 20:04:49 -0700 | [diff] [blame] | 1000 | list_del(&page->lru); |
| 1001 | __ClearPageBuddy(page); |
| 1002 | set_page_private(page, 0); |
| 1003 | zone->free_area[order].nr_free--; |
| 1004 | } |
| 1005 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1006 | /* |
Alexander Duyck | a2129f2 | 2020-04-06 20:04:45 -0700 | [diff] [blame] | 1007 | * If this is not the largest possible page, check if the buddy |
| 1008 | * of the next-highest order is free. If it is, it's possible |
| 1009 | * that pages are being freed that will coalesce soon. In case, |
| 1010 | * that is happening, add the free page to the tail of the list |
| 1011 | * so it's less likely to be used soon and more likely to be merged |
| 1012 | * as a higher order page |
| 1013 | */ |
| 1014 | static inline bool |
| 1015 | buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn, |
| 1016 | struct page *page, unsigned int order) |
| 1017 | { |
| 1018 | struct page *higher_page, *higher_buddy; |
| 1019 | unsigned long combined_pfn; |
| 1020 | |
| 1021 | if (order >= MAX_ORDER - 2) |
| 1022 | return false; |
| 1023 | |
Alexander Duyck | a2129f2 | 2020-04-06 20:04:45 -0700 | [diff] [blame] | 1024 | combined_pfn = buddy_pfn & pfn; |
| 1025 | higher_page = page + (combined_pfn - pfn); |
| 1026 | buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1); |
| 1027 | higher_buddy = higher_page + (buddy_pfn - combined_pfn); |
| 1028 | |
Mike Rapoport | 859a85d | 2021-09-07 19:54:52 -0700 | [diff] [blame] | 1029 | return page_is_buddy(higher_page, higher_buddy, order + 1); |
Alexander Duyck | a2129f2 | 2020-04-06 20:04:45 -0700 | [diff] [blame] | 1030 | } |
| 1031 | |
| 1032 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1033 | * Freeing function for a buddy system allocator. |
| 1034 | * |
| 1035 | * The concept of a buddy system is to maintain direct-mapped table |
| 1036 | * (containing bit values) for memory blocks of various "orders". |
| 1037 | * The bottom level table contains the map for the smallest allocatable |
| 1038 | * units of memory (here, pages), and each level above it describes |
| 1039 | * pairs of units from the levels below, hence, "buddies". |
| 1040 | * At a high level, all that happens here is marking the table entry |
| 1041 | * at the bottom level available, and propagating the changes upward |
| 1042 | * as necessary, plus some accounting needed to play nicely with other |
| 1043 | * parts of the VM system. |
| 1044 | * At each level, we keep a list of pages, which are heads of continuous |
Matthew Wilcox | 6e292b9 | 2018-06-07 17:08:18 -0700 | [diff] [blame] | 1045 | * free pages of length of (1 << order) and marked with PageBuddy. |
| 1046 | * Page's order is recorded in page_private(page) field. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1047 | * So when we are allocating or freeing one, we can derive the state of the |
Michal Nazarewicz | 5f63b72 | 2012-01-11 15:16:11 +0100 | [diff] [blame] | 1048 | * other. That is, if we allocate a small block, and both were |
| 1049 | * free, the remainder of the region must be split into blocks. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1050 | * If a block is freed, and its buddy is also free, then this |
Michal Nazarewicz | 5f63b72 | 2012-01-11 15:16:11 +0100 | [diff] [blame] | 1051 | * triggers coalescing into a block of larger size. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1052 | * |
Nadia Yvette Chambers | 6d49e35 | 2012-12-06 10:39:54 +0100 | [diff] [blame] | 1053 | * -- nyc |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1054 | */ |
| 1055 | |
Nick Piggin | 48db57f | 2006-01-08 01:00:42 -0800 | [diff] [blame] | 1056 | static inline void __free_one_page(struct page *page, |
Mel Gorman | dc4b0ca | 2014-06-04 16:10:17 -0700 | [diff] [blame] | 1057 | unsigned long pfn, |
Mel Gorman | ed0ae21 | 2009-06-16 15:32:07 -0700 | [diff] [blame] | 1058 | struct zone *zone, unsigned int order, |
David Hildenbrand | f04a5d5 | 2020-10-15 20:09:20 -0700 | [diff] [blame] | 1059 | int migratetype, fpi_t fpi_flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1060 | { |
Mel Gorman | 5e1f0f0 | 2019-03-05 15:45:41 -0800 | [diff] [blame] | 1061 | struct capture_control *capc = task_capc(zone); |
Kees Cook | 3f649ab | 2020-06-03 13:09:38 -0700 | [diff] [blame] | 1062 | unsigned long buddy_pfn; |
Alexander Duyck | a2129f2 | 2020-04-06 20:04:45 -0700 | [diff] [blame] | 1063 | unsigned long combined_pfn; |
Alexander Duyck | a2129f2 | 2020-04-06 20:04:45 -0700 | [diff] [blame] | 1064 | unsigned int max_order; |
| 1065 | struct page *buddy; |
| 1066 | bool to_tail; |
Vlastimil Babka | d9dddbf | 2016-03-25 14:21:50 -0700 | [diff] [blame] | 1067 | |
Muchun Song | 7ad6983 | 2020-12-14 19:11:25 -0800 | [diff] [blame] | 1068 | max_order = min_t(unsigned int, MAX_ORDER - 1, pageblock_order); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1069 | |
Cody P Schafer | d29bb97 | 2013-02-22 16:35:25 -0800 | [diff] [blame] | 1070 | VM_BUG_ON(!zone_is_initialized(zone)); |
Kirill A. Shutemov | 6e9f0d5 | 2015-02-11 15:25:50 -0800 | [diff] [blame] | 1071 | VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1072 | |
Mel Gorman | ed0ae21 | 2009-06-16 15:32:07 -0700 | [diff] [blame] | 1073 | VM_BUG_ON(migratetype == -1); |
Vlastimil Babka | d9dddbf | 2016-03-25 14:21:50 -0700 | [diff] [blame] | 1074 | if (likely(!is_migrate_isolate(migratetype))) |
Joonsoo Kim | 8f82b55 | 2014-11-13 15:19:18 -0800 | [diff] [blame] | 1075 | __mod_zone_freepage_state(zone, 1 << order, migratetype); |
Mel Gorman | ed0ae21 | 2009-06-16 15:32:07 -0700 | [diff] [blame] | 1076 | |
Vlastimil Babka | 76741e7 | 2017-02-22 15:41:48 -0800 | [diff] [blame] | 1077 | VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page); |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 1078 | VM_BUG_ON_PAGE(bad_range(zone, page), page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1079 | |
Vlastimil Babka | d9dddbf | 2016-03-25 14:21:50 -0700 | [diff] [blame] | 1080 | continue_merging: |
Muchun Song | 7ad6983 | 2020-12-14 19:11:25 -0800 | [diff] [blame] | 1081 | while (order < max_order) { |
Mel Gorman | 5e1f0f0 | 2019-03-05 15:45:41 -0800 | [diff] [blame] | 1082 | if (compaction_capture(capc, page, order, migratetype)) { |
| 1083 | __mod_zone_freepage_state(zone, -(1 << order), |
| 1084 | migratetype); |
| 1085 | return; |
| 1086 | } |
Vlastimil Babka | 76741e7 | 2017-02-22 15:41:48 -0800 | [diff] [blame] | 1087 | buddy_pfn = __find_buddy_pfn(pfn, order); |
| 1088 | buddy = page + (buddy_pfn - pfn); |
Vlastimil Babka | 13ad59d | 2017-02-22 15:41:51 -0800 | [diff] [blame] | 1089 | |
Andy Whitcroft | cb2b95e | 2006-06-23 02:03:01 -0700 | [diff] [blame] | 1090 | if (!page_is_buddy(page, buddy, order)) |
Vlastimil Babka | d9dddbf | 2016-03-25 14:21:50 -0700 | [diff] [blame] | 1091 | goto done_merging; |
Stanislaw Gruszka | c0a32fc | 2012-01-10 15:07:28 -0800 | [diff] [blame] | 1092 | /* |
| 1093 | * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page, |
| 1094 | * merge with it and move up one order. |
| 1095 | */ |
Dan Williams | b03641a | 2019-05-14 15:41:32 -0700 | [diff] [blame] | 1096 | if (page_is_guard(buddy)) |
Joonsoo Kim | 2847cf9 | 2014-12-12 16:55:01 -0800 | [diff] [blame] | 1097 | clear_page_guard(zone, buddy, order, migratetype); |
Dan Williams | b03641a | 2019-05-14 15:41:32 -0700 | [diff] [blame] | 1098 | else |
Alexander Duyck | 6ab0136 | 2020-04-06 20:04:49 -0700 | [diff] [blame] | 1099 | del_page_from_free_list(buddy, zone, order); |
Vlastimil Babka | 76741e7 | 2017-02-22 15:41:48 -0800 | [diff] [blame] | 1100 | combined_pfn = buddy_pfn & pfn; |
| 1101 | page = page + (combined_pfn - pfn); |
| 1102 | pfn = combined_pfn; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1103 | order++; |
| 1104 | } |
Muchun Song | 7ad6983 | 2020-12-14 19:11:25 -0800 | [diff] [blame] | 1105 | if (order < MAX_ORDER - 1) { |
Vlastimil Babka | d9dddbf | 2016-03-25 14:21:50 -0700 | [diff] [blame] | 1106 | /* If we are here, it means order is >= pageblock_order. |
| 1107 | * We want to prevent merge between freepages on isolate |
| 1108 | * pageblock and normal pageblock. Without this, pageblock |
| 1109 | * isolation could cause incorrect freepage or CMA accounting. |
| 1110 | * |
| 1111 | * We don't want to hit this code for the more frequent |
| 1112 | * low-order merging. |
| 1113 | */ |
| 1114 | if (unlikely(has_isolate_pageblock(zone))) { |
| 1115 | int buddy_mt; |
| 1116 | |
Vlastimil Babka | 76741e7 | 2017-02-22 15:41:48 -0800 | [diff] [blame] | 1117 | buddy_pfn = __find_buddy_pfn(pfn, order); |
| 1118 | buddy = page + (buddy_pfn - pfn); |
Vlastimil Babka | d9dddbf | 2016-03-25 14:21:50 -0700 | [diff] [blame] | 1119 | buddy_mt = get_pageblock_migratetype(buddy); |
| 1120 | |
| 1121 | if (migratetype != buddy_mt |
| 1122 | && (is_migrate_isolate(migratetype) || |
| 1123 | is_migrate_isolate(buddy_mt))) |
| 1124 | goto done_merging; |
| 1125 | } |
Muchun Song | 7ad6983 | 2020-12-14 19:11:25 -0800 | [diff] [blame] | 1126 | max_order = order + 1; |
Vlastimil Babka | d9dddbf | 2016-03-25 14:21:50 -0700 | [diff] [blame] | 1127 | goto continue_merging; |
| 1128 | } |
| 1129 | |
| 1130 | done_merging: |
Matthew Wilcox (Oracle) | ab130f91 | 2020-10-15 20:10:15 -0700 | [diff] [blame] | 1131 | set_buddy_order(page, order); |
Corrado Zoccolo | 6dda9d5 | 2010-05-24 14:31:54 -0700 | [diff] [blame] | 1132 | |
David Hildenbrand | 47b6a24a2 | 2020-10-15 20:09:26 -0700 | [diff] [blame] | 1133 | if (fpi_flags & FPI_TO_TAIL) |
| 1134 | to_tail = true; |
| 1135 | else if (is_shuffle_order(order)) |
Alexander Duyck | a2129f2 | 2020-04-06 20:04:45 -0700 | [diff] [blame] | 1136 | to_tail = shuffle_pick_tail(); |
Dan Williams | 97500a4 | 2019-05-14 15:41:35 -0700 | [diff] [blame] | 1137 | else |
Alexander Duyck | a2129f2 | 2020-04-06 20:04:45 -0700 | [diff] [blame] | 1138 | to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order); |
Dan Williams | 97500a4 | 2019-05-14 15:41:35 -0700 | [diff] [blame] | 1139 | |
Alexander Duyck | a2129f2 | 2020-04-06 20:04:45 -0700 | [diff] [blame] | 1140 | if (to_tail) |
Alexander Duyck | 6ab0136 | 2020-04-06 20:04:49 -0700 | [diff] [blame] | 1141 | add_to_free_list_tail(page, zone, order, migratetype); |
Alexander Duyck | a2129f2 | 2020-04-06 20:04:45 -0700 | [diff] [blame] | 1142 | else |
Alexander Duyck | 6ab0136 | 2020-04-06 20:04:49 -0700 | [diff] [blame] | 1143 | add_to_free_list(page, zone, order, migratetype); |
Alexander Duyck | 36e66c5 | 2020-04-06 20:04:56 -0700 | [diff] [blame] | 1144 | |
| 1145 | /* Notify page reporting subsystem of freed page */ |
David Hildenbrand | f04a5d5 | 2020-10-15 20:09:20 -0700 | [diff] [blame] | 1146 | if (!(fpi_flags & FPI_SKIP_REPORT_NOTIFY)) |
Alexander Duyck | 36e66c5 | 2020-04-06 20:04:56 -0700 | [diff] [blame] | 1147 | page_reporting_notify_free(order); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1148 | } |
| 1149 | |
Mel Gorman | 7bfec6f | 2016-05-19 17:14:15 -0700 | [diff] [blame] | 1150 | /* |
| 1151 | * A bad page could be due to a number of fields. Instead of multiple branches, |
| 1152 | * try and check multiple fields with one check. The caller must do a detailed |
| 1153 | * check if necessary. |
| 1154 | */ |
| 1155 | static inline bool page_expected_state(struct page *page, |
| 1156 | unsigned long check_flags) |
| 1157 | { |
| 1158 | if (unlikely(atomic_read(&page->_mapcount) != -1)) |
| 1159 | return false; |
| 1160 | |
| 1161 | if (unlikely((unsigned long)page->mapping | |
| 1162 | page_ref_count(page) | |
| 1163 | #ifdef CONFIG_MEMCG |
Muchun Song | 4806083 | 2021-04-29 22:56:45 -0700 | [diff] [blame] | 1164 | page->memcg_data | |
Mel Gorman | 7bfec6f | 2016-05-19 17:14:15 -0700 | [diff] [blame] | 1165 | #endif |
| 1166 | (page->flags & check_flags))) |
| 1167 | return false; |
| 1168 | |
| 1169 | return true; |
| 1170 | } |
| 1171 | |
Wei Yang | 58b7f11 | 2020-06-03 15:58:39 -0700 | [diff] [blame] | 1172 | static const char *page_bad_reason(struct page *page, unsigned long flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1173 | { |
Wei Yang | 82a3241 | 2020-06-03 15:58:29 -0700 | [diff] [blame] | 1174 | const char *bad_reason = NULL; |
Dave Hansen | f0b791a | 2014-01-23 15:52:49 -0800 | [diff] [blame] | 1175 | |
Kirill A. Shutemov | 53f9263 | 2016-01-15 16:53:42 -0800 | [diff] [blame] | 1176 | if (unlikely(atomic_read(&page->_mapcount) != -1)) |
Dave Hansen | f0b791a | 2014-01-23 15:52:49 -0800 | [diff] [blame] | 1177 | bad_reason = "nonzero mapcount"; |
| 1178 | if (unlikely(page->mapping != NULL)) |
| 1179 | bad_reason = "non-NULL mapping"; |
Joonsoo Kim | fe896d1 | 2016-03-17 14:19:26 -0700 | [diff] [blame] | 1180 | if (unlikely(page_ref_count(page) != 0)) |
Joonsoo Kim | 0139aa7 | 2016-05-19 17:10:49 -0700 | [diff] [blame] | 1181 | bad_reason = "nonzero _refcount"; |
Wei Yang | 58b7f11 | 2020-06-03 15:58:39 -0700 | [diff] [blame] | 1182 | if (unlikely(page->flags & flags)) { |
| 1183 | if (flags == PAGE_FLAGS_CHECK_AT_PREP) |
| 1184 | bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag(s) set"; |
| 1185 | else |
| 1186 | bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set"; |
Dave Hansen | f0b791a | 2014-01-23 15:52:49 -0800 | [diff] [blame] | 1187 | } |
Johannes Weiner | 9edad6e | 2014-12-10 15:44:58 -0800 | [diff] [blame] | 1188 | #ifdef CONFIG_MEMCG |
Muchun Song | 4806083 | 2021-04-29 22:56:45 -0700 | [diff] [blame] | 1189 | if (unlikely(page->memcg_data)) |
Johannes Weiner | 9edad6e | 2014-12-10 15:44:58 -0800 | [diff] [blame] | 1190 | bad_reason = "page still charged to cgroup"; |
| 1191 | #endif |
Wei Yang | 58b7f11 | 2020-06-03 15:58:39 -0700 | [diff] [blame] | 1192 | return bad_reason; |
Mel Gorman | bb552ac | 2016-05-19 17:14:18 -0700 | [diff] [blame] | 1193 | } |
| 1194 | |
Wei Yang | 58b7f11 | 2020-06-03 15:58:39 -0700 | [diff] [blame] | 1195 | static void check_free_page_bad(struct page *page) |
| 1196 | { |
| 1197 | bad_page(page, |
| 1198 | page_bad_reason(page, PAGE_FLAGS_CHECK_AT_FREE)); |
Mel Gorman | bb552ac | 2016-05-19 17:14:18 -0700 | [diff] [blame] | 1199 | } |
| 1200 | |
Wei Yang | 534fe5e | 2020-06-03 15:58:36 -0700 | [diff] [blame] | 1201 | static inline int check_free_page(struct page *page) |
Mel Gorman | bb552ac | 2016-05-19 17:14:18 -0700 | [diff] [blame] | 1202 | { |
Mel Gorman | da838d4 | 2016-05-19 17:14:21 -0700 | [diff] [blame] | 1203 | if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE))) |
Mel Gorman | bb552ac | 2016-05-19 17:14:18 -0700 | [diff] [blame] | 1204 | return 0; |
Mel Gorman | bb552ac | 2016-05-19 17:14:18 -0700 | [diff] [blame] | 1205 | |
| 1206 | /* Something has gone sideways, find it */ |
Wei Yang | 0d0c48a | 2020-06-03 15:58:33 -0700 | [diff] [blame] | 1207 | check_free_page_bad(page); |
Mel Gorman | 7bfec6f | 2016-05-19 17:14:15 -0700 | [diff] [blame] | 1208 | return 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1209 | } |
| 1210 | |
Mel Gorman | 4db7548 | 2016-05-19 17:14:32 -0700 | [diff] [blame] | 1211 | static int free_tail_pages_check(struct page *head_page, struct page *page) |
| 1212 | { |
| 1213 | int ret = 1; |
| 1214 | |
| 1215 | /* |
| 1216 | * We rely page->lru.next never has bit 0 set, unless the page |
| 1217 | * is PageTail(). Let's make sure that's true even for poisoned ->lru. |
| 1218 | */ |
| 1219 | BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1); |
| 1220 | |
| 1221 | if (!IS_ENABLED(CONFIG_DEBUG_VM)) { |
| 1222 | ret = 0; |
| 1223 | goto out; |
| 1224 | } |
| 1225 | switch (page - head_page) { |
| 1226 | case 1: |
Matthew Wilcox | 4da1984 | 2018-06-07 17:08:50 -0700 | [diff] [blame] | 1227 | /* the first tail page: ->mapping may be compound_mapcount() */ |
Mel Gorman | 4db7548 | 2016-05-19 17:14:32 -0700 | [diff] [blame] | 1228 | if (unlikely(compound_mapcount(page))) { |
Wei Yang | 82a3241 | 2020-06-03 15:58:29 -0700 | [diff] [blame] | 1229 | bad_page(page, "nonzero compound_mapcount"); |
Mel Gorman | 4db7548 | 2016-05-19 17:14:32 -0700 | [diff] [blame] | 1230 | goto out; |
| 1231 | } |
| 1232 | break; |
| 1233 | case 2: |
| 1234 | /* |
| 1235 | * the second tail page: ->mapping is |
Matthew Wilcox | fa3015b | 2018-06-07 17:08:42 -0700 | [diff] [blame] | 1236 | * deferred_list.next -- ignore value. |
Mel Gorman | 4db7548 | 2016-05-19 17:14:32 -0700 | [diff] [blame] | 1237 | */ |
| 1238 | break; |
| 1239 | default: |
| 1240 | if (page->mapping != TAIL_MAPPING) { |
Wei Yang | 82a3241 | 2020-06-03 15:58:29 -0700 | [diff] [blame] | 1241 | bad_page(page, "corrupted mapping in tail page"); |
Mel Gorman | 4db7548 | 2016-05-19 17:14:32 -0700 | [diff] [blame] | 1242 | goto out; |
| 1243 | } |
| 1244 | break; |
| 1245 | } |
| 1246 | if (unlikely(!PageTail(page))) { |
Wei Yang | 82a3241 | 2020-06-03 15:58:29 -0700 | [diff] [blame] | 1247 | bad_page(page, "PageTail not set"); |
Mel Gorman | 4db7548 | 2016-05-19 17:14:32 -0700 | [diff] [blame] | 1248 | goto out; |
| 1249 | } |
| 1250 | if (unlikely(compound_head(page) != head_page)) { |
Wei Yang | 82a3241 | 2020-06-03 15:58:29 -0700 | [diff] [blame] | 1251 | bad_page(page, "compound_head not consistent"); |
Mel Gorman | 4db7548 | 2016-05-19 17:14:32 -0700 | [diff] [blame] | 1252 | goto out; |
| 1253 | } |
| 1254 | ret = 0; |
| 1255 | out: |
| 1256 | page->mapping = NULL; |
| 1257 | clear_compound_head(page); |
| 1258 | return ret; |
| 1259 | } |
| 1260 | |
Peter Collingbourne | 013bb59 | 2021-06-02 16:52:29 -0700 | [diff] [blame] | 1261 | static void kernel_init_free_pages(struct page *page, int numpages, bool zero_tags) |
Alexander Potapenko | 6471384 | 2019-07-11 20:59:19 -0700 | [diff] [blame] | 1262 | { |
| 1263 | int i; |
| 1264 | |
Peter Collingbourne | 013bb59 | 2021-06-02 16:52:29 -0700 | [diff] [blame] | 1265 | if (zero_tags) { |
| 1266 | for (i = 0; i < numpages; i++) |
| 1267 | tag_clear_highpage(page + i); |
| 1268 | return; |
| 1269 | } |
| 1270 | |
Qian Cai | 9e15afa | 2020-08-06 23:25:54 -0700 | [diff] [blame] | 1271 | /* s390's use of memset() could override KASAN redzones. */ |
| 1272 | kasan_disable_current(); |
Andrey Konovalov | aa1ef4d | 2020-12-22 12:02:17 -0800 | [diff] [blame] | 1273 | for (i = 0; i < numpages; i++) { |
Andrey Konovalov | acb35b1 | 2021-01-23 21:01:43 -0800 | [diff] [blame] | 1274 | u8 tag = page_kasan_tag(page + i); |
Andrey Konovalov | aa1ef4d | 2020-12-22 12:02:17 -0800 | [diff] [blame] | 1275 | page_kasan_tag_reset(page + i); |
Alexander Potapenko | 6471384 | 2019-07-11 20:59:19 -0700 | [diff] [blame] | 1276 | clear_highpage(page + i); |
Andrey Konovalov | acb35b1 | 2021-01-23 21:01:43 -0800 | [diff] [blame] | 1277 | page_kasan_tag_set(page + i, tag); |
Andrey Konovalov | aa1ef4d | 2020-12-22 12:02:17 -0800 | [diff] [blame] | 1278 | } |
Qian Cai | 9e15afa | 2020-08-06 23:25:54 -0700 | [diff] [blame] | 1279 | kasan_enable_current(); |
Alexander Potapenko | 6471384 | 2019-07-11 20:59:19 -0700 | [diff] [blame] | 1280 | } |
| 1281 | |
Mel Gorman | e2769db | 2016-05-19 17:14:38 -0700 | [diff] [blame] | 1282 | static __always_inline bool free_pages_prepare(struct page *page, |
Andrey Konovalov | 2c33568 | 2021-04-29 22:59:52 -0700 | [diff] [blame] | 1283 | unsigned int order, bool check_free, fpi_t fpi_flags) |
Mel Gorman | e2769db | 2016-05-19 17:14:38 -0700 | [diff] [blame] | 1284 | { |
| 1285 | int bad = 0; |
Peter Collingbourne | c275c5c | 2021-06-02 16:52:30 -0700 | [diff] [blame] | 1286 | bool skip_kasan_poison = should_skip_kasan_poison(page, fpi_flags); |
Mel Gorman | e2769db | 2016-05-19 17:14:38 -0700 | [diff] [blame] | 1287 | |
| 1288 | VM_BUG_ON_PAGE(PageTail(page), page); |
| 1289 | |
| 1290 | trace_mm_page_free(page, order); |
Mel Gorman | e2769db | 2016-05-19 17:14:38 -0700 | [diff] [blame] | 1291 | |
Oscar Salvador | 79f5f8f | 2020-10-15 20:07:09 -0700 | [diff] [blame] | 1292 | if (unlikely(PageHWPoison(page)) && !order) { |
| 1293 | /* |
| 1294 | * Do not let hwpoison pages hit pcplists/buddy |
| 1295 | * Untie memcg state and reset page's owner |
| 1296 | */ |
Roman Gushchin | 18b2db3 | 2020-12-01 13:58:30 -0800 | [diff] [blame] | 1297 | if (memcg_kmem_enabled() && PageMemcgKmem(page)) |
Oscar Salvador | 79f5f8f | 2020-10-15 20:07:09 -0700 | [diff] [blame] | 1298 | __memcg_kmem_uncharge_page(page, order); |
| 1299 | reset_page_owner(page, order); |
| 1300 | return false; |
| 1301 | } |
| 1302 | |
Mel Gorman | e2769db | 2016-05-19 17:14:38 -0700 | [diff] [blame] | 1303 | /* |
| 1304 | * Check tail pages before head page information is cleared to |
| 1305 | * avoid checking PageCompound for order-0 pages. |
| 1306 | */ |
| 1307 | if (unlikely(order)) { |
| 1308 | bool compound = PageCompound(page); |
| 1309 | int i; |
| 1310 | |
| 1311 | VM_BUG_ON_PAGE(compound && compound_order(page) != order, page); |
| 1312 | |
Yang Shi | eac96c3 | 2021-10-28 14:36:11 -0700 | [diff] [blame] | 1313 | if (compound) { |
Kirill A. Shutemov | 9a73f61 | 2016-07-26 15:25:53 -0700 | [diff] [blame] | 1314 | ClearPageDoubleMap(page); |
Yang Shi | eac96c3 | 2021-10-28 14:36:11 -0700 | [diff] [blame] | 1315 | ClearPageHasHWPoisoned(page); |
| 1316 | } |
Mel Gorman | e2769db | 2016-05-19 17:14:38 -0700 | [diff] [blame] | 1317 | for (i = 1; i < (1 << order); i++) { |
| 1318 | if (compound) |
| 1319 | bad += free_tail_pages_check(page, page + i); |
Wei Yang | 534fe5e | 2020-06-03 15:58:36 -0700 | [diff] [blame] | 1320 | if (unlikely(check_free_page(page + i))) { |
Mel Gorman | e2769db | 2016-05-19 17:14:38 -0700 | [diff] [blame] | 1321 | bad++; |
| 1322 | continue; |
| 1323 | } |
| 1324 | (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; |
| 1325 | } |
| 1326 | } |
Minchan Kim | bda807d | 2016-07-26 15:23:05 -0700 | [diff] [blame] | 1327 | if (PageMappingFlags(page)) |
Mel Gorman | e2769db | 2016-05-19 17:14:38 -0700 | [diff] [blame] | 1328 | page->mapping = NULL; |
Roman Gushchin | 18b2db3 | 2020-12-01 13:58:30 -0800 | [diff] [blame] | 1329 | if (memcg_kmem_enabled() && PageMemcgKmem(page)) |
Roman Gushchin | f4b00ea | 2020-04-01 21:06:46 -0700 | [diff] [blame] | 1330 | __memcg_kmem_uncharge_page(page, order); |
Mel Gorman | e2769db | 2016-05-19 17:14:38 -0700 | [diff] [blame] | 1331 | if (check_free) |
Wei Yang | 534fe5e | 2020-06-03 15:58:36 -0700 | [diff] [blame] | 1332 | bad += check_free_page(page); |
Mel Gorman | e2769db | 2016-05-19 17:14:38 -0700 | [diff] [blame] | 1333 | if (bad) |
| 1334 | return false; |
| 1335 | |
| 1336 | page_cpupid_reset_last(page); |
| 1337 | page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; |
| 1338 | reset_page_owner(page, order); |
| 1339 | |
| 1340 | if (!PageHighMem(page)) { |
| 1341 | debug_check_no_locks_freed(page_address(page), |
| 1342 | PAGE_SIZE << order); |
| 1343 | debug_check_no_obj_freed(page_address(page), |
| 1344 | PAGE_SIZE << order); |
| 1345 | } |
Alexander Potapenko | 6471384 | 2019-07-11 20:59:19 -0700 | [diff] [blame] | 1346 | |
Vlastimil Babka | 8db26a3 | 2020-12-14 19:13:34 -0800 | [diff] [blame] | 1347 | kernel_poison_pages(page, 1 << order); |
| 1348 | |
Qian Cai | 234fdce | 2019-10-06 17:58:25 -0700 | [diff] [blame] | 1349 | /* |
Andrey Konovalov | 1bb5eab | 2021-04-29 23:00:02 -0700 | [diff] [blame] | 1350 | * As memory initialization might be integrated into KASAN, |
| 1351 | * kasan_free_pages and kernel_init_free_pages must be |
| 1352 | * kept together to avoid discrepancies in behavior. |
| 1353 | * |
Andrey Konovalov | f9d79e8 | 2021-03-12 21:08:10 -0800 | [diff] [blame] | 1354 | * With hardware tag-based KASAN, memory tags must be set before the |
| 1355 | * page becomes unavailable via debug_pagealloc or arch_free_page. |
| 1356 | */ |
Peter Collingbourne | 7a3b835 | 2021-06-02 16:52:28 -0700 | [diff] [blame] | 1357 | if (kasan_has_integrated_init()) { |
| 1358 | if (!skip_kasan_poison) |
| 1359 | kasan_free_pages(page, order); |
| 1360 | } else { |
| 1361 | bool init = want_init_on_free(); |
| 1362 | |
| 1363 | if (init) |
Peter Collingbourne | 013bb59 | 2021-06-02 16:52:29 -0700 | [diff] [blame] | 1364 | kernel_init_free_pages(page, 1 << order, false); |
Peter Collingbourne | 7a3b835 | 2021-06-02 16:52:28 -0700 | [diff] [blame] | 1365 | if (!skip_kasan_poison) |
| 1366 | kasan_poison_pages(page, order, init); |
| 1367 | } |
Andrey Konovalov | f9d79e8 | 2021-03-12 21:08:10 -0800 | [diff] [blame] | 1368 | |
| 1369 | /* |
Qian Cai | 234fdce | 2019-10-06 17:58:25 -0700 | [diff] [blame] | 1370 | * arch_free_page() can make the page's contents inaccessible. s390 |
| 1371 | * does this. So nothing which can access the page's contents should |
| 1372 | * happen after this. |
| 1373 | */ |
| 1374 | arch_free_page(page, order); |
| 1375 | |
Mike Rapoport | 77bc7fd | 2020-12-14 19:10:20 -0800 | [diff] [blame] | 1376 | debug_pagealloc_unmap_pages(page, 1 << order); |
Rick Edgecombe | d633269 | 2019-04-25 17:11:35 -0700 | [diff] [blame] | 1377 | |
Mel Gorman | e2769db | 2016-05-19 17:14:38 -0700 | [diff] [blame] | 1378 | return true; |
| 1379 | } |
Mel Gorman | 4db7548 | 2016-05-19 17:14:32 -0700 | [diff] [blame] | 1380 | |
| 1381 | #ifdef CONFIG_DEBUG_VM |
Vlastimil Babka | 4462b32 | 2019-07-11 20:55:09 -0700 | [diff] [blame] | 1382 | /* |
| 1383 | * With DEBUG_VM enabled, order-0 pages are checked immediately when being freed |
| 1384 | * to pcp lists. With debug_pagealloc also enabled, they are also rechecked when |
| 1385 | * moved from pcp lists to free lists. |
| 1386 | */ |
Mel Gorman | 44042b4 | 2021-06-28 19:43:08 -0700 | [diff] [blame] | 1387 | static bool free_pcp_prepare(struct page *page, unsigned int order) |
Mel Gorman | 4db7548 | 2016-05-19 17:14:32 -0700 | [diff] [blame] | 1388 | { |
Mel Gorman | 44042b4 | 2021-06-28 19:43:08 -0700 | [diff] [blame] | 1389 | return free_pages_prepare(page, order, true, FPI_NONE); |
Mel Gorman | 4db7548 | 2016-05-19 17:14:32 -0700 | [diff] [blame] | 1390 | } |
| 1391 | |
Vlastimil Babka | 4462b32 | 2019-07-11 20:55:09 -0700 | [diff] [blame] | 1392 | static bool bulkfree_pcp_prepare(struct page *page) |
Mel Gorman | 4db7548 | 2016-05-19 17:14:32 -0700 | [diff] [blame] | 1393 | { |
Vlastimil Babka | 8e57f8a | 2020-01-13 16:29:20 -0800 | [diff] [blame] | 1394 | if (debug_pagealloc_enabled_static()) |
Wei Yang | 534fe5e | 2020-06-03 15:58:36 -0700 | [diff] [blame] | 1395 | return check_free_page(page); |
Vlastimil Babka | 4462b32 | 2019-07-11 20:55:09 -0700 | [diff] [blame] | 1396 | else |
| 1397 | return false; |
Mel Gorman | 4db7548 | 2016-05-19 17:14:32 -0700 | [diff] [blame] | 1398 | } |
| 1399 | #else |
Vlastimil Babka | 4462b32 | 2019-07-11 20:55:09 -0700 | [diff] [blame] | 1400 | /* |
| 1401 | * With DEBUG_VM disabled, order-0 pages being freed are checked only when |
| 1402 | * moving from pcp lists to free list in order to reduce overhead. With |
| 1403 | * debug_pagealloc enabled, they are checked also immediately when being freed |
| 1404 | * to the pcp lists. |
| 1405 | */ |
Mel Gorman | 44042b4 | 2021-06-28 19:43:08 -0700 | [diff] [blame] | 1406 | static bool free_pcp_prepare(struct page *page, unsigned int order) |
Mel Gorman | 4db7548 | 2016-05-19 17:14:32 -0700 | [diff] [blame] | 1407 | { |
Vlastimil Babka | 8e57f8a | 2020-01-13 16:29:20 -0800 | [diff] [blame] | 1408 | if (debug_pagealloc_enabled_static()) |
Mel Gorman | 44042b4 | 2021-06-28 19:43:08 -0700 | [diff] [blame] | 1409 | return free_pages_prepare(page, order, true, FPI_NONE); |
Vlastimil Babka | 4462b32 | 2019-07-11 20:55:09 -0700 | [diff] [blame] | 1410 | else |
Mel Gorman | 44042b4 | 2021-06-28 19:43:08 -0700 | [diff] [blame] | 1411 | return free_pages_prepare(page, order, false, FPI_NONE); |
Mel Gorman | 4db7548 | 2016-05-19 17:14:32 -0700 | [diff] [blame] | 1412 | } |
| 1413 | |
| 1414 | static bool bulkfree_pcp_prepare(struct page *page) |
| 1415 | { |
Wei Yang | 534fe5e | 2020-06-03 15:58:36 -0700 | [diff] [blame] | 1416 | return check_free_page(page); |
Mel Gorman | 4db7548 | 2016-05-19 17:14:32 -0700 | [diff] [blame] | 1417 | } |
| 1418 | #endif /* CONFIG_DEBUG_VM */ |
| 1419 | |
Aaron Lu | 9733416 | 2018-04-05 16:24:14 -0700 | [diff] [blame] | 1420 | static inline void prefetch_buddy(struct page *page) |
| 1421 | { |
| 1422 | unsigned long pfn = page_to_pfn(page); |
| 1423 | unsigned long buddy_pfn = __find_buddy_pfn(pfn, 0); |
| 1424 | struct page *buddy = page + (buddy_pfn - pfn); |
| 1425 | |
| 1426 | prefetch(buddy); |
| 1427 | } |
| 1428 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1429 | /* |
Mel Gorman | 5f8dcc2 | 2009-09-21 17:03:19 -0700 | [diff] [blame] | 1430 | * Frees a number of pages from the PCP lists |
Miaohe Lin | 7cba630 | 2021-11-05 13:40:08 -0700 | [diff] [blame] | 1431 | * Assumes all pages on list are in same zone. |
Renaud Lienhart | 207f36e | 2005-09-10 00:26:59 -0700 | [diff] [blame] | 1432 | * count is the number of pages to free. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1433 | */ |
Mel Gorman | 5f8dcc2 | 2009-09-21 17:03:19 -0700 | [diff] [blame] | 1434 | static void free_pcppages_bulk(struct zone *zone, int count, |
| 1435 | struct per_cpu_pages *pcp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1436 | { |
Mel Gorman | 44042b4 | 2021-06-28 19:43:08 -0700 | [diff] [blame] | 1437 | int pindex = 0; |
Mel Gorman | a6f9edd6 | 2009-09-21 17:03:20 -0700 | [diff] [blame] | 1438 | int batch_free = 0; |
Mel Gorman | 44042b4 | 2021-06-28 19:43:08 -0700 | [diff] [blame] | 1439 | int nr_freed = 0; |
| 1440 | unsigned int order; |
Vlastimil Babka | 5c3ad2e | 2020-12-14 19:10:50 -0800 | [diff] [blame] | 1441 | int prefetch_nr = READ_ONCE(pcp->batch); |
Mel Gorman | 3777999 | 2016-05-19 17:13:58 -0700 | [diff] [blame] | 1442 | bool isolated_pageblocks; |
Aaron Lu | 0a5f4e5 | 2018-04-05 16:24:10 -0700 | [diff] [blame] | 1443 | struct page *page, *tmp; |
| 1444 | LIST_HEAD(head); |
Mel Gorman | f2260e6 | 2009-06-16 15:32:13 -0700 | [diff] [blame] | 1445 | |
Charan Teja Reddy | 88e8ac1 | 2020-08-20 17:42:27 -0700 | [diff] [blame] | 1446 | /* |
| 1447 | * Ensure proper count is passed which otherwise would stuck in the |
| 1448 | * below while (list_empty(list)) loop. |
| 1449 | */ |
| 1450 | count = min(pcp->count, count); |
Mel Gorman | 44042b4 | 2021-06-28 19:43:08 -0700 | [diff] [blame] | 1451 | while (count > 0) { |
Mel Gorman | 5f8dcc2 | 2009-09-21 17:03:19 -0700 | [diff] [blame] | 1452 | struct list_head *list; |
Nick Piggin | 48db57f | 2006-01-08 01:00:42 -0800 | [diff] [blame] | 1453 | |
Mel Gorman | 5f8dcc2 | 2009-09-21 17:03:19 -0700 | [diff] [blame] | 1454 | /* |
Mel Gorman | a6f9edd6 | 2009-09-21 17:03:20 -0700 | [diff] [blame] | 1455 | * Remove pages from lists in a round-robin fashion. A |
| 1456 | * batch_free count is maintained that is incremented when an |
| 1457 | * empty list is encountered. This is so more pages are freed |
| 1458 | * off fuller lists instead of spinning excessively around empty |
| 1459 | * lists |
Mel Gorman | 5f8dcc2 | 2009-09-21 17:03:19 -0700 | [diff] [blame] | 1460 | */ |
| 1461 | do { |
Mel Gorman | a6f9edd6 | 2009-09-21 17:03:20 -0700 | [diff] [blame] | 1462 | batch_free++; |
Mel Gorman | 44042b4 | 2021-06-28 19:43:08 -0700 | [diff] [blame] | 1463 | if (++pindex == NR_PCP_LISTS) |
| 1464 | pindex = 0; |
| 1465 | list = &pcp->lists[pindex]; |
Mel Gorman | 5f8dcc2 | 2009-09-21 17:03:19 -0700 | [diff] [blame] | 1466 | } while (list_empty(list)); |
| 1467 | |
Namhyung Kim | 1d16871 | 2011-03-22 16:32:45 -0700 | [diff] [blame] | 1468 | /* This is the only non-empty list. Free them all. */ |
Mel Gorman | 44042b4 | 2021-06-28 19:43:08 -0700 | [diff] [blame] | 1469 | if (batch_free == NR_PCP_LISTS) |
Mel Gorman | e5b31ac | 2016-05-19 17:14:24 -0700 | [diff] [blame] | 1470 | batch_free = count; |
Namhyung Kim | 1d16871 | 2011-03-22 16:32:45 -0700 | [diff] [blame] | 1471 | |
Mel Gorman | 44042b4 | 2021-06-28 19:43:08 -0700 | [diff] [blame] | 1472 | order = pindex_to_order(pindex); |
| 1473 | BUILD_BUG_ON(MAX_ORDER >= (1<<NR_PCP_ORDER_WIDTH)); |
Mel Gorman | a6f9edd6 | 2009-09-21 17:03:20 -0700 | [diff] [blame] | 1474 | do { |
Geliang Tang | a16601c | 2016-01-14 15:20:30 -0800 | [diff] [blame] | 1475 | page = list_last_entry(list, struct page, lru); |
Aaron Lu | 0a5f4e5 | 2018-04-05 16:24:10 -0700 | [diff] [blame] | 1476 | /* must delete to avoid corrupting pcp list */ |
Mel Gorman | a6f9edd6 | 2009-09-21 17:03:20 -0700 | [diff] [blame] | 1477 | list_del(&page->lru); |
Mel Gorman | 44042b4 | 2021-06-28 19:43:08 -0700 | [diff] [blame] | 1478 | nr_freed += 1 << order; |
| 1479 | count -= 1 << order; |
Vlastimil Babka | aa016d1 | 2015-09-08 15:01:22 -0700 | [diff] [blame] | 1480 | |
Mel Gorman | 4db7548 | 2016-05-19 17:14:32 -0700 | [diff] [blame] | 1481 | if (bulkfree_pcp_prepare(page)) |
| 1482 | continue; |
| 1483 | |
Mel Gorman | 44042b4 | 2021-06-28 19:43:08 -0700 | [diff] [blame] | 1484 | /* Encode order with the migratetype */ |
| 1485 | page->index <<= NR_PCP_ORDER_WIDTH; |
| 1486 | page->index |= order; |
| 1487 | |
Aaron Lu | 0a5f4e5 | 2018-04-05 16:24:10 -0700 | [diff] [blame] | 1488 | list_add_tail(&page->lru, &head); |
Aaron Lu | 9733416 | 2018-04-05 16:24:14 -0700 | [diff] [blame] | 1489 | |
| 1490 | /* |
| 1491 | * We are going to put the page back to the global |
| 1492 | * pool, prefetch its buddy to speed up later access |
| 1493 | * under zone->lock. It is believed the overhead of |
| 1494 | * an additional test and calculating buddy_pfn here |
| 1495 | * can be offset by reduced memory latency later. To |
| 1496 | * avoid excessive prefetching due to large count, only |
| 1497 | * prefetch buddy for the first pcp->batch nr of pages. |
| 1498 | */ |
Vlastimil Babka | 5c3ad2e | 2020-12-14 19:10:50 -0800 | [diff] [blame] | 1499 | if (prefetch_nr) { |
Aaron Lu | 9733416 | 2018-04-05 16:24:14 -0700 | [diff] [blame] | 1500 | prefetch_buddy(page); |
Vlastimil Babka | 5c3ad2e | 2020-12-14 19:10:50 -0800 | [diff] [blame] | 1501 | prefetch_nr--; |
| 1502 | } |
Mel Gorman | 44042b4 | 2021-06-28 19:43:08 -0700 | [diff] [blame] | 1503 | } while (count > 0 && --batch_free && !list_empty(list)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1504 | } |
Mel Gorman | 44042b4 | 2021-06-28 19:43:08 -0700 | [diff] [blame] | 1505 | pcp->count -= nr_freed; |
Aaron Lu | 0a5f4e5 | 2018-04-05 16:24:10 -0700 | [diff] [blame] | 1506 | |
Mel Gorman | dbbee9d | 2021-06-28 19:41:41 -0700 | [diff] [blame] | 1507 | /* |
| 1508 | * local_lock_irq held so equivalent to spin_lock_irqsave for |
| 1509 | * both PREEMPT_RT and non-PREEMPT_RT configurations. |
| 1510 | */ |
Aaron Lu | 0a5f4e5 | 2018-04-05 16:24:10 -0700 | [diff] [blame] | 1511 | spin_lock(&zone->lock); |
| 1512 | isolated_pageblocks = has_isolate_pageblock(zone); |
| 1513 | |
| 1514 | /* |
| 1515 | * Use safe version since after __free_one_page(), |
| 1516 | * page->lru.next will not point to original list. |
| 1517 | */ |
| 1518 | list_for_each_entry_safe(page, tmp, &head, lru) { |
| 1519 | int mt = get_pcppage_migratetype(page); |
Mel Gorman | 44042b4 | 2021-06-28 19:43:08 -0700 | [diff] [blame] | 1520 | |
| 1521 | /* mt has been encoded with the order (see above) */ |
| 1522 | order = mt & NR_PCP_ORDER_MASK; |
| 1523 | mt >>= NR_PCP_ORDER_WIDTH; |
| 1524 | |
Aaron Lu | 0a5f4e5 | 2018-04-05 16:24:10 -0700 | [diff] [blame] | 1525 | /* MIGRATE_ISOLATE page should not go to pcplists */ |
| 1526 | VM_BUG_ON_PAGE(is_migrate_isolate(mt), page); |
| 1527 | /* Pageblock could have been isolated meanwhile */ |
| 1528 | if (unlikely(isolated_pageblocks)) |
| 1529 | mt = get_pageblock_migratetype(page); |
| 1530 | |
Mel Gorman | 44042b4 | 2021-06-28 19:43:08 -0700 | [diff] [blame] | 1531 | __free_one_page(page, page_to_pfn(page), zone, order, mt, FPI_NONE); |
| 1532 | trace_mm_page_pcpu_drain(page, order, mt); |
Aaron Lu | 0a5f4e5 | 2018-04-05 16:24:10 -0700 | [diff] [blame] | 1533 | } |
Mel Gorman | d34b073 | 2017-04-20 14:37:43 -0700 | [diff] [blame] | 1534 | spin_unlock(&zone->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1535 | } |
| 1536 | |
Mel Gorman | dc4b0ca | 2014-06-04 16:10:17 -0700 | [diff] [blame] | 1537 | static void free_one_page(struct zone *zone, |
| 1538 | struct page *page, unsigned long pfn, |
Mel Gorman | 7aeb09f | 2014-06-04 16:10:21 -0700 | [diff] [blame] | 1539 | unsigned int order, |
David Hildenbrand | 7fef431 | 2020-10-15 20:09:35 -0700 | [diff] [blame] | 1540 | int migratetype, fpi_t fpi_flags) |
Nick Piggin | 48db57f | 2006-01-08 01:00:42 -0800 | [diff] [blame] | 1541 | { |
Mel Gorman | df1acc8 | 2021-06-28 19:42:00 -0700 | [diff] [blame] | 1542 | unsigned long flags; |
| 1543 | |
| 1544 | spin_lock_irqsave(&zone->lock, flags); |
Joonsoo Kim | ad53f92 | 2014-11-13 15:19:11 -0800 | [diff] [blame] | 1545 | if (unlikely(has_isolate_pageblock(zone) || |
| 1546 | is_migrate_isolate(migratetype))) { |
| 1547 | migratetype = get_pfnblock_migratetype(page, pfn); |
Joonsoo Kim | ad53f92 | 2014-11-13 15:19:11 -0800 | [diff] [blame] | 1548 | } |
David Hildenbrand | 7fef431 | 2020-10-15 20:09:35 -0700 | [diff] [blame] | 1549 | __free_one_page(page, pfn, zone, order, migratetype, fpi_flags); |
Mel Gorman | df1acc8 | 2021-06-28 19:42:00 -0700 | [diff] [blame] | 1550 | spin_unlock_irqrestore(&zone->lock, flags); |
Nick Piggin | 48db57f | 2006-01-08 01:00:42 -0800 | [diff] [blame] | 1551 | } |
| 1552 | |
Robin Holt | 1e8ce83 | 2015-06-30 14:56:45 -0700 | [diff] [blame] | 1553 | static void __meminit __init_single_page(struct page *page, unsigned long pfn, |
Pavel Tatashin | d0dc12e | 2018-04-05 16:23:00 -0700 | [diff] [blame] | 1554 | unsigned long zone, int nid) |
Robin Holt | 1e8ce83 | 2015-06-30 14:56:45 -0700 | [diff] [blame] | 1555 | { |
Pavel Tatashin | d0dc12e | 2018-04-05 16:23:00 -0700 | [diff] [blame] | 1556 | mm_zero_struct_page(page); |
Robin Holt | 1e8ce83 | 2015-06-30 14:56:45 -0700 | [diff] [blame] | 1557 | set_page_links(page, zone, nid, pfn); |
Robin Holt | 1e8ce83 | 2015-06-30 14:56:45 -0700 | [diff] [blame] | 1558 | init_page_count(page); |
| 1559 | page_mapcount_reset(page); |
| 1560 | page_cpupid_reset_last(page); |
Andrey Konovalov | 2813b9c | 2018-12-28 00:30:57 -0800 | [diff] [blame] | 1561 | page_kasan_tag_reset(page); |
Robin Holt | 1e8ce83 | 2015-06-30 14:56:45 -0700 | [diff] [blame] | 1562 | |
Robin Holt | 1e8ce83 | 2015-06-30 14:56:45 -0700 | [diff] [blame] | 1563 | INIT_LIST_HEAD(&page->lru); |
| 1564 | #ifdef WANT_PAGE_VIRTUAL |
| 1565 | /* The shift won't overflow because ZONE_NORMAL is below 4G. */ |
| 1566 | if (!is_highmem_idx(zone)) |
| 1567 | set_page_address(page, __va(pfn << PAGE_SHIFT)); |
| 1568 | #endif |
| 1569 | } |
| 1570 | |
Mel Gorman | 7e18adb | 2015-06-30 14:57:05 -0700 | [diff] [blame] | 1571 | #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT |
Arnd Bergmann | 57148a6 | 2017-10-03 16:15:10 -0700 | [diff] [blame] | 1572 | static void __meminit init_reserved_page(unsigned long pfn) |
Mel Gorman | 7e18adb | 2015-06-30 14:57:05 -0700 | [diff] [blame] | 1573 | { |
| 1574 | pg_data_t *pgdat; |
| 1575 | int nid, zid; |
| 1576 | |
| 1577 | if (!early_page_uninitialised(pfn)) |
| 1578 | return; |
| 1579 | |
| 1580 | nid = early_pfn_to_nid(pfn); |
| 1581 | pgdat = NODE_DATA(nid); |
| 1582 | |
| 1583 | for (zid = 0; zid < MAX_NR_ZONES; zid++) { |
| 1584 | struct zone *zone = &pgdat->node_zones[zid]; |
| 1585 | |
Miaohe Lin | 86fb05b | 2021-11-05 13:40:11 -0700 | [diff] [blame] | 1586 | if (zone_spans_pfn(zone, pfn)) |
Mel Gorman | 7e18adb | 2015-06-30 14:57:05 -0700 | [diff] [blame] | 1587 | break; |
| 1588 | } |
Pavel Tatashin | d0dc12e | 2018-04-05 16:23:00 -0700 | [diff] [blame] | 1589 | __init_single_page(pfn_to_page(pfn), pfn, zid, nid); |
Mel Gorman | 7e18adb | 2015-06-30 14:57:05 -0700 | [diff] [blame] | 1590 | } |
| 1591 | #else |
| 1592 | static inline void init_reserved_page(unsigned long pfn) |
| 1593 | { |
| 1594 | } |
| 1595 | #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ |
| 1596 | |
Nathan Zimmer | 92923ca | 2015-06-30 14:56:48 -0700 | [diff] [blame] | 1597 | /* |
| 1598 | * Initialised pages do not have PageReserved set. This function is |
| 1599 | * called for each range allocated by the bootmem allocator and |
| 1600 | * marks the pages PageReserved. The remaining valid pages are later |
| 1601 | * sent to the buddy page allocator. |
| 1602 | */ |
Stefan Bader | 4b50bcc | 2016-05-20 16:58:38 -0700 | [diff] [blame] | 1603 | void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end) |
Nathan Zimmer | 92923ca | 2015-06-30 14:56:48 -0700 | [diff] [blame] | 1604 | { |
| 1605 | unsigned long start_pfn = PFN_DOWN(start); |
| 1606 | unsigned long end_pfn = PFN_UP(end); |
| 1607 | |
Mel Gorman | 7e18adb | 2015-06-30 14:57:05 -0700 | [diff] [blame] | 1608 | for (; start_pfn < end_pfn; start_pfn++) { |
| 1609 | if (pfn_valid(start_pfn)) { |
| 1610 | struct page *page = pfn_to_page(start_pfn); |
| 1611 | |
| 1612 | init_reserved_page(start_pfn); |
Kirill A. Shutemov | 1d798ca | 2015-11-06 16:29:54 -0800 | [diff] [blame] | 1613 | |
| 1614 | /* Avoid false-positive PageTail() */ |
| 1615 | INIT_LIST_HEAD(&page->lru); |
| 1616 | |
Alexander Duyck | d483da5 | 2018-10-26 15:07:48 -0700 | [diff] [blame] | 1617 | /* |
| 1618 | * no need for atomic set_bit because the struct |
| 1619 | * page is not visible yet so nobody should |
| 1620 | * access it yet. |
| 1621 | */ |
| 1622 | __SetPageReserved(page); |
Mel Gorman | 7e18adb | 2015-06-30 14:57:05 -0700 | [diff] [blame] | 1623 | } |
| 1624 | } |
Nathan Zimmer | 92923ca | 2015-06-30 14:56:48 -0700 | [diff] [blame] | 1625 | } |
| 1626 | |
David Hildenbrand | 7fef431 | 2020-10-15 20:09:35 -0700 | [diff] [blame] | 1627 | static void __free_pages_ok(struct page *page, unsigned int order, |
| 1628 | fpi_t fpi_flags) |
KOSAKI Motohiro | ec95f53 | 2010-05-24 14:32:38 -0700 | [diff] [blame] | 1629 | { |
Mel Gorman | d34b073 | 2017-04-20 14:37:43 -0700 | [diff] [blame] | 1630 | unsigned long flags; |
Minchan Kim | 95e3441 | 2012-10-08 16:32:11 -0700 | [diff] [blame] | 1631 | int migratetype; |
Mel Gorman | dc4b0ca | 2014-06-04 16:10:17 -0700 | [diff] [blame] | 1632 | unsigned long pfn = page_to_pfn(page); |
Mel Gorman | 56f0e66 | 2021-06-28 19:41:57 -0700 | [diff] [blame] | 1633 | struct zone *zone = page_zone(page); |
KOSAKI Motohiro | ec95f53 | 2010-05-24 14:32:38 -0700 | [diff] [blame] | 1634 | |
Andrey Konovalov | 2c33568 | 2021-04-29 22:59:52 -0700 | [diff] [blame] | 1635 | if (!free_pages_prepare(page, order, true, fpi_flags)) |
KOSAKI Motohiro | ec95f53 | 2010-05-24 14:32:38 -0700 | [diff] [blame] | 1636 | return; |
| 1637 | |
Mel Gorman | cfc47a2 | 2014-06-04 16:10:19 -0700 | [diff] [blame] | 1638 | migratetype = get_pfnblock_migratetype(page, pfn); |
Mel Gorman | dbbee9d | 2021-06-28 19:41:41 -0700 | [diff] [blame] | 1639 | |
Mel Gorman | 56f0e66 | 2021-06-28 19:41:57 -0700 | [diff] [blame] | 1640 | spin_lock_irqsave(&zone->lock, flags); |
Mel Gorman | 56f0e66 | 2021-06-28 19:41:57 -0700 | [diff] [blame] | 1641 | if (unlikely(has_isolate_pageblock(zone) || |
| 1642 | is_migrate_isolate(migratetype))) { |
| 1643 | migratetype = get_pfnblock_migratetype(page, pfn); |
| 1644 | } |
| 1645 | __free_one_page(page, pfn, zone, order, migratetype, fpi_flags); |
| 1646 | spin_unlock_irqrestore(&zone->lock, flags); |
Mel Gorman | 9024999 | 2021-06-28 19:42:03 -0700 | [diff] [blame] | 1647 | |
Mel Gorman | d34b073 | 2017-04-20 14:37:43 -0700 | [diff] [blame] | 1648 | __count_vm_events(PGFREE, 1 << order); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1649 | } |
| 1650 | |
Arun KS | a9cd410 | 2019-03-05 15:42:14 -0800 | [diff] [blame] | 1651 | void __free_pages_core(struct page *page, unsigned int order) |
David Howells | a226f6c | 2006-01-06 00:11:08 -0800 | [diff] [blame] | 1652 | { |
Johannes Weiner | c399307 | 2012-01-10 15:08:10 -0800 | [diff] [blame] | 1653 | unsigned int nr_pages = 1 << order; |
Yinghai Lu | e2d0bd2 | 2013-09-11 14:20:37 -0700 | [diff] [blame] | 1654 | struct page *p = page; |
Johannes Weiner | c399307 | 2012-01-10 15:08:10 -0800 | [diff] [blame] | 1655 | unsigned int loop; |
David Howells | a226f6c | 2006-01-06 00:11:08 -0800 | [diff] [blame] | 1656 | |
David Hildenbrand | 7fef431 | 2020-10-15 20:09:35 -0700 | [diff] [blame] | 1657 | /* |
| 1658 | * When initializing the memmap, __init_single_page() sets the refcount |
| 1659 | * of all pages to 1 ("allocated"/"not free"). We have to set the |
| 1660 | * refcount of all involved pages to 0. |
| 1661 | */ |
Yinghai Lu | e2d0bd2 | 2013-09-11 14:20:37 -0700 | [diff] [blame] | 1662 | prefetchw(p); |
| 1663 | for (loop = 0; loop < (nr_pages - 1); loop++, p++) { |
| 1664 | prefetchw(p + 1); |
Johannes Weiner | c399307 | 2012-01-10 15:08:10 -0800 | [diff] [blame] | 1665 | __ClearPageReserved(p); |
| 1666 | set_page_count(p, 0); |
David Howells | a226f6c | 2006-01-06 00:11:08 -0800 | [diff] [blame] | 1667 | } |
Yinghai Lu | e2d0bd2 | 2013-09-11 14:20:37 -0700 | [diff] [blame] | 1668 | __ClearPageReserved(p); |
| 1669 | set_page_count(p, 0); |
Johannes Weiner | c399307 | 2012-01-10 15:08:10 -0800 | [diff] [blame] | 1670 | |
Arun KS | 9705bea | 2018-12-28 00:34:24 -0800 | [diff] [blame] | 1671 | atomic_long_add(nr_pages, &page_zone(page)->managed_pages); |
David Hildenbrand | 7fef431 | 2020-10-15 20:09:35 -0700 | [diff] [blame] | 1672 | |
| 1673 | /* |
| 1674 | * Bypass PCP and place fresh pages right to the tail, primarily |
| 1675 | * relevant for memory onlining. |
| 1676 | */ |
Andrey Konovalov | 2c33568 | 2021-04-29 22:59:52 -0700 | [diff] [blame] | 1677 | __free_pages_ok(page, order, FPI_TO_TAIL | FPI_SKIP_KASAN_POISON); |
David Howells | a226f6c | 2006-01-06 00:11:08 -0800 | [diff] [blame] | 1678 | } |
| 1679 | |
Mike Rapoport | a9ee6cf | 2021-06-28 19:43:01 -0700 | [diff] [blame] | 1680 | #ifdef CONFIG_NUMA |
Mel Gorman | 7ace991 | 2015-08-06 15:46:13 -0700 | [diff] [blame] | 1681 | |
Mike Rapoport | 03e92a5 | 2020-12-14 19:09:32 -0800 | [diff] [blame] | 1682 | /* |
| 1683 | * During memory init memblocks map pfns to nids. The search is expensive and |
| 1684 | * this caches recent lookups. The implementation of __early_pfn_to_nid |
| 1685 | * treats start/end as pfns. |
| 1686 | */ |
| 1687 | struct mminit_pfnnid_cache { |
| 1688 | unsigned long last_start; |
| 1689 | unsigned long last_end; |
| 1690 | int last_nid; |
| 1691 | }; |
Mel Gorman | 75a592a | 2015-06-30 14:56:59 -0700 | [diff] [blame] | 1692 | |
Mike Rapoport | 03e92a5 | 2020-12-14 19:09:32 -0800 | [diff] [blame] | 1693 | static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata; |
Mike Rapoport | 6f24fbd | 2020-06-03 15:56:57 -0700 | [diff] [blame] | 1694 | |
| 1695 | /* |
| 1696 | * Required by SPARSEMEM. Given a PFN, return what node the PFN is on. |
| 1697 | */ |
Mike Rapoport | 03e92a5 | 2020-12-14 19:09:32 -0800 | [diff] [blame] | 1698 | static int __meminit __early_pfn_to_nid(unsigned long pfn, |
Mike Rapoport | 6f24fbd | 2020-06-03 15:56:57 -0700 | [diff] [blame] | 1699 | struct mminit_pfnnid_cache *state) |
| 1700 | { |
| 1701 | unsigned long start_pfn, end_pfn; |
| 1702 | int nid; |
| 1703 | |
| 1704 | if (state->last_start <= pfn && pfn < state->last_end) |
| 1705 | return state->last_nid; |
| 1706 | |
| 1707 | nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn); |
| 1708 | if (nid != NUMA_NO_NODE) { |
| 1709 | state->last_start = start_pfn; |
| 1710 | state->last_end = end_pfn; |
| 1711 | state->last_nid = nid; |
| 1712 | } |
| 1713 | |
| 1714 | return nid; |
| 1715 | } |
Mike Rapoport | 6f24fbd | 2020-06-03 15:56:57 -0700 | [diff] [blame] | 1716 | |
Mel Gorman | 75a592a | 2015-06-30 14:56:59 -0700 | [diff] [blame] | 1717 | int __meminit early_pfn_to_nid(unsigned long pfn) |
| 1718 | { |
Mel Gorman | 7ace991 | 2015-08-06 15:46:13 -0700 | [diff] [blame] | 1719 | static DEFINE_SPINLOCK(early_pfn_lock); |
Mel Gorman | 75a592a | 2015-06-30 14:56:59 -0700 | [diff] [blame] | 1720 | int nid; |
| 1721 | |
Mel Gorman | 7ace991 | 2015-08-06 15:46:13 -0700 | [diff] [blame] | 1722 | spin_lock(&early_pfn_lock); |
Mel Gorman | 75a592a | 2015-06-30 14:56:59 -0700 | [diff] [blame] | 1723 | nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache); |
Mel Gorman | 7ace991 | 2015-08-06 15:46:13 -0700 | [diff] [blame] | 1724 | if (nid < 0) |
Mel Gorman | e4568d3 | 2016-07-14 12:07:20 -0700 | [diff] [blame] | 1725 | nid = first_online_node; |
Mel Gorman | 7ace991 | 2015-08-06 15:46:13 -0700 | [diff] [blame] | 1726 | spin_unlock(&early_pfn_lock); |
| 1727 | |
| 1728 | return nid; |
Mel Gorman | 75a592a | 2015-06-30 14:56:59 -0700 | [diff] [blame] | 1729 | } |
Mike Rapoport | a9ee6cf | 2021-06-28 19:43:01 -0700 | [diff] [blame] | 1730 | #endif /* CONFIG_NUMA */ |
Mel Gorman | 75a592a | 2015-06-30 14:56:59 -0700 | [diff] [blame] | 1731 | |
Mike Rapoport | 7c2ee34 | 2018-10-30 15:09:36 -0700 | [diff] [blame] | 1732 | void __init memblock_free_pages(struct page *page, unsigned long pfn, |
Mel Gorman | 3a80a7f | 2015-06-30 14:57:02 -0700 | [diff] [blame] | 1733 | unsigned int order) |
| 1734 | { |
| 1735 | if (early_page_uninitialised(pfn)) |
| 1736 | return; |
Arun KS | a9cd410 | 2019-03-05 15:42:14 -0800 | [diff] [blame] | 1737 | __free_pages_core(page, order); |
Mel Gorman | 3a80a7f | 2015-06-30 14:57:02 -0700 | [diff] [blame] | 1738 | } |
| 1739 | |
Joonsoo Kim | 7cf91a9 | 2016-03-15 14:57:51 -0700 | [diff] [blame] | 1740 | /* |
| 1741 | * Check that the whole (or subset of) a pageblock given by the interval of |
| 1742 | * [start_pfn, end_pfn) is valid and within the same zone, before scanning it |
Mike Rapoport | 859a85d | 2021-09-07 19:54:52 -0700 | [diff] [blame] | 1743 | * with the migration of free compaction scanner. |
Joonsoo Kim | 7cf91a9 | 2016-03-15 14:57:51 -0700 | [diff] [blame] | 1744 | * |
| 1745 | * Return struct page pointer of start_pfn, or NULL if checks were not passed. |
| 1746 | * |
| 1747 | * It's possible on some configurations to have a setup like node0 node1 node0 |
| 1748 | * i.e. it's possible that all pages within a zones range of pages do not |
| 1749 | * belong to a single zone. We assume that a border between node0 and node1 |
| 1750 | * can occur within a single pageblock, but not a node0 node1 node0 |
| 1751 | * interleaving within a single pageblock. It is therefore sufficient to check |
| 1752 | * the first and last page of a pageblock and avoid checking each individual |
| 1753 | * page in a pageblock. |
| 1754 | */ |
| 1755 | struct page *__pageblock_pfn_to_page(unsigned long start_pfn, |
| 1756 | unsigned long end_pfn, struct zone *zone) |
| 1757 | { |
| 1758 | struct page *start_page; |
| 1759 | struct page *end_page; |
| 1760 | |
| 1761 | /* end_pfn is one past the range we are checking */ |
| 1762 | end_pfn--; |
| 1763 | |
| 1764 | if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn)) |
| 1765 | return NULL; |
| 1766 | |
Michal Hocko | 2d070ea | 2017-07-06 15:37:56 -0700 | [diff] [blame] | 1767 | start_page = pfn_to_online_page(start_pfn); |
| 1768 | if (!start_page) |
| 1769 | return NULL; |
Joonsoo Kim | 7cf91a9 | 2016-03-15 14:57:51 -0700 | [diff] [blame] | 1770 | |
| 1771 | if (page_zone(start_page) != zone) |
| 1772 | return NULL; |
| 1773 | |
| 1774 | end_page = pfn_to_page(end_pfn); |
| 1775 | |
| 1776 | /* This gives a shorter code than deriving page_zone(end_page) */ |
| 1777 | if (page_zone_id(start_page) != page_zone_id(end_page)) |
| 1778 | return NULL; |
| 1779 | |
| 1780 | return start_page; |
| 1781 | } |
| 1782 | |
| 1783 | void set_zone_contiguous(struct zone *zone) |
| 1784 | { |
| 1785 | unsigned long block_start_pfn = zone->zone_start_pfn; |
| 1786 | unsigned long block_end_pfn; |
| 1787 | |
| 1788 | block_end_pfn = ALIGN(block_start_pfn + 1, pageblock_nr_pages); |
| 1789 | for (; block_start_pfn < zone_end_pfn(zone); |
| 1790 | block_start_pfn = block_end_pfn, |
| 1791 | block_end_pfn += pageblock_nr_pages) { |
| 1792 | |
| 1793 | block_end_pfn = min(block_end_pfn, zone_end_pfn(zone)); |
| 1794 | |
| 1795 | if (!__pageblock_pfn_to_page(block_start_pfn, |
| 1796 | block_end_pfn, zone)) |
| 1797 | return; |
David Hildenbrand | e84fe99 | 2020-05-07 18:35:46 -0700 | [diff] [blame] | 1798 | cond_resched(); |
Joonsoo Kim | 7cf91a9 | 2016-03-15 14:57:51 -0700 | [diff] [blame] | 1799 | } |
| 1800 | |
| 1801 | /* We confirm that there is no hole */ |
| 1802 | zone->contiguous = true; |
| 1803 | } |
| 1804 | |
| 1805 | void clear_zone_contiguous(struct zone *zone) |
| 1806 | { |
| 1807 | zone->contiguous = false; |
| 1808 | } |
| 1809 | |
Mel Gorman | 7e18adb | 2015-06-30 14:57:05 -0700 | [diff] [blame] | 1810 | #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT |
Pavel Tatashin | 2f47a91 | 2017-11-15 17:36:09 -0800 | [diff] [blame] | 1811 | static void __init deferred_free_range(unsigned long pfn, |
| 1812 | unsigned long nr_pages) |
Mel Gorman | a4de83d | 2015-06-30 14:57:16 -0700 | [diff] [blame] | 1813 | { |
Pavel Tatashin | 2f47a91 | 2017-11-15 17:36:09 -0800 | [diff] [blame] | 1814 | struct page *page; |
| 1815 | unsigned long i; |
Mel Gorman | a4de83d | 2015-06-30 14:57:16 -0700 | [diff] [blame] | 1816 | |
Pavel Tatashin | 2f47a91 | 2017-11-15 17:36:09 -0800 | [diff] [blame] | 1817 | if (!nr_pages) |
Mel Gorman | a4de83d | 2015-06-30 14:57:16 -0700 | [diff] [blame] | 1818 | return; |
| 1819 | |
Pavel Tatashin | 2f47a91 | 2017-11-15 17:36:09 -0800 | [diff] [blame] | 1820 | page = pfn_to_page(pfn); |
| 1821 | |
Mel Gorman | a4de83d | 2015-06-30 14:57:16 -0700 | [diff] [blame] | 1822 | /* Free a large naturally-aligned chunk if possible */ |
Xishi Qiu | e780149 | 2016-10-07 16:58:09 -0700 | [diff] [blame] | 1823 | if (nr_pages == pageblock_nr_pages && |
| 1824 | (pfn & (pageblock_nr_pages - 1)) == 0) { |
Mel Gorman | ac5d253 | 2015-06-30 14:57:20 -0700 | [diff] [blame] | 1825 | set_pageblock_migratetype(page, MIGRATE_MOVABLE); |
Arun KS | a9cd410 | 2019-03-05 15:42:14 -0800 | [diff] [blame] | 1826 | __free_pages_core(page, pageblock_order); |
Mel Gorman | a4de83d | 2015-06-30 14:57:16 -0700 | [diff] [blame] | 1827 | return; |
| 1828 | } |
| 1829 | |
Xishi Qiu | e780149 | 2016-10-07 16:58:09 -0700 | [diff] [blame] | 1830 | for (i = 0; i < nr_pages; i++, page++, pfn++) { |
| 1831 | if ((pfn & (pageblock_nr_pages - 1)) == 0) |
| 1832 | set_pageblock_migratetype(page, MIGRATE_MOVABLE); |
Arun KS | a9cd410 | 2019-03-05 15:42:14 -0800 | [diff] [blame] | 1833 | __free_pages_core(page, 0); |
Xishi Qiu | e780149 | 2016-10-07 16:58:09 -0700 | [diff] [blame] | 1834 | } |
Mel Gorman | a4de83d | 2015-06-30 14:57:16 -0700 | [diff] [blame] | 1835 | } |
| 1836 | |
Nicolai Stange | d3cd131 | 2015-08-06 15:46:16 -0700 | [diff] [blame] | 1837 | /* Completion tracking for deferred_init_memmap() threads */ |
| 1838 | static atomic_t pgdat_init_n_undone __initdata; |
| 1839 | static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp); |
| 1840 | |
| 1841 | static inline void __init pgdat_init_report_one_done(void) |
| 1842 | { |
| 1843 | if (atomic_dec_and_test(&pgdat_init_n_undone)) |
| 1844 | complete(&pgdat_init_all_done_comp); |
| 1845 | } |
Mel Gorman | 0e1cc95 | 2015-06-30 14:57:27 -0700 | [diff] [blame] | 1846 | |
Pavel Tatashin | 2f47a91 | 2017-11-15 17:36:09 -0800 | [diff] [blame] | 1847 | /* |
Pavel Tatashin | 80b1f41 | 2018-01-31 16:16:30 -0800 | [diff] [blame] | 1848 | * Returns true if page needs to be initialized or freed to buddy allocator. |
| 1849 | * |
| 1850 | * First we check if pfn is valid on architectures where it is possible to have |
| 1851 | * holes within pageblock_nr_pages. On systems where it is not possible, this |
| 1852 | * function is optimized out. |
| 1853 | * |
| 1854 | * Then, we check if a current large page is valid by only checking the validity |
| 1855 | * of the head pfn. |
Pavel Tatashin | 2f47a91 | 2017-11-15 17:36:09 -0800 | [diff] [blame] | 1856 | */ |
Alexander Duyck | 56ec43d | 2019-05-13 17:21:13 -0700 | [diff] [blame] | 1857 | static inline bool __init deferred_pfn_valid(unsigned long pfn) |
Pavel Tatashin | 2f47a91 | 2017-11-15 17:36:09 -0800 | [diff] [blame] | 1858 | { |
Pavel Tatashin | 80b1f41 | 2018-01-31 16:16:30 -0800 | [diff] [blame] | 1859 | if (!(pfn & (pageblock_nr_pages - 1)) && !pfn_valid(pfn)) |
| 1860 | return false; |
Pavel Tatashin | 80b1f41 | 2018-01-31 16:16:30 -0800 | [diff] [blame] | 1861 | return true; |
Pavel Tatashin | 2f47a91 | 2017-11-15 17:36:09 -0800 | [diff] [blame] | 1862 | } |
| 1863 | |
Pavel Tatashin | 80b1f41 | 2018-01-31 16:16:30 -0800 | [diff] [blame] | 1864 | /* |
| 1865 | * Free pages to buddy allocator. Try to free aligned pages in |
| 1866 | * pageblock_nr_pages sizes. |
| 1867 | */ |
Alexander Duyck | 56ec43d | 2019-05-13 17:21:13 -0700 | [diff] [blame] | 1868 | static void __init deferred_free_pages(unsigned long pfn, |
Pavel Tatashin | 80b1f41 | 2018-01-31 16:16:30 -0800 | [diff] [blame] | 1869 | unsigned long end_pfn) |
Pavel Tatashin | 2f47a91 | 2017-11-15 17:36:09 -0800 | [diff] [blame] | 1870 | { |
Pavel Tatashin | 2f47a91 | 2017-11-15 17:36:09 -0800 | [diff] [blame] | 1871 | unsigned long nr_pgmask = pageblock_nr_pages - 1; |
Pavel Tatashin | 2f47a91 | 2017-11-15 17:36:09 -0800 | [diff] [blame] | 1872 | unsigned long nr_free = 0; |
Pavel Tatashin | 2f47a91 | 2017-11-15 17:36:09 -0800 | [diff] [blame] | 1873 | |
Pavel Tatashin | 80b1f41 | 2018-01-31 16:16:30 -0800 | [diff] [blame] | 1874 | for (; pfn < end_pfn; pfn++) { |
Alexander Duyck | 56ec43d | 2019-05-13 17:21:13 -0700 | [diff] [blame] | 1875 | if (!deferred_pfn_valid(pfn)) { |
Pavel Tatashin | 80b1f41 | 2018-01-31 16:16:30 -0800 | [diff] [blame] | 1876 | deferred_free_range(pfn - nr_free, nr_free); |
| 1877 | nr_free = 0; |
| 1878 | } else if (!(pfn & nr_pgmask)) { |
| 1879 | deferred_free_range(pfn - nr_free, nr_free); |
Pavel Tatashin | 2f47a91 | 2017-11-15 17:36:09 -0800 | [diff] [blame] | 1880 | nr_free = 1; |
Pavel Tatashin | 80b1f41 | 2018-01-31 16:16:30 -0800 | [diff] [blame] | 1881 | } else { |
| 1882 | nr_free++; |
Pavel Tatashin | 2f47a91 | 2017-11-15 17:36:09 -0800 | [diff] [blame] | 1883 | } |
| 1884 | } |
| 1885 | /* Free the last block of pages to allocator */ |
Pavel Tatashin | 80b1f41 | 2018-01-31 16:16:30 -0800 | [diff] [blame] | 1886 | deferred_free_range(pfn - nr_free, nr_free); |
| 1887 | } |
Pavel Tatashin | 2f47a91 | 2017-11-15 17:36:09 -0800 | [diff] [blame] | 1888 | |
Pavel Tatashin | 80b1f41 | 2018-01-31 16:16:30 -0800 | [diff] [blame] | 1889 | /* |
| 1890 | * Initialize struct pages. We minimize pfn page lookups and scheduler checks |
| 1891 | * by performing it only once every pageblock_nr_pages. |
| 1892 | * Return number of pages initialized. |
| 1893 | */ |
Alexander Duyck | 56ec43d | 2019-05-13 17:21:13 -0700 | [diff] [blame] | 1894 | static unsigned long __init deferred_init_pages(struct zone *zone, |
Pavel Tatashin | 80b1f41 | 2018-01-31 16:16:30 -0800 | [diff] [blame] | 1895 | unsigned long pfn, |
| 1896 | unsigned long end_pfn) |
| 1897 | { |
Pavel Tatashin | 80b1f41 | 2018-01-31 16:16:30 -0800 | [diff] [blame] | 1898 | unsigned long nr_pgmask = pageblock_nr_pages - 1; |
Alexander Duyck | 56ec43d | 2019-05-13 17:21:13 -0700 | [diff] [blame] | 1899 | int nid = zone_to_nid(zone); |
Pavel Tatashin | 80b1f41 | 2018-01-31 16:16:30 -0800 | [diff] [blame] | 1900 | unsigned long nr_pages = 0; |
Alexander Duyck | 56ec43d | 2019-05-13 17:21:13 -0700 | [diff] [blame] | 1901 | int zid = zone_idx(zone); |
Pavel Tatashin | 80b1f41 | 2018-01-31 16:16:30 -0800 | [diff] [blame] | 1902 | struct page *page = NULL; |
| 1903 | |
| 1904 | for (; pfn < end_pfn; pfn++) { |
Alexander Duyck | 56ec43d | 2019-05-13 17:21:13 -0700 | [diff] [blame] | 1905 | if (!deferred_pfn_valid(pfn)) { |
Pavel Tatashin | 80b1f41 | 2018-01-31 16:16:30 -0800 | [diff] [blame] | 1906 | page = NULL; |
| 1907 | continue; |
| 1908 | } else if (!page || !(pfn & nr_pgmask)) { |
| 1909 | page = pfn_to_page(pfn); |
Pavel Tatashin | 80b1f41 | 2018-01-31 16:16:30 -0800 | [diff] [blame] | 1910 | } else { |
| 1911 | page++; |
| 1912 | } |
Pavel Tatashin | d0dc12e | 2018-04-05 16:23:00 -0700 | [diff] [blame] | 1913 | __init_single_page(page, pfn, zid, nid); |
Pavel Tatashin | 80b1f41 | 2018-01-31 16:16:30 -0800 | [diff] [blame] | 1914 | nr_pages++; |
| 1915 | } |
| 1916 | return (nr_pages); |
Pavel Tatashin | 2f47a91 | 2017-11-15 17:36:09 -0800 | [diff] [blame] | 1917 | } |
| 1918 | |
Alexander Duyck | 0e56aca | 2019-05-13 17:21:20 -0700 | [diff] [blame] | 1919 | /* |
| 1920 | * This function is meant to pre-load the iterator for the zone init. |
| 1921 | * Specifically it walks through the ranges until we are caught up to the |
| 1922 | * first_init_pfn value and exits there. If we never encounter the value we |
| 1923 | * return false indicating there are no valid ranges left. |
| 1924 | */ |
| 1925 | static bool __init |
| 1926 | deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone, |
| 1927 | unsigned long *spfn, unsigned long *epfn, |
| 1928 | unsigned long first_init_pfn) |
| 1929 | { |
| 1930 | u64 j; |
| 1931 | |
| 1932 | /* |
| 1933 | * Start out by walking through the ranges in this zone that have |
| 1934 | * already been initialized. We don't need to do anything with them |
| 1935 | * so we just need to flush them out of the system. |
| 1936 | */ |
| 1937 | for_each_free_mem_pfn_range_in_zone(j, zone, spfn, epfn) { |
| 1938 | if (*epfn <= first_init_pfn) |
| 1939 | continue; |
| 1940 | if (*spfn < first_init_pfn) |
| 1941 | *spfn = first_init_pfn; |
| 1942 | *i = j; |
| 1943 | return true; |
| 1944 | } |
| 1945 | |
| 1946 | return false; |
| 1947 | } |
| 1948 | |
| 1949 | /* |
| 1950 | * Initialize and free pages. We do it in two loops: first we initialize |
| 1951 | * struct page, then free to buddy allocator, because while we are |
| 1952 | * freeing pages we can access pages that are ahead (computing buddy |
| 1953 | * page in __free_one_page()). |
| 1954 | * |
| 1955 | * In order to try and keep some memory in the cache we have the loop |
| 1956 | * broken along max page order boundaries. This way we will not cause |
| 1957 | * any issues with the buddy page computation. |
| 1958 | */ |
| 1959 | static unsigned long __init |
| 1960 | deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn, |
| 1961 | unsigned long *end_pfn) |
| 1962 | { |
| 1963 | unsigned long mo_pfn = ALIGN(*start_pfn + 1, MAX_ORDER_NR_PAGES); |
| 1964 | unsigned long spfn = *start_pfn, epfn = *end_pfn; |
| 1965 | unsigned long nr_pages = 0; |
| 1966 | u64 j = *i; |
| 1967 | |
| 1968 | /* First we loop through and initialize the page values */ |
| 1969 | for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) { |
| 1970 | unsigned long t; |
| 1971 | |
| 1972 | if (mo_pfn <= *start_pfn) |
| 1973 | break; |
| 1974 | |
| 1975 | t = min(mo_pfn, *end_pfn); |
| 1976 | nr_pages += deferred_init_pages(zone, *start_pfn, t); |
| 1977 | |
| 1978 | if (mo_pfn < *end_pfn) { |
| 1979 | *start_pfn = mo_pfn; |
| 1980 | break; |
| 1981 | } |
| 1982 | } |
| 1983 | |
| 1984 | /* Reset values and now loop through freeing pages as needed */ |
| 1985 | swap(j, *i); |
| 1986 | |
| 1987 | for_each_free_mem_pfn_range_in_zone_from(j, zone, &spfn, &epfn) { |
| 1988 | unsigned long t; |
| 1989 | |
| 1990 | if (mo_pfn <= spfn) |
| 1991 | break; |
| 1992 | |
| 1993 | t = min(mo_pfn, epfn); |
| 1994 | deferred_free_pages(spfn, t); |
| 1995 | |
| 1996 | if (mo_pfn <= epfn) |
| 1997 | break; |
| 1998 | } |
| 1999 | |
| 2000 | return nr_pages; |
| 2001 | } |
| 2002 | |
Daniel Jordan | e444314 | 2020-06-03 15:59:51 -0700 | [diff] [blame] | 2003 | static void __init |
| 2004 | deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn, |
| 2005 | void *arg) |
| 2006 | { |
| 2007 | unsigned long spfn, epfn; |
| 2008 | struct zone *zone = arg; |
| 2009 | u64 i; |
| 2010 | |
| 2011 | deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn); |
| 2012 | |
| 2013 | /* |
| 2014 | * Initialize and free pages in MAX_ORDER sized increments so that we |
| 2015 | * can avoid introducing any issues with the buddy allocator. |
| 2016 | */ |
| 2017 | while (spfn < end_pfn) { |
| 2018 | deferred_init_maxorder(&i, zone, &spfn, &epfn); |
| 2019 | cond_resched(); |
| 2020 | } |
| 2021 | } |
| 2022 | |
Daniel Jordan | ecd0965 | 2020-06-03 15:59:55 -0700 | [diff] [blame] | 2023 | /* An arch may override for more concurrency. */ |
| 2024 | __weak int __init |
| 2025 | deferred_page_init_max_threads(const struct cpumask *node_cpumask) |
| 2026 | { |
| 2027 | return 1; |
| 2028 | } |
| 2029 | |
Mel Gorman | 7e18adb | 2015-06-30 14:57:05 -0700 | [diff] [blame] | 2030 | /* Initialise remaining memory on a node */ |
Mel Gorman | 0e1cc95 | 2015-06-30 14:57:27 -0700 | [diff] [blame] | 2031 | static int __init deferred_init_memmap(void *data) |
Mel Gorman | 7e18adb | 2015-06-30 14:57:05 -0700 | [diff] [blame] | 2032 | { |
Mel Gorman | 0e1cc95 | 2015-06-30 14:57:27 -0700 | [diff] [blame] | 2033 | pg_data_t *pgdat = data; |
Mel Gorman | 0e1cc95 | 2015-06-30 14:57:27 -0700 | [diff] [blame] | 2034 | const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); |
Daniel Jordan | 89c7c40 | 2020-06-03 15:59:47 -0700 | [diff] [blame] | 2035 | unsigned long spfn = 0, epfn = 0; |
Alexander Duyck | 0e56aca | 2019-05-13 17:21:20 -0700 | [diff] [blame] | 2036 | unsigned long first_init_pfn, flags; |
| 2037 | unsigned long start = jiffies; |
| 2038 | struct zone *zone; |
Daniel Jordan | e444314 | 2020-06-03 15:59:51 -0700 | [diff] [blame] | 2039 | int zid, max_threads; |
Pavel Tatashin | 2f47a91 | 2017-11-15 17:36:09 -0800 | [diff] [blame] | 2040 | u64 i; |
Mel Gorman | 7e18adb | 2015-06-30 14:57:05 -0700 | [diff] [blame] | 2041 | |
Mel Gorman | 0e1cc95 | 2015-06-30 14:57:27 -0700 | [diff] [blame] | 2042 | /* Bind memory initialisation thread to a local node if possible */ |
| 2043 | if (!cpumask_empty(cpumask)) |
| 2044 | set_cpus_allowed_ptr(current, cpumask); |
Mel Gorman | 7e18adb | 2015-06-30 14:57:05 -0700 | [diff] [blame] | 2045 | |
Pavel Tatashin | 3a2d7fa | 2018-04-05 16:22:27 -0700 | [diff] [blame] | 2046 | pgdat_resize_lock(pgdat, &flags); |
| 2047 | first_init_pfn = pgdat->first_deferred_pfn; |
| 2048 | if (first_init_pfn == ULONG_MAX) { |
| 2049 | pgdat_resize_unlock(pgdat, &flags); |
| 2050 | pgdat_init_report_one_done(); |
| 2051 | return 0; |
| 2052 | } |
| 2053 | |
Mel Gorman | 7e18adb | 2015-06-30 14:57:05 -0700 | [diff] [blame] | 2054 | /* Sanity check boundaries */ |
| 2055 | BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn); |
| 2056 | BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat)); |
| 2057 | pgdat->first_deferred_pfn = ULONG_MAX; |
| 2058 | |
Pavel Tatashin | 3d06085 | 2020-06-03 15:59:24 -0700 | [diff] [blame] | 2059 | /* |
| 2060 | * Once we unlock here, the zone cannot be grown anymore, thus if an |
| 2061 | * interrupt thread must allocate this early in boot, zone must be |
| 2062 | * pre-grown prior to start of deferred page initialization. |
| 2063 | */ |
| 2064 | pgdat_resize_unlock(pgdat, &flags); |
| 2065 | |
Mel Gorman | 7e18adb | 2015-06-30 14:57:05 -0700 | [diff] [blame] | 2066 | /* Only the highest zone is deferred so find it */ |
| 2067 | for (zid = 0; zid < MAX_NR_ZONES; zid++) { |
| 2068 | zone = pgdat->node_zones + zid; |
| 2069 | if (first_init_pfn < zone_end_pfn(zone)) |
| 2070 | break; |
| 2071 | } |
Alexander Duyck | 0e56aca | 2019-05-13 17:21:20 -0700 | [diff] [blame] | 2072 | |
| 2073 | /* If the zone is empty somebody else may have cleared out the zone */ |
| 2074 | if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, |
| 2075 | first_init_pfn)) |
| 2076 | goto zone_empty; |
Mel Gorman | 7e18adb | 2015-06-30 14:57:05 -0700 | [diff] [blame] | 2077 | |
Daniel Jordan | ecd0965 | 2020-06-03 15:59:55 -0700 | [diff] [blame] | 2078 | max_threads = deferred_page_init_max_threads(cpumask); |
Mel Gorman | 7e18adb | 2015-06-30 14:57:05 -0700 | [diff] [blame] | 2079 | |
Daniel Jordan | 117003c | 2020-06-03 15:59:20 -0700 | [diff] [blame] | 2080 | while (spfn < epfn) { |
Daniel Jordan | e444314 | 2020-06-03 15:59:51 -0700 | [diff] [blame] | 2081 | unsigned long epfn_align = ALIGN(epfn, PAGES_PER_SECTION); |
| 2082 | struct padata_mt_job job = { |
| 2083 | .thread_fn = deferred_init_memmap_chunk, |
| 2084 | .fn_arg = zone, |
| 2085 | .start = spfn, |
| 2086 | .size = epfn_align - spfn, |
| 2087 | .align = PAGES_PER_SECTION, |
| 2088 | .min_chunk = PAGES_PER_SECTION, |
| 2089 | .max_threads = max_threads, |
| 2090 | }; |
| 2091 | |
| 2092 | padata_do_multithreaded(&job); |
| 2093 | deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, |
| 2094 | epfn_align); |
Daniel Jordan | 117003c | 2020-06-03 15:59:20 -0700 | [diff] [blame] | 2095 | } |
Mel Gorman | 7e18adb | 2015-06-30 14:57:05 -0700 | [diff] [blame] | 2096 | zone_empty: |
Mel Gorman | 7e18adb | 2015-06-30 14:57:05 -0700 | [diff] [blame] | 2097 | /* Sanity check that the next zone really is unpopulated */ |
| 2098 | WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone)); |
| 2099 | |
Daniel Jordan | 89c7c40 | 2020-06-03 15:59:47 -0700 | [diff] [blame] | 2100 | pr_info("node %d deferred pages initialised in %ums\n", |
| 2101 | pgdat->node_id, jiffies_to_msecs(jiffies - start)); |
Nicolai Stange | d3cd131 | 2015-08-06 15:46:16 -0700 | [diff] [blame] | 2102 | |
| 2103 | pgdat_init_report_one_done(); |
Mel Gorman | 0e1cc95 | 2015-06-30 14:57:27 -0700 | [diff] [blame] | 2104 | return 0; |
| 2105 | } |
Pavel Tatashin | c9e97a1 | 2018-04-05 16:22:31 -0700 | [diff] [blame] | 2106 | |
| 2107 | /* |
Pavel Tatashin | c9e97a1 | 2018-04-05 16:22:31 -0700 | [diff] [blame] | 2108 | * If this zone has deferred pages, try to grow it by initializing enough |
| 2109 | * deferred pages to satisfy the allocation specified by order, rounded up to |
| 2110 | * the nearest PAGES_PER_SECTION boundary. So we're adding memory in increments |
| 2111 | * of SECTION_SIZE bytes by initializing struct pages in increments of |
| 2112 | * PAGES_PER_SECTION * sizeof(struct page) bytes. |
| 2113 | * |
| 2114 | * Return true when zone was grown, otherwise return false. We return true even |
| 2115 | * when we grow less than requested, to let the caller decide if there are |
| 2116 | * enough pages to satisfy the allocation. |
| 2117 | * |
| 2118 | * Note: We use noinline because this function is needed only during boot, and |
| 2119 | * it is called from a __ref function _deferred_grow_zone. This way we are |
| 2120 | * making sure that it is not inlined into permanent text section. |
| 2121 | */ |
| 2122 | static noinline bool __init |
| 2123 | deferred_grow_zone(struct zone *zone, unsigned int order) |
| 2124 | { |
Pavel Tatashin | c9e97a1 | 2018-04-05 16:22:31 -0700 | [diff] [blame] | 2125 | unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION); |
Alexander Duyck | 837566e | 2019-05-13 17:21:17 -0700 | [diff] [blame] | 2126 | pg_data_t *pgdat = zone->zone_pgdat; |
Pavel Tatashin | c9e97a1 | 2018-04-05 16:22:31 -0700 | [diff] [blame] | 2127 | unsigned long first_deferred_pfn = pgdat->first_deferred_pfn; |
Alexander Duyck | 0e56aca | 2019-05-13 17:21:20 -0700 | [diff] [blame] | 2128 | unsigned long spfn, epfn, flags; |
| 2129 | unsigned long nr_pages = 0; |
Pavel Tatashin | c9e97a1 | 2018-04-05 16:22:31 -0700 | [diff] [blame] | 2130 | u64 i; |
| 2131 | |
| 2132 | /* Only the last zone may have deferred pages */ |
| 2133 | if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat)) |
| 2134 | return false; |
| 2135 | |
| 2136 | pgdat_resize_lock(pgdat, &flags); |
| 2137 | |
| 2138 | /* |
Pavel Tatashin | c9e97a1 | 2018-04-05 16:22:31 -0700 | [diff] [blame] | 2139 | * If someone grew this zone while we were waiting for spinlock, return |
| 2140 | * true, as there might be enough pages already. |
| 2141 | */ |
| 2142 | if (first_deferred_pfn != pgdat->first_deferred_pfn) { |
| 2143 | pgdat_resize_unlock(pgdat, &flags); |
| 2144 | return true; |
| 2145 | } |
| 2146 | |
Alexander Duyck | 0e56aca | 2019-05-13 17:21:20 -0700 | [diff] [blame] | 2147 | /* If the zone is empty somebody else may have cleared out the zone */ |
| 2148 | if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, |
| 2149 | first_deferred_pfn)) { |
| 2150 | pgdat->first_deferred_pfn = ULONG_MAX; |
Pavel Tatashin | c9e97a1 | 2018-04-05 16:22:31 -0700 | [diff] [blame] | 2151 | pgdat_resize_unlock(pgdat, &flags); |
Juergen Gross | b9705d8 | 2019-07-04 15:14:36 -0700 | [diff] [blame] | 2152 | /* Retry only once. */ |
| 2153 | return first_deferred_pfn != ULONG_MAX; |
Pavel Tatashin | c9e97a1 | 2018-04-05 16:22:31 -0700 | [diff] [blame] | 2154 | } |
| 2155 | |
Alexander Duyck | 0e56aca | 2019-05-13 17:21:20 -0700 | [diff] [blame] | 2156 | /* |
| 2157 | * Initialize and free pages in MAX_ORDER sized increments so |
| 2158 | * that we can avoid introducing any issues with the buddy |
| 2159 | * allocator. |
| 2160 | */ |
| 2161 | while (spfn < epfn) { |
| 2162 | /* update our first deferred PFN for this section */ |
| 2163 | first_deferred_pfn = spfn; |
Pavel Tatashin | c9e97a1 | 2018-04-05 16:22:31 -0700 | [diff] [blame] | 2164 | |
Alexander Duyck | 0e56aca | 2019-05-13 17:21:20 -0700 | [diff] [blame] | 2165 | nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn); |
Daniel Jordan | 117003c | 2020-06-03 15:59:20 -0700 | [diff] [blame] | 2166 | touch_nmi_watchdog(); |
Pavel Tatashin | c9e97a1 | 2018-04-05 16:22:31 -0700 | [diff] [blame] | 2167 | |
Alexander Duyck | 0e56aca | 2019-05-13 17:21:20 -0700 | [diff] [blame] | 2168 | /* We should only stop along section boundaries */ |
| 2169 | if ((first_deferred_pfn ^ spfn) < PAGES_PER_SECTION) |
| 2170 | continue; |
| 2171 | |
| 2172 | /* If our quota has been met we can stop here */ |
Pavel Tatashin | c9e97a1 | 2018-04-05 16:22:31 -0700 | [diff] [blame] | 2173 | if (nr_pages >= nr_pages_needed) |
| 2174 | break; |
| 2175 | } |
| 2176 | |
Alexander Duyck | 0e56aca | 2019-05-13 17:21:20 -0700 | [diff] [blame] | 2177 | pgdat->first_deferred_pfn = spfn; |
Pavel Tatashin | c9e97a1 | 2018-04-05 16:22:31 -0700 | [diff] [blame] | 2178 | pgdat_resize_unlock(pgdat, &flags); |
| 2179 | |
| 2180 | return nr_pages > 0; |
| 2181 | } |
| 2182 | |
| 2183 | /* |
| 2184 | * deferred_grow_zone() is __init, but it is called from |
| 2185 | * get_page_from_freelist() during early boot until deferred_pages permanently |
| 2186 | * disables this call. This is why we have refdata wrapper to avoid warning, |
| 2187 | * and to ensure that the function body gets unloaded. |
| 2188 | */ |
| 2189 | static bool __ref |
| 2190 | _deferred_grow_zone(struct zone *zone, unsigned int order) |
| 2191 | { |
| 2192 | return deferred_grow_zone(zone, order); |
| 2193 | } |
| 2194 | |
Joonsoo Kim | 7cf91a9 | 2016-03-15 14:57:51 -0700 | [diff] [blame] | 2195 | #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ |
Mel Gorman | 0e1cc95 | 2015-06-30 14:57:27 -0700 | [diff] [blame] | 2196 | |
| 2197 | void __init page_alloc_init_late(void) |
| 2198 | { |
Joonsoo Kim | 7cf91a9 | 2016-03-15 14:57:51 -0700 | [diff] [blame] | 2199 | struct zone *zone; |
Dan Williams | e900a91 | 2019-05-14 15:41:28 -0700 | [diff] [blame] | 2200 | int nid; |
Joonsoo Kim | 7cf91a9 | 2016-03-15 14:57:51 -0700 | [diff] [blame] | 2201 | |
| 2202 | #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT |
Mel Gorman | 0e1cc95 | 2015-06-30 14:57:27 -0700 | [diff] [blame] | 2203 | |
Nicolai Stange | d3cd131 | 2015-08-06 15:46:16 -0700 | [diff] [blame] | 2204 | /* There will be num_node_state(N_MEMORY) threads */ |
| 2205 | atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY)); |
Mel Gorman | 0e1cc95 | 2015-06-30 14:57:27 -0700 | [diff] [blame] | 2206 | for_each_node_state(nid, N_MEMORY) { |
Mel Gorman | 0e1cc95 | 2015-06-30 14:57:27 -0700 | [diff] [blame] | 2207 | kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid); |
| 2208 | } |
| 2209 | |
| 2210 | /* Block until all are initialised */ |
Nicolai Stange | d3cd131 | 2015-08-06 15:46:16 -0700 | [diff] [blame] | 2211 | wait_for_completion(&pgdat_init_all_done_comp); |
Mel Gorman | 4248b0d | 2015-08-06 15:46:20 -0700 | [diff] [blame] | 2212 | |
Pavel Tatashin | c9e97a1 | 2018-04-05 16:22:31 -0700 | [diff] [blame] | 2213 | /* |
| 2214 | * We initialized the rest of the deferred pages. Permanently disable |
| 2215 | * on-demand struct page initialization. |
| 2216 | */ |
| 2217 | static_branch_disable(&deferred_pages); |
| 2218 | |
Mel Gorman | 4248b0d | 2015-08-06 15:46:20 -0700 | [diff] [blame] | 2219 | /* Reinit limits that are based on free pages after the kernel is up */ |
| 2220 | files_maxfiles_init(); |
Joonsoo Kim | 7cf91a9 | 2016-03-15 14:57:51 -0700 | [diff] [blame] | 2221 | #endif |
Mike Rapoport | 350e88b | 2019-05-13 17:22:59 -0700 | [diff] [blame] | 2222 | |
Lin Feng | ba8f358 | 2020-12-14 19:11:19 -0800 | [diff] [blame] | 2223 | buffer_init(); |
| 2224 | |
Pavel Tatashin | 3010f87 | 2017-08-18 15:16:05 -0700 | [diff] [blame] | 2225 | /* Discard memblock private memory */ |
| 2226 | memblock_discard(); |
Joonsoo Kim | 7cf91a9 | 2016-03-15 14:57:51 -0700 | [diff] [blame] | 2227 | |
Dan Williams | e900a91 | 2019-05-14 15:41:28 -0700 | [diff] [blame] | 2228 | for_each_node_state(nid, N_MEMORY) |
| 2229 | shuffle_free_memory(NODE_DATA(nid)); |
| 2230 | |
Joonsoo Kim | 7cf91a9 | 2016-03-15 14:57:51 -0700 | [diff] [blame] | 2231 | for_each_populated_zone(zone) |
| 2232 | set_zone_contiguous(zone); |
Mel Gorman | 7e18adb | 2015-06-30 14:57:05 -0700 | [diff] [blame] | 2233 | } |
Mel Gorman | 7e18adb | 2015-06-30 14:57:05 -0700 | [diff] [blame] | 2234 | |
Michal Nazarewicz | 47118af | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 2235 | #ifdef CONFIG_CMA |
Li Zhong | 9cf510a | 2013-08-23 13:52:52 +0800 | [diff] [blame] | 2236 | /* Free whole pageblock and set its migration type to MIGRATE_CMA. */ |
Michal Nazarewicz | 47118af | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 2237 | void __init init_cma_reserved_pageblock(struct page *page) |
| 2238 | { |
| 2239 | unsigned i = pageblock_nr_pages; |
| 2240 | struct page *p = page; |
| 2241 | |
| 2242 | do { |
| 2243 | __ClearPageReserved(p); |
| 2244 | set_page_count(p, 0); |
Joonsoo Kim | d883c6c | 2018-05-23 10:18:21 +0900 | [diff] [blame] | 2245 | } while (++p, --i); |
Michal Nazarewicz | 47118af | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 2246 | |
Michal Nazarewicz | 47118af | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 2247 | set_pageblock_migratetype(page, MIGRATE_CMA); |
Michal Nazarewicz | dc78327 | 2014-07-02 15:22:35 -0700 | [diff] [blame] | 2248 | |
| 2249 | if (pageblock_order >= MAX_ORDER) { |
| 2250 | i = pageblock_nr_pages; |
| 2251 | p = page; |
| 2252 | do { |
| 2253 | set_page_refcounted(p); |
| 2254 | __free_pages(p, MAX_ORDER - 1); |
| 2255 | p += MAX_ORDER_NR_PAGES; |
| 2256 | } while (i -= MAX_ORDER_NR_PAGES); |
| 2257 | } else { |
| 2258 | set_page_refcounted(page); |
| 2259 | __free_pages(page, pageblock_order); |
| 2260 | } |
| 2261 | |
Jiang Liu | 3dcc057 | 2013-07-03 15:03:21 -0700 | [diff] [blame] | 2262 | adjust_managed_page_count(page, pageblock_nr_pages); |
David Hildenbrand | 3c381db | 2021-02-25 17:16:40 -0800 | [diff] [blame] | 2263 | page_zone(page)->cma_pages += pageblock_nr_pages; |
Michal Nazarewicz | 47118af | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 2264 | } |
| 2265 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2266 | |
| 2267 | /* |
| 2268 | * The order of subdivision here is critical for the IO subsystem. |
| 2269 | * Please do not alter this order without good reasons and regression |
| 2270 | * testing. Specifically, as large blocks of memory are subdivided, |
| 2271 | * the order in which smaller blocks are delivered depends on the order |
| 2272 | * they're subdivided in this function. This is the primary factor |
| 2273 | * influencing the order in which pages are delivered to the IO |
| 2274 | * subsystem according to empirical testing, and this is also justified |
| 2275 | * by considering the behavior of a buddy system containing a single |
| 2276 | * large block of memory acted on by a series of small allocations. |
| 2277 | * This behavior is a critical factor in sglist merging's success. |
| 2278 | * |
Nadia Yvette Chambers | 6d49e35 | 2012-12-06 10:39:54 +0100 | [diff] [blame] | 2279 | * -- nyc |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2280 | */ |
Nick Piggin | 085cc7d5 | 2006-01-06 00:11:01 -0800 | [diff] [blame] | 2281 | static inline void expand(struct zone *zone, struct page *page, |
Alexander Duyck | 6ab0136 | 2020-04-06 20:04:49 -0700 | [diff] [blame] | 2282 | int low, int high, int migratetype) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2283 | { |
| 2284 | unsigned long size = 1 << high; |
| 2285 | |
| 2286 | while (high > low) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2287 | high--; |
| 2288 | size >>= 1; |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 2289 | VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); |
Stanislaw Gruszka | c0a32fc | 2012-01-10 15:07:28 -0800 | [diff] [blame] | 2290 | |
Joonsoo Kim | acbc15a | 2016-10-07 16:58:15 -0700 | [diff] [blame] | 2291 | /* |
| 2292 | * Mark as guard pages (or page), that will allow to |
| 2293 | * merge back to allocator when buddy will be freed. |
| 2294 | * Corresponding page table entries will not be touched, |
| 2295 | * pages will stay not present in virtual address space |
| 2296 | */ |
| 2297 | if (set_page_guard(zone, &page[size], high, migratetype)) |
Stanislaw Gruszka | c0a32fc | 2012-01-10 15:07:28 -0800 | [diff] [blame] | 2298 | continue; |
Joonsoo Kim | acbc15a | 2016-10-07 16:58:15 -0700 | [diff] [blame] | 2299 | |
Alexander Duyck | 6ab0136 | 2020-04-06 20:04:49 -0700 | [diff] [blame] | 2300 | add_to_free_list(&page[size], zone, high, migratetype); |
Matthew Wilcox (Oracle) | ab130f91 | 2020-10-15 20:10:15 -0700 | [diff] [blame] | 2301 | set_buddy_order(&page[size], high); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2302 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2303 | } |
| 2304 | |
Vlastimil Babka | 4e61180 | 2016-05-19 17:14:41 -0700 | [diff] [blame] | 2305 | static void check_new_page_bad(struct page *page) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2306 | { |
Naoya Horiguchi | f4c18e6 | 2015-08-06 15:47:08 -0700 | [diff] [blame] | 2307 | if (unlikely(page->flags & __PG_HWPOISON)) { |
Naoya Horiguchi | e570f56 | 2016-05-20 16:58:50 -0700 | [diff] [blame] | 2308 | /* Don't complain about hwpoisoned pages */ |
| 2309 | page_mapcount_reset(page); /* remove PageBuddy */ |
| 2310 | return; |
Naoya Horiguchi | f4c18e6 | 2015-08-06 15:47:08 -0700 | [diff] [blame] | 2311 | } |
Wei Yang | 58b7f11 | 2020-06-03 15:58:39 -0700 | [diff] [blame] | 2312 | |
| 2313 | bad_page(page, |
| 2314 | page_bad_reason(page, PAGE_FLAGS_CHECK_AT_PREP)); |
Vlastimil Babka | 4e61180 | 2016-05-19 17:14:41 -0700 | [diff] [blame] | 2315 | } |
| 2316 | |
| 2317 | /* |
| 2318 | * This page is about to be returned from the page allocator |
| 2319 | */ |
| 2320 | static inline int check_new_page(struct page *page) |
| 2321 | { |
| 2322 | if (likely(page_expected_state(page, |
| 2323 | PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON))) |
| 2324 | return 0; |
| 2325 | |
| 2326 | check_new_page_bad(page); |
| 2327 | return 1; |
Wu Fengguang | 2a7684a | 2009-09-16 11:50:12 +0200 | [diff] [blame] | 2328 | } |
| 2329 | |
Mel Gorman | 479f854 | 2016-05-19 17:14:35 -0700 | [diff] [blame] | 2330 | #ifdef CONFIG_DEBUG_VM |
Vlastimil Babka | 4462b32 | 2019-07-11 20:55:09 -0700 | [diff] [blame] | 2331 | /* |
| 2332 | * With DEBUG_VM enabled, order-0 pages are checked for expected state when |
| 2333 | * being allocated from pcp lists. With debug_pagealloc also enabled, they are |
| 2334 | * also checked when pcp lists are refilled from the free lists. |
| 2335 | */ |
| 2336 | static inline bool check_pcp_refill(struct page *page) |
Mel Gorman | 479f854 | 2016-05-19 17:14:35 -0700 | [diff] [blame] | 2337 | { |
Vlastimil Babka | 8e57f8a | 2020-01-13 16:29:20 -0800 | [diff] [blame] | 2338 | if (debug_pagealloc_enabled_static()) |
Vlastimil Babka | 4462b32 | 2019-07-11 20:55:09 -0700 | [diff] [blame] | 2339 | return check_new_page(page); |
| 2340 | else |
| 2341 | return false; |
Mel Gorman | 479f854 | 2016-05-19 17:14:35 -0700 | [diff] [blame] | 2342 | } |
| 2343 | |
Vlastimil Babka | 4462b32 | 2019-07-11 20:55:09 -0700 | [diff] [blame] | 2344 | static inline bool check_new_pcp(struct page *page) |
Mel Gorman | 479f854 | 2016-05-19 17:14:35 -0700 | [diff] [blame] | 2345 | { |
| 2346 | return check_new_page(page); |
| 2347 | } |
| 2348 | #else |
Vlastimil Babka | 4462b32 | 2019-07-11 20:55:09 -0700 | [diff] [blame] | 2349 | /* |
| 2350 | * With DEBUG_VM disabled, free order-0 pages are checked for expected state |
| 2351 | * when pcp lists are being refilled from the free lists. With debug_pagealloc |
| 2352 | * enabled, they are also checked when being allocated from the pcp lists. |
| 2353 | */ |
| 2354 | static inline bool check_pcp_refill(struct page *page) |
Mel Gorman | 479f854 | 2016-05-19 17:14:35 -0700 | [diff] [blame] | 2355 | { |
| 2356 | return check_new_page(page); |
| 2357 | } |
Vlastimil Babka | 4462b32 | 2019-07-11 20:55:09 -0700 | [diff] [blame] | 2358 | static inline bool check_new_pcp(struct page *page) |
Mel Gorman | 479f854 | 2016-05-19 17:14:35 -0700 | [diff] [blame] | 2359 | { |
Vlastimil Babka | 8e57f8a | 2020-01-13 16:29:20 -0800 | [diff] [blame] | 2360 | if (debug_pagealloc_enabled_static()) |
Vlastimil Babka | 4462b32 | 2019-07-11 20:55:09 -0700 | [diff] [blame] | 2361 | return check_new_page(page); |
| 2362 | else |
| 2363 | return false; |
Mel Gorman | 479f854 | 2016-05-19 17:14:35 -0700 | [diff] [blame] | 2364 | } |
| 2365 | #endif /* CONFIG_DEBUG_VM */ |
| 2366 | |
| 2367 | static bool check_new_pages(struct page *page, unsigned int order) |
| 2368 | { |
| 2369 | int i; |
| 2370 | for (i = 0; i < (1 << order); i++) { |
| 2371 | struct page *p = page + i; |
| 2372 | |
| 2373 | if (unlikely(check_new_page(p))) |
| 2374 | return true; |
| 2375 | } |
| 2376 | |
| 2377 | return false; |
| 2378 | } |
| 2379 | |
Joonsoo Kim | 46f24fd | 2016-07-26 15:23:58 -0700 | [diff] [blame] | 2380 | inline void post_alloc_hook(struct page *page, unsigned int order, |
| 2381 | gfp_t gfp_flags) |
| 2382 | { |
| 2383 | set_page_private(page, 0); |
| 2384 | set_page_refcounted(page); |
| 2385 | |
| 2386 | arch_alloc_page(page, order); |
Mike Rapoport | 77bc7fd | 2020-12-14 19:10:20 -0800 | [diff] [blame] | 2387 | debug_pagealloc_map_pages(page, 1 << order); |
David Hildenbrand | 862b6de | 2020-12-14 19:11:15 -0800 | [diff] [blame] | 2388 | |
Andrey Konovalov | 1bb5eab | 2021-04-29 23:00:02 -0700 | [diff] [blame] | 2389 | /* |
| 2390 | * Page unpoisoning must happen before memory initialization. |
| 2391 | * Otherwise, the poison pattern will be overwritten for __GFP_ZERO |
| 2392 | * allocations and the page unpoisoning code will complain. |
| 2393 | */ |
| 2394 | kernel_unpoison_pages(page, 1 << order); |
| 2395 | |
| 2396 | /* |
| 2397 | * As memory initialization might be integrated into KASAN, |
| 2398 | * kasan_alloc_pages and kernel_init_free_pages must be |
| 2399 | * kept together to avoid discrepancies in behavior. |
| 2400 | */ |
Peter Collingbourne | 7a3b835 | 2021-06-02 16:52:28 -0700 | [diff] [blame] | 2401 | if (kasan_has_integrated_init()) { |
| 2402 | kasan_alloc_pages(page, order, gfp_flags); |
| 2403 | } else { |
| 2404 | bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags); |
| 2405 | |
| 2406 | kasan_unpoison_pages(page, order, init); |
| 2407 | if (init) |
Peter Collingbourne | 013bb59 | 2021-06-02 16:52:29 -0700 | [diff] [blame] | 2408 | kernel_init_free_pages(page, 1 << order, |
| 2409 | gfp_flags & __GFP_ZEROTAGS); |
Peter Collingbourne | 7a3b835 | 2021-06-02 16:52:28 -0700 | [diff] [blame] | 2410 | } |
Andrey Konovalov | 1bb5eab | 2021-04-29 23:00:02 -0700 | [diff] [blame] | 2411 | |
| 2412 | set_page_owner(page, order, gfp_flags); |
Joonsoo Kim | 46f24fd | 2016-07-26 15:23:58 -0700 | [diff] [blame] | 2413 | } |
| 2414 | |
Mel Gorman | 479f854 | 2016-05-19 17:14:35 -0700 | [diff] [blame] | 2415 | static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, |
Mel Gorman | c603844 | 2016-05-19 17:13:38 -0700 | [diff] [blame] | 2416 | unsigned int alloc_flags) |
Wu Fengguang | 2a7684a | 2009-09-16 11:50:12 +0200 | [diff] [blame] | 2417 | { |
Joonsoo Kim | 46f24fd | 2016-07-26 15:23:58 -0700 | [diff] [blame] | 2418 | post_alloc_hook(page, order, gfp_flags); |
Nick Piggin | 17cf440 | 2006-03-22 00:08:41 -0800 | [diff] [blame] | 2419 | |
Nick Piggin | 17cf440 | 2006-03-22 00:08:41 -0800 | [diff] [blame] | 2420 | if (order && (gfp_flags & __GFP_COMP)) |
| 2421 | prep_compound_page(page, order); |
| 2422 | |
Vlastimil Babka | 7537919 | 2015-02-11 15:25:38 -0800 | [diff] [blame] | 2423 | /* |
Michal Hocko | 2f064f3 | 2015-08-21 14:11:51 -0700 | [diff] [blame] | 2424 | * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to |
Vlastimil Babka | 7537919 | 2015-02-11 15:25:38 -0800 | [diff] [blame] | 2425 | * allocate the page. The expectation is that the caller is taking |
| 2426 | * steps that will free more memory. The caller should avoid the page |
| 2427 | * being used for !PFMEMALLOC purposes. |
| 2428 | */ |
Michal Hocko | 2f064f3 | 2015-08-21 14:11:51 -0700 | [diff] [blame] | 2429 | if (alloc_flags & ALLOC_NO_WATERMARKS) |
| 2430 | set_page_pfmemalloc(page); |
| 2431 | else |
| 2432 | clear_page_pfmemalloc(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2433 | } |
| 2434 | |
Mel Gorman | 56fd56b | 2007-10-16 01:25:58 -0700 | [diff] [blame] | 2435 | /* |
| 2436 | * Go through the free lists for the given migratetype and remove |
| 2437 | * the smallest available page from the freelists |
| 2438 | */ |
Aaron Lu | 85ccc8f | 2017-11-15 17:36:53 -0800 | [diff] [blame] | 2439 | static __always_inline |
Mel Gorman | 728ec98 | 2009-06-16 15:32:04 -0700 | [diff] [blame] | 2440 | struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, |
Mel Gorman | 56fd56b | 2007-10-16 01:25:58 -0700 | [diff] [blame] | 2441 | int migratetype) |
| 2442 | { |
| 2443 | unsigned int current_order; |
Pintu Kumar | b8af294 | 2013-09-11 14:20:34 -0700 | [diff] [blame] | 2444 | struct free_area *area; |
Mel Gorman | 56fd56b | 2007-10-16 01:25:58 -0700 | [diff] [blame] | 2445 | struct page *page; |
| 2446 | |
| 2447 | /* Find a page of the appropriate size in the preferred list */ |
| 2448 | for (current_order = order; current_order < MAX_ORDER; ++current_order) { |
| 2449 | area = &(zone->free_area[current_order]); |
Dan Williams | b03641a | 2019-05-14 15:41:32 -0700 | [diff] [blame] | 2450 | page = get_page_from_free_area(area, migratetype); |
Geliang Tang | a16601c | 2016-01-14 15:20:30 -0800 | [diff] [blame] | 2451 | if (!page) |
| 2452 | continue; |
Alexander Duyck | 6ab0136 | 2020-04-06 20:04:49 -0700 | [diff] [blame] | 2453 | del_page_from_free_list(page, zone, current_order); |
| 2454 | expand(zone, page, order, current_order, migratetype); |
Vlastimil Babka | bb14c2c | 2015-09-08 15:01:25 -0700 | [diff] [blame] | 2455 | set_pcppage_migratetype(page, migratetype); |
Mel Gorman | 56fd56b | 2007-10-16 01:25:58 -0700 | [diff] [blame] | 2456 | return page; |
| 2457 | } |
| 2458 | |
| 2459 | return NULL; |
| 2460 | } |
| 2461 | |
| 2462 | |
Mel Gorman | b2a0ac8 | 2007-10-16 01:25:48 -0700 | [diff] [blame] | 2463 | /* |
| 2464 | * This array describes the order lists are fallen back to when |
| 2465 | * the free lists for the desirable migrate type are depleted |
| 2466 | */ |
Wei Yang | da41566 | 2020-08-06 23:25:58 -0700 | [diff] [blame] | 2467 | static int fallbacks[MIGRATE_TYPES][3] = { |
Mel Gorman | 974a786 | 2015-11-06 16:28:34 -0800 | [diff] [blame] | 2468 | [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_TYPES }, |
Mel Gorman | 974a786 | 2015-11-06 16:28:34 -0800 | [diff] [blame] | 2469 | [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES }, |
Huang Shijie | 7ead334 | 2018-12-28 00:34:46 -0800 | [diff] [blame] | 2470 | [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_TYPES }, |
Joonsoo Kim | dc67647 | 2015-04-14 15:45:15 -0700 | [diff] [blame] | 2471 | #ifdef CONFIG_CMA |
Mel Gorman | 974a786 | 2015-11-06 16:28:34 -0800 | [diff] [blame] | 2472 | [MIGRATE_CMA] = { MIGRATE_TYPES }, /* Never used */ |
Michal Nazarewicz | 47118af | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 2473 | #endif |
Minchan Kim | 194159f | 2013-02-22 16:33:58 -0800 | [diff] [blame] | 2474 | #ifdef CONFIG_MEMORY_ISOLATION |
Mel Gorman | 974a786 | 2015-11-06 16:28:34 -0800 | [diff] [blame] | 2475 | [MIGRATE_ISOLATE] = { MIGRATE_TYPES }, /* Never used */ |
Minchan Kim | 194159f | 2013-02-22 16:33:58 -0800 | [diff] [blame] | 2476 | #endif |
Mel Gorman | b2a0ac8 | 2007-10-16 01:25:48 -0700 | [diff] [blame] | 2477 | }; |
| 2478 | |
Joonsoo Kim | dc67647 | 2015-04-14 15:45:15 -0700 | [diff] [blame] | 2479 | #ifdef CONFIG_CMA |
Aaron Lu | 85ccc8f | 2017-11-15 17:36:53 -0800 | [diff] [blame] | 2480 | static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone, |
Joonsoo Kim | dc67647 | 2015-04-14 15:45:15 -0700 | [diff] [blame] | 2481 | unsigned int order) |
| 2482 | { |
| 2483 | return __rmqueue_smallest(zone, order, MIGRATE_CMA); |
| 2484 | } |
| 2485 | #else |
| 2486 | static inline struct page *__rmqueue_cma_fallback(struct zone *zone, |
| 2487 | unsigned int order) { return NULL; } |
| 2488 | #endif |
| 2489 | |
Mel Gorman | c361be5 | 2007-10-16 01:25:51 -0700 | [diff] [blame] | 2490 | /* |
David Hildenbrand | 293ffa5 | 2020-10-15 20:09:30 -0700 | [diff] [blame] | 2491 | * Move the free pages in a range to the freelist tail of the requested type. |
Mel Gorman | d9c2340 | 2007-10-16 01:26:01 -0700 | [diff] [blame] | 2492 | * Note that start_page and end_pages are not aligned on a pageblock |
Mel Gorman | c361be5 | 2007-10-16 01:25:51 -0700 | [diff] [blame] | 2493 | * boundary. If alignment is required, use move_freepages_block() |
| 2494 | */ |
Vlastimil Babka | 02aa0cd | 2017-05-08 15:54:40 -0700 | [diff] [blame] | 2495 | static int move_freepages(struct zone *zone, |
Kefeng Wang | 39ddb99 | 2021-04-29 23:01:36 -0700 | [diff] [blame] | 2496 | unsigned long start_pfn, unsigned long end_pfn, |
Vlastimil Babka | 02aa0cd | 2017-05-08 15:54:40 -0700 | [diff] [blame] | 2497 | int migratetype, int *num_movable) |
Mel Gorman | c361be5 | 2007-10-16 01:25:51 -0700 | [diff] [blame] | 2498 | { |
| 2499 | struct page *page; |
Kefeng Wang | 39ddb99 | 2021-04-29 23:01:36 -0700 | [diff] [blame] | 2500 | unsigned long pfn; |
Kirill A. Shutemov | d00181b | 2015-11-06 16:29:57 -0800 | [diff] [blame] | 2501 | unsigned int order; |
Mel Gorman | d100313 | 2007-10-16 01:26:00 -0700 | [diff] [blame] | 2502 | int pages_moved = 0; |
Mel Gorman | c361be5 | 2007-10-16 01:25:51 -0700 | [diff] [blame] | 2503 | |
Kefeng Wang | 39ddb99 | 2021-04-29 23:01:36 -0700 | [diff] [blame] | 2504 | for (pfn = start_pfn; pfn <= end_pfn;) { |
Kefeng Wang | 39ddb99 | 2021-04-29 23:01:36 -0700 | [diff] [blame] | 2505 | page = pfn_to_page(pfn); |
Mel Gorman | c361be5 | 2007-10-16 01:25:51 -0700 | [diff] [blame] | 2506 | if (!PageBuddy(page)) { |
Vlastimil Babka | 02aa0cd | 2017-05-08 15:54:40 -0700 | [diff] [blame] | 2507 | /* |
| 2508 | * We assume that pages that could be isolated for |
| 2509 | * migration are movable. But we don't actually try |
| 2510 | * isolating, as that would be expensive. |
| 2511 | */ |
| 2512 | if (num_movable && |
| 2513 | (PageLRU(page) || __PageMovable(page))) |
| 2514 | (*num_movable)++; |
Kefeng Wang | 39ddb99 | 2021-04-29 23:01:36 -0700 | [diff] [blame] | 2515 | pfn++; |
Mel Gorman | c361be5 | 2007-10-16 01:25:51 -0700 | [diff] [blame] | 2516 | continue; |
| 2517 | } |
| 2518 | |
David Rientjes | cd96103 | 2019-08-24 17:54:40 -0700 | [diff] [blame] | 2519 | /* Make sure we are not inadvertently changing nodes */ |
| 2520 | VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); |
| 2521 | VM_BUG_ON_PAGE(page_zone(page) != zone, page); |
| 2522 | |
Matthew Wilcox (Oracle) | ab130f91 | 2020-10-15 20:10:15 -0700 | [diff] [blame] | 2523 | order = buddy_order(page); |
Alexander Duyck | 6ab0136 | 2020-04-06 20:04:49 -0700 | [diff] [blame] | 2524 | move_to_free_list(page, zone, order, migratetype); |
Kefeng Wang | 39ddb99 | 2021-04-29 23:01:36 -0700 | [diff] [blame] | 2525 | pfn += 1 << order; |
Mel Gorman | d100313 | 2007-10-16 01:26:00 -0700 | [diff] [blame] | 2526 | pages_moved += 1 << order; |
Mel Gorman | c361be5 | 2007-10-16 01:25:51 -0700 | [diff] [blame] | 2527 | } |
| 2528 | |
Mel Gorman | d100313 | 2007-10-16 01:26:00 -0700 | [diff] [blame] | 2529 | return pages_moved; |
Mel Gorman | c361be5 | 2007-10-16 01:25:51 -0700 | [diff] [blame] | 2530 | } |
| 2531 | |
Minchan Kim | ee6f509 | 2012-07-31 16:43:50 -0700 | [diff] [blame] | 2532 | int move_freepages_block(struct zone *zone, struct page *page, |
Vlastimil Babka | 02aa0cd | 2017-05-08 15:54:40 -0700 | [diff] [blame] | 2533 | int migratetype, int *num_movable) |
Mel Gorman | c361be5 | 2007-10-16 01:25:51 -0700 | [diff] [blame] | 2534 | { |
Kefeng Wang | 39ddb99 | 2021-04-29 23:01:36 -0700 | [diff] [blame] | 2535 | unsigned long start_pfn, end_pfn, pfn; |
Mel Gorman | c361be5 | 2007-10-16 01:25:51 -0700 | [diff] [blame] | 2536 | |
David Rientjes | 4a22212 | 2018-10-26 15:09:24 -0700 | [diff] [blame] | 2537 | if (num_movable) |
| 2538 | *num_movable = 0; |
| 2539 | |
Kefeng Wang | 39ddb99 | 2021-04-29 23:01:36 -0700 | [diff] [blame] | 2540 | pfn = page_to_pfn(page); |
| 2541 | start_pfn = pfn & ~(pageblock_nr_pages - 1); |
Mel Gorman | d9c2340 | 2007-10-16 01:26:01 -0700 | [diff] [blame] | 2542 | end_pfn = start_pfn + pageblock_nr_pages - 1; |
Mel Gorman | c361be5 | 2007-10-16 01:25:51 -0700 | [diff] [blame] | 2543 | |
| 2544 | /* Do not cross zone boundaries */ |
Cody P Schafer | 108bcc9 | 2013-02-22 16:35:23 -0800 | [diff] [blame] | 2545 | if (!zone_spans_pfn(zone, start_pfn)) |
Kefeng Wang | 39ddb99 | 2021-04-29 23:01:36 -0700 | [diff] [blame] | 2546 | start_pfn = pfn; |
Cody P Schafer | 108bcc9 | 2013-02-22 16:35:23 -0800 | [diff] [blame] | 2547 | if (!zone_spans_pfn(zone, end_pfn)) |
Mel Gorman | c361be5 | 2007-10-16 01:25:51 -0700 | [diff] [blame] | 2548 | return 0; |
| 2549 | |
Kefeng Wang | 39ddb99 | 2021-04-29 23:01:36 -0700 | [diff] [blame] | 2550 | return move_freepages(zone, start_pfn, end_pfn, migratetype, |
Vlastimil Babka | 02aa0cd | 2017-05-08 15:54:40 -0700 | [diff] [blame] | 2551 | num_movable); |
Mel Gorman | c361be5 | 2007-10-16 01:25:51 -0700 | [diff] [blame] | 2552 | } |
| 2553 | |
Mel Gorman | 2f66a68 | 2009-09-21 17:02:31 -0700 | [diff] [blame] | 2554 | static void change_pageblock_range(struct page *pageblock_page, |
| 2555 | int start_order, int migratetype) |
| 2556 | { |
| 2557 | int nr_pageblocks = 1 << (start_order - pageblock_order); |
| 2558 | |
| 2559 | while (nr_pageblocks--) { |
| 2560 | set_pageblock_migratetype(pageblock_page, migratetype); |
| 2561 | pageblock_page += pageblock_nr_pages; |
| 2562 | } |
| 2563 | } |
| 2564 | |
Srivatsa S. Bhat | fef903e | 2013-09-11 14:20:35 -0700 | [diff] [blame] | 2565 | /* |
Vlastimil Babka | 9c0415e | 2015-02-11 15:28:21 -0800 | [diff] [blame] | 2566 | * When we are falling back to another migratetype during allocation, try to |
| 2567 | * steal extra free pages from the same pageblocks to satisfy further |
| 2568 | * allocations, instead of polluting multiple pageblocks. |
| 2569 | * |
| 2570 | * If we are stealing a relatively large buddy page, it is likely there will |
| 2571 | * be more free pages in the pageblock, so try to steal them all. For |
| 2572 | * reclaimable and unmovable allocations, we steal regardless of page size, |
| 2573 | * as fragmentation caused by those allocations polluting movable pageblocks |
| 2574 | * is worse than movable allocations stealing from unmovable and reclaimable |
| 2575 | * pageblocks. |
Srivatsa S. Bhat | fef903e | 2013-09-11 14:20:35 -0700 | [diff] [blame] | 2576 | */ |
Joonsoo Kim | 4eb7dce | 2015-04-14 15:45:18 -0700 | [diff] [blame] | 2577 | static bool can_steal_fallback(unsigned int order, int start_mt) |
| 2578 | { |
| 2579 | /* |
| 2580 | * Leaving this order check is intended, although there is |
| 2581 | * relaxed order check in next check. The reason is that |
| 2582 | * we can actually steal whole pageblock if this condition met, |
| 2583 | * but, below check doesn't guarantee it and that is just heuristic |
| 2584 | * so could be changed anytime. |
| 2585 | */ |
| 2586 | if (order >= pageblock_order) |
| 2587 | return true; |
| 2588 | |
| 2589 | if (order >= pageblock_order / 2 || |
| 2590 | start_mt == MIGRATE_RECLAIMABLE || |
| 2591 | start_mt == MIGRATE_UNMOVABLE || |
| 2592 | page_group_by_mobility_disabled) |
| 2593 | return true; |
| 2594 | |
| 2595 | return false; |
| 2596 | } |
| 2597 | |
Johannes Weiner | 597c892 | 2020-12-14 19:12:15 -0800 | [diff] [blame] | 2598 | static inline bool boost_watermark(struct zone *zone) |
Mel Gorman | 1c30844 | 2018-12-28 00:35:52 -0800 | [diff] [blame] | 2599 | { |
| 2600 | unsigned long max_boost; |
| 2601 | |
| 2602 | if (!watermark_boost_factor) |
Johannes Weiner | 597c892 | 2020-12-14 19:12:15 -0800 | [diff] [blame] | 2603 | return false; |
Henry Willard | 14f6914 | 2020-05-07 18:36:27 -0700 | [diff] [blame] | 2604 | /* |
| 2605 | * Don't bother in zones that are unlikely to produce results. |
| 2606 | * On small machines, including kdump capture kernels running |
| 2607 | * in a small area, boosting the watermark can cause an out of |
| 2608 | * memory situation immediately. |
| 2609 | */ |
| 2610 | if ((pageblock_nr_pages * 4) > zone_managed_pages(zone)) |
Johannes Weiner | 597c892 | 2020-12-14 19:12:15 -0800 | [diff] [blame] | 2611 | return false; |
Mel Gorman | 1c30844 | 2018-12-28 00:35:52 -0800 | [diff] [blame] | 2612 | |
| 2613 | max_boost = mult_frac(zone->_watermark[WMARK_HIGH], |
| 2614 | watermark_boost_factor, 10000); |
Mel Gorman | 94b3334 | 2019-02-20 22:19:49 -0800 | [diff] [blame] | 2615 | |
| 2616 | /* |
| 2617 | * high watermark may be uninitialised if fragmentation occurs |
| 2618 | * very early in boot so do not boost. We do not fall |
| 2619 | * through and boost by pageblock_nr_pages as failing |
| 2620 | * allocations that early means that reclaim is not going |
| 2621 | * to help and it may even be impossible to reclaim the |
| 2622 | * boosted watermark resulting in a hang. |
| 2623 | */ |
| 2624 | if (!max_boost) |
Johannes Weiner | 597c892 | 2020-12-14 19:12:15 -0800 | [diff] [blame] | 2625 | return false; |
Mel Gorman | 94b3334 | 2019-02-20 22:19:49 -0800 | [diff] [blame] | 2626 | |
Mel Gorman | 1c30844 | 2018-12-28 00:35:52 -0800 | [diff] [blame] | 2627 | max_boost = max(pageblock_nr_pages, max_boost); |
| 2628 | |
| 2629 | zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages, |
| 2630 | max_boost); |
Johannes Weiner | 597c892 | 2020-12-14 19:12:15 -0800 | [diff] [blame] | 2631 | |
| 2632 | return true; |
Mel Gorman | 1c30844 | 2018-12-28 00:35:52 -0800 | [diff] [blame] | 2633 | } |
| 2634 | |
Joonsoo Kim | 4eb7dce | 2015-04-14 15:45:18 -0700 | [diff] [blame] | 2635 | /* |
| 2636 | * This function implements actual steal behaviour. If order is large enough, |
| 2637 | * we can steal whole pageblock. If not, we first move freepages in this |
Vlastimil Babka | 02aa0cd | 2017-05-08 15:54:40 -0700 | [diff] [blame] | 2638 | * pageblock to our migratetype and determine how many already-allocated pages |
| 2639 | * are there in the pageblock with a compatible migratetype. If at least half |
| 2640 | * of pages are free or compatible, we can change migratetype of the pageblock |
| 2641 | * itself, so pages freed in the future will be put on the correct free list. |
Joonsoo Kim | 4eb7dce | 2015-04-14 15:45:18 -0700 | [diff] [blame] | 2642 | */ |
| 2643 | static void steal_suitable_fallback(struct zone *zone, struct page *page, |
Mel Gorman | 1c30844 | 2018-12-28 00:35:52 -0800 | [diff] [blame] | 2644 | unsigned int alloc_flags, int start_type, bool whole_block) |
Srivatsa S. Bhat | fef903e | 2013-09-11 14:20:35 -0700 | [diff] [blame] | 2645 | { |
Matthew Wilcox (Oracle) | ab130f91 | 2020-10-15 20:10:15 -0700 | [diff] [blame] | 2646 | unsigned int current_order = buddy_order(page); |
Vlastimil Babka | 02aa0cd | 2017-05-08 15:54:40 -0700 | [diff] [blame] | 2647 | int free_pages, movable_pages, alike_pages; |
| 2648 | int old_block_type; |
| 2649 | |
| 2650 | old_block_type = get_pageblock_migratetype(page); |
Srivatsa S. Bhat | fef903e | 2013-09-11 14:20:35 -0700 | [diff] [blame] | 2651 | |
Vlastimil Babka | 3bc48f9 | 2017-05-08 15:54:37 -0700 | [diff] [blame] | 2652 | /* |
| 2653 | * This can happen due to races and we want to prevent broken |
| 2654 | * highatomic accounting. |
| 2655 | */ |
Vlastimil Babka | 02aa0cd | 2017-05-08 15:54:40 -0700 | [diff] [blame] | 2656 | if (is_migrate_highatomic(old_block_type)) |
Vlastimil Babka | 3bc48f9 | 2017-05-08 15:54:37 -0700 | [diff] [blame] | 2657 | goto single_page; |
| 2658 | |
Srivatsa S. Bhat | fef903e | 2013-09-11 14:20:35 -0700 | [diff] [blame] | 2659 | /* Take ownership for orders >= pageblock_order */ |
| 2660 | if (current_order >= pageblock_order) { |
| 2661 | change_pageblock_range(page, current_order, start_type); |
Vlastimil Babka | 3bc48f9 | 2017-05-08 15:54:37 -0700 | [diff] [blame] | 2662 | goto single_page; |
Srivatsa S. Bhat | fef903e | 2013-09-11 14:20:35 -0700 | [diff] [blame] | 2663 | } |
| 2664 | |
Mel Gorman | 1c30844 | 2018-12-28 00:35:52 -0800 | [diff] [blame] | 2665 | /* |
| 2666 | * Boost watermarks to increase reclaim pressure to reduce the |
| 2667 | * likelihood of future fallbacks. Wake kswapd now as the node |
| 2668 | * may be balanced overall and kswapd will not wake naturally. |
| 2669 | */ |
Johannes Weiner | 597c892 | 2020-12-14 19:12:15 -0800 | [diff] [blame] | 2670 | if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD)) |
Mel Gorman | 73444bc | 2019-01-08 15:23:39 -0800 | [diff] [blame] | 2671 | set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); |
Mel Gorman | 1c30844 | 2018-12-28 00:35:52 -0800 | [diff] [blame] | 2672 | |
Vlastimil Babka | 3bc48f9 | 2017-05-08 15:54:37 -0700 | [diff] [blame] | 2673 | /* We are not allowed to try stealing from the whole block */ |
| 2674 | if (!whole_block) |
| 2675 | goto single_page; |
| 2676 | |
Vlastimil Babka | 02aa0cd | 2017-05-08 15:54:40 -0700 | [diff] [blame] | 2677 | free_pages = move_freepages_block(zone, page, start_type, |
| 2678 | &movable_pages); |
| 2679 | /* |
| 2680 | * Determine how many pages are compatible with our allocation. |
| 2681 | * For movable allocation, it's the number of movable pages which |
| 2682 | * we just obtained. For other types it's a bit more tricky. |
| 2683 | */ |
| 2684 | if (start_type == MIGRATE_MOVABLE) { |
| 2685 | alike_pages = movable_pages; |
| 2686 | } else { |
| 2687 | /* |
| 2688 | * If we are falling back a RECLAIMABLE or UNMOVABLE allocation |
| 2689 | * to MOVABLE pageblock, consider all non-movable pages as |
| 2690 | * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or |
| 2691 | * vice versa, be conservative since we can't distinguish the |
| 2692 | * exact migratetype of non-movable pages. |
| 2693 | */ |
| 2694 | if (old_block_type == MIGRATE_MOVABLE) |
| 2695 | alike_pages = pageblock_nr_pages |
| 2696 | - (free_pages + movable_pages); |
| 2697 | else |
| 2698 | alike_pages = 0; |
| 2699 | } |
| 2700 | |
Vlastimil Babka | 3bc48f9 | 2017-05-08 15:54:37 -0700 | [diff] [blame] | 2701 | /* moving whole block can fail due to zone boundary conditions */ |
Vlastimil Babka | 02aa0cd | 2017-05-08 15:54:40 -0700 | [diff] [blame] | 2702 | if (!free_pages) |
Vlastimil Babka | 3bc48f9 | 2017-05-08 15:54:37 -0700 | [diff] [blame] | 2703 | goto single_page; |
Srivatsa S. Bhat | fef903e | 2013-09-11 14:20:35 -0700 | [diff] [blame] | 2704 | |
Vlastimil Babka | 02aa0cd | 2017-05-08 15:54:40 -0700 | [diff] [blame] | 2705 | /* |
| 2706 | * If a sufficient number of pages in the block are either free or of |
| 2707 | * comparable migratability as our allocation, claim the whole block. |
| 2708 | */ |
| 2709 | if (free_pages + alike_pages >= (1 << (pageblock_order-1)) || |
Joonsoo Kim | 4eb7dce | 2015-04-14 15:45:18 -0700 | [diff] [blame] | 2710 | page_group_by_mobility_disabled) |
| 2711 | set_pageblock_migratetype(page, start_type); |
Vlastimil Babka | 3bc48f9 | 2017-05-08 15:54:37 -0700 | [diff] [blame] | 2712 | |
| 2713 | return; |
| 2714 | |
| 2715 | single_page: |
Alexander Duyck | 6ab0136 | 2020-04-06 20:04:49 -0700 | [diff] [blame] | 2716 | move_to_free_list(page, zone, current_order, start_type); |
Joonsoo Kim | 4eb7dce | 2015-04-14 15:45:18 -0700 | [diff] [blame] | 2717 | } |
Srivatsa S. Bhat | fef903e | 2013-09-11 14:20:35 -0700 | [diff] [blame] | 2718 | |
Joonsoo Kim | 2149cda | 2015-04-14 15:45:21 -0700 | [diff] [blame] | 2719 | /* |
| 2720 | * Check whether there is a suitable fallback freepage with requested order. |
| 2721 | * If only_stealable is true, this function returns fallback_mt only if |
| 2722 | * we can steal other freepages all together. This would help to reduce |
| 2723 | * fragmentation due to mixed migratetype pages in one pageblock. |
| 2724 | */ |
| 2725 | int find_suitable_fallback(struct free_area *area, unsigned int order, |
| 2726 | int migratetype, bool only_stealable, bool *can_steal) |
Joonsoo Kim | 4eb7dce | 2015-04-14 15:45:18 -0700 | [diff] [blame] | 2727 | { |
| 2728 | int i; |
| 2729 | int fallback_mt; |
| 2730 | |
| 2731 | if (area->nr_free == 0) |
| 2732 | return -1; |
| 2733 | |
| 2734 | *can_steal = false; |
| 2735 | for (i = 0;; i++) { |
| 2736 | fallback_mt = fallbacks[migratetype][i]; |
Mel Gorman | 974a786 | 2015-11-06 16:28:34 -0800 | [diff] [blame] | 2737 | if (fallback_mt == MIGRATE_TYPES) |
Joonsoo Kim | 4eb7dce | 2015-04-14 15:45:18 -0700 | [diff] [blame] | 2738 | break; |
| 2739 | |
Dan Williams | b03641a | 2019-05-14 15:41:32 -0700 | [diff] [blame] | 2740 | if (free_area_empty(area, fallback_mt)) |
Joonsoo Kim | 4eb7dce | 2015-04-14 15:45:18 -0700 | [diff] [blame] | 2741 | continue; |
| 2742 | |
| 2743 | if (can_steal_fallback(order, migratetype)) |
| 2744 | *can_steal = true; |
| 2745 | |
Joonsoo Kim | 2149cda | 2015-04-14 15:45:21 -0700 | [diff] [blame] | 2746 | if (!only_stealable) |
| 2747 | return fallback_mt; |
| 2748 | |
| 2749 | if (*can_steal) |
| 2750 | return fallback_mt; |
Srivatsa S. Bhat | fef903e | 2013-09-11 14:20:35 -0700 | [diff] [blame] | 2751 | } |
Joonsoo Kim | 4eb7dce | 2015-04-14 15:45:18 -0700 | [diff] [blame] | 2752 | |
| 2753 | return -1; |
Srivatsa S. Bhat | fef903e | 2013-09-11 14:20:35 -0700 | [diff] [blame] | 2754 | } |
| 2755 | |
Mel Gorman | 0aaa29a | 2015-11-06 16:28:37 -0800 | [diff] [blame] | 2756 | /* |
| 2757 | * Reserve a pageblock for exclusive use of high-order atomic allocations if |
| 2758 | * there are no empty page blocks that contain a page with a suitable order |
| 2759 | */ |
| 2760 | static void reserve_highatomic_pageblock(struct page *page, struct zone *zone, |
| 2761 | unsigned int alloc_order) |
| 2762 | { |
| 2763 | int mt; |
| 2764 | unsigned long max_managed, flags; |
| 2765 | |
| 2766 | /* |
| 2767 | * Limit the number reserved to 1 pageblock or roughly 1% of a zone. |
| 2768 | * Check is race-prone but harmless. |
| 2769 | */ |
Arun KS | 9705bea | 2018-12-28 00:34:24 -0800 | [diff] [blame] | 2770 | max_managed = (zone_managed_pages(zone) / 100) + pageblock_nr_pages; |
Mel Gorman | 0aaa29a | 2015-11-06 16:28:37 -0800 | [diff] [blame] | 2771 | if (zone->nr_reserved_highatomic >= max_managed) |
| 2772 | return; |
| 2773 | |
| 2774 | spin_lock_irqsave(&zone->lock, flags); |
| 2775 | |
| 2776 | /* Recheck the nr_reserved_highatomic limit under the lock */ |
| 2777 | if (zone->nr_reserved_highatomic >= max_managed) |
| 2778 | goto out_unlock; |
| 2779 | |
| 2780 | /* Yoink! */ |
| 2781 | mt = get_pageblock_migratetype(page); |
Xishi Qiu | a6ffdc0 | 2017-05-03 14:52:52 -0700 | [diff] [blame] | 2782 | if (!is_migrate_highatomic(mt) && !is_migrate_isolate(mt) |
| 2783 | && !is_migrate_cma(mt)) { |
Mel Gorman | 0aaa29a | 2015-11-06 16:28:37 -0800 | [diff] [blame] | 2784 | zone->nr_reserved_highatomic += pageblock_nr_pages; |
| 2785 | set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC); |
Vlastimil Babka | 02aa0cd | 2017-05-08 15:54:40 -0700 | [diff] [blame] | 2786 | move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL); |
Mel Gorman | 0aaa29a | 2015-11-06 16:28:37 -0800 | [diff] [blame] | 2787 | } |
| 2788 | |
| 2789 | out_unlock: |
| 2790 | spin_unlock_irqrestore(&zone->lock, flags); |
| 2791 | } |
| 2792 | |
| 2793 | /* |
| 2794 | * Used when an allocation is about to fail under memory pressure. This |
| 2795 | * potentially hurts the reliability of high-order allocations when under |
| 2796 | * intense memory pressure but failed atomic allocations should be easier |
| 2797 | * to recover from than an OOM. |
Minchan Kim | 29fac03 | 2016-12-12 16:42:14 -0800 | [diff] [blame] | 2798 | * |
| 2799 | * If @force is true, try to unreserve a pageblock even though highatomic |
| 2800 | * pageblock is exhausted. |
Mel Gorman | 0aaa29a | 2015-11-06 16:28:37 -0800 | [diff] [blame] | 2801 | */ |
Minchan Kim | 29fac03 | 2016-12-12 16:42:14 -0800 | [diff] [blame] | 2802 | static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, |
| 2803 | bool force) |
Mel Gorman | 0aaa29a | 2015-11-06 16:28:37 -0800 | [diff] [blame] | 2804 | { |
| 2805 | struct zonelist *zonelist = ac->zonelist; |
| 2806 | unsigned long flags; |
| 2807 | struct zoneref *z; |
| 2808 | struct zone *zone; |
| 2809 | struct page *page; |
| 2810 | int order; |
Minchan Kim | 04c8716 | 2016-12-12 16:42:11 -0800 | [diff] [blame] | 2811 | bool ret; |
Mel Gorman | 0aaa29a | 2015-11-06 16:28:37 -0800 | [diff] [blame] | 2812 | |
Joonsoo Kim | 97a225e | 2020-06-03 15:59:01 -0700 | [diff] [blame] | 2813 | for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx, |
Mel Gorman | 0aaa29a | 2015-11-06 16:28:37 -0800 | [diff] [blame] | 2814 | ac->nodemask) { |
Minchan Kim | 29fac03 | 2016-12-12 16:42:14 -0800 | [diff] [blame] | 2815 | /* |
| 2816 | * Preserve at least one pageblock unless memory pressure |
| 2817 | * is really high. |
| 2818 | */ |
| 2819 | if (!force && zone->nr_reserved_highatomic <= |
| 2820 | pageblock_nr_pages) |
Mel Gorman | 0aaa29a | 2015-11-06 16:28:37 -0800 | [diff] [blame] | 2821 | continue; |
| 2822 | |
| 2823 | spin_lock_irqsave(&zone->lock, flags); |
| 2824 | for (order = 0; order < MAX_ORDER; order++) { |
| 2825 | struct free_area *area = &(zone->free_area[order]); |
| 2826 | |
Dan Williams | b03641a | 2019-05-14 15:41:32 -0700 | [diff] [blame] | 2827 | page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC); |
Geliang Tang | a16601c | 2016-01-14 15:20:30 -0800 | [diff] [blame] | 2828 | if (!page) |
Mel Gorman | 0aaa29a | 2015-11-06 16:28:37 -0800 | [diff] [blame] | 2829 | continue; |
| 2830 | |
Mel Gorman | 0aaa29a | 2015-11-06 16:28:37 -0800 | [diff] [blame] | 2831 | /* |
Minchan Kim | 4855e4a | 2016-12-12 16:42:08 -0800 | [diff] [blame] | 2832 | * In page freeing path, migratetype change is racy so |
| 2833 | * we can counter several free pages in a pageblock |
Ingo Molnar | f0953a1 | 2021-05-06 18:06:47 -0700 | [diff] [blame] | 2834 | * in this loop although we changed the pageblock type |
Minchan Kim | 4855e4a | 2016-12-12 16:42:08 -0800 | [diff] [blame] | 2835 | * from highatomic to ac->migratetype. So we should |
| 2836 | * adjust the count once. |
Mel Gorman | 0aaa29a | 2015-11-06 16:28:37 -0800 | [diff] [blame] | 2837 | */ |
Xishi Qiu | a6ffdc0 | 2017-05-03 14:52:52 -0700 | [diff] [blame] | 2838 | if (is_migrate_highatomic_page(page)) { |
Minchan Kim | 4855e4a | 2016-12-12 16:42:08 -0800 | [diff] [blame] | 2839 | /* |
| 2840 | * It should never happen but changes to |
| 2841 | * locking could inadvertently allow a per-cpu |
| 2842 | * drain to add pages to MIGRATE_HIGHATOMIC |
| 2843 | * while unreserving so be safe and watch for |
| 2844 | * underflows. |
| 2845 | */ |
| 2846 | zone->nr_reserved_highatomic -= min( |
| 2847 | pageblock_nr_pages, |
| 2848 | zone->nr_reserved_highatomic); |
| 2849 | } |
Mel Gorman | 0aaa29a | 2015-11-06 16:28:37 -0800 | [diff] [blame] | 2850 | |
| 2851 | /* |
| 2852 | * Convert to ac->migratetype and avoid the normal |
| 2853 | * pageblock stealing heuristics. Minimally, the caller |
| 2854 | * is doing the work and needs the pages. More |
| 2855 | * importantly, if the block was always converted to |
| 2856 | * MIGRATE_UNMOVABLE or another type then the number |
| 2857 | * of pageblocks that cannot be completely freed |
| 2858 | * may increase. |
| 2859 | */ |
| 2860 | set_pageblock_migratetype(page, ac->migratetype); |
Vlastimil Babka | 02aa0cd | 2017-05-08 15:54:40 -0700 | [diff] [blame] | 2861 | ret = move_freepages_block(zone, page, ac->migratetype, |
| 2862 | NULL); |
Minchan Kim | 29fac03 | 2016-12-12 16:42:14 -0800 | [diff] [blame] | 2863 | if (ret) { |
| 2864 | spin_unlock_irqrestore(&zone->lock, flags); |
| 2865 | return ret; |
| 2866 | } |
Mel Gorman | 0aaa29a | 2015-11-06 16:28:37 -0800 | [diff] [blame] | 2867 | } |
| 2868 | spin_unlock_irqrestore(&zone->lock, flags); |
| 2869 | } |
Minchan Kim | 04c8716 | 2016-12-12 16:42:11 -0800 | [diff] [blame] | 2870 | |
| 2871 | return false; |
Mel Gorman | 0aaa29a | 2015-11-06 16:28:37 -0800 | [diff] [blame] | 2872 | } |
| 2873 | |
Vlastimil Babka | 3bc48f9 | 2017-05-08 15:54:37 -0700 | [diff] [blame] | 2874 | /* |
| 2875 | * Try finding a free buddy page on the fallback list and put it on the free |
| 2876 | * list of requested migratetype, possibly along with other pages from the same |
| 2877 | * block, depending on fragmentation avoidance heuristics. Returns true if |
| 2878 | * fallback was found so that __rmqueue_smallest() can grab it. |
Rasmus Villemoes | b002529 | 2017-07-10 15:49:26 -0700 | [diff] [blame] | 2879 | * |
| 2880 | * The use of signed ints for order and current_order is a deliberate |
| 2881 | * deviation from the rest of this file, to make the for loop |
| 2882 | * condition simpler. |
Vlastimil Babka | 3bc48f9 | 2017-05-08 15:54:37 -0700 | [diff] [blame] | 2883 | */ |
Aaron Lu | 85ccc8f | 2017-11-15 17:36:53 -0800 | [diff] [blame] | 2884 | static __always_inline bool |
Mel Gorman | 6bb1545 | 2018-12-28 00:35:41 -0800 | [diff] [blame] | 2885 | __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, |
| 2886 | unsigned int alloc_flags) |
Mel Gorman | b2a0ac8 | 2007-10-16 01:25:48 -0700 | [diff] [blame] | 2887 | { |
Pintu Kumar | b8af294 | 2013-09-11 14:20:34 -0700 | [diff] [blame] | 2888 | struct free_area *area; |
Rasmus Villemoes | b002529 | 2017-07-10 15:49:26 -0700 | [diff] [blame] | 2889 | int current_order; |
Mel Gorman | 6bb1545 | 2018-12-28 00:35:41 -0800 | [diff] [blame] | 2890 | int min_order = order; |
Mel Gorman | b2a0ac8 | 2007-10-16 01:25:48 -0700 | [diff] [blame] | 2891 | struct page *page; |
Joonsoo Kim | 4eb7dce | 2015-04-14 15:45:18 -0700 | [diff] [blame] | 2892 | int fallback_mt; |
| 2893 | bool can_steal; |
Mel Gorman | b2a0ac8 | 2007-10-16 01:25:48 -0700 | [diff] [blame] | 2894 | |
Vlastimil Babka | 7a8f58f | 2017-07-10 15:47:14 -0700 | [diff] [blame] | 2895 | /* |
Mel Gorman | 6bb1545 | 2018-12-28 00:35:41 -0800 | [diff] [blame] | 2896 | * Do not steal pages from freelists belonging to other pageblocks |
| 2897 | * i.e. orders < pageblock_order. If there are no local zones free, |
| 2898 | * the zonelists will be reiterated without ALLOC_NOFRAGMENT. |
| 2899 | */ |
| 2900 | if (alloc_flags & ALLOC_NOFRAGMENT) |
| 2901 | min_order = pageblock_order; |
| 2902 | |
| 2903 | /* |
Vlastimil Babka | 7a8f58f | 2017-07-10 15:47:14 -0700 | [diff] [blame] | 2904 | * Find the largest available free page in the other list. This roughly |
| 2905 | * approximates finding the pageblock with the most free pages, which |
| 2906 | * would be too costly to do exactly. |
| 2907 | */ |
Mel Gorman | 6bb1545 | 2018-12-28 00:35:41 -0800 | [diff] [blame] | 2908 | for (current_order = MAX_ORDER - 1; current_order >= min_order; |
Mel Gorman | 7aeb09f | 2014-06-04 16:10:21 -0700 | [diff] [blame] | 2909 | --current_order) { |
Joonsoo Kim | 4eb7dce | 2015-04-14 15:45:18 -0700 | [diff] [blame] | 2910 | area = &(zone->free_area[current_order]); |
| 2911 | fallback_mt = find_suitable_fallback(area, current_order, |
Joonsoo Kim | 2149cda | 2015-04-14 15:45:21 -0700 | [diff] [blame] | 2912 | start_migratetype, false, &can_steal); |
Joonsoo Kim | 4eb7dce | 2015-04-14 15:45:18 -0700 | [diff] [blame] | 2913 | if (fallback_mt == -1) |
| 2914 | continue; |
Mel Gorman | b2a0ac8 | 2007-10-16 01:25:48 -0700 | [diff] [blame] | 2915 | |
Vlastimil Babka | 7a8f58f | 2017-07-10 15:47:14 -0700 | [diff] [blame] | 2916 | /* |
| 2917 | * We cannot steal all free pages from the pageblock and the |
| 2918 | * requested migratetype is movable. In that case it's better to |
| 2919 | * steal and split the smallest available page instead of the |
| 2920 | * largest available page, because even if the next movable |
| 2921 | * allocation falls back into a different pageblock than this |
| 2922 | * one, it won't cause permanent fragmentation. |
| 2923 | */ |
| 2924 | if (!can_steal && start_migratetype == MIGRATE_MOVABLE |
| 2925 | && current_order > order) |
| 2926 | goto find_smallest; |
Mel Gorman | e010487 | 2007-10-16 01:25:53 -0700 | [diff] [blame] | 2927 | |
Vlastimil Babka | 7a8f58f | 2017-07-10 15:47:14 -0700 | [diff] [blame] | 2928 | goto do_steal; |
Mel Gorman | b2a0ac8 | 2007-10-16 01:25:48 -0700 | [diff] [blame] | 2929 | } |
| 2930 | |
Vlastimil Babka | 3bc48f9 | 2017-05-08 15:54:37 -0700 | [diff] [blame] | 2931 | return false; |
Vlastimil Babka | 7a8f58f | 2017-07-10 15:47:14 -0700 | [diff] [blame] | 2932 | |
| 2933 | find_smallest: |
| 2934 | for (current_order = order; current_order < MAX_ORDER; |
| 2935 | current_order++) { |
| 2936 | area = &(zone->free_area[current_order]); |
| 2937 | fallback_mt = find_suitable_fallback(area, current_order, |
| 2938 | start_migratetype, false, &can_steal); |
| 2939 | if (fallback_mt != -1) |
| 2940 | break; |
| 2941 | } |
| 2942 | |
| 2943 | /* |
| 2944 | * This should not happen - we already found a suitable fallback |
| 2945 | * when looking for the largest page. |
| 2946 | */ |
| 2947 | VM_BUG_ON(current_order == MAX_ORDER); |
| 2948 | |
| 2949 | do_steal: |
Dan Williams | b03641a | 2019-05-14 15:41:32 -0700 | [diff] [blame] | 2950 | page = get_page_from_free_area(area, fallback_mt); |
Vlastimil Babka | 7a8f58f | 2017-07-10 15:47:14 -0700 | [diff] [blame] | 2951 | |
Mel Gorman | 1c30844 | 2018-12-28 00:35:52 -0800 | [diff] [blame] | 2952 | steal_suitable_fallback(zone, page, alloc_flags, start_migratetype, |
| 2953 | can_steal); |
Vlastimil Babka | 7a8f58f | 2017-07-10 15:47:14 -0700 | [diff] [blame] | 2954 | |
| 2955 | trace_mm_page_alloc_extfrag(page, order, current_order, |
| 2956 | start_migratetype, fallback_mt); |
| 2957 | |
| 2958 | return true; |
| 2959 | |
Mel Gorman | b2a0ac8 | 2007-10-16 01:25:48 -0700 | [diff] [blame] | 2960 | } |
| 2961 | |
Mel Gorman | 56fd56b | 2007-10-16 01:25:58 -0700 | [diff] [blame] | 2962 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2963 | * Do the hard work of removing an element from the buddy allocator. |
| 2964 | * Call me with the zone->lock already held. |
| 2965 | */ |
Aaron Lu | 85ccc8f | 2017-11-15 17:36:53 -0800 | [diff] [blame] | 2966 | static __always_inline struct page * |
Mel Gorman | 6bb1545 | 2018-12-28 00:35:41 -0800 | [diff] [blame] | 2967 | __rmqueue(struct zone *zone, unsigned int order, int migratetype, |
| 2968 | unsigned int alloc_flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2969 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2970 | struct page *page; |
| 2971 | |
Hailong liu | ce8f86e | 2021-01-12 15:49:08 -0800 | [diff] [blame] | 2972 | if (IS_ENABLED(CONFIG_CMA)) { |
| 2973 | /* |
| 2974 | * Balance movable allocations between regular and CMA areas by |
| 2975 | * allocating from CMA when over half of the zone's free memory |
| 2976 | * is in the CMA area. |
| 2977 | */ |
| 2978 | if (alloc_flags & ALLOC_CMA && |
| 2979 | zone_page_state(zone, NR_FREE_CMA_PAGES) > |
| 2980 | zone_page_state(zone, NR_FREE_PAGES) / 2) { |
| 2981 | page = __rmqueue_cma_fallback(zone, order); |
| 2982 | if (page) |
| 2983 | goto out; |
| 2984 | } |
Roman Gushchin | 1686766 | 2020-06-03 15:58:42 -0700 | [diff] [blame] | 2985 | } |
Vlastimil Babka | 3bc48f9 | 2017-05-08 15:54:37 -0700 | [diff] [blame] | 2986 | retry: |
Mel Gorman | 56fd56b | 2007-10-16 01:25:58 -0700 | [diff] [blame] | 2987 | page = __rmqueue_smallest(zone, order, migratetype); |
Mel Gorman | 974a786 | 2015-11-06 16:28:34 -0800 | [diff] [blame] | 2988 | if (unlikely(!page)) { |
Joonsoo Kim | 8510e69 | 2020-08-06 23:26:04 -0700 | [diff] [blame] | 2989 | if (alloc_flags & ALLOC_CMA) |
Joonsoo Kim | dc67647 | 2015-04-14 15:45:15 -0700 | [diff] [blame] | 2990 | page = __rmqueue_cma_fallback(zone, order); |
| 2991 | |
Mel Gorman | 6bb1545 | 2018-12-28 00:35:41 -0800 | [diff] [blame] | 2992 | if (!page && __rmqueue_fallback(zone, order, migratetype, |
| 2993 | alloc_flags)) |
Vlastimil Babka | 3bc48f9 | 2017-05-08 15:54:37 -0700 | [diff] [blame] | 2994 | goto retry; |
Mel Gorman | 728ec98 | 2009-06-16 15:32:04 -0700 | [diff] [blame] | 2995 | } |
Hailong liu | ce8f86e | 2021-01-12 15:49:08 -0800 | [diff] [blame] | 2996 | out: |
| 2997 | if (page) |
| 2998 | trace_mm_page_alloc_zone_locked(page, order, migratetype); |
Mel Gorman | b2a0ac8 | 2007-10-16 01:25:48 -0700 | [diff] [blame] | 2999 | return page; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3000 | } |
| 3001 | |
Michal Nazarewicz | 5f63b72 | 2012-01-11 15:16:11 +0100 | [diff] [blame] | 3002 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3003 | * Obtain a specified number of elements from the buddy allocator, all under |
| 3004 | * a single hold of the lock, for efficiency. Add them to the supplied list. |
| 3005 | * Returns the number of new pages which were placed at *list. |
| 3006 | */ |
Michal Nazarewicz | 5f63b72 | 2012-01-11 15:16:11 +0100 | [diff] [blame] | 3007 | static int rmqueue_bulk(struct zone *zone, unsigned int order, |
Mel Gorman | b2a0ac8 | 2007-10-16 01:25:48 -0700 | [diff] [blame] | 3008 | unsigned long count, struct list_head *list, |
Mel Gorman | 6bb1545 | 2018-12-28 00:35:41 -0800 | [diff] [blame] | 3009 | int migratetype, unsigned int alloc_flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3010 | { |
Mel Gorman | cb66bed | 2021-04-29 23:01:42 -0700 | [diff] [blame] | 3011 | int i, allocated = 0; |
Michal Nazarewicz | 5f63b72 | 2012-01-11 15:16:11 +0100 | [diff] [blame] | 3012 | |
Mel Gorman | dbbee9d | 2021-06-28 19:41:41 -0700 | [diff] [blame] | 3013 | /* |
| 3014 | * local_lock_irq held so equivalent to spin_lock_irqsave for |
| 3015 | * both PREEMPT_RT and non-PREEMPT_RT configurations. |
| 3016 | */ |
Mel Gorman | d34b073 | 2017-04-20 14:37:43 -0700 | [diff] [blame] | 3017 | spin_lock(&zone->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3018 | for (i = 0; i < count; ++i) { |
Mel Gorman | 6bb1545 | 2018-12-28 00:35:41 -0800 | [diff] [blame] | 3019 | struct page *page = __rmqueue(zone, order, migratetype, |
| 3020 | alloc_flags); |
Nick Piggin | 085cc7d5 | 2006-01-06 00:11:01 -0800 | [diff] [blame] | 3021 | if (unlikely(page == NULL)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3022 | break; |
Mel Gorman | 81eabcb | 2007-12-17 16:20:05 -0800 | [diff] [blame] | 3023 | |
Mel Gorman | 479f854 | 2016-05-19 17:14:35 -0700 | [diff] [blame] | 3024 | if (unlikely(check_pcp_refill(page))) |
| 3025 | continue; |
| 3026 | |
Mel Gorman | 81eabcb | 2007-12-17 16:20:05 -0800 | [diff] [blame] | 3027 | /* |
Vlastimil Babka | 0fac3ba | 2017-11-15 17:38:07 -0800 | [diff] [blame] | 3028 | * Split buddy pages returned by expand() are received here in |
| 3029 | * physical page order. The page is added to the tail of |
| 3030 | * caller's list. From the callers perspective, the linked list |
| 3031 | * is ordered by page number under some conditions. This is |
| 3032 | * useful for IO devices that can forward direction from the |
| 3033 | * head, thus also in the physical page order. This is useful |
| 3034 | * for IO devices that can merge IO requests if the physical |
| 3035 | * pages are ordered properly. |
Mel Gorman | 81eabcb | 2007-12-17 16:20:05 -0800 | [diff] [blame] | 3036 | */ |
Vlastimil Babka | 0fac3ba | 2017-11-15 17:38:07 -0800 | [diff] [blame] | 3037 | list_add_tail(&page->lru, list); |
Mel Gorman | cb66bed | 2021-04-29 23:01:42 -0700 | [diff] [blame] | 3038 | allocated++; |
Vlastimil Babka | bb14c2c | 2015-09-08 15:01:25 -0700 | [diff] [blame] | 3039 | if (is_migrate_cma(get_pcppage_migratetype(page))) |
Bartlomiej Zolnierkiewicz | d1ce749 | 2012-10-08 16:32:02 -0700 | [diff] [blame] | 3040 | __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, |
| 3041 | -(1 << order)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3042 | } |
Mel Gorman | a6de734 | 2016-12-12 16:44:41 -0800 | [diff] [blame] | 3043 | |
| 3044 | /* |
| 3045 | * i pages were removed from the buddy list even if some leak due |
| 3046 | * to check_pcp_refill failing so adjust NR_FREE_PAGES based |
Mel Gorman | cb66bed | 2021-04-29 23:01:42 -0700 | [diff] [blame] | 3047 | * on i. Do not confuse with 'allocated' which is the number of |
Mel Gorman | a6de734 | 2016-12-12 16:44:41 -0800 | [diff] [blame] | 3048 | * pages added to the pcp list. |
| 3049 | */ |
Mel Gorman | f2260e6 | 2009-06-16 15:32:13 -0700 | [diff] [blame] | 3050 | __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); |
Mel Gorman | d34b073 | 2017-04-20 14:37:43 -0700 | [diff] [blame] | 3051 | spin_unlock(&zone->lock); |
Mel Gorman | cb66bed | 2021-04-29 23:01:42 -0700 | [diff] [blame] | 3052 | return allocated; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3053 | } |
| 3054 | |
Christoph Lameter | 4ae7c03 | 2005-06-21 17:14:57 -0700 | [diff] [blame] | 3055 | #ifdef CONFIG_NUMA |
Christoph Lameter | 8fce4d8 | 2006-03-09 17:33:54 -0800 | [diff] [blame] | 3056 | /* |
Christoph Lameter | 4037d45 | 2007-05-09 02:35:14 -0700 | [diff] [blame] | 3057 | * Called from the vmstat counter updater to drain pagesets of this |
| 3058 | * currently executing processor on remote nodes after they have |
| 3059 | * expired. |
| 3060 | * |
Christoph Lameter | 879336c | 2006-03-22 00:09:08 -0800 | [diff] [blame] | 3061 | * Note that this function must be called with the thread pinned to |
| 3062 | * a single processor. |
Christoph Lameter | 8fce4d8 | 2006-03-09 17:33:54 -0800 | [diff] [blame] | 3063 | */ |
Christoph Lameter | 4037d45 | 2007-05-09 02:35:14 -0700 | [diff] [blame] | 3064 | void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) |
Christoph Lameter | 4ae7c03 | 2005-06-21 17:14:57 -0700 | [diff] [blame] | 3065 | { |
Christoph Lameter | 4ae7c03 | 2005-06-21 17:14:57 -0700 | [diff] [blame] | 3066 | unsigned long flags; |
Michal Nazarewicz | 7be12fc | 2014-08-06 16:05:15 -0700 | [diff] [blame] | 3067 | int to_drain, batch; |
Christoph Lameter | 4ae7c03 | 2005-06-21 17:14:57 -0700 | [diff] [blame] | 3068 | |
Mel Gorman | dbbee9d | 2021-06-28 19:41:41 -0700 | [diff] [blame] | 3069 | local_lock_irqsave(&pagesets.lock, flags); |
Jason Low | 4db0c3c | 2015-04-15 16:14:08 -0700 | [diff] [blame] | 3070 | batch = READ_ONCE(pcp->batch); |
Michal Nazarewicz | 7be12fc | 2014-08-06 16:05:15 -0700 | [diff] [blame] | 3071 | to_drain = min(pcp->count, batch); |
Aaron Lu | 77ba906 | 2018-04-05 16:24:06 -0700 | [diff] [blame] | 3072 | if (to_drain > 0) |
KOSAKI Motohiro | 2a13515 | 2012-07-31 16:42:53 -0700 | [diff] [blame] | 3073 | free_pcppages_bulk(zone, to_drain, pcp); |
Mel Gorman | dbbee9d | 2021-06-28 19:41:41 -0700 | [diff] [blame] | 3074 | local_unlock_irqrestore(&pagesets.lock, flags); |
Christoph Lameter | 4ae7c03 | 2005-06-21 17:14:57 -0700 | [diff] [blame] | 3075 | } |
| 3076 | #endif |
| 3077 | |
Christoph Lameter | 9f8f217 | 2008-02-04 22:29:11 -0800 | [diff] [blame] | 3078 | /* |
Vlastimil Babka | 93481ff | 2014-12-10 15:43:01 -0800 | [diff] [blame] | 3079 | * Drain pcplists of the indicated processor and zone. |
| 3080 | * |
| 3081 | * The processor must either be the current processor and the |
| 3082 | * thread pinned to the current processor or a processor that |
| 3083 | * is not online. |
| 3084 | */ |
| 3085 | static void drain_pages_zone(unsigned int cpu, struct zone *zone) |
| 3086 | { |
| 3087 | unsigned long flags; |
Vlastimil Babka | 93481ff | 2014-12-10 15:43:01 -0800 | [diff] [blame] | 3088 | struct per_cpu_pages *pcp; |
| 3089 | |
Mel Gorman | dbbee9d | 2021-06-28 19:41:41 -0700 | [diff] [blame] | 3090 | local_lock_irqsave(&pagesets.lock, flags); |
Vlastimil Babka | 93481ff | 2014-12-10 15:43:01 -0800 | [diff] [blame] | 3091 | |
Mel Gorman | 28f836b | 2021-06-28 19:41:38 -0700 | [diff] [blame] | 3092 | pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); |
Aaron Lu | 77ba906 | 2018-04-05 16:24:06 -0700 | [diff] [blame] | 3093 | if (pcp->count) |
Vlastimil Babka | 93481ff | 2014-12-10 15:43:01 -0800 | [diff] [blame] | 3094 | free_pcppages_bulk(zone, pcp->count, pcp); |
Mel Gorman | 28f836b | 2021-06-28 19:41:38 -0700 | [diff] [blame] | 3095 | |
Mel Gorman | dbbee9d | 2021-06-28 19:41:41 -0700 | [diff] [blame] | 3096 | local_unlock_irqrestore(&pagesets.lock, flags); |
Vlastimil Babka | 93481ff | 2014-12-10 15:43:01 -0800 | [diff] [blame] | 3097 | } |
| 3098 | |
| 3099 | /* |
| 3100 | * Drain pcplists of all zones on the indicated processor. |
Christoph Lameter | 9f8f217 | 2008-02-04 22:29:11 -0800 | [diff] [blame] | 3101 | * |
| 3102 | * The processor must either be the current processor and the |
| 3103 | * thread pinned to the current processor or a processor that |
| 3104 | * is not online. |
| 3105 | */ |
| 3106 | static void drain_pages(unsigned int cpu) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3107 | { |
| 3108 | struct zone *zone; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3109 | |
KOSAKI Motohiro | ee99c71 | 2009-03-31 15:19:31 -0700 | [diff] [blame] | 3110 | for_each_populated_zone(zone) { |
Vlastimil Babka | 93481ff | 2014-12-10 15:43:01 -0800 | [diff] [blame] | 3111 | drain_pages_zone(cpu, zone); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3112 | } |
| 3113 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3114 | |
Christoph Lameter | 9f8f217 | 2008-02-04 22:29:11 -0800 | [diff] [blame] | 3115 | /* |
| 3116 | * Spill all of this CPU's per-cpu pages back into the buddy allocator. |
Vlastimil Babka | 93481ff | 2014-12-10 15:43:01 -0800 | [diff] [blame] | 3117 | * |
| 3118 | * The CPU has to be pinned. When zone parameter is non-NULL, spill just |
| 3119 | * the single zone's pages. |
Christoph Lameter | 9f8f217 | 2008-02-04 22:29:11 -0800 | [diff] [blame] | 3120 | */ |
Vlastimil Babka | 93481ff | 2014-12-10 15:43:01 -0800 | [diff] [blame] | 3121 | void drain_local_pages(struct zone *zone) |
Christoph Lameter | 9f8f217 | 2008-02-04 22:29:11 -0800 | [diff] [blame] | 3122 | { |
Vlastimil Babka | 93481ff | 2014-12-10 15:43:01 -0800 | [diff] [blame] | 3123 | int cpu = smp_processor_id(); |
| 3124 | |
| 3125 | if (zone) |
| 3126 | drain_pages_zone(cpu, zone); |
| 3127 | else |
| 3128 | drain_pages(cpu); |
Christoph Lameter | 9f8f217 | 2008-02-04 22:29:11 -0800 | [diff] [blame] | 3129 | } |
| 3130 | |
Mel Gorman | 0ccce3b | 2017-02-24 14:56:32 -0800 | [diff] [blame] | 3131 | static void drain_local_pages_wq(struct work_struct *work) |
| 3132 | { |
Wei Yang | d9367bd | 2018-12-28 00:38:58 -0800 | [diff] [blame] | 3133 | struct pcpu_drain *drain; |
| 3134 | |
| 3135 | drain = container_of(work, struct pcpu_drain, work); |
| 3136 | |
Michal Hocko | a459eeb | 2017-02-24 14:56:35 -0800 | [diff] [blame] | 3137 | /* |
| 3138 | * drain_all_pages doesn't use proper cpu hotplug protection so |
| 3139 | * we can race with cpu offline when the WQ can move this from |
| 3140 | * a cpu pinned worker to an unbound one. We can operate on a different |
Ingo Molnar | f0953a1 | 2021-05-06 18:06:47 -0700 | [diff] [blame] | 3141 | * cpu which is alright but we also have to make sure to not move to |
Michal Hocko | a459eeb | 2017-02-24 14:56:35 -0800 | [diff] [blame] | 3142 | * a different one. |
| 3143 | */ |
Sebastian Andrzej Siewior | 9c25cbf | 2021-11-05 13:40:52 -0700 | [diff] [blame] | 3144 | migrate_disable(); |
Wei Yang | d9367bd | 2018-12-28 00:38:58 -0800 | [diff] [blame] | 3145 | drain_local_pages(drain->zone); |
Sebastian Andrzej Siewior | 9c25cbf | 2021-11-05 13:40:52 -0700 | [diff] [blame] | 3146 | migrate_enable(); |
Mel Gorman | 0ccce3b | 2017-02-24 14:56:32 -0800 | [diff] [blame] | 3147 | } |
| 3148 | |
Christoph Lameter | 9f8f217 | 2008-02-04 22:29:11 -0800 | [diff] [blame] | 3149 | /* |
Vlastimil Babka | ec6e8c7e | 2020-12-14 19:10:59 -0800 | [diff] [blame] | 3150 | * The implementation of drain_all_pages(), exposing an extra parameter to |
| 3151 | * drain on all cpus. |
Gilad Ben-Yossef | 7404649 | 2012-03-28 14:42:45 -0700 | [diff] [blame] | 3152 | * |
Vlastimil Babka | ec6e8c7e | 2020-12-14 19:10:59 -0800 | [diff] [blame] | 3153 | * drain_all_pages() is optimized to only execute on cpus where pcplists are |
| 3154 | * not empty. The check for non-emptiness can however race with a free to |
| 3155 | * pcplist that has not yet increased the pcp->count from 0 to 1. Callers |
| 3156 | * that need the guarantee that every CPU has drained can disable the |
| 3157 | * optimizing racy check. |
Christoph Lameter | 9f8f217 | 2008-02-04 22:29:11 -0800 | [diff] [blame] | 3158 | */ |
Zou Wei | 3b1f365 | 2020-12-14 19:11:12 -0800 | [diff] [blame] | 3159 | static void __drain_all_pages(struct zone *zone, bool force_all_cpus) |
Christoph Lameter | 9f8f217 | 2008-02-04 22:29:11 -0800 | [diff] [blame] | 3160 | { |
Gilad Ben-Yossef | 7404649 | 2012-03-28 14:42:45 -0700 | [diff] [blame] | 3161 | int cpu; |
Gilad Ben-Yossef | 7404649 | 2012-03-28 14:42:45 -0700 | [diff] [blame] | 3162 | |
| 3163 | /* |
Zhen Lei | 041711c | 2021-06-30 18:53:17 -0700 | [diff] [blame] | 3164 | * Allocate in the BSS so we won't require allocation in |
Gilad Ben-Yossef | 7404649 | 2012-03-28 14:42:45 -0700 | [diff] [blame] | 3165 | * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y |
| 3166 | */ |
| 3167 | static cpumask_t cpus_with_pcps; |
| 3168 | |
Michal Hocko | ce61287 | 2017-04-07 16:05:05 -0700 | [diff] [blame] | 3169 | /* |
| 3170 | * Make sure nobody triggers this path before mm_percpu_wq is fully |
| 3171 | * initialized. |
| 3172 | */ |
| 3173 | if (WARN_ON_ONCE(!mm_percpu_wq)) |
| 3174 | return; |
| 3175 | |
Mel Gorman | bd233f5 | 2017-02-24 14:56:56 -0800 | [diff] [blame] | 3176 | /* |
| 3177 | * Do not drain if one is already in progress unless it's specific to |
| 3178 | * a zone. Such callers are primarily CMA and memory hotplug and need |
| 3179 | * the drain to be complete when the call returns. |
| 3180 | */ |
| 3181 | if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) { |
| 3182 | if (!zone) |
| 3183 | return; |
| 3184 | mutex_lock(&pcpu_drain_mutex); |
| 3185 | } |
Mel Gorman | 0ccce3b | 2017-02-24 14:56:32 -0800 | [diff] [blame] | 3186 | |
Gilad Ben-Yossef | 7404649 | 2012-03-28 14:42:45 -0700 | [diff] [blame] | 3187 | /* |
| 3188 | * We don't care about racing with CPU hotplug event |
| 3189 | * as offline notification will cause the notified |
| 3190 | * cpu to drain that CPU pcps and on_each_cpu_mask |
| 3191 | * disables preemption as part of its processing |
| 3192 | */ |
| 3193 | for_each_online_cpu(cpu) { |
Mel Gorman | 28f836b | 2021-06-28 19:41:38 -0700 | [diff] [blame] | 3194 | struct per_cpu_pages *pcp; |
Vlastimil Babka | 93481ff | 2014-12-10 15:43:01 -0800 | [diff] [blame] | 3195 | struct zone *z; |
Gilad Ben-Yossef | 7404649 | 2012-03-28 14:42:45 -0700 | [diff] [blame] | 3196 | bool has_pcps = false; |
Vlastimil Babka | 93481ff | 2014-12-10 15:43:01 -0800 | [diff] [blame] | 3197 | |
Vlastimil Babka | ec6e8c7e | 2020-12-14 19:10:59 -0800 | [diff] [blame] | 3198 | if (force_all_cpus) { |
| 3199 | /* |
| 3200 | * The pcp.count check is racy, some callers need a |
| 3201 | * guarantee that no cpu is missed. |
| 3202 | */ |
| 3203 | has_pcps = true; |
| 3204 | } else if (zone) { |
Mel Gorman | 28f836b | 2021-06-28 19:41:38 -0700 | [diff] [blame] | 3205 | pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); |
| 3206 | if (pcp->count) |
Gilad Ben-Yossef | 7404649 | 2012-03-28 14:42:45 -0700 | [diff] [blame] | 3207 | has_pcps = true; |
Vlastimil Babka | 93481ff | 2014-12-10 15:43:01 -0800 | [diff] [blame] | 3208 | } else { |
| 3209 | for_each_populated_zone(z) { |
Mel Gorman | 28f836b | 2021-06-28 19:41:38 -0700 | [diff] [blame] | 3210 | pcp = per_cpu_ptr(z->per_cpu_pageset, cpu); |
| 3211 | if (pcp->count) { |
Vlastimil Babka | 93481ff | 2014-12-10 15:43:01 -0800 | [diff] [blame] | 3212 | has_pcps = true; |
| 3213 | break; |
| 3214 | } |
Gilad Ben-Yossef | 7404649 | 2012-03-28 14:42:45 -0700 | [diff] [blame] | 3215 | } |
| 3216 | } |
Vlastimil Babka | 93481ff | 2014-12-10 15:43:01 -0800 | [diff] [blame] | 3217 | |
Gilad Ben-Yossef | 7404649 | 2012-03-28 14:42:45 -0700 | [diff] [blame] | 3218 | if (has_pcps) |
| 3219 | cpumask_set_cpu(cpu, &cpus_with_pcps); |
| 3220 | else |
| 3221 | cpumask_clear_cpu(cpu, &cpus_with_pcps); |
| 3222 | } |
Mel Gorman | 0ccce3b | 2017-02-24 14:56:32 -0800 | [diff] [blame] | 3223 | |
Mel Gorman | bd233f5 | 2017-02-24 14:56:56 -0800 | [diff] [blame] | 3224 | for_each_cpu(cpu, &cpus_with_pcps) { |
Wei Yang | d9367bd | 2018-12-28 00:38:58 -0800 | [diff] [blame] | 3225 | struct pcpu_drain *drain = per_cpu_ptr(&pcpu_drain, cpu); |
| 3226 | |
| 3227 | drain->zone = zone; |
| 3228 | INIT_WORK(&drain->work, drain_local_pages_wq); |
| 3229 | queue_work_on(cpu, mm_percpu_wq, &drain->work); |
Mel Gorman | 0ccce3b | 2017-02-24 14:56:32 -0800 | [diff] [blame] | 3230 | } |
Mel Gorman | bd233f5 | 2017-02-24 14:56:56 -0800 | [diff] [blame] | 3231 | for_each_cpu(cpu, &cpus_with_pcps) |
Wei Yang | d9367bd | 2018-12-28 00:38:58 -0800 | [diff] [blame] | 3232 | flush_work(&per_cpu_ptr(&pcpu_drain, cpu)->work); |
Mel Gorman | bd233f5 | 2017-02-24 14:56:56 -0800 | [diff] [blame] | 3233 | |
| 3234 | mutex_unlock(&pcpu_drain_mutex); |
Christoph Lameter | 9f8f217 | 2008-02-04 22:29:11 -0800 | [diff] [blame] | 3235 | } |
| 3236 | |
Vlastimil Babka | ec6e8c7e | 2020-12-14 19:10:59 -0800 | [diff] [blame] | 3237 | /* |
| 3238 | * Spill all the per-cpu pages from all CPUs back into the buddy allocator. |
| 3239 | * |
| 3240 | * When zone parameter is non-NULL, spill just the single zone's pages. |
| 3241 | * |
| 3242 | * Note that this can be extremely slow as the draining happens in a workqueue. |
| 3243 | */ |
| 3244 | void drain_all_pages(struct zone *zone) |
| 3245 | { |
| 3246 | __drain_all_pages(zone, false); |
| 3247 | } |
| 3248 | |
Rafael J. Wysocki | 296699d | 2007-07-29 23:27:18 +0200 | [diff] [blame] | 3249 | #ifdef CONFIG_HIBERNATION |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3250 | |
Chen Yu | 556b969 | 2017-08-25 15:55:30 -0700 | [diff] [blame] | 3251 | /* |
| 3252 | * Touch the watchdog for every WD_PAGE_COUNT pages. |
| 3253 | */ |
| 3254 | #define WD_PAGE_COUNT (128*1024) |
| 3255 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3256 | void mark_free_pages(struct zone *zone) |
| 3257 | { |
Chen Yu | 556b969 | 2017-08-25 15:55:30 -0700 | [diff] [blame] | 3258 | unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT; |
Rafael J. Wysocki | f623f0d | 2006-09-25 23:32:49 -0700 | [diff] [blame] | 3259 | unsigned long flags; |
Mel Gorman | 7aeb09f | 2014-06-04 16:10:21 -0700 | [diff] [blame] | 3260 | unsigned int order, t; |
Geliang Tang | 86760a2 | 2016-01-14 15:20:33 -0800 | [diff] [blame] | 3261 | struct page *page; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3262 | |
Xishi Qiu | 8080fc0 | 2013-09-11 14:21:45 -0700 | [diff] [blame] | 3263 | if (zone_is_empty(zone)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3264 | return; |
| 3265 | |
| 3266 | spin_lock_irqsave(&zone->lock, flags); |
Rafael J. Wysocki | f623f0d | 2006-09-25 23:32:49 -0700 | [diff] [blame] | 3267 | |
Cody P Schafer | 108bcc9 | 2013-02-22 16:35:23 -0800 | [diff] [blame] | 3268 | max_zone_pfn = zone_end_pfn(zone); |
Rafael J. Wysocki | f623f0d | 2006-09-25 23:32:49 -0700 | [diff] [blame] | 3269 | for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) |
| 3270 | if (pfn_valid(pfn)) { |
Geliang Tang | 86760a2 | 2016-01-14 15:20:33 -0800 | [diff] [blame] | 3271 | page = pfn_to_page(pfn); |
Joonsoo Kim | ba6b097 | 2016-05-19 17:12:16 -0700 | [diff] [blame] | 3272 | |
Chen Yu | 556b969 | 2017-08-25 15:55:30 -0700 | [diff] [blame] | 3273 | if (!--page_count) { |
| 3274 | touch_nmi_watchdog(); |
| 3275 | page_count = WD_PAGE_COUNT; |
| 3276 | } |
| 3277 | |
Joonsoo Kim | ba6b097 | 2016-05-19 17:12:16 -0700 | [diff] [blame] | 3278 | if (page_zone(page) != zone) |
| 3279 | continue; |
| 3280 | |
Rafael J. Wysocki | 7be9823 | 2007-05-06 14:50:42 -0700 | [diff] [blame] | 3281 | if (!swsusp_page_is_forbidden(page)) |
| 3282 | swsusp_unset_page_free(page); |
Rafael J. Wysocki | f623f0d | 2006-09-25 23:32:49 -0700 | [diff] [blame] | 3283 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3284 | |
Mel Gorman | b2a0ac8 | 2007-10-16 01:25:48 -0700 | [diff] [blame] | 3285 | for_each_migratetype_order(order, t) { |
Geliang Tang | 86760a2 | 2016-01-14 15:20:33 -0800 | [diff] [blame] | 3286 | list_for_each_entry(page, |
| 3287 | &zone->free_area[order].free_list[t], lru) { |
Rafael J. Wysocki | f623f0d | 2006-09-25 23:32:49 -0700 | [diff] [blame] | 3288 | unsigned long i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3289 | |
Geliang Tang | 86760a2 | 2016-01-14 15:20:33 -0800 | [diff] [blame] | 3290 | pfn = page_to_pfn(page); |
Chen Yu | 556b969 | 2017-08-25 15:55:30 -0700 | [diff] [blame] | 3291 | for (i = 0; i < (1UL << order); i++) { |
| 3292 | if (!--page_count) { |
| 3293 | touch_nmi_watchdog(); |
| 3294 | page_count = WD_PAGE_COUNT; |
| 3295 | } |
Rafael J. Wysocki | 7be9823 | 2007-05-06 14:50:42 -0700 | [diff] [blame] | 3296 | swsusp_set_page_free(pfn_to_page(pfn + i)); |
Chen Yu | 556b969 | 2017-08-25 15:55:30 -0700 | [diff] [blame] | 3297 | } |
Rafael J. Wysocki | f623f0d | 2006-09-25 23:32:49 -0700 | [diff] [blame] | 3298 | } |
Mel Gorman | b2a0ac8 | 2007-10-16 01:25:48 -0700 | [diff] [blame] | 3299 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3300 | spin_unlock_irqrestore(&zone->lock, flags); |
| 3301 | } |
Mel Gorman | e2c55dc | 2007-10-16 01:25:50 -0700 | [diff] [blame] | 3302 | #endif /* CONFIG_PM */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3303 | |
Mel Gorman | 44042b4 | 2021-06-28 19:43:08 -0700 | [diff] [blame] | 3304 | static bool free_unref_page_prepare(struct page *page, unsigned long pfn, |
| 3305 | unsigned int order) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3306 | { |
Mel Gorman | 5f8dcc2 | 2009-09-21 17:03:19 -0700 | [diff] [blame] | 3307 | int migratetype; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3308 | |
Mel Gorman | 44042b4 | 2021-06-28 19:43:08 -0700 | [diff] [blame] | 3309 | if (!free_pcp_prepare(page, order)) |
Mel Gorman | 9cca35d4 | 2017-11-15 17:37:37 -0800 | [diff] [blame] | 3310 | return false; |
Hugh Dickins | 689bceb | 2005-11-21 21:32:20 -0800 | [diff] [blame] | 3311 | |
Mel Gorman | dc4b0ca | 2014-06-04 16:10:17 -0700 | [diff] [blame] | 3312 | migratetype = get_pfnblock_migratetype(page, pfn); |
Vlastimil Babka | bb14c2c | 2015-09-08 15:01:25 -0700 | [diff] [blame] | 3313 | set_pcppage_migratetype(page, migratetype); |
Mel Gorman | 9cca35d4 | 2017-11-15 17:37:37 -0800 | [diff] [blame] | 3314 | return true; |
| 3315 | } |
| 3316 | |
Mel Gorman | 3b12e7e | 2021-06-28 19:42:18 -0700 | [diff] [blame] | 3317 | static int nr_pcp_free(struct per_cpu_pages *pcp, int high, int batch) |
| 3318 | { |
| 3319 | int min_nr_free, max_nr_free; |
| 3320 | |
| 3321 | /* Check for PCP disabled or boot pageset */ |
| 3322 | if (unlikely(high < batch)) |
| 3323 | return 1; |
| 3324 | |
| 3325 | /* Leave at least pcp->batch pages on the list */ |
| 3326 | min_nr_free = batch; |
| 3327 | max_nr_free = high - batch; |
| 3328 | |
| 3329 | /* |
| 3330 | * Double the number of pages freed each time there is subsequent |
| 3331 | * freeing of pages without any allocation. |
| 3332 | */ |
| 3333 | batch <<= pcp->free_factor; |
| 3334 | if (batch < max_nr_free) |
| 3335 | pcp->free_factor++; |
| 3336 | batch = clamp(batch, min_nr_free, max_nr_free); |
| 3337 | |
| 3338 | return batch; |
| 3339 | } |
| 3340 | |
Mel Gorman | c49c2c4 | 2021-06-28 19:42:21 -0700 | [diff] [blame] | 3341 | static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone) |
| 3342 | { |
| 3343 | int high = READ_ONCE(pcp->high); |
| 3344 | |
| 3345 | if (unlikely(!high)) |
| 3346 | return 0; |
| 3347 | |
| 3348 | if (!test_bit(ZONE_RECLAIM_ACTIVE, &zone->flags)) |
| 3349 | return high; |
| 3350 | |
| 3351 | /* |
| 3352 | * If reclaim is active, limit the number of pages that can be |
| 3353 | * stored on pcp lists |
| 3354 | */ |
| 3355 | return min(READ_ONCE(pcp->batch) << 2, high); |
| 3356 | } |
| 3357 | |
Mel Gorman | df1acc8 | 2021-06-28 19:42:00 -0700 | [diff] [blame] | 3358 | static void free_unref_page_commit(struct page *page, unsigned long pfn, |
Mel Gorman | 44042b4 | 2021-06-28 19:43:08 -0700 | [diff] [blame] | 3359 | int migratetype, unsigned int order) |
Mel Gorman | 9cca35d4 | 2017-11-15 17:37:37 -0800 | [diff] [blame] | 3360 | { |
| 3361 | struct zone *zone = page_zone(page); |
| 3362 | struct per_cpu_pages *pcp; |
Mel Gorman | 3b12e7e | 2021-06-28 19:42:18 -0700 | [diff] [blame] | 3363 | int high; |
Mel Gorman | 44042b4 | 2021-06-28 19:43:08 -0700 | [diff] [blame] | 3364 | int pindex; |
Mel Gorman | 9cca35d4 | 2017-11-15 17:37:37 -0800 | [diff] [blame] | 3365 | |
Mel Gorman | d34b073 | 2017-04-20 14:37:43 -0700 | [diff] [blame] | 3366 | __count_vm_event(PGFREE); |
Mel Gorman | 28f836b | 2021-06-28 19:41:38 -0700 | [diff] [blame] | 3367 | pcp = this_cpu_ptr(zone->per_cpu_pageset); |
Mel Gorman | 44042b4 | 2021-06-28 19:43:08 -0700 | [diff] [blame] | 3368 | pindex = order_to_pindex(migratetype, order); |
| 3369 | list_add(&page->lru, &pcp->lists[pindex]); |
| 3370 | pcp->count += 1 << order; |
Mel Gorman | c49c2c4 | 2021-06-28 19:42:21 -0700 | [diff] [blame] | 3371 | high = nr_pcp_high(pcp, zone); |
Mel Gorman | 3b12e7e | 2021-06-28 19:42:18 -0700 | [diff] [blame] | 3372 | if (pcp->count >= high) { |
| 3373 | int batch = READ_ONCE(pcp->batch); |
| 3374 | |
| 3375 | free_pcppages_bulk(zone, nr_pcp_free(pcp, high, batch), pcp); |
| 3376 | } |
Mel Gorman | 9cca35d4 | 2017-11-15 17:37:37 -0800 | [diff] [blame] | 3377 | } |
Mel Gorman | 5f8dcc2 | 2009-09-21 17:03:19 -0700 | [diff] [blame] | 3378 | |
Mel Gorman | 9cca35d4 | 2017-11-15 17:37:37 -0800 | [diff] [blame] | 3379 | /* |
Mel Gorman | 44042b4 | 2021-06-28 19:43:08 -0700 | [diff] [blame] | 3380 | * Free a pcp page |
Mel Gorman | 9cca35d4 | 2017-11-15 17:37:37 -0800 | [diff] [blame] | 3381 | */ |
Mel Gorman | 44042b4 | 2021-06-28 19:43:08 -0700 | [diff] [blame] | 3382 | void free_unref_page(struct page *page, unsigned int order) |
Mel Gorman | 9cca35d4 | 2017-11-15 17:37:37 -0800 | [diff] [blame] | 3383 | { |
| 3384 | unsigned long flags; |
| 3385 | unsigned long pfn = page_to_pfn(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3386 | int migratetype; |
| 3387 | |
Mel Gorman | 44042b4 | 2021-06-28 19:43:08 -0700 | [diff] [blame] | 3388 | if (!free_unref_page_prepare(page, pfn, order)) |
Mel Gorman | 9cca35d4 | 2017-11-15 17:37:37 -0800 | [diff] [blame] | 3389 | return; |
Thomas Gleixner | 3ac7fe5 | 2008-04-30 00:55:01 -0700 | [diff] [blame] | 3390 | |
Nick Piggin | 9858db5 | 2006-10-11 01:21:30 -0700 | [diff] [blame] | 3391 | /* |
Thomas Gleixner | 3ac7fe5 | 2008-04-30 00:55:01 -0700 | [diff] [blame] | 3392 | * We only track unmovable, reclaimable and movable on pcp lists. |
Mel Gorman | df1acc8 | 2021-06-28 19:42:00 -0700 | [diff] [blame] | 3393 | * Place ISOLATE pages on the isolated list because they are being |
Nick Piggin | dafb136 | 2006-10-11 01:21:30 -0700 | [diff] [blame] | 3394 | * offlined but treat HIGHATOMIC as movable pages so we can get those |
Hugh Dickins | 689bceb | 2005-11-21 21:32:20 -0800 | [diff] [blame] | 3395 | * areas back if necessary. Otherwise, we may have to free |
| 3396 | * excessively into the page allocator |
Christoph Lameter | 3dfa572 | 2008-02-04 22:29:19 -0800 | [diff] [blame] | 3397 | */ |
Mel Gorman | df1acc8 | 2021-06-28 19:42:00 -0700 | [diff] [blame] | 3398 | migratetype = get_pcppage_migratetype(page); |
| 3399 | if (unlikely(migratetype >= MIGRATE_PCPTYPES)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3400 | if (unlikely(is_migrate_isolate(migratetype))) { |
Mel Gorman | 44042b4 | 2021-06-28 19:43:08 -0700 | [diff] [blame] | 3401 | free_one_page(page_zone(page), page, pfn, order, migratetype, FPI_NONE); |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 3402 | return; |
Mel Gorman | da456f1 | 2009-06-16 15:32:08 -0700 | [diff] [blame] | 3403 | } |
Christoph Lameter | 3dfa572 | 2008-02-04 22:29:19 -0800 | [diff] [blame] | 3404 | migratetype = MIGRATE_MOVABLE; |
| 3405 | } |
| 3406 | |
Mel Gorman | dbbee9d | 2021-06-28 19:41:41 -0700 | [diff] [blame] | 3407 | local_lock_irqsave(&pagesets.lock, flags); |
Mel Gorman | 44042b4 | 2021-06-28 19:43:08 -0700 | [diff] [blame] | 3408 | free_unref_page_commit(page, pfn, migratetype, order); |
Mel Gorman | dbbee9d | 2021-06-28 19:41:41 -0700 | [diff] [blame] | 3409 | local_unlock_irqrestore(&pagesets.lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3410 | } |
| 3411 | |
Nick Piggin | 8dfcc9b | 2006-03-22 00:08:05 -0800 | [diff] [blame] | 3412 | /* |
Konstantin Khlebnikov | cc59850 | 2012-01-10 15:07:04 -0800 | [diff] [blame] | 3413 | * Free a list of 0-order pages |
| 3414 | */ |
Mel Gorman | 2d4894b | 2017-11-15 17:37:59 -0800 | [diff] [blame] | 3415 | void free_unref_page_list(struct list_head *list) |
Konstantin Khlebnikov | cc59850 | 2012-01-10 15:07:04 -0800 | [diff] [blame] | 3416 | { |
| 3417 | struct page *page, *next; |
Mel Gorman | 9cca35d4 | 2017-11-15 17:37:37 -0800 | [diff] [blame] | 3418 | unsigned long flags, pfn; |
Lucas Stach | c24ad77 | 2017-12-14 15:32:55 -0800 | [diff] [blame] | 3419 | int batch_count = 0; |
Mel Gorman | df1acc8 | 2021-06-28 19:42:00 -0700 | [diff] [blame] | 3420 | int migratetype; |
Konstantin Khlebnikov | cc59850 | 2012-01-10 15:07:04 -0800 | [diff] [blame] | 3421 | |
Mel Gorman | 9cca35d4 | 2017-11-15 17:37:37 -0800 | [diff] [blame] | 3422 | /* Prepare pages for freeing */ |
Konstantin Khlebnikov | cc59850 | 2012-01-10 15:07:04 -0800 | [diff] [blame] | 3423 | list_for_each_entry_safe(page, next, list, lru) { |
Mel Gorman | 9cca35d4 | 2017-11-15 17:37:37 -0800 | [diff] [blame] | 3424 | pfn = page_to_pfn(page); |
Miaohe Lin | 053cfda | 2021-09-08 18:10:11 -0700 | [diff] [blame] | 3425 | if (!free_unref_page_prepare(page, pfn, 0)) { |
Mel Gorman | 9cca35d4 | 2017-11-15 17:37:37 -0800 | [diff] [blame] | 3426 | list_del(&page->lru); |
Miaohe Lin | 053cfda | 2021-09-08 18:10:11 -0700 | [diff] [blame] | 3427 | continue; |
| 3428 | } |
Mel Gorman | df1acc8 | 2021-06-28 19:42:00 -0700 | [diff] [blame] | 3429 | |
| 3430 | /* |
| 3431 | * Free isolated pages directly to the allocator, see |
| 3432 | * comment in free_unref_page. |
| 3433 | */ |
| 3434 | migratetype = get_pcppage_migratetype(page); |
Doug Berger | 47aef60 | 2021-08-19 19:04:12 -0700 | [diff] [blame] | 3435 | if (unlikely(is_migrate_isolate(migratetype))) { |
| 3436 | list_del(&page->lru); |
| 3437 | free_one_page(page_zone(page), page, pfn, 0, migratetype, FPI_NONE); |
| 3438 | continue; |
Mel Gorman | df1acc8 | 2021-06-28 19:42:00 -0700 | [diff] [blame] | 3439 | } |
| 3440 | |
Mel Gorman | 9cca35d4 | 2017-11-15 17:37:37 -0800 | [diff] [blame] | 3441 | set_page_private(page, pfn); |
Konstantin Khlebnikov | cc59850 | 2012-01-10 15:07:04 -0800 | [diff] [blame] | 3442 | } |
Mel Gorman | 9cca35d4 | 2017-11-15 17:37:37 -0800 | [diff] [blame] | 3443 | |
Mel Gorman | dbbee9d | 2021-06-28 19:41:41 -0700 | [diff] [blame] | 3444 | local_lock_irqsave(&pagesets.lock, flags); |
Mel Gorman | 9cca35d4 | 2017-11-15 17:37:37 -0800 | [diff] [blame] | 3445 | list_for_each_entry_safe(page, next, list, lru) { |
Mel Gorman | df1acc8 | 2021-06-28 19:42:00 -0700 | [diff] [blame] | 3446 | pfn = page_private(page); |
Mel Gorman | 9cca35d4 | 2017-11-15 17:37:37 -0800 | [diff] [blame] | 3447 | set_page_private(page, 0); |
Doug Berger | 47aef60 | 2021-08-19 19:04:12 -0700 | [diff] [blame] | 3448 | |
| 3449 | /* |
| 3450 | * Non-isolated types over MIGRATE_PCPTYPES get added |
| 3451 | * to the MIGRATE_MOVABLE pcp list. |
| 3452 | */ |
Mel Gorman | df1acc8 | 2021-06-28 19:42:00 -0700 | [diff] [blame] | 3453 | migratetype = get_pcppage_migratetype(page); |
Doug Berger | 47aef60 | 2021-08-19 19:04:12 -0700 | [diff] [blame] | 3454 | if (unlikely(migratetype >= MIGRATE_PCPTYPES)) |
| 3455 | migratetype = MIGRATE_MOVABLE; |
| 3456 | |
Mel Gorman | 2d4894b | 2017-11-15 17:37:59 -0800 | [diff] [blame] | 3457 | trace_mm_page_free_batched(page); |
Mel Gorman | 44042b4 | 2021-06-28 19:43:08 -0700 | [diff] [blame] | 3458 | free_unref_page_commit(page, pfn, migratetype, 0); |
Lucas Stach | c24ad77 | 2017-12-14 15:32:55 -0800 | [diff] [blame] | 3459 | |
| 3460 | /* |
| 3461 | * Guard against excessive IRQ disabled times when we get |
| 3462 | * a large list of pages to free. |
| 3463 | */ |
| 3464 | if (++batch_count == SWAP_CLUSTER_MAX) { |
Mel Gorman | dbbee9d | 2021-06-28 19:41:41 -0700 | [diff] [blame] | 3465 | local_unlock_irqrestore(&pagesets.lock, flags); |
Lucas Stach | c24ad77 | 2017-12-14 15:32:55 -0800 | [diff] [blame] | 3466 | batch_count = 0; |
Mel Gorman | dbbee9d | 2021-06-28 19:41:41 -0700 | [diff] [blame] | 3467 | local_lock_irqsave(&pagesets.lock, flags); |
Lucas Stach | c24ad77 | 2017-12-14 15:32:55 -0800 | [diff] [blame] | 3468 | } |
Mel Gorman | 9cca35d4 | 2017-11-15 17:37:37 -0800 | [diff] [blame] | 3469 | } |
Mel Gorman | dbbee9d | 2021-06-28 19:41:41 -0700 | [diff] [blame] | 3470 | local_unlock_irqrestore(&pagesets.lock, flags); |
Konstantin Khlebnikov | cc59850 | 2012-01-10 15:07:04 -0800 | [diff] [blame] | 3471 | } |
| 3472 | |
| 3473 | /* |
Nick Piggin | 8dfcc9b | 2006-03-22 00:08:05 -0800 | [diff] [blame] | 3474 | * split_page takes a non-compound higher-order page, and splits it into |
| 3475 | * n (1<<order) sub-pages: page[0..n] |
| 3476 | * Each sub-page must be freed individually. |
| 3477 | * |
| 3478 | * Note: this is probably too low level an operation for use in drivers. |
| 3479 | * Please consult with lkml before using this in your driver. |
| 3480 | */ |
| 3481 | void split_page(struct page *page, unsigned int order) |
| 3482 | { |
| 3483 | int i; |
| 3484 | |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 3485 | VM_BUG_ON_PAGE(PageCompound(page), page); |
| 3486 | VM_BUG_ON_PAGE(!page_count(page), page); |
Vegard Nossum | b1eeab6 | 2008-11-25 16:55:53 +0100 | [diff] [blame] | 3487 | |
Joonsoo Kim | a9627bc | 2016-07-26 15:23:49 -0700 | [diff] [blame] | 3488 | for (i = 1; i < (1 << order); i++) |
Nick Piggin | 7835e98 | 2006-03-22 00:08:40 -0800 | [diff] [blame] | 3489 | set_page_refcounted(page + i); |
Matthew Wilcox (Oracle) | 8fb156c | 2020-10-15 20:05:29 -0700 | [diff] [blame] | 3490 | split_page_owner(page, 1 << order); |
Zhou Guanghui | e1baddf | 2021-03-12 21:08:33 -0800 | [diff] [blame] | 3491 | split_page_memcg(page, 1 << order); |
Nick Piggin | 8dfcc9b | 2006-03-22 00:08:05 -0800 | [diff] [blame] | 3492 | } |
K. Y. Srinivasan | 5853ff2 | 2013-03-25 15:47:38 -0700 | [diff] [blame] | 3493 | EXPORT_SYMBOL_GPL(split_page); |
Nick Piggin | 8dfcc9b | 2006-03-22 00:08:05 -0800 | [diff] [blame] | 3494 | |
Joonsoo Kim | 3c60509 | 2014-11-13 15:19:21 -0800 | [diff] [blame] | 3495 | int __isolate_free_page(struct page *page, unsigned int order) |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 3496 | { |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 3497 | unsigned long watermark; |
| 3498 | struct zone *zone; |
Bartlomiej Zolnierkiewicz | 2139cbe | 2012-10-08 16:32:00 -0700 | [diff] [blame] | 3499 | int mt; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 3500 | |
| 3501 | BUG_ON(!PageBuddy(page)); |
| 3502 | |
| 3503 | zone = page_zone(page); |
Marek Szyprowski | 2e30abd | 2012-12-11 16:02:57 -0800 | [diff] [blame] | 3504 | mt = get_pageblock_migratetype(page); |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 3505 | |
Minchan Kim | 194159f | 2013-02-22 16:33:58 -0800 | [diff] [blame] | 3506 | if (!is_migrate_isolate(mt)) { |
Vlastimil Babka | 8348faf | 2016-10-07 16:58:00 -0700 | [diff] [blame] | 3507 | /* |
| 3508 | * Obey watermarks as if the page was being allocated. We can |
| 3509 | * emulate a high-order watermark check with a raised order-0 |
| 3510 | * watermark, because we already know our high-order page |
| 3511 | * exists. |
| 3512 | */ |
Mel Gorman | fd1444b | 2019-03-05 15:44:50 -0800 | [diff] [blame] | 3513 | watermark = zone->_watermark[WMARK_MIN] + (1UL << order); |
Joonsoo Kim | d883c6c | 2018-05-23 10:18:21 +0900 | [diff] [blame] | 3514 | if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA)) |
Marek Szyprowski | 2e30abd | 2012-12-11 16:02:57 -0800 | [diff] [blame] | 3515 | return 0; |
| 3516 | |
Mel Gorman | 8fb74b9 | 2013-01-11 14:32:16 -0800 | [diff] [blame] | 3517 | __mod_zone_freepage_state(zone, -(1UL << order), mt); |
Marek Szyprowski | 2e30abd | 2012-12-11 16:02:57 -0800 | [diff] [blame] | 3518 | } |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 3519 | |
| 3520 | /* Remove page from free list */ |
Dan Williams | b03641a | 2019-05-14 15:41:32 -0700 | [diff] [blame] | 3521 | |
Alexander Duyck | 6ab0136 | 2020-04-06 20:04:49 -0700 | [diff] [blame] | 3522 | del_page_from_free_list(page, zone, order); |
Bartlomiej Zolnierkiewicz | 2139cbe | 2012-10-08 16:32:00 -0700 | [diff] [blame] | 3523 | |
zhong jiang | 400bc7f | 2016-07-28 15:45:07 -0700 | [diff] [blame] | 3524 | /* |
| 3525 | * Set the pageblock if the isolated page is at least half of a |
| 3526 | * pageblock |
| 3527 | */ |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 3528 | if (order >= pageblock_order - 1) { |
| 3529 | struct page *endpage = page + (1 << order) - 1; |
Michal Nazarewicz | 47118af | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 3530 | for (; page < endpage; page += pageblock_nr_pages) { |
| 3531 | int mt = get_pageblock_migratetype(page); |
Minchan Kim | 88ed365 | 2016-12-12 16:42:05 -0800 | [diff] [blame] | 3532 | if (!is_migrate_isolate(mt) && !is_migrate_cma(mt) |
Xishi Qiu | a6ffdc0 | 2017-05-03 14:52:52 -0700 | [diff] [blame] | 3533 | && !is_migrate_highatomic(mt)) |
Michal Nazarewicz | 47118af | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 3534 | set_pageblock_migratetype(page, |
| 3535 | MIGRATE_MOVABLE); |
| 3536 | } |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 3537 | } |
| 3538 | |
Joonsoo Kim | f3a14ce | 2015-07-17 16:24:15 -0700 | [diff] [blame] | 3539 | |
Mel Gorman | 8fb74b9 | 2013-01-11 14:32:16 -0800 | [diff] [blame] | 3540 | return 1UL << order; |
Mel Gorman | 1fb3f8c | 2012-10-08 16:29:12 -0700 | [diff] [blame] | 3541 | } |
| 3542 | |
Alexander Duyck | 624f58d | 2020-04-06 20:04:53 -0700 | [diff] [blame] | 3543 | /** |
| 3544 | * __putback_isolated_page - Return a now-isolated page back where we got it |
| 3545 | * @page: Page that was isolated |
| 3546 | * @order: Order of the isolated page |
Randy Dunlap | e6a0a7a | 2020-04-10 14:32:29 -0700 | [diff] [blame] | 3547 | * @mt: The page's pageblock's migratetype |
Alexander Duyck | 624f58d | 2020-04-06 20:04:53 -0700 | [diff] [blame] | 3548 | * |
| 3549 | * This function is meant to return a page pulled from the free lists via |
| 3550 | * __isolate_free_page back to the free lists they were pulled from. |
| 3551 | */ |
| 3552 | void __putback_isolated_page(struct page *page, unsigned int order, int mt) |
| 3553 | { |
| 3554 | struct zone *zone = page_zone(page); |
| 3555 | |
| 3556 | /* zone lock should be held when this function is called */ |
| 3557 | lockdep_assert_held(&zone->lock); |
| 3558 | |
| 3559 | /* Return isolated page to tail of freelist. */ |
David Hildenbrand | f04a5d5 | 2020-10-15 20:09:20 -0700 | [diff] [blame] | 3560 | __free_one_page(page, page_to_pfn(page), zone, order, mt, |
David Hildenbrand | 47b6a24a2 | 2020-10-15 20:09:26 -0700 | [diff] [blame] | 3561 | FPI_SKIP_REPORT_NOTIFY | FPI_TO_TAIL); |
Alexander Duyck | 624f58d | 2020-04-06 20:04:53 -0700 | [diff] [blame] | 3562 | } |
| 3563 | |
Mel Gorman | 1fb3f8c | 2012-10-08 16:29:12 -0700 | [diff] [blame] | 3564 | /* |
Mel Gorman | 060e741 | 2016-05-19 17:13:27 -0700 | [diff] [blame] | 3565 | * Update NUMA hit/miss statistics |
| 3566 | * |
| 3567 | * Must be called with interrupts disabled. |
Mel Gorman | 060e741 | 2016-05-19 17:13:27 -0700 | [diff] [blame] | 3568 | */ |
Mel Gorman | 3e23060 | 2021-06-28 19:41:50 -0700 | [diff] [blame] | 3569 | static inline void zone_statistics(struct zone *preferred_zone, struct zone *z, |
| 3570 | long nr_account) |
Mel Gorman | 060e741 | 2016-05-19 17:13:27 -0700 | [diff] [blame] | 3571 | { |
| 3572 | #ifdef CONFIG_NUMA |
Kemi Wang | 3a321d2 | 2017-09-08 16:12:48 -0700 | [diff] [blame] | 3573 | enum numa_stat_item local_stat = NUMA_LOCAL; |
Mel Gorman | 060e741 | 2016-05-19 17:13:27 -0700 | [diff] [blame] | 3574 | |
Kemi Wang | 4518085 | 2017-11-15 17:38:22 -0800 | [diff] [blame] | 3575 | /* skip numa counters update if numa stats is disabled */ |
| 3576 | if (!static_branch_likely(&vm_numa_stat_key)) |
| 3577 | return; |
| 3578 | |
Pavel Tatashin | c1093b7 | 2018-08-21 21:53:32 -0700 | [diff] [blame] | 3579 | if (zone_to_nid(z) != numa_node_id()) |
Mel Gorman | 060e741 | 2016-05-19 17:13:27 -0700 | [diff] [blame] | 3580 | local_stat = NUMA_OTHER; |
Mel Gorman | 060e741 | 2016-05-19 17:13:27 -0700 | [diff] [blame] | 3581 | |
Pavel Tatashin | c1093b7 | 2018-08-21 21:53:32 -0700 | [diff] [blame] | 3582 | if (zone_to_nid(z) == zone_to_nid(preferred_zone)) |
Mel Gorman | 3e23060 | 2021-06-28 19:41:50 -0700 | [diff] [blame] | 3583 | __count_numa_events(z, NUMA_HIT, nr_account); |
Michal Hocko | 2df2663 | 2017-01-10 16:57:39 -0800 | [diff] [blame] | 3584 | else { |
Mel Gorman | 3e23060 | 2021-06-28 19:41:50 -0700 | [diff] [blame] | 3585 | __count_numa_events(z, NUMA_MISS, nr_account); |
| 3586 | __count_numa_events(preferred_zone, NUMA_FOREIGN, nr_account); |
Mel Gorman | 060e741 | 2016-05-19 17:13:27 -0700 | [diff] [blame] | 3587 | } |
Mel Gorman | 3e23060 | 2021-06-28 19:41:50 -0700 | [diff] [blame] | 3588 | __count_numa_events(z, local_stat, nr_account); |
Mel Gorman | 060e741 | 2016-05-19 17:13:27 -0700 | [diff] [blame] | 3589 | #endif |
| 3590 | } |
| 3591 | |
Mel Gorman | 066b239 | 2017-02-24 14:56:26 -0800 | [diff] [blame] | 3592 | /* Remove page from the per-cpu list, caller must protect the list */ |
Jesper Dangaard Brouer | 3b82201 | 2021-04-29 23:01:55 -0700 | [diff] [blame] | 3593 | static inline |
Mel Gorman | 44042b4 | 2021-06-28 19:43:08 -0700 | [diff] [blame] | 3594 | struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order, |
| 3595 | int migratetype, |
Mel Gorman | 6bb1545 | 2018-12-28 00:35:41 -0800 | [diff] [blame] | 3596 | unsigned int alloc_flags, |
Mel Gorman | 453f85d | 2017-11-15 17:38:03 -0800 | [diff] [blame] | 3597 | struct per_cpu_pages *pcp, |
Mel Gorman | 066b239 | 2017-02-24 14:56:26 -0800 | [diff] [blame] | 3598 | struct list_head *list) |
| 3599 | { |
| 3600 | struct page *page; |
| 3601 | |
| 3602 | do { |
| 3603 | if (list_empty(list)) { |
Mel Gorman | 44042b4 | 2021-06-28 19:43:08 -0700 | [diff] [blame] | 3604 | int batch = READ_ONCE(pcp->batch); |
| 3605 | int alloced; |
| 3606 | |
| 3607 | /* |
| 3608 | * Scale batch relative to order if batch implies |
| 3609 | * free pages can be stored on the PCP. Batch can |
| 3610 | * be 1 for small zones or for boot pagesets which |
| 3611 | * should never store free pages as the pages may |
| 3612 | * belong to arbitrary zones. |
| 3613 | */ |
| 3614 | if (batch > 1) |
| 3615 | batch = max(batch >> order, 2); |
| 3616 | alloced = rmqueue_bulk(zone, order, |
| 3617 | batch, list, |
Mel Gorman | 6bb1545 | 2018-12-28 00:35:41 -0800 | [diff] [blame] | 3618 | migratetype, alloc_flags); |
Mel Gorman | 44042b4 | 2021-06-28 19:43:08 -0700 | [diff] [blame] | 3619 | |
| 3620 | pcp->count += alloced << order; |
Mel Gorman | 066b239 | 2017-02-24 14:56:26 -0800 | [diff] [blame] | 3621 | if (unlikely(list_empty(list))) |
| 3622 | return NULL; |
| 3623 | } |
| 3624 | |
Mel Gorman | 453f85d | 2017-11-15 17:38:03 -0800 | [diff] [blame] | 3625 | page = list_first_entry(list, struct page, lru); |
Mel Gorman | 066b239 | 2017-02-24 14:56:26 -0800 | [diff] [blame] | 3626 | list_del(&page->lru); |
Mel Gorman | 44042b4 | 2021-06-28 19:43:08 -0700 | [diff] [blame] | 3627 | pcp->count -= 1 << order; |
Mel Gorman | 066b239 | 2017-02-24 14:56:26 -0800 | [diff] [blame] | 3628 | } while (check_new_pcp(page)); |
| 3629 | |
| 3630 | return page; |
| 3631 | } |
| 3632 | |
| 3633 | /* Lock and remove page from the per-cpu list */ |
| 3634 | static struct page *rmqueue_pcplist(struct zone *preferred_zone, |
Mel Gorman | 44042b4 | 2021-06-28 19:43:08 -0700 | [diff] [blame] | 3635 | struct zone *zone, unsigned int order, |
| 3636 | gfp_t gfp_flags, int migratetype, |
| 3637 | unsigned int alloc_flags) |
Mel Gorman | 066b239 | 2017-02-24 14:56:26 -0800 | [diff] [blame] | 3638 | { |
| 3639 | struct per_cpu_pages *pcp; |
| 3640 | struct list_head *list; |
Mel Gorman | 066b239 | 2017-02-24 14:56:26 -0800 | [diff] [blame] | 3641 | struct page *page; |
Mel Gorman | d34b073 | 2017-04-20 14:37:43 -0700 | [diff] [blame] | 3642 | unsigned long flags; |
Mel Gorman | 066b239 | 2017-02-24 14:56:26 -0800 | [diff] [blame] | 3643 | |
Mel Gorman | dbbee9d | 2021-06-28 19:41:41 -0700 | [diff] [blame] | 3644 | local_lock_irqsave(&pagesets.lock, flags); |
Mel Gorman | 3b12e7e | 2021-06-28 19:42:18 -0700 | [diff] [blame] | 3645 | |
| 3646 | /* |
| 3647 | * On allocation, reduce the number of pages that are batch freed. |
| 3648 | * See nr_pcp_free() where free_factor is increased for subsequent |
| 3649 | * frees. |
| 3650 | */ |
Mel Gorman | 28f836b | 2021-06-28 19:41:38 -0700 | [diff] [blame] | 3651 | pcp = this_cpu_ptr(zone->per_cpu_pageset); |
Mel Gorman | 3b12e7e | 2021-06-28 19:42:18 -0700 | [diff] [blame] | 3652 | pcp->free_factor >>= 1; |
Mel Gorman | 44042b4 | 2021-06-28 19:43:08 -0700 | [diff] [blame] | 3653 | list = &pcp->lists[order_to_pindex(migratetype, order)]; |
| 3654 | page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list); |
Mel Gorman | 43c95bc | 2021-06-28 19:41:54 -0700 | [diff] [blame] | 3655 | local_unlock_irqrestore(&pagesets.lock, flags); |
Mel Gorman | 066b239 | 2017-02-24 14:56:26 -0800 | [diff] [blame] | 3656 | if (page) { |
Yafang Shao | 1c52e6d | 2019-05-13 17:22:40 -0700 | [diff] [blame] | 3657 | __count_zid_vm_events(PGALLOC, page_zonenum(page), 1); |
Mel Gorman | 3e23060 | 2021-06-28 19:41:50 -0700 | [diff] [blame] | 3658 | zone_statistics(preferred_zone, zone, 1); |
Mel Gorman | 066b239 | 2017-02-24 14:56:26 -0800 | [diff] [blame] | 3659 | } |
Mel Gorman | 066b239 | 2017-02-24 14:56:26 -0800 | [diff] [blame] | 3660 | return page; |
| 3661 | } |
| 3662 | |
Mel Gorman | 060e741 | 2016-05-19 17:13:27 -0700 | [diff] [blame] | 3663 | /* |
Vlastimil Babka | 7537919 | 2015-02-11 15:25:38 -0800 | [diff] [blame] | 3664 | * Allocate a page from the given zone. Use pcplists for order-0 allocations. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3665 | */ |
Mel Gorman | 0a15c3e | 2009-06-16 15:32:05 -0700 | [diff] [blame] | 3666 | static inline |
Mel Gorman | 066b239 | 2017-02-24 14:56:26 -0800 | [diff] [blame] | 3667 | struct page *rmqueue(struct zone *preferred_zone, |
Mel Gorman | 7aeb09f | 2014-06-04 16:10:21 -0700 | [diff] [blame] | 3668 | struct zone *zone, unsigned int order, |
Mel Gorman | c603844 | 2016-05-19 17:13:38 -0700 | [diff] [blame] | 3669 | gfp_t gfp_flags, unsigned int alloc_flags, |
| 3670 | int migratetype) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3671 | { |
| 3672 | unsigned long flags; |
Hugh Dickins | 689bceb | 2005-11-21 21:32:20 -0800 | [diff] [blame] | 3673 | struct page *page; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3674 | |
Mel Gorman | 44042b4 | 2021-06-28 19:43:08 -0700 | [diff] [blame] | 3675 | if (likely(pcp_allowed_order(order))) { |
Joonsoo Kim | 1d91df8 | 2020-10-02 22:21:45 -0700 | [diff] [blame] | 3676 | /* |
| 3677 | * MIGRATE_MOVABLE pcplist could have the pages on CMA area and |
| 3678 | * we need to skip it when CMA area isn't allowed. |
| 3679 | */ |
| 3680 | if (!IS_ENABLED(CONFIG_CMA) || alloc_flags & ALLOC_CMA || |
| 3681 | migratetype != MIGRATE_MOVABLE) { |
Mel Gorman | 44042b4 | 2021-06-28 19:43:08 -0700 | [diff] [blame] | 3682 | page = rmqueue_pcplist(preferred_zone, zone, order, |
| 3683 | gfp_flags, migratetype, alloc_flags); |
Joonsoo Kim | 1d91df8 | 2020-10-02 22:21:45 -0700 | [diff] [blame] | 3684 | goto out; |
| 3685 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3686 | } |
| 3687 | |
Mel Gorman | 066b239 | 2017-02-24 14:56:26 -0800 | [diff] [blame] | 3688 | /* |
| 3689 | * We most definitely don't want callers attempting to |
| 3690 | * allocate greater than order-1 page units with __GFP_NOFAIL. |
| 3691 | */ |
| 3692 | WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); |
| 3693 | spin_lock_irqsave(&zone->lock, flags); |
| 3694 | |
| 3695 | do { |
| 3696 | page = NULL; |
Joonsoo Kim | 1d91df8 | 2020-10-02 22:21:45 -0700 | [diff] [blame] | 3697 | /* |
| 3698 | * order-0 request can reach here when the pcplist is skipped |
| 3699 | * due to non-CMA allocation context. HIGHATOMIC area is |
| 3700 | * reserved for high-order atomic allocation, so order-0 |
| 3701 | * request should skip it. |
| 3702 | */ |
| 3703 | if (order > 0 && alloc_flags & ALLOC_HARDER) { |
Mel Gorman | 066b239 | 2017-02-24 14:56:26 -0800 | [diff] [blame] | 3704 | page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); |
| 3705 | if (page) |
| 3706 | trace_mm_page_alloc_zone_locked(page, order, migratetype); |
| 3707 | } |
| 3708 | if (!page) |
Mel Gorman | 6bb1545 | 2018-12-28 00:35:41 -0800 | [diff] [blame] | 3709 | page = __rmqueue(zone, order, migratetype, alloc_flags); |
Mel Gorman | 066b239 | 2017-02-24 14:56:26 -0800 | [diff] [blame] | 3710 | } while (page && check_new_pages(page, order)); |
Mel Gorman | 066b239 | 2017-02-24 14:56:26 -0800 | [diff] [blame] | 3711 | if (!page) |
| 3712 | goto failed; |
Mel Gorman | 43c95bc | 2021-06-28 19:41:54 -0700 | [diff] [blame] | 3713 | |
Mel Gorman | 066b239 | 2017-02-24 14:56:26 -0800 | [diff] [blame] | 3714 | __mod_zone_freepage_state(zone, -(1 << order), |
| 3715 | get_pcppage_migratetype(page)); |
Mel Gorman | 43c95bc | 2021-06-28 19:41:54 -0700 | [diff] [blame] | 3716 | spin_unlock_irqrestore(&zone->lock, flags); |
Mel Gorman | 066b239 | 2017-02-24 14:56:26 -0800 | [diff] [blame] | 3717 | |
Mel Gorman | 16709d1 | 2016-07-28 15:46:56 -0700 | [diff] [blame] | 3718 | __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); |
Mel Gorman | 3e23060 | 2021-06-28 19:41:50 -0700 | [diff] [blame] | 3719 | zone_statistics(preferred_zone, zone, 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3720 | |
Mel Gorman | 066b239 | 2017-02-24 14:56:26 -0800 | [diff] [blame] | 3721 | out: |
Mel Gorman | 73444bc | 2019-01-08 15:23:39 -0800 | [diff] [blame] | 3722 | /* Separate test+clear to avoid unnecessary atomics */ |
| 3723 | if (test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags)) { |
| 3724 | clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); |
| 3725 | wakeup_kswapd(zone, 0, 0, zone_idx(zone)); |
| 3726 | } |
| 3727 | |
Mel Gorman | 066b239 | 2017-02-24 14:56:26 -0800 | [diff] [blame] | 3728 | VM_BUG_ON_PAGE(page && bad_range(zone, page), page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3729 | return page; |
Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 3730 | |
| 3731 | failed: |
Mel Gorman | 43c95bc | 2021-06-28 19:41:54 -0700 | [diff] [blame] | 3732 | spin_unlock_irqrestore(&zone->lock, flags); |
Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 3733 | return NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3734 | } |
| 3735 | |
Akinobu Mita | 933e312 | 2006-12-08 02:39:45 -0800 | [diff] [blame] | 3736 | #ifdef CONFIG_FAIL_PAGE_ALLOC |
| 3737 | |
Akinobu Mita | b2588c4 | 2011-07-26 16:09:03 -0700 | [diff] [blame] | 3738 | static struct { |
Akinobu Mita | 933e312 | 2006-12-08 02:39:45 -0800 | [diff] [blame] | 3739 | struct fault_attr attr; |
| 3740 | |
Viresh Kumar | 621a5f7 | 2015-09-26 15:04:07 -0700 | [diff] [blame] | 3741 | bool ignore_gfp_highmem; |
Mel Gorman | 71baba4 | 2015-11-06 16:28:28 -0800 | [diff] [blame] | 3742 | bool ignore_gfp_reclaim; |
Akinobu Mita | 5411499 | 2007-07-15 23:40:23 -0700 | [diff] [blame] | 3743 | u32 min_order; |
Akinobu Mita | 933e312 | 2006-12-08 02:39:45 -0800 | [diff] [blame] | 3744 | } fail_page_alloc = { |
| 3745 | .attr = FAULT_ATTR_INITIALIZER, |
Mel Gorman | 71baba4 | 2015-11-06 16:28:28 -0800 | [diff] [blame] | 3746 | .ignore_gfp_reclaim = true, |
Viresh Kumar | 621a5f7 | 2015-09-26 15:04:07 -0700 | [diff] [blame] | 3747 | .ignore_gfp_highmem = true, |
Akinobu Mita | 5411499 | 2007-07-15 23:40:23 -0700 | [diff] [blame] | 3748 | .min_order = 1, |
Akinobu Mita | 933e312 | 2006-12-08 02:39:45 -0800 | [diff] [blame] | 3749 | }; |
| 3750 | |
| 3751 | static int __init setup_fail_page_alloc(char *str) |
| 3752 | { |
| 3753 | return setup_fault_attr(&fail_page_alloc.attr, str); |
| 3754 | } |
| 3755 | __setup("fail_page_alloc=", setup_fail_page_alloc); |
| 3756 | |
Benjamin Poirier | af3b854 | 2018-12-28 00:39:23 -0800 | [diff] [blame] | 3757 | static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) |
Akinobu Mita | 933e312 | 2006-12-08 02:39:45 -0800 | [diff] [blame] | 3758 | { |
Akinobu Mita | 5411499 | 2007-07-15 23:40:23 -0700 | [diff] [blame] | 3759 | if (order < fail_page_alloc.min_order) |
Gavin Shan | deaf386 | 2012-07-31 16:41:51 -0700 | [diff] [blame] | 3760 | return false; |
Akinobu Mita | 933e312 | 2006-12-08 02:39:45 -0800 | [diff] [blame] | 3761 | if (gfp_mask & __GFP_NOFAIL) |
Gavin Shan | deaf386 | 2012-07-31 16:41:51 -0700 | [diff] [blame] | 3762 | return false; |
Akinobu Mita | 933e312 | 2006-12-08 02:39:45 -0800 | [diff] [blame] | 3763 | if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) |
Gavin Shan | deaf386 | 2012-07-31 16:41:51 -0700 | [diff] [blame] | 3764 | return false; |
Mel Gorman | 71baba4 | 2015-11-06 16:28:28 -0800 | [diff] [blame] | 3765 | if (fail_page_alloc.ignore_gfp_reclaim && |
| 3766 | (gfp_mask & __GFP_DIRECT_RECLAIM)) |
Gavin Shan | deaf386 | 2012-07-31 16:41:51 -0700 | [diff] [blame] | 3767 | return false; |
Akinobu Mita | 933e312 | 2006-12-08 02:39:45 -0800 | [diff] [blame] | 3768 | |
| 3769 | return should_fail(&fail_page_alloc.attr, 1 << order); |
| 3770 | } |
| 3771 | |
| 3772 | #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS |
| 3773 | |
| 3774 | static int __init fail_page_alloc_debugfs(void) |
| 3775 | { |
Joe Perches | 0825a6f | 2018-06-14 15:27:58 -0700 | [diff] [blame] | 3776 | umode_t mode = S_IFREG | 0600; |
Akinobu Mita | 933e312 | 2006-12-08 02:39:45 -0800 | [diff] [blame] | 3777 | struct dentry *dir; |
Akinobu Mita | 933e312 | 2006-12-08 02:39:45 -0800 | [diff] [blame] | 3778 | |
Akinobu Mita | dd48c08 | 2011-08-03 16:21:01 -0700 | [diff] [blame] | 3779 | dir = fault_create_debugfs_attr("fail_page_alloc", NULL, |
| 3780 | &fail_page_alloc.attr); |
Akinobu Mita | 933e312 | 2006-12-08 02:39:45 -0800 | [diff] [blame] | 3781 | |
Greg Kroah-Hartman | d9f7979 | 2019-03-05 15:46:09 -0800 | [diff] [blame] | 3782 | debugfs_create_bool("ignore-gfp-wait", mode, dir, |
| 3783 | &fail_page_alloc.ignore_gfp_reclaim); |
| 3784 | debugfs_create_bool("ignore-gfp-highmem", mode, dir, |
| 3785 | &fail_page_alloc.ignore_gfp_highmem); |
| 3786 | debugfs_create_u32("min-order", mode, dir, &fail_page_alloc.min_order); |
Akinobu Mita | 933e312 | 2006-12-08 02:39:45 -0800 | [diff] [blame] | 3787 | |
Akinobu Mita | b2588c4 | 2011-07-26 16:09:03 -0700 | [diff] [blame] | 3788 | return 0; |
Akinobu Mita | 933e312 | 2006-12-08 02:39:45 -0800 | [diff] [blame] | 3789 | } |
| 3790 | |
| 3791 | late_initcall(fail_page_alloc_debugfs); |
| 3792 | |
| 3793 | #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ |
| 3794 | |
| 3795 | #else /* CONFIG_FAIL_PAGE_ALLOC */ |
| 3796 | |
Benjamin Poirier | af3b854 | 2018-12-28 00:39:23 -0800 | [diff] [blame] | 3797 | static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) |
Akinobu Mita | 933e312 | 2006-12-08 02:39:45 -0800 | [diff] [blame] | 3798 | { |
Gavin Shan | deaf386 | 2012-07-31 16:41:51 -0700 | [diff] [blame] | 3799 | return false; |
Akinobu Mita | 933e312 | 2006-12-08 02:39:45 -0800 | [diff] [blame] | 3800 | } |
| 3801 | |
| 3802 | #endif /* CONFIG_FAIL_PAGE_ALLOC */ |
| 3803 | |
Matteo Croce | 54aa386 | 2021-07-14 21:26:43 -0700 | [diff] [blame] | 3804 | noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) |
Benjamin Poirier | af3b854 | 2018-12-28 00:39:23 -0800 | [diff] [blame] | 3805 | { |
| 3806 | return __should_fail_alloc_page(gfp_mask, order); |
| 3807 | } |
| 3808 | ALLOW_ERROR_INJECTION(should_fail_alloc_page, TRUE); |
| 3809 | |
Jaewon Kim | f27ce0e | 2020-08-06 23:25:20 -0700 | [diff] [blame] | 3810 | static inline long __zone_watermark_unusable_free(struct zone *z, |
| 3811 | unsigned int order, unsigned int alloc_flags) |
| 3812 | { |
| 3813 | const bool alloc_harder = (alloc_flags & (ALLOC_HARDER|ALLOC_OOM)); |
| 3814 | long unusable_free = (1 << order) - 1; |
| 3815 | |
| 3816 | /* |
| 3817 | * If the caller does not have rights to ALLOC_HARDER then subtract |
| 3818 | * the high-atomic reserves. This will over-estimate the size of the |
| 3819 | * atomic reserve but it avoids a search. |
| 3820 | */ |
| 3821 | if (likely(!alloc_harder)) |
| 3822 | unusable_free += z->nr_reserved_highatomic; |
| 3823 | |
| 3824 | #ifdef CONFIG_CMA |
| 3825 | /* If allocation can't use CMA areas don't use free CMA pages */ |
| 3826 | if (!(alloc_flags & ALLOC_CMA)) |
| 3827 | unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES); |
| 3828 | #endif |
| 3829 | |
| 3830 | return unusable_free; |
| 3831 | } |
| 3832 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3833 | /* |
Mel Gorman | 97a16fc | 2015-11-06 16:28:40 -0800 | [diff] [blame] | 3834 | * Return true if free base pages are above 'mark'. For high-order checks it |
| 3835 | * will return true of the order-0 watermark is reached and there is at least |
| 3836 | * one free page of a suitable size. Checking now avoids taking the zone lock |
| 3837 | * to check in the allocation paths if no pages are free. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3838 | */ |
Michal Hocko | 86a294a | 2016-05-20 16:57:12 -0700 | [diff] [blame] | 3839 | bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, |
Joonsoo Kim | 97a225e | 2020-06-03 15:59:01 -0700 | [diff] [blame] | 3840 | int highest_zoneidx, unsigned int alloc_flags, |
Michal Hocko | 86a294a | 2016-05-20 16:57:12 -0700 | [diff] [blame] | 3841 | long free_pages) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3842 | { |
Christoph Lameter | d23ad42 | 2007-02-10 01:43:02 -0800 | [diff] [blame] | 3843 | long min = mark; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3844 | int o; |
Michal Hocko | cd04ae1 | 2017-09-06 16:24:50 -0700 | [diff] [blame] | 3845 | const bool alloc_harder = (alloc_flags & (ALLOC_HARDER|ALLOC_OOM)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3846 | |
Mel Gorman | 0aaa29a | 2015-11-06 16:28:37 -0800 | [diff] [blame] | 3847 | /* free_pages may go negative - that's OK */ |
Jaewon Kim | f27ce0e | 2020-08-06 23:25:20 -0700 | [diff] [blame] | 3848 | free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags); |
Mel Gorman | 0aaa29a | 2015-11-06 16:28:37 -0800 | [diff] [blame] | 3849 | |
Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 3850 | if (alloc_flags & ALLOC_HIGH) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3851 | min -= min / 2; |
Mel Gorman | 0aaa29a | 2015-11-06 16:28:37 -0800 | [diff] [blame] | 3852 | |
Jaewon Kim | f27ce0e | 2020-08-06 23:25:20 -0700 | [diff] [blame] | 3853 | if (unlikely(alloc_harder)) { |
Michal Hocko | cd04ae1 | 2017-09-06 16:24:50 -0700 | [diff] [blame] | 3854 | /* |
| 3855 | * OOM victims can try even harder than normal ALLOC_HARDER |
| 3856 | * users on the grounds that it's definitely going to be in |
| 3857 | * the exit path shortly and free memory. Any allocation it |
| 3858 | * makes during the free path will be small and short-lived. |
| 3859 | */ |
| 3860 | if (alloc_flags & ALLOC_OOM) |
| 3861 | min -= min / 2; |
| 3862 | else |
| 3863 | min -= min / 4; |
| 3864 | } |
| 3865 | |
Mel Gorman | 97a16fc | 2015-11-06 16:28:40 -0800 | [diff] [blame] | 3866 | /* |
| 3867 | * Check watermarks for an order-0 allocation request. If these |
| 3868 | * are not met, then a high-order request also cannot go ahead |
| 3869 | * even if a suitable page happened to be free. |
| 3870 | */ |
Joonsoo Kim | 97a225e | 2020-06-03 15:59:01 -0700 | [diff] [blame] | 3871 | if (free_pages <= min + z->lowmem_reserve[highest_zoneidx]) |
Mel Gorman | 88f5acf | 2011-01-13 15:45:41 -0800 | [diff] [blame] | 3872 | return false; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3873 | |
Mel Gorman | 97a16fc | 2015-11-06 16:28:40 -0800 | [diff] [blame] | 3874 | /* If this is an order-0 request then the watermark is fine */ |
| 3875 | if (!order) |
| 3876 | return true; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3877 | |
Mel Gorman | 97a16fc | 2015-11-06 16:28:40 -0800 | [diff] [blame] | 3878 | /* For a high-order request, check at least one suitable page is free */ |
| 3879 | for (o = order; o < MAX_ORDER; o++) { |
| 3880 | struct free_area *area = &z->free_area[o]; |
| 3881 | int mt; |
| 3882 | |
| 3883 | if (!area->nr_free) |
| 3884 | continue; |
| 3885 | |
Mel Gorman | 97a16fc | 2015-11-06 16:28:40 -0800 | [diff] [blame] | 3886 | for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) { |
Dan Williams | b03641a | 2019-05-14 15:41:32 -0700 | [diff] [blame] | 3887 | if (!free_area_empty(area, mt)) |
Mel Gorman | 97a16fc | 2015-11-06 16:28:40 -0800 | [diff] [blame] | 3888 | return true; |
| 3889 | } |
| 3890 | |
| 3891 | #ifdef CONFIG_CMA |
Joonsoo Kim | d883c6c | 2018-05-23 10:18:21 +0900 | [diff] [blame] | 3892 | if ((alloc_flags & ALLOC_CMA) && |
Dan Williams | b03641a | 2019-05-14 15:41:32 -0700 | [diff] [blame] | 3893 | !free_area_empty(area, MIGRATE_CMA)) { |
Mel Gorman | 97a16fc | 2015-11-06 16:28:40 -0800 | [diff] [blame] | 3894 | return true; |
Joonsoo Kim | d883c6c | 2018-05-23 10:18:21 +0900 | [diff] [blame] | 3895 | } |
Mel Gorman | 97a16fc | 2015-11-06 16:28:40 -0800 | [diff] [blame] | 3896 | #endif |
chenqiwu | 76089d0 | 2020-04-01 21:09:50 -0700 | [diff] [blame] | 3897 | if (alloc_harder && !free_area_empty(area, MIGRATE_HIGHATOMIC)) |
Vlastimil Babka | b050e37 | 2017-11-15 17:38:30 -0800 | [diff] [blame] | 3898 | return true; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3899 | } |
Mel Gorman | 97a16fc | 2015-11-06 16:28:40 -0800 | [diff] [blame] | 3900 | return false; |
Mel Gorman | 88f5acf | 2011-01-13 15:45:41 -0800 | [diff] [blame] | 3901 | } |
| 3902 | |
Mel Gorman | 7aeb09f | 2014-06-04 16:10:21 -0700 | [diff] [blame] | 3903 | bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, |
Joonsoo Kim | 97a225e | 2020-06-03 15:59:01 -0700 | [diff] [blame] | 3904 | int highest_zoneidx, unsigned int alloc_flags) |
Mel Gorman | 88f5acf | 2011-01-13 15:45:41 -0800 | [diff] [blame] | 3905 | { |
Joonsoo Kim | 97a225e | 2020-06-03 15:59:01 -0700 | [diff] [blame] | 3906 | return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, |
Mel Gorman | 88f5acf | 2011-01-13 15:45:41 -0800 | [diff] [blame] | 3907 | zone_page_state(z, NR_FREE_PAGES)); |
| 3908 | } |
| 3909 | |
Mel Gorman | 48ee5f3 | 2016-05-19 17:14:07 -0700 | [diff] [blame] | 3910 | static inline bool zone_watermark_fast(struct zone *z, unsigned int order, |
Joonsoo Kim | 97a225e | 2020-06-03 15:59:01 -0700 | [diff] [blame] | 3911 | unsigned long mark, int highest_zoneidx, |
Charan Teja Reddy | f80b08f | 2020-08-06 23:25:24 -0700 | [diff] [blame] | 3912 | unsigned int alloc_flags, gfp_t gfp_mask) |
Mel Gorman | 48ee5f3 | 2016-05-19 17:14:07 -0700 | [diff] [blame] | 3913 | { |
Jaewon Kim | f27ce0e | 2020-08-06 23:25:20 -0700 | [diff] [blame] | 3914 | long free_pages; |
Joonsoo Kim | d883c6c | 2018-05-23 10:18:21 +0900 | [diff] [blame] | 3915 | |
Jaewon Kim | f27ce0e | 2020-08-06 23:25:20 -0700 | [diff] [blame] | 3916 | free_pages = zone_page_state(z, NR_FREE_PAGES); |
Mel Gorman | 48ee5f3 | 2016-05-19 17:14:07 -0700 | [diff] [blame] | 3917 | |
| 3918 | /* |
| 3919 | * Fast check for order-0 only. If this fails then the reserves |
Jaewon Kim | f27ce0e | 2020-08-06 23:25:20 -0700 | [diff] [blame] | 3920 | * need to be calculated. |
Mel Gorman | 48ee5f3 | 2016-05-19 17:14:07 -0700 | [diff] [blame] | 3921 | */ |
Jaewon Kim | f27ce0e | 2020-08-06 23:25:20 -0700 | [diff] [blame] | 3922 | if (!order) { |
| 3923 | long fast_free; |
| 3924 | |
| 3925 | fast_free = free_pages; |
| 3926 | fast_free -= __zone_watermark_unusable_free(z, 0, alloc_flags); |
| 3927 | if (fast_free > mark + z->lowmem_reserve[highest_zoneidx]) |
| 3928 | return true; |
| 3929 | } |
Mel Gorman | 48ee5f3 | 2016-05-19 17:14:07 -0700 | [diff] [blame] | 3930 | |
Charan Teja Reddy | f80b08f | 2020-08-06 23:25:24 -0700 | [diff] [blame] | 3931 | if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, |
| 3932 | free_pages)) |
| 3933 | return true; |
| 3934 | /* |
| 3935 | * Ignore watermark boosting for GFP_ATOMIC order-0 allocations |
| 3936 | * when checking the min watermark. The min watermark is the |
| 3937 | * point where boosting is ignored so that kswapd is woken up |
| 3938 | * when below the low watermark. |
| 3939 | */ |
| 3940 | if (unlikely(!order && (gfp_mask & __GFP_ATOMIC) && z->watermark_boost |
| 3941 | && ((alloc_flags & ALLOC_WMARK_MASK) == WMARK_MIN))) { |
| 3942 | mark = z->_watermark[WMARK_MIN]; |
| 3943 | return __zone_watermark_ok(z, order, mark, highest_zoneidx, |
| 3944 | alloc_flags, free_pages); |
| 3945 | } |
| 3946 | |
| 3947 | return false; |
Mel Gorman | 48ee5f3 | 2016-05-19 17:14:07 -0700 | [diff] [blame] | 3948 | } |
| 3949 | |
Mel Gorman | 7aeb09f | 2014-06-04 16:10:21 -0700 | [diff] [blame] | 3950 | bool zone_watermark_ok_safe(struct zone *z, unsigned int order, |
Joonsoo Kim | 97a225e | 2020-06-03 15:59:01 -0700 | [diff] [blame] | 3951 | unsigned long mark, int highest_zoneidx) |
Mel Gorman | 88f5acf | 2011-01-13 15:45:41 -0800 | [diff] [blame] | 3952 | { |
| 3953 | long free_pages = zone_page_state(z, NR_FREE_PAGES); |
| 3954 | |
| 3955 | if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark) |
| 3956 | free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES); |
| 3957 | |
Joonsoo Kim | 97a225e | 2020-06-03 15:59:01 -0700 | [diff] [blame] | 3958 | return __zone_watermark_ok(z, order, mark, highest_zoneidx, 0, |
Mel Gorman | 88f5acf | 2011-01-13 15:45:41 -0800 | [diff] [blame] | 3959 | free_pages); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3960 | } |
| 3961 | |
Paul Jackson | 9276b1bc | 2006-12-06 20:31:48 -0800 | [diff] [blame] | 3962 | #ifdef CONFIG_NUMA |
Geert Uytterhoeven | 61bb6cd | 2021-11-05 13:40:24 -0700 | [diff] [blame] | 3963 | int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE; |
| 3964 | |
David Rientjes | 957f822 | 2012-10-08 16:33:24 -0700 | [diff] [blame] | 3965 | static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) |
| 3966 | { |
Gavin Shan | e02dc01 | 2017-02-24 14:59:33 -0800 | [diff] [blame] | 3967 | return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <= |
Matt Fleming | a55c745 | 2019-08-08 20:53:01 +0100 | [diff] [blame] | 3968 | node_reclaim_distance; |
David Rientjes | 957f822 | 2012-10-08 16:33:24 -0700 | [diff] [blame] | 3969 | } |
Paul Jackson | 9276b1bc | 2006-12-06 20:31:48 -0800 | [diff] [blame] | 3970 | #else /* CONFIG_NUMA */ |
David Rientjes | 957f822 | 2012-10-08 16:33:24 -0700 | [diff] [blame] | 3971 | static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) |
| 3972 | { |
| 3973 | return true; |
| 3974 | } |
Paul Jackson | 9276b1bc | 2006-12-06 20:31:48 -0800 | [diff] [blame] | 3975 | #endif /* CONFIG_NUMA */ |
| 3976 | |
Mel Gorman | 6bb1545 | 2018-12-28 00:35:41 -0800 | [diff] [blame] | 3977 | /* |
| 3978 | * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid |
| 3979 | * fragmentation is subtle. If the preferred zone was HIGHMEM then |
| 3980 | * premature use of a lower zone may cause lowmem pressure problems that |
| 3981 | * are worse than fragmentation. If the next zone is ZONE_DMA then it is |
| 3982 | * probably too small. It only makes sense to spread allocations to avoid |
| 3983 | * fragmentation between the Normal and DMA32 zones. |
| 3984 | */ |
| 3985 | static inline unsigned int |
Mel Gorman | 0a79cda | 2018-12-28 00:35:48 -0800 | [diff] [blame] | 3986 | alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) |
Mel Gorman | 6bb1545 | 2018-12-28 00:35:41 -0800 | [diff] [blame] | 3987 | { |
Mateusz Nosek | 736838e | 2020-04-01 21:09:47 -0700 | [diff] [blame] | 3988 | unsigned int alloc_flags; |
Mel Gorman | 0a79cda | 2018-12-28 00:35:48 -0800 | [diff] [blame] | 3989 | |
Mateusz Nosek | 736838e | 2020-04-01 21:09:47 -0700 | [diff] [blame] | 3990 | /* |
| 3991 | * __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD |
| 3992 | * to save a branch. |
| 3993 | */ |
| 3994 | alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM); |
Mel Gorman | 0a79cda | 2018-12-28 00:35:48 -0800 | [diff] [blame] | 3995 | |
| 3996 | #ifdef CONFIG_ZONE_DMA32 |
Andrey Ryabinin | 8139ad0 | 2019-04-25 22:23:58 -0700 | [diff] [blame] | 3997 | if (!zone) |
| 3998 | return alloc_flags; |
| 3999 | |
Mel Gorman | 6bb1545 | 2018-12-28 00:35:41 -0800 | [diff] [blame] | 4000 | if (zone_idx(zone) != ZONE_NORMAL) |
Andrey Ryabinin | 8118b82 | 2019-04-25 22:24:01 -0700 | [diff] [blame] | 4001 | return alloc_flags; |
Mel Gorman | 6bb1545 | 2018-12-28 00:35:41 -0800 | [diff] [blame] | 4002 | |
| 4003 | /* |
| 4004 | * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and |
| 4005 | * the pointer is within zone->zone_pgdat->node_zones[]. Also assume |
| 4006 | * on UMA that if Normal is populated then so is DMA32. |
| 4007 | */ |
| 4008 | BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1); |
| 4009 | if (nr_online_nodes > 1 && !populated_zone(--zone)) |
Andrey Ryabinin | 8118b82 | 2019-04-25 22:24:01 -0700 | [diff] [blame] | 4010 | return alloc_flags; |
Mel Gorman | 6bb1545 | 2018-12-28 00:35:41 -0800 | [diff] [blame] | 4011 | |
Andrey Ryabinin | 8118b82 | 2019-04-25 22:24:01 -0700 | [diff] [blame] | 4012 | alloc_flags |= ALLOC_NOFRAGMENT; |
Mel Gorman | 0a79cda | 2018-12-28 00:35:48 -0800 | [diff] [blame] | 4013 | #endif /* CONFIG_ZONE_DMA32 */ |
| 4014 | return alloc_flags; |
Mel Gorman | 6bb1545 | 2018-12-28 00:35:41 -0800 | [diff] [blame] | 4015 | } |
Mel Gorman | 6bb1545 | 2018-12-28 00:35:41 -0800 | [diff] [blame] | 4016 | |
Pavel Tatashin | 8e3560d | 2021-05-04 18:39:00 -0700 | [diff] [blame] | 4017 | /* Must be called after current_gfp_context() which can change gfp_mask */ |
| 4018 | static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask, |
| 4019 | unsigned int alloc_flags) |
Joonsoo Kim | 8510e69 | 2020-08-06 23:26:04 -0700 | [diff] [blame] | 4020 | { |
| 4021 | #ifdef CONFIG_CMA |
Pavel Tatashin | 8e3560d | 2021-05-04 18:39:00 -0700 | [diff] [blame] | 4022 | if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE) |
Joonsoo Kim | 8510e69 | 2020-08-06 23:26:04 -0700 | [diff] [blame] | 4023 | alloc_flags |= ALLOC_CMA; |
Joonsoo Kim | 8510e69 | 2020-08-06 23:26:04 -0700 | [diff] [blame] | 4024 | #endif |
| 4025 | return alloc_flags; |
| 4026 | } |
| 4027 | |
Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 4028 | /* |
Paul Jackson | 0798e51 | 2006-12-06 20:31:38 -0800 | [diff] [blame] | 4029 | * get_page_from_freelist goes through the zonelist trying to allocate |
Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 4030 | * a page. |
| 4031 | */ |
| 4032 | static struct page * |
Vlastimil Babka | a926375 | 2015-02-11 15:25:41 -0800 | [diff] [blame] | 4033 | get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, |
| 4034 | const struct alloc_context *ac) |
Martin Hicks | 753ee72 | 2005-06-21 17:14:41 -0700 | [diff] [blame] | 4035 | { |
Mel Gorman | 6bb1545 | 2018-12-28 00:35:41 -0800 | [diff] [blame] | 4036 | struct zoneref *z; |
Mel Gorman | 5117f45 | 2009-06-16 15:31:59 -0700 | [diff] [blame] | 4037 | struct zone *zone; |
Mel Gorman | 3b8c0be | 2016-07-28 15:46:53 -0700 | [diff] [blame] | 4038 | struct pglist_data *last_pgdat_dirty_limit = NULL; |
Mel Gorman | 6bb1545 | 2018-12-28 00:35:41 -0800 | [diff] [blame] | 4039 | bool no_fallback; |
Mel Gorman | 3b8c0be | 2016-07-28 15:46:53 -0700 | [diff] [blame] | 4040 | |
Mel Gorman | 6bb1545 | 2018-12-28 00:35:41 -0800 | [diff] [blame] | 4041 | retry: |
Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 4042 | /* |
Paul Jackson | 9276b1bc | 2006-12-06 20:31:48 -0800 | [diff] [blame] | 4043 | * Scan zonelist, looking for a zone with enough free. |
Vladimir Davydov | 344736f | 2014-10-20 15:50:30 +0400 | [diff] [blame] | 4044 | * See also __cpuset_node_allowed() comment in kernel/cpuset.c. |
Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 4045 | */ |
Mel Gorman | 6bb1545 | 2018-12-28 00:35:41 -0800 | [diff] [blame] | 4046 | no_fallback = alloc_flags & ALLOC_NOFRAGMENT; |
| 4047 | z = ac->preferred_zoneref; |
Mateusz Nosek | 30d8ec7 | 2020-10-13 16:55:57 -0700 | [diff] [blame] | 4048 | for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx, |
| 4049 | ac->nodemask) { |
Mel Gorman | be06af0 | 2016-05-19 17:13:47 -0700 | [diff] [blame] | 4050 | struct page *page; |
Johannes Weiner | e085dbc | 2013-09-11 14:20:46 -0700 | [diff] [blame] | 4051 | unsigned long mark; |
| 4052 | |
Mel Gorman | 664eedd | 2014-06-04 16:10:08 -0700 | [diff] [blame] | 4053 | if (cpusets_enabled() && |
| 4054 | (alloc_flags & ALLOC_CPUSET) && |
Vlastimil Babka | 002f290 | 2016-05-19 17:14:30 -0700 | [diff] [blame] | 4055 | !__cpuset_zone_allowed(zone, gfp_mask)) |
Mel Gorman | cd38b11 | 2011-07-25 17:12:29 -0700 | [diff] [blame] | 4056 | continue; |
Johannes Weiner | a756cf5 | 2012-01-10 15:07:49 -0800 | [diff] [blame] | 4057 | /* |
| 4058 | * When allocating a page cache page for writing, we |
Mel Gorman | 281e372 | 2016-07-28 15:46:11 -0700 | [diff] [blame] | 4059 | * want to get it from a node that is within its dirty |
| 4060 | * limit, such that no single node holds more than its |
Johannes Weiner | a756cf5 | 2012-01-10 15:07:49 -0800 | [diff] [blame] | 4061 | * proportional share of globally allowed dirty pages. |
Mel Gorman | 281e372 | 2016-07-28 15:46:11 -0700 | [diff] [blame] | 4062 | * The dirty limits take into account the node's |
Johannes Weiner | a756cf5 | 2012-01-10 15:07:49 -0800 | [diff] [blame] | 4063 | * lowmem reserves and high watermark so that kswapd |
| 4064 | * should be able to balance it without having to |
| 4065 | * write pages from its LRU list. |
| 4066 | * |
Johannes Weiner | a756cf5 | 2012-01-10 15:07:49 -0800 | [diff] [blame] | 4067 | * XXX: For now, allow allocations to potentially |
Mel Gorman | 281e372 | 2016-07-28 15:46:11 -0700 | [diff] [blame] | 4068 | * exceed the per-node dirty limit in the slowpath |
Mel Gorman | c9ab0c4 | 2015-11-06 16:28:12 -0800 | [diff] [blame] | 4069 | * (spread_dirty_pages unset) before going into reclaim, |
Johannes Weiner | a756cf5 | 2012-01-10 15:07:49 -0800 | [diff] [blame] | 4070 | * which is important when on a NUMA setup the allowed |
Mel Gorman | 281e372 | 2016-07-28 15:46:11 -0700 | [diff] [blame] | 4071 | * nodes are together not big enough to reach the |
Johannes Weiner | a756cf5 | 2012-01-10 15:07:49 -0800 | [diff] [blame] | 4072 | * global limit. The proper fix for these situations |
Mel Gorman | 281e372 | 2016-07-28 15:46:11 -0700 | [diff] [blame] | 4073 | * will require awareness of nodes in the |
Johannes Weiner | a756cf5 | 2012-01-10 15:07:49 -0800 | [diff] [blame] | 4074 | * dirty-throttling and the flusher threads. |
| 4075 | */ |
Mel Gorman | 3b8c0be | 2016-07-28 15:46:53 -0700 | [diff] [blame] | 4076 | if (ac->spread_dirty_pages) { |
| 4077 | if (last_pgdat_dirty_limit == zone->zone_pgdat) |
| 4078 | continue; |
| 4079 | |
| 4080 | if (!node_dirty_ok(zone->zone_pgdat)) { |
| 4081 | last_pgdat_dirty_limit = zone->zone_pgdat; |
| 4082 | continue; |
| 4083 | } |
| 4084 | } |
Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 4085 | |
Mel Gorman | 6bb1545 | 2018-12-28 00:35:41 -0800 | [diff] [blame] | 4086 | if (no_fallback && nr_online_nodes > 1 && |
| 4087 | zone != ac->preferred_zoneref->zone) { |
| 4088 | int local_nid; |
| 4089 | |
| 4090 | /* |
| 4091 | * If moving to a remote node, retry but allow |
| 4092 | * fragmenting fallbacks. Locality is more important |
| 4093 | * than fragmentation avoidance. |
| 4094 | */ |
| 4095 | local_nid = zone_to_nid(ac->preferred_zoneref->zone); |
| 4096 | if (zone_to_nid(zone) != local_nid) { |
| 4097 | alloc_flags &= ~ALLOC_NOFRAGMENT; |
| 4098 | goto retry; |
| 4099 | } |
| 4100 | } |
| 4101 | |
Mel Gorman | a921444 | 2018-12-28 00:35:44 -0800 | [diff] [blame] | 4102 | mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); |
Mel Gorman | 48ee5f3 | 2016-05-19 17:14:07 -0700 | [diff] [blame] | 4103 | if (!zone_watermark_fast(zone, order, mark, |
Charan Teja Reddy | f80b08f | 2020-08-06 23:25:24 -0700 | [diff] [blame] | 4104 | ac->highest_zoneidx, alloc_flags, |
| 4105 | gfp_mask)) { |
Mel Gorman | fa5e084 | 2009-06-16 15:33:22 -0700 | [diff] [blame] | 4106 | int ret; |
| 4107 | |
Pavel Tatashin | c9e97a1 | 2018-04-05 16:22:31 -0700 | [diff] [blame] | 4108 | #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT |
| 4109 | /* |
| 4110 | * Watermark failed for this zone, but see if we can |
| 4111 | * grow this zone if it contains deferred pages. |
| 4112 | */ |
| 4113 | if (static_branch_unlikely(&deferred_pages)) { |
| 4114 | if (_deferred_grow_zone(zone, order)) |
| 4115 | goto try_this_zone; |
| 4116 | } |
| 4117 | #endif |
Mel Gorman | 5dab291 | 2014-06-04 16:10:14 -0700 | [diff] [blame] | 4118 | /* Checked here to keep the fast path fast */ |
| 4119 | BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK); |
| 4120 | if (alloc_flags & ALLOC_NO_WATERMARKS) |
| 4121 | goto try_this_zone; |
| 4122 | |
Dave Hansen | 202e35d | 2021-05-04 18:36:04 -0700 | [diff] [blame] | 4123 | if (!node_reclaim_enabled() || |
Mel Gorman | c33d6c0 | 2016-05-19 17:14:10 -0700 | [diff] [blame] | 4124 | !zone_allows_reclaim(ac->preferred_zoneref->zone, zone)) |
Mel Gorman | cd38b11 | 2011-07-25 17:12:29 -0700 | [diff] [blame] | 4125 | continue; |
| 4126 | |
Mel Gorman | a5f5f91 | 2016-07-28 15:46:32 -0700 | [diff] [blame] | 4127 | ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); |
Mel Gorman | fa5e084 | 2009-06-16 15:33:22 -0700 | [diff] [blame] | 4128 | switch (ret) { |
Mel Gorman | a5f5f91 | 2016-07-28 15:46:32 -0700 | [diff] [blame] | 4129 | case NODE_RECLAIM_NOSCAN: |
Mel Gorman | fa5e084 | 2009-06-16 15:33:22 -0700 | [diff] [blame] | 4130 | /* did not scan */ |
Mel Gorman | cd38b11 | 2011-07-25 17:12:29 -0700 | [diff] [blame] | 4131 | continue; |
Mel Gorman | a5f5f91 | 2016-07-28 15:46:32 -0700 | [diff] [blame] | 4132 | case NODE_RECLAIM_FULL: |
Mel Gorman | fa5e084 | 2009-06-16 15:33:22 -0700 | [diff] [blame] | 4133 | /* scanned but unreclaimable */ |
Mel Gorman | cd38b11 | 2011-07-25 17:12:29 -0700 | [diff] [blame] | 4134 | continue; |
Mel Gorman | fa5e084 | 2009-06-16 15:33:22 -0700 | [diff] [blame] | 4135 | default: |
| 4136 | /* did we reclaim enough */ |
Mel Gorman | fed2719 | 2013-04-29 15:07:57 -0700 | [diff] [blame] | 4137 | if (zone_watermark_ok(zone, order, mark, |
Joonsoo Kim | 97a225e | 2020-06-03 15:59:01 -0700 | [diff] [blame] | 4138 | ac->highest_zoneidx, alloc_flags)) |
Mel Gorman | fed2719 | 2013-04-29 15:07:57 -0700 | [diff] [blame] | 4139 | goto try_this_zone; |
| 4140 | |
Mel Gorman | fed2719 | 2013-04-29 15:07:57 -0700 | [diff] [blame] | 4141 | continue; |
Paul Jackson | 0798e51 | 2006-12-06 20:31:38 -0800 | [diff] [blame] | 4142 | } |
Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 4143 | } |
| 4144 | |
Mel Gorman | fa5e084 | 2009-06-16 15:33:22 -0700 | [diff] [blame] | 4145 | try_this_zone: |
Mel Gorman | 066b239 | 2017-02-24 14:56:26 -0800 | [diff] [blame] | 4146 | page = rmqueue(ac->preferred_zoneref->zone, zone, order, |
Mel Gorman | 0aaa29a | 2015-11-06 16:28:37 -0800 | [diff] [blame] | 4147 | gfp_mask, alloc_flags, ac->migratetype); |
Vlastimil Babka | 7537919 | 2015-02-11 15:25:38 -0800 | [diff] [blame] | 4148 | if (page) { |
Mel Gorman | 479f854 | 2016-05-19 17:14:35 -0700 | [diff] [blame] | 4149 | prep_new_page(page, order, gfp_mask, alloc_flags); |
Mel Gorman | 0aaa29a | 2015-11-06 16:28:37 -0800 | [diff] [blame] | 4150 | |
| 4151 | /* |
| 4152 | * If this is a high-order atomic allocation then check |
| 4153 | * if the pageblock should be reserved for the future |
| 4154 | */ |
| 4155 | if (unlikely(order && (alloc_flags & ALLOC_HARDER))) |
| 4156 | reserve_highatomic_pageblock(page, zone, order); |
| 4157 | |
Vlastimil Babka | 7537919 | 2015-02-11 15:25:38 -0800 | [diff] [blame] | 4158 | return page; |
Pavel Tatashin | c9e97a1 | 2018-04-05 16:22:31 -0700 | [diff] [blame] | 4159 | } else { |
| 4160 | #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT |
| 4161 | /* Try again if zone has deferred pages */ |
| 4162 | if (static_branch_unlikely(&deferred_pages)) { |
| 4163 | if (_deferred_grow_zone(zone, order)) |
| 4164 | goto try_this_zone; |
| 4165 | } |
| 4166 | #endif |
Vlastimil Babka | 7537919 | 2015-02-11 15:25:38 -0800 | [diff] [blame] | 4167 | } |
Mel Gorman | 54a6eb5 | 2008-04-28 02:12:16 -0700 | [diff] [blame] | 4168 | } |
Paul Jackson | 9276b1bc | 2006-12-06 20:31:48 -0800 | [diff] [blame] | 4169 | |
Mel Gorman | 6bb1545 | 2018-12-28 00:35:41 -0800 | [diff] [blame] | 4170 | /* |
| 4171 | * It's possible on a UMA machine to get through all zones that are |
| 4172 | * fragmented. If avoiding fragmentation, reset and try again. |
| 4173 | */ |
| 4174 | if (no_fallback) { |
| 4175 | alloc_flags &= ~ALLOC_NOFRAGMENT; |
| 4176 | goto retry; |
| 4177 | } |
| 4178 | |
Mel Gorman | 4ffeaf3 | 2014-08-06 16:07:22 -0700 | [diff] [blame] | 4179 | return NULL; |
Martin Hicks | 753ee72 | 2005-06-21 17:14:41 -0700 | [diff] [blame] | 4180 | } |
| 4181 | |
Michal Hocko | 9af744d | 2017-02-22 15:46:16 -0800 | [diff] [blame] | 4182 | static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask) |
Dave Hansen | a238ab5 | 2011-05-24 17:12:16 -0700 | [diff] [blame] | 4183 | { |
Dave Hansen | a238ab5 | 2011-05-24 17:12:16 -0700 | [diff] [blame] | 4184 | unsigned int filter = SHOW_MEM_FILTER_NODES; |
Dave Hansen | a238ab5 | 2011-05-24 17:12:16 -0700 | [diff] [blame] | 4185 | |
| 4186 | /* |
| 4187 | * This documents exceptions given to allocations in certain |
| 4188 | * contexts that are allowed to allocate outside current's set |
| 4189 | * of allowed nodes. |
| 4190 | */ |
| 4191 | if (!(gfp_mask & __GFP_NOMEMALLOC)) |
Michal Hocko | cd04ae1 | 2017-09-06 16:24:50 -0700 | [diff] [blame] | 4192 | if (tsk_is_oom_victim(current) || |
Dave Hansen | a238ab5 | 2011-05-24 17:12:16 -0700 | [diff] [blame] | 4193 | (current->flags & (PF_MEMALLOC | PF_EXITING))) |
| 4194 | filter &= ~SHOW_MEM_FILTER_NODES; |
Vasily Averin | 88dc6f20 | 2021-09-02 14:58:13 -0700 | [diff] [blame] | 4195 | if (!in_task() || !(gfp_mask & __GFP_DIRECT_RECLAIM)) |
Dave Hansen | a238ab5 | 2011-05-24 17:12:16 -0700 | [diff] [blame] | 4196 | filter &= ~SHOW_MEM_FILTER_NODES; |
| 4197 | |
Michal Hocko | 9af744d | 2017-02-22 15:46:16 -0800 | [diff] [blame] | 4198 | show_mem(filter, nodemask); |
Michal Hocko | aa18750 | 2017-02-22 15:41:45 -0800 | [diff] [blame] | 4199 | } |
| 4200 | |
Michal Hocko | a8e9925 | 2017-02-22 15:46:10 -0800 | [diff] [blame] | 4201 | void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...) |
Michal Hocko | aa18750 | 2017-02-22 15:41:45 -0800 | [diff] [blame] | 4202 | { |
| 4203 | struct va_format vaf; |
| 4204 | va_list args; |
Johannes Weiner | 1be334e | 2019-11-05 21:16:51 -0800 | [diff] [blame] | 4205 | static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1); |
Michal Hocko | aa18750 | 2017-02-22 15:41:45 -0800 | [diff] [blame] | 4206 | |
Tetsuo Handa | 0f7896f | 2017-05-03 14:55:34 -0700 | [diff] [blame] | 4207 | if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs)) |
Michal Hocko | aa18750 | 2017-02-22 15:41:45 -0800 | [diff] [blame] | 4208 | return; |
| 4209 | |
Michal Hocko | 7877cdc | 2016-10-07 17:01:55 -0700 | [diff] [blame] | 4210 | va_start(args, fmt); |
| 4211 | vaf.fmt = fmt; |
| 4212 | vaf.va = &args; |
yuzhoujian | ef8444e | 2018-12-28 00:36:07 -0800 | [diff] [blame] | 4213 | pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl", |
Michal Hocko | 0205f75 | 2017-11-15 17:39:14 -0800 | [diff] [blame] | 4214 | current->comm, &vaf, gfp_mask, &gfp_mask, |
| 4215 | nodemask_pr_args(nodemask)); |
Michal Hocko | 7877cdc | 2016-10-07 17:01:55 -0700 | [diff] [blame] | 4216 | va_end(args); |
Joe Perches | 3ee9a4f | 2011-10-31 17:08:35 -0700 | [diff] [blame] | 4217 | |
Michal Hocko | a8e9925 | 2017-02-22 15:46:10 -0800 | [diff] [blame] | 4218 | cpuset_print_current_mems_allowed(); |
yuzhoujian | ef8444e | 2018-12-28 00:36:07 -0800 | [diff] [blame] | 4219 | pr_cont("\n"); |
Dave Hansen | a238ab5 | 2011-05-24 17:12:16 -0700 | [diff] [blame] | 4220 | dump_stack(); |
David Rientjes | 685dbf6 | 2017-02-22 15:46:28 -0800 | [diff] [blame] | 4221 | warn_alloc_show_mem(gfp_mask, nodemask); |
Dave Hansen | a238ab5 | 2011-05-24 17:12:16 -0700 | [diff] [blame] | 4222 | } |
| 4223 | |
Mel Gorman | 11e33f6 | 2009-06-16 15:31:57 -0700 | [diff] [blame] | 4224 | static inline struct page * |
Michal Hocko | 6c18ba7 | 2017-02-22 15:46:25 -0800 | [diff] [blame] | 4225 | __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order, |
| 4226 | unsigned int alloc_flags, |
| 4227 | const struct alloc_context *ac) |
| 4228 | { |
| 4229 | struct page *page; |
| 4230 | |
| 4231 | page = get_page_from_freelist(gfp_mask, order, |
| 4232 | alloc_flags|ALLOC_CPUSET, ac); |
| 4233 | /* |
| 4234 | * fallback to ignore cpuset restriction if our nodes |
| 4235 | * are depleted |
| 4236 | */ |
| 4237 | if (!page) |
| 4238 | page = get_page_from_freelist(gfp_mask, order, |
| 4239 | alloc_flags, ac); |
| 4240 | |
| 4241 | return page; |
| 4242 | } |
| 4243 | |
| 4244 | static inline struct page * |
Mel Gorman | 11e33f6 | 2009-06-16 15:31:57 -0700 | [diff] [blame] | 4245 | __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, |
Vlastimil Babka | a926375 | 2015-02-11 15:25:41 -0800 | [diff] [blame] | 4246 | const struct alloc_context *ac, unsigned long *did_some_progress) |
Mel Gorman | 11e33f6 | 2009-06-16 15:31:57 -0700 | [diff] [blame] | 4247 | { |
David Rientjes | 6e0fc46 | 2015-09-08 15:00:36 -0700 | [diff] [blame] | 4248 | struct oom_control oc = { |
| 4249 | .zonelist = ac->zonelist, |
| 4250 | .nodemask = ac->nodemask, |
Vladimir Davydov | 2a966b7 | 2016-07-26 15:22:33 -0700 | [diff] [blame] | 4251 | .memcg = NULL, |
David Rientjes | 6e0fc46 | 2015-09-08 15:00:36 -0700 | [diff] [blame] | 4252 | .gfp_mask = gfp_mask, |
| 4253 | .order = order, |
David Rientjes | 6e0fc46 | 2015-09-08 15:00:36 -0700 | [diff] [blame] | 4254 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4255 | struct page *page; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4256 | |
Johannes Weiner | 9879de7 | 2015-01-26 12:58:32 -0800 | [diff] [blame] | 4257 | *did_some_progress = 0; |
| 4258 | |
Johannes Weiner | 9879de7 | 2015-01-26 12:58:32 -0800 | [diff] [blame] | 4259 | /* |
Johannes Weiner | dc56401 | 2015-06-24 16:57:19 -0700 | [diff] [blame] | 4260 | * Acquire the oom lock. If that fails, somebody else is |
| 4261 | * making progress for us. |
Johannes Weiner | 9879de7 | 2015-01-26 12:58:32 -0800 | [diff] [blame] | 4262 | */ |
Johannes Weiner | dc56401 | 2015-06-24 16:57:19 -0700 | [diff] [blame] | 4263 | if (!mutex_trylock(&oom_lock)) { |
Johannes Weiner | 9879de7 | 2015-01-26 12:58:32 -0800 | [diff] [blame] | 4264 | *did_some_progress = 1; |
Mel Gorman | 11e33f6 | 2009-06-16 15:31:57 -0700 | [diff] [blame] | 4265 | schedule_timeout_uninterruptible(1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4266 | return NULL; |
| 4267 | } |
Jens Axboe | 6b1de91 | 2005-11-17 21:35:02 +0100 | [diff] [blame] | 4268 | |
Mel Gorman | 11e33f6 | 2009-06-16 15:31:57 -0700 | [diff] [blame] | 4269 | /* |
| 4270 | * Go through the zonelist yet one more time, keep very high watermark |
| 4271 | * here, this is only to catch a parallel oom killing, we must fail if |
Tetsuo Handa | e746bf7 | 2017-08-31 16:15:20 -0700 | [diff] [blame] | 4272 | * we're still under heavy pressure. But make sure that this reclaim |
| 4273 | * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY |
| 4274 | * allocation which will never fail due to oom_lock already held. |
Mel Gorman | 11e33f6 | 2009-06-16 15:31:57 -0700 | [diff] [blame] | 4275 | */ |
Tetsuo Handa | e746bf7 | 2017-08-31 16:15:20 -0700 | [diff] [blame] | 4276 | page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) & |
| 4277 | ~__GFP_DIRECT_RECLAIM, order, |
| 4278 | ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac); |
Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 4279 | if (page) |
Mel Gorman | 11e33f6 | 2009-06-16 15:31:57 -0700 | [diff] [blame] | 4280 | goto out; |
| 4281 | |
Michal Hocko | 06ad276 | 2017-02-22 15:46:22 -0800 | [diff] [blame] | 4282 | /* Coredumps can quickly deplete all memory reserves */ |
| 4283 | if (current->flags & PF_DUMPCORE) |
| 4284 | goto out; |
| 4285 | /* The OOM killer will not help higher order allocs */ |
| 4286 | if (order > PAGE_ALLOC_COSTLY_ORDER) |
| 4287 | goto out; |
Michal Hocko | dcda9b0 | 2017-07-12 14:36:45 -0700 | [diff] [blame] | 4288 | /* |
| 4289 | * We have already exhausted all our reclaim opportunities without any |
| 4290 | * success so it is time to admit defeat. We will skip the OOM killer |
| 4291 | * because it is very likely that the caller has a more reasonable |
| 4292 | * fallback than shooting a random task. |
Mateusz Nosek | cfb4a54 | 2020-10-13 16:55:45 -0700 | [diff] [blame] | 4293 | * |
| 4294 | * The OOM killer may not free memory on a specific node. |
Michal Hocko | dcda9b0 | 2017-07-12 14:36:45 -0700 | [diff] [blame] | 4295 | */ |
Mateusz Nosek | cfb4a54 | 2020-10-13 16:55:45 -0700 | [diff] [blame] | 4296 | if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE)) |
Michal Hocko | dcda9b0 | 2017-07-12 14:36:45 -0700 | [diff] [blame] | 4297 | goto out; |
Michal Hocko | 06ad276 | 2017-02-22 15:46:22 -0800 | [diff] [blame] | 4298 | /* The OOM killer does not needlessly kill tasks for lowmem */ |
Joonsoo Kim | 97a225e | 2020-06-03 15:59:01 -0700 | [diff] [blame] | 4299 | if (ac->highest_zoneidx < ZONE_NORMAL) |
Michal Hocko | 06ad276 | 2017-02-22 15:46:22 -0800 | [diff] [blame] | 4300 | goto out; |
| 4301 | if (pm_suspended_storage()) |
| 4302 | goto out; |
| 4303 | /* |
| 4304 | * XXX: GFP_NOFS allocations should rather fail than rely on |
| 4305 | * other request to make a forward progress. |
| 4306 | * We are in an unfortunate situation where out_of_memory cannot |
| 4307 | * do much for this context but let's try it to at least get |
| 4308 | * access to memory reserved if the current task is killed (see |
| 4309 | * out_of_memory). Once filesystems are ready to handle allocation |
| 4310 | * failures more gracefully we should just bail out here. |
| 4311 | */ |
Michal Hocko | 3da88fb3 | 2016-05-19 17:13:09 -0700 | [diff] [blame] | 4312 | |
Shile Zhang | 3c2c648 | 2018-01-31 16:20:07 -0800 | [diff] [blame] | 4313 | /* Exhausted what can be done so it's blame time */ |
Michal Hocko | 5020e28 | 2016-01-14 15:20:36 -0800 | [diff] [blame] | 4314 | if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) { |
Michal Hocko | c32b3cb | 2015-02-11 15:26:24 -0800 | [diff] [blame] | 4315 | *did_some_progress = 1; |
Michal Hocko | 5020e28 | 2016-01-14 15:20:36 -0800 | [diff] [blame] | 4316 | |
Michal Hocko | 6c18ba7 | 2017-02-22 15:46:25 -0800 | [diff] [blame] | 4317 | /* |
| 4318 | * Help non-failing allocations by giving them access to memory |
| 4319 | * reserves |
| 4320 | */ |
| 4321 | if (gfp_mask & __GFP_NOFAIL) |
| 4322 | page = __alloc_pages_cpuset_fallback(gfp_mask, order, |
Michal Hocko | 5020e28 | 2016-01-14 15:20:36 -0800 | [diff] [blame] | 4323 | ALLOC_NO_WATERMARKS, ac); |
Michal Hocko | 5020e28 | 2016-01-14 15:20:36 -0800 | [diff] [blame] | 4324 | } |
Mel Gorman | 11e33f6 | 2009-06-16 15:31:57 -0700 | [diff] [blame] | 4325 | out: |
Johannes Weiner | dc56401 | 2015-06-24 16:57:19 -0700 | [diff] [blame] | 4326 | mutex_unlock(&oom_lock); |
Mel Gorman | 11e33f6 | 2009-06-16 15:31:57 -0700 | [diff] [blame] | 4327 | return page; |
| 4328 | } |
| 4329 | |
Michal Hocko | 33c2d21 | 2016-05-20 16:57:06 -0700 | [diff] [blame] | 4330 | /* |
Lu Jialin | baf2f90 | 2021-05-06 18:06:50 -0700 | [diff] [blame] | 4331 | * Maximum number of compaction retries with a progress before OOM |
Michal Hocko | 33c2d21 | 2016-05-20 16:57:06 -0700 | [diff] [blame] | 4332 | * killer is consider as the only way to move forward. |
| 4333 | */ |
| 4334 | #define MAX_COMPACT_RETRIES 16 |
| 4335 | |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 4336 | #ifdef CONFIG_COMPACTION |
| 4337 | /* Try memory compaction for high-order allocations before reclaim */ |
| 4338 | static struct page * |
| 4339 | __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, |
Mel Gorman | c603844 | 2016-05-19 17:13:38 -0700 | [diff] [blame] | 4340 | unsigned int alloc_flags, const struct alloc_context *ac, |
Vlastimil Babka | a5508cd | 2016-07-28 15:49:28 -0700 | [diff] [blame] | 4341 | enum compact_priority prio, enum compact_result *compact_result) |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 4342 | { |
Mel Gorman | 5e1f0f0 | 2019-03-05 15:45:41 -0800 | [diff] [blame] | 4343 | struct page *page = NULL; |
Johannes Weiner | eb41468 | 2018-10-26 15:06:27 -0700 | [diff] [blame] | 4344 | unsigned long pflags; |
Vlastimil Babka | 499118e | 2017-05-08 15:59:50 -0700 | [diff] [blame] | 4345 | unsigned int noreclaim_flag; |
Vlastimil Babka | 53853e2 | 2014-10-09 15:27:02 -0700 | [diff] [blame] | 4346 | |
Mel Gorman | 6619971 | 2012-01-12 17:19:41 -0800 | [diff] [blame] | 4347 | if (!order) |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 4348 | return NULL; |
| 4349 | |
Johannes Weiner | eb41468 | 2018-10-26 15:06:27 -0700 | [diff] [blame] | 4350 | psi_memstall_enter(&pflags); |
Vlastimil Babka | 499118e | 2017-05-08 15:59:50 -0700 | [diff] [blame] | 4351 | noreclaim_flag = memalloc_noreclaim_save(); |
Johannes Weiner | eb41468 | 2018-10-26 15:06:27 -0700 | [diff] [blame] | 4352 | |
Michal Hocko | c5d01d0 | 2016-05-20 16:56:53 -0700 | [diff] [blame] | 4353 | *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, |
Mel Gorman | 5e1f0f0 | 2019-03-05 15:45:41 -0800 | [diff] [blame] | 4354 | prio, &page); |
Johannes Weiner | eb41468 | 2018-10-26 15:06:27 -0700 | [diff] [blame] | 4355 | |
Vlastimil Babka | 499118e | 2017-05-08 15:59:50 -0700 | [diff] [blame] | 4356 | memalloc_noreclaim_restore(noreclaim_flag); |
Johannes Weiner | eb41468 | 2018-10-26 15:06:27 -0700 | [diff] [blame] | 4357 | psi_memstall_leave(&pflags); |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 4358 | |
Charan Teja Reddy | 06dac2f | 2021-05-04 18:36:51 -0700 | [diff] [blame] | 4359 | if (*compact_result == COMPACT_SKIPPED) |
| 4360 | return NULL; |
Vlastimil Babka | 98dd3b4 | 2014-10-09 15:27:04 -0700 | [diff] [blame] | 4361 | /* |
| 4362 | * At least in one zone compaction wasn't deferred or skipped, so let's |
| 4363 | * count a compaction stall |
| 4364 | */ |
| 4365 | count_vm_event(COMPACTSTALL); |
| 4366 | |
Mel Gorman | 5e1f0f0 | 2019-03-05 15:45:41 -0800 | [diff] [blame] | 4367 | /* Prep a captured page if available */ |
| 4368 | if (page) |
| 4369 | prep_new_page(page, order, gfp_mask, alloc_flags); |
| 4370 | |
| 4371 | /* Try get a page from the freelist if available */ |
| 4372 | if (!page) |
| 4373 | page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); |
Vlastimil Babka | 98dd3b4 | 2014-10-09 15:27:04 -0700 | [diff] [blame] | 4374 | |
| 4375 | if (page) { |
| 4376 | struct zone *zone = page_zone(page); |
| 4377 | |
| 4378 | zone->compact_blockskip_flush = false; |
| 4379 | compaction_defer_reset(zone, order, true); |
| 4380 | count_vm_event(COMPACTSUCCESS); |
| 4381 | return page; |
| 4382 | } |
| 4383 | |
| 4384 | /* |
Vlastimil Babka | 98dd3b4 | 2014-10-09 15:27:04 -0700 | [diff] [blame] | 4385 | * It's bad if compaction run occurs and fails. The most likely reason |
| 4386 | * is that pages exist, but not enough to satisfy watermarks. |
| 4387 | */ |
| 4388 | count_vm_event(COMPACTFAIL); |
| 4389 | |
| 4390 | cond_resched(); |
| 4391 | |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 4392 | return NULL; |
| 4393 | } |
Michal Hocko | 33c2d21 | 2016-05-20 16:57:06 -0700 | [diff] [blame] | 4394 | |
Vlastimil Babka | 3250845 | 2016-10-07 17:00:28 -0700 | [diff] [blame] | 4395 | static inline bool |
| 4396 | should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, |
| 4397 | enum compact_result compact_result, |
| 4398 | enum compact_priority *compact_priority, |
Vlastimil Babka | d943649 | 2016-10-07 17:00:31 -0700 | [diff] [blame] | 4399 | int *compaction_retries) |
Vlastimil Babka | 3250845 | 2016-10-07 17:00:28 -0700 | [diff] [blame] | 4400 | { |
| 4401 | int max_retries = MAX_COMPACT_RETRIES; |
Vlastimil Babka | c2033b0 | 2016-10-07 17:00:34 -0700 | [diff] [blame] | 4402 | int min_priority; |
Michal Hocko | 65190cf | 2017-02-22 15:42:03 -0800 | [diff] [blame] | 4403 | bool ret = false; |
| 4404 | int retries = *compaction_retries; |
| 4405 | enum compact_priority priority = *compact_priority; |
Vlastimil Babka | 3250845 | 2016-10-07 17:00:28 -0700 | [diff] [blame] | 4406 | |
| 4407 | if (!order) |
| 4408 | return false; |
| 4409 | |
Aaron Tomlin | 691d949 | 2021-06-28 19:41:10 -0700 | [diff] [blame] | 4410 | if (fatal_signal_pending(current)) |
| 4411 | return false; |
| 4412 | |
Vlastimil Babka | d943649 | 2016-10-07 17:00:31 -0700 | [diff] [blame] | 4413 | if (compaction_made_progress(compact_result)) |
| 4414 | (*compaction_retries)++; |
| 4415 | |
Vlastimil Babka | 3250845 | 2016-10-07 17:00:28 -0700 | [diff] [blame] | 4416 | /* |
| 4417 | * compaction considers all the zone as desperately out of memory |
| 4418 | * so it doesn't really make much sense to retry except when the |
| 4419 | * failure could be caused by insufficient priority |
| 4420 | */ |
Vlastimil Babka | d943649 | 2016-10-07 17:00:31 -0700 | [diff] [blame] | 4421 | if (compaction_failed(compact_result)) |
| 4422 | goto check_priority; |
Vlastimil Babka | 3250845 | 2016-10-07 17:00:28 -0700 | [diff] [blame] | 4423 | |
| 4424 | /* |
Vlastimil Babka | 49433085 | 2019-09-23 15:37:32 -0700 | [diff] [blame] | 4425 | * compaction was skipped because there are not enough order-0 pages |
| 4426 | * to work with, so we retry only if it looks like reclaim can help. |
Vlastimil Babka | 3250845 | 2016-10-07 17:00:28 -0700 | [diff] [blame] | 4427 | */ |
Vlastimil Babka | 49433085 | 2019-09-23 15:37:32 -0700 | [diff] [blame] | 4428 | if (compaction_needs_reclaim(compact_result)) { |
Michal Hocko | 65190cf | 2017-02-22 15:42:03 -0800 | [diff] [blame] | 4429 | ret = compaction_zonelist_suitable(ac, order, alloc_flags); |
| 4430 | goto out; |
| 4431 | } |
Vlastimil Babka | 3250845 | 2016-10-07 17:00:28 -0700 | [diff] [blame] | 4432 | |
| 4433 | /* |
Vlastimil Babka | 49433085 | 2019-09-23 15:37:32 -0700 | [diff] [blame] | 4434 | * make sure the compaction wasn't deferred or didn't bail out early |
| 4435 | * due to locks contention before we declare that we should give up. |
| 4436 | * But the next retry should use a higher priority if allowed, so |
| 4437 | * we don't just keep bailing out endlessly. |
| 4438 | */ |
| 4439 | if (compaction_withdrawn(compact_result)) { |
| 4440 | goto check_priority; |
| 4441 | } |
| 4442 | |
| 4443 | /* |
Michal Hocko | dcda9b0 | 2017-07-12 14:36:45 -0700 | [diff] [blame] | 4444 | * !costly requests are much more important than __GFP_RETRY_MAYFAIL |
Vlastimil Babka | 3250845 | 2016-10-07 17:00:28 -0700 | [diff] [blame] | 4445 | * costly ones because they are de facto nofail and invoke OOM |
| 4446 | * killer to move on while costly can fail and users are ready |
| 4447 | * to cope with that. 1/4 retries is rather arbitrary but we |
| 4448 | * would need much more detailed feedback from compaction to |
| 4449 | * make a better decision. |
| 4450 | */ |
| 4451 | if (order > PAGE_ALLOC_COSTLY_ORDER) |
| 4452 | max_retries /= 4; |
Michal Hocko | 65190cf | 2017-02-22 15:42:03 -0800 | [diff] [blame] | 4453 | if (*compaction_retries <= max_retries) { |
| 4454 | ret = true; |
| 4455 | goto out; |
| 4456 | } |
Vlastimil Babka | 3250845 | 2016-10-07 17:00:28 -0700 | [diff] [blame] | 4457 | |
Vlastimil Babka | d943649 | 2016-10-07 17:00:31 -0700 | [diff] [blame] | 4458 | /* |
| 4459 | * Make sure there are attempts at the highest priority if we exhausted |
| 4460 | * all retries or failed at the lower priorities. |
| 4461 | */ |
| 4462 | check_priority: |
Vlastimil Babka | c2033b0 | 2016-10-07 17:00:34 -0700 | [diff] [blame] | 4463 | min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ? |
| 4464 | MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY; |
Michal Hocko | 65190cf | 2017-02-22 15:42:03 -0800 | [diff] [blame] | 4465 | |
Vlastimil Babka | c2033b0 | 2016-10-07 17:00:34 -0700 | [diff] [blame] | 4466 | if (*compact_priority > min_priority) { |
Vlastimil Babka | d943649 | 2016-10-07 17:00:31 -0700 | [diff] [blame] | 4467 | (*compact_priority)--; |
| 4468 | *compaction_retries = 0; |
Michal Hocko | 65190cf | 2017-02-22 15:42:03 -0800 | [diff] [blame] | 4469 | ret = true; |
Vlastimil Babka | d943649 | 2016-10-07 17:00:31 -0700 | [diff] [blame] | 4470 | } |
Michal Hocko | 65190cf | 2017-02-22 15:42:03 -0800 | [diff] [blame] | 4471 | out: |
| 4472 | trace_compact_retry(order, priority, compact_result, retries, max_retries, ret); |
| 4473 | return ret; |
Vlastimil Babka | 3250845 | 2016-10-07 17:00:28 -0700 | [diff] [blame] | 4474 | } |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 4475 | #else |
| 4476 | static inline struct page * |
| 4477 | __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, |
Mel Gorman | c603844 | 2016-05-19 17:13:38 -0700 | [diff] [blame] | 4478 | unsigned int alloc_flags, const struct alloc_context *ac, |
Vlastimil Babka | a5508cd | 2016-07-28 15:49:28 -0700 | [diff] [blame] | 4479 | enum compact_priority prio, enum compact_result *compact_result) |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 4480 | { |
Michal Hocko | 33c2d21 | 2016-05-20 16:57:06 -0700 | [diff] [blame] | 4481 | *compact_result = COMPACT_SKIPPED; |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 4482 | return NULL; |
| 4483 | } |
Michal Hocko | 33c2d21 | 2016-05-20 16:57:06 -0700 | [diff] [blame] | 4484 | |
| 4485 | static inline bool |
Michal Hocko | 86a294a | 2016-05-20 16:57:12 -0700 | [diff] [blame] | 4486 | should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags, |
| 4487 | enum compact_result compact_result, |
Vlastimil Babka | a5508cd | 2016-07-28 15:49:28 -0700 | [diff] [blame] | 4488 | enum compact_priority *compact_priority, |
Vlastimil Babka | d943649 | 2016-10-07 17:00:31 -0700 | [diff] [blame] | 4489 | int *compaction_retries) |
Michal Hocko | 33c2d21 | 2016-05-20 16:57:06 -0700 | [diff] [blame] | 4490 | { |
Michal Hocko | 31e49bf | 2016-05-20 16:57:15 -0700 | [diff] [blame] | 4491 | struct zone *zone; |
| 4492 | struct zoneref *z; |
| 4493 | |
| 4494 | if (!order || order > PAGE_ALLOC_COSTLY_ORDER) |
| 4495 | return false; |
| 4496 | |
| 4497 | /* |
| 4498 | * There are setups with compaction disabled which would prefer to loop |
| 4499 | * inside the allocator rather than hit the oom killer prematurely. |
| 4500 | * Let's give them a good hope and keep retrying while the order-0 |
| 4501 | * watermarks are OK. |
| 4502 | */ |
Joonsoo Kim | 97a225e | 2020-06-03 15:59:01 -0700 | [diff] [blame] | 4503 | for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, |
| 4504 | ac->highest_zoneidx, ac->nodemask) { |
Michal Hocko | 31e49bf | 2016-05-20 16:57:15 -0700 | [diff] [blame] | 4505 | if (zone_watermark_ok(zone, 0, min_wmark_pages(zone), |
Joonsoo Kim | 97a225e | 2020-06-03 15:59:01 -0700 | [diff] [blame] | 4506 | ac->highest_zoneidx, alloc_flags)) |
Michal Hocko | 31e49bf | 2016-05-20 16:57:15 -0700 | [diff] [blame] | 4507 | return true; |
| 4508 | } |
Michal Hocko | 33c2d21 | 2016-05-20 16:57:06 -0700 | [diff] [blame] | 4509 | return false; |
| 4510 | } |
Vlastimil Babka | 3250845 | 2016-10-07 17:00:28 -0700 | [diff] [blame] | 4511 | #endif /* CONFIG_COMPACTION */ |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 4512 | |
Peter Zijlstra | d92a8cf | 2017-03-03 10:13:38 +0100 | [diff] [blame] | 4513 | #ifdef CONFIG_LOCKDEP |
Omar Sandoval | 9378132 | 2018-06-07 17:07:02 -0700 | [diff] [blame] | 4514 | static struct lockdep_map __fs_reclaim_map = |
Peter Zijlstra | d92a8cf | 2017-03-03 10:13:38 +0100 | [diff] [blame] | 4515 | STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map); |
| 4516 | |
Daniel Vetter | f920e41 | 2020-12-14 19:08:30 -0800 | [diff] [blame] | 4517 | static bool __need_reclaim(gfp_t gfp_mask) |
Peter Zijlstra | d92a8cf | 2017-03-03 10:13:38 +0100 | [diff] [blame] | 4518 | { |
Peter Zijlstra | d92a8cf | 2017-03-03 10:13:38 +0100 | [diff] [blame] | 4519 | /* no reclaim without waiting on it */ |
| 4520 | if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) |
| 4521 | return false; |
| 4522 | |
| 4523 | /* this guy won't enter reclaim */ |
Tetsuo Handa | 2e517d68 | 2018-03-22 16:17:10 -0700 | [diff] [blame] | 4524 | if (current->flags & PF_MEMALLOC) |
Peter Zijlstra | d92a8cf | 2017-03-03 10:13:38 +0100 | [diff] [blame] | 4525 | return false; |
| 4526 | |
Peter Zijlstra | d92a8cf | 2017-03-03 10:13:38 +0100 | [diff] [blame] | 4527 | if (gfp_mask & __GFP_NOLOCKDEP) |
| 4528 | return false; |
| 4529 | |
| 4530 | return true; |
| 4531 | } |
| 4532 | |
Matthew Wilcox (Oracle) | 4f3eaf4 | 2021-09-02 14:52:58 -0700 | [diff] [blame] | 4533 | void __fs_reclaim_acquire(unsigned long ip) |
Omar Sandoval | 9378132 | 2018-06-07 17:07:02 -0700 | [diff] [blame] | 4534 | { |
Matthew Wilcox (Oracle) | 4f3eaf4 | 2021-09-02 14:52:58 -0700 | [diff] [blame] | 4535 | lock_acquire_exclusive(&__fs_reclaim_map, 0, 0, NULL, ip); |
Omar Sandoval | 9378132 | 2018-06-07 17:07:02 -0700 | [diff] [blame] | 4536 | } |
| 4537 | |
Matthew Wilcox (Oracle) | 4f3eaf4 | 2021-09-02 14:52:58 -0700 | [diff] [blame] | 4538 | void __fs_reclaim_release(unsigned long ip) |
Omar Sandoval | 9378132 | 2018-06-07 17:07:02 -0700 | [diff] [blame] | 4539 | { |
Matthew Wilcox (Oracle) | 4f3eaf4 | 2021-09-02 14:52:58 -0700 | [diff] [blame] | 4540 | lock_release(&__fs_reclaim_map, ip); |
Omar Sandoval | 9378132 | 2018-06-07 17:07:02 -0700 | [diff] [blame] | 4541 | } |
| 4542 | |
Peter Zijlstra | d92a8cf | 2017-03-03 10:13:38 +0100 | [diff] [blame] | 4543 | void fs_reclaim_acquire(gfp_t gfp_mask) |
| 4544 | { |
Daniel Vetter | f920e41 | 2020-12-14 19:08:30 -0800 | [diff] [blame] | 4545 | gfp_mask = current_gfp_context(gfp_mask); |
| 4546 | |
| 4547 | if (__need_reclaim(gfp_mask)) { |
| 4548 | if (gfp_mask & __GFP_FS) |
Matthew Wilcox (Oracle) | 4f3eaf4 | 2021-09-02 14:52:58 -0700 | [diff] [blame] | 4549 | __fs_reclaim_acquire(_RET_IP_); |
Daniel Vetter | f920e41 | 2020-12-14 19:08:30 -0800 | [diff] [blame] | 4550 | |
| 4551 | #ifdef CONFIG_MMU_NOTIFIER |
| 4552 | lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); |
| 4553 | lock_map_release(&__mmu_notifier_invalidate_range_start_map); |
| 4554 | #endif |
| 4555 | |
| 4556 | } |
Peter Zijlstra | d92a8cf | 2017-03-03 10:13:38 +0100 | [diff] [blame] | 4557 | } |
| 4558 | EXPORT_SYMBOL_GPL(fs_reclaim_acquire); |
| 4559 | |
| 4560 | void fs_reclaim_release(gfp_t gfp_mask) |
| 4561 | { |
Daniel Vetter | f920e41 | 2020-12-14 19:08:30 -0800 | [diff] [blame] | 4562 | gfp_mask = current_gfp_context(gfp_mask); |
| 4563 | |
| 4564 | if (__need_reclaim(gfp_mask)) { |
| 4565 | if (gfp_mask & __GFP_FS) |
Matthew Wilcox (Oracle) | 4f3eaf4 | 2021-09-02 14:52:58 -0700 | [diff] [blame] | 4566 | __fs_reclaim_release(_RET_IP_); |
Daniel Vetter | f920e41 | 2020-12-14 19:08:30 -0800 | [diff] [blame] | 4567 | } |
Peter Zijlstra | d92a8cf | 2017-03-03 10:13:38 +0100 | [diff] [blame] | 4568 | } |
| 4569 | EXPORT_SYMBOL_GPL(fs_reclaim_release); |
| 4570 | #endif |
| 4571 | |
Marek Szyprowski | bba9071 | 2012-01-25 12:09:52 +0100 | [diff] [blame] | 4572 | /* Perform direct synchronous page reclaim */ |
Yanfei Xu | 2187e17 | 2020-10-13 16:55:54 -0700 | [diff] [blame] | 4573 | static unsigned long |
Vlastimil Babka | a926375 | 2015-02-11 15:25:41 -0800 | [diff] [blame] | 4574 | __perform_reclaim(gfp_t gfp_mask, unsigned int order, |
| 4575 | const struct alloc_context *ac) |
Mel Gorman | 11e33f6 | 2009-06-16 15:31:57 -0700 | [diff] [blame] | 4576 | { |
Vlastimil Babka | 499118e | 2017-05-08 15:59:50 -0700 | [diff] [blame] | 4577 | unsigned int noreclaim_flag; |
Yanfei Xu | 2187e17 | 2020-10-13 16:55:54 -0700 | [diff] [blame] | 4578 | unsigned long pflags, progress; |
Mel Gorman | 11e33f6 | 2009-06-16 15:31:57 -0700 | [diff] [blame] | 4579 | |
| 4580 | cond_resched(); |
| 4581 | |
| 4582 | /* We now go into synchronous reclaim */ |
| 4583 | cpuset_memory_pressure_bump(); |
Johannes Weiner | eb41468 | 2018-10-26 15:06:27 -0700 | [diff] [blame] | 4584 | psi_memstall_enter(&pflags); |
Peter Zijlstra | d92a8cf | 2017-03-03 10:13:38 +0100 | [diff] [blame] | 4585 | fs_reclaim_acquire(gfp_mask); |
Omar Sandoval | 9378132 | 2018-06-07 17:07:02 -0700 | [diff] [blame] | 4586 | noreclaim_flag = memalloc_noreclaim_save(); |
Mel Gorman | 11e33f6 | 2009-06-16 15:31:57 -0700 | [diff] [blame] | 4587 | |
Vlastimil Babka | a926375 | 2015-02-11 15:25:41 -0800 | [diff] [blame] | 4588 | progress = try_to_free_pages(ac->zonelist, order, gfp_mask, |
| 4589 | ac->nodemask); |
Mel Gorman | 11e33f6 | 2009-06-16 15:31:57 -0700 | [diff] [blame] | 4590 | |
Vlastimil Babka | 499118e | 2017-05-08 15:59:50 -0700 | [diff] [blame] | 4591 | memalloc_noreclaim_restore(noreclaim_flag); |
Omar Sandoval | 9378132 | 2018-06-07 17:07:02 -0700 | [diff] [blame] | 4592 | fs_reclaim_release(gfp_mask); |
Johannes Weiner | eb41468 | 2018-10-26 15:06:27 -0700 | [diff] [blame] | 4593 | psi_memstall_leave(&pflags); |
Mel Gorman | 11e33f6 | 2009-06-16 15:31:57 -0700 | [diff] [blame] | 4594 | |
| 4595 | cond_resched(); |
| 4596 | |
Marek Szyprowski | bba9071 | 2012-01-25 12:09:52 +0100 | [diff] [blame] | 4597 | return progress; |
| 4598 | } |
| 4599 | |
| 4600 | /* The really slow allocator path where we enter direct reclaim */ |
| 4601 | static inline struct page * |
| 4602 | __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, |
Mel Gorman | c603844 | 2016-05-19 17:13:38 -0700 | [diff] [blame] | 4603 | unsigned int alloc_flags, const struct alloc_context *ac, |
Vlastimil Babka | a926375 | 2015-02-11 15:25:41 -0800 | [diff] [blame] | 4604 | unsigned long *did_some_progress) |
Marek Szyprowski | bba9071 | 2012-01-25 12:09:52 +0100 | [diff] [blame] | 4605 | { |
| 4606 | struct page *page = NULL; |
| 4607 | bool drained = false; |
| 4608 | |
Vlastimil Babka | a926375 | 2015-02-11 15:25:41 -0800 | [diff] [blame] | 4609 | *did_some_progress = __perform_reclaim(gfp_mask, order, ac); |
Mel Gorman | 9ee493c | 2010-09-09 16:38:18 -0700 | [diff] [blame] | 4610 | if (unlikely(!(*did_some_progress))) |
| 4611 | return NULL; |
Mel Gorman | 11e33f6 | 2009-06-16 15:31:57 -0700 | [diff] [blame] | 4612 | |
Mel Gorman | 9ee493c | 2010-09-09 16:38:18 -0700 | [diff] [blame] | 4613 | retry: |
Vlastimil Babka | 31a6c19 | 2016-07-28 15:49:13 -0700 | [diff] [blame] | 4614 | page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); |
Mel Gorman | 9ee493c | 2010-09-09 16:38:18 -0700 | [diff] [blame] | 4615 | |
| 4616 | /* |
| 4617 | * If an allocation failed after direct reclaim, it could be because |
Mel Gorman | 0aaa29a | 2015-11-06 16:28:37 -0800 | [diff] [blame] | 4618 | * pages are pinned on the per-cpu lists or in high alloc reserves. |
Randy Dunlap | 047b996 | 2020-08-11 18:33:14 -0700 | [diff] [blame] | 4619 | * Shrink them and try again |
Mel Gorman | 9ee493c | 2010-09-09 16:38:18 -0700 | [diff] [blame] | 4620 | */ |
| 4621 | if (!page && !drained) { |
Minchan Kim | 29fac03 | 2016-12-12 16:42:14 -0800 | [diff] [blame] | 4622 | unreserve_highatomic_pageblock(ac, false); |
Vlastimil Babka | 93481ff | 2014-12-10 15:43:01 -0800 | [diff] [blame] | 4623 | drain_all_pages(NULL); |
Mel Gorman | 9ee493c | 2010-09-09 16:38:18 -0700 | [diff] [blame] | 4624 | drained = true; |
| 4625 | goto retry; |
| 4626 | } |
| 4627 | |
Mel Gorman | 11e33f6 | 2009-06-16 15:31:57 -0700 | [diff] [blame] | 4628 | return page; |
| 4629 | } |
| 4630 | |
David Rientjes | 5ecd9d4 | 2018-04-05 16:25:16 -0700 | [diff] [blame] | 4631 | static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask, |
| 4632 | const struct alloc_context *ac) |
Mel Gorman | 11e33f6 | 2009-06-16 15:31:57 -0700 | [diff] [blame] | 4633 | { |
| 4634 | struct zoneref *z; |
| 4635 | struct zone *zone; |
Mel Gorman | e1a5563 | 2016-07-28 15:46:26 -0700 | [diff] [blame] | 4636 | pg_data_t *last_pgdat = NULL; |
Joonsoo Kim | 97a225e | 2020-06-03 15:59:01 -0700 | [diff] [blame] | 4637 | enum zone_type highest_zoneidx = ac->highest_zoneidx; |
Mel Gorman | 11e33f6 | 2009-06-16 15:31:57 -0700 | [diff] [blame] | 4638 | |
Joonsoo Kim | 97a225e | 2020-06-03 15:59:01 -0700 | [diff] [blame] | 4639 | for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx, |
David Rientjes | 5ecd9d4 | 2018-04-05 16:25:16 -0700 | [diff] [blame] | 4640 | ac->nodemask) { |
Mel Gorman | e1a5563 | 2016-07-28 15:46:26 -0700 | [diff] [blame] | 4641 | if (last_pgdat != zone->zone_pgdat) |
Joonsoo Kim | 97a225e | 2020-06-03 15:59:01 -0700 | [diff] [blame] | 4642 | wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx); |
Mel Gorman | e1a5563 | 2016-07-28 15:46:26 -0700 | [diff] [blame] | 4643 | last_pgdat = zone->zone_pgdat; |
| 4644 | } |
Mel Gorman | 11e33f6 | 2009-06-16 15:31:57 -0700 | [diff] [blame] | 4645 | } |
| 4646 | |
Mel Gorman | c603844 | 2016-05-19 17:13:38 -0700 | [diff] [blame] | 4647 | static inline unsigned int |
Peter Zijlstra | 341ce06 | 2009-06-16 15:32:02 -0700 | [diff] [blame] | 4648 | gfp_to_alloc_flags(gfp_t gfp_mask) |
| 4649 | { |
Mel Gorman | c603844 | 2016-05-19 17:13:38 -0700 | [diff] [blame] | 4650 | unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET; |
Peter Zijlstra | 341ce06 | 2009-06-16 15:32:02 -0700 | [diff] [blame] | 4651 | |
Mateusz Nosek | 736838e | 2020-04-01 21:09:47 -0700 | [diff] [blame] | 4652 | /* |
| 4653 | * __GFP_HIGH is assumed to be the same as ALLOC_HIGH |
| 4654 | * and __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD |
| 4655 | * to save two branches. |
| 4656 | */ |
Namhyung Kim | e6223a3 | 2010-10-26 14:21:59 -0700 | [diff] [blame] | 4657 | BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH); |
Mateusz Nosek | 736838e | 2020-04-01 21:09:47 -0700 | [diff] [blame] | 4658 | BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM != (__force gfp_t) ALLOC_KSWAPD); |
Mel Gorman | a56f57f | 2009-06-16 15:32:02 -0700 | [diff] [blame] | 4659 | |
Peter Zijlstra | 341ce06 | 2009-06-16 15:32:02 -0700 | [diff] [blame] | 4660 | /* |
| 4661 | * The caller may dip into page reserves a bit more if the caller |
| 4662 | * cannot run direct reclaim, or if the caller has realtime scheduling |
| 4663 | * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will |
Mel Gorman | d0164ad | 2015-11-06 16:28:21 -0800 | [diff] [blame] | 4664 | * set both ALLOC_HARDER (__GFP_ATOMIC) and ALLOC_HIGH (__GFP_HIGH). |
Peter Zijlstra | 341ce06 | 2009-06-16 15:32:02 -0700 | [diff] [blame] | 4665 | */ |
Mateusz Nosek | 736838e | 2020-04-01 21:09:47 -0700 | [diff] [blame] | 4666 | alloc_flags |= (__force int) |
| 4667 | (gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM)); |
Peter Zijlstra | 341ce06 | 2009-06-16 15:32:02 -0700 | [diff] [blame] | 4668 | |
Mel Gorman | d0164ad | 2015-11-06 16:28:21 -0800 | [diff] [blame] | 4669 | if (gfp_mask & __GFP_ATOMIC) { |
Andrea Arcangeli | 5c3240d | 2011-01-13 15:46:49 -0800 | [diff] [blame] | 4670 | /* |
David Rientjes | b104a35 | 2014-07-30 16:08:24 -0700 | [diff] [blame] | 4671 | * Not worth trying to allocate harder for __GFP_NOMEMALLOC even |
| 4672 | * if it can't schedule. |
Andrea Arcangeli | 5c3240d | 2011-01-13 15:46:49 -0800 | [diff] [blame] | 4673 | */ |
David Rientjes | b104a35 | 2014-07-30 16:08:24 -0700 | [diff] [blame] | 4674 | if (!(gfp_mask & __GFP_NOMEMALLOC)) |
Andrea Arcangeli | 5c3240d | 2011-01-13 15:46:49 -0800 | [diff] [blame] | 4675 | alloc_flags |= ALLOC_HARDER; |
Peter Zijlstra | 341ce06 | 2009-06-16 15:32:02 -0700 | [diff] [blame] | 4676 | /* |
David Rientjes | b104a35 | 2014-07-30 16:08:24 -0700 | [diff] [blame] | 4677 | * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the |
Vladimir Davydov | 344736f | 2014-10-20 15:50:30 +0400 | [diff] [blame] | 4678 | * comment for __cpuset_node_allowed(). |
Peter Zijlstra | 341ce06 | 2009-06-16 15:32:02 -0700 | [diff] [blame] | 4679 | */ |
| 4680 | alloc_flags &= ~ALLOC_CPUSET; |
Vasily Averin | 88dc6f20 | 2021-09-02 14:58:13 -0700 | [diff] [blame] | 4681 | } else if (unlikely(rt_task(current)) && in_task()) |
Peter Zijlstra | 341ce06 | 2009-06-16 15:32:02 -0700 | [diff] [blame] | 4682 | alloc_flags |= ALLOC_HARDER; |
| 4683 | |
Pavel Tatashin | 8e3560d | 2021-05-04 18:39:00 -0700 | [diff] [blame] | 4684 | alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags); |
Joonsoo Kim | 8510e69 | 2020-08-06 23:26:04 -0700 | [diff] [blame] | 4685 | |
Peter Zijlstra | 341ce06 | 2009-06-16 15:32:02 -0700 | [diff] [blame] | 4686 | return alloc_flags; |
| 4687 | } |
| 4688 | |
Michal Hocko | cd04ae1 | 2017-09-06 16:24:50 -0700 | [diff] [blame] | 4689 | static bool oom_reserves_allowed(struct task_struct *tsk) |
Mel Gorman | 072bb0a | 2012-07-31 16:43:58 -0700 | [diff] [blame] | 4690 | { |
Michal Hocko | cd04ae1 | 2017-09-06 16:24:50 -0700 | [diff] [blame] | 4691 | if (!tsk_is_oom_victim(tsk)) |
Vlastimil Babka | 31a6c19 | 2016-07-28 15:49:13 -0700 | [diff] [blame] | 4692 | return false; |
| 4693 | |
Michal Hocko | cd04ae1 | 2017-09-06 16:24:50 -0700 | [diff] [blame] | 4694 | /* |
| 4695 | * !MMU doesn't have oom reaper so give access to memory reserves |
| 4696 | * only to the thread with TIF_MEMDIE set |
| 4697 | */ |
| 4698 | if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE)) |
| 4699 | return false; |
Vlastimil Babka | 31a6c19 | 2016-07-28 15:49:13 -0700 | [diff] [blame] | 4700 | |
Michal Hocko | cd04ae1 | 2017-09-06 16:24:50 -0700 | [diff] [blame] | 4701 | return true; |
| 4702 | } |
| 4703 | |
| 4704 | /* |
| 4705 | * Distinguish requests which really need access to full memory |
| 4706 | * reserves from oom victims which can live with a portion of it |
| 4707 | */ |
| 4708 | static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask) |
| 4709 | { |
| 4710 | if (unlikely(gfp_mask & __GFP_NOMEMALLOC)) |
| 4711 | return 0; |
| 4712 | if (gfp_mask & __GFP_MEMALLOC) |
| 4713 | return ALLOC_NO_WATERMARKS; |
| 4714 | if (in_serving_softirq() && (current->flags & PF_MEMALLOC)) |
| 4715 | return ALLOC_NO_WATERMARKS; |
| 4716 | if (!in_interrupt()) { |
| 4717 | if (current->flags & PF_MEMALLOC) |
| 4718 | return ALLOC_NO_WATERMARKS; |
| 4719 | else if (oom_reserves_allowed(current)) |
| 4720 | return ALLOC_OOM; |
| 4721 | } |
| 4722 | |
| 4723 | return 0; |
| 4724 | } |
| 4725 | |
| 4726 | bool gfp_pfmemalloc_allowed(gfp_t gfp_mask) |
| 4727 | { |
| 4728 | return !!__gfp_pfmemalloc_flags(gfp_mask); |
Mel Gorman | 072bb0a | 2012-07-31 16:43:58 -0700 | [diff] [blame] | 4729 | } |
| 4730 | |
Michal Hocko | 0a0337e | 2016-05-20 16:57:00 -0700 | [diff] [blame] | 4731 | /* |
Michal Hocko | 0a0337e | 2016-05-20 16:57:00 -0700 | [diff] [blame] | 4732 | * Checks whether it makes sense to retry the reclaim to make a forward progress |
| 4733 | * for the given allocation request. |
Johannes Weiner | 491d79a | 2017-05-03 14:52:16 -0700 | [diff] [blame] | 4734 | * |
| 4735 | * We give up when we either have tried MAX_RECLAIM_RETRIES in a row |
| 4736 | * without success, or when we couldn't even meet the watermark if we |
| 4737 | * reclaimed all remaining pages on the LRU lists. |
Michal Hocko | 0a0337e | 2016-05-20 16:57:00 -0700 | [diff] [blame] | 4738 | * |
| 4739 | * Returns true if a retry is viable or false to enter the oom path. |
| 4740 | */ |
| 4741 | static inline bool |
| 4742 | should_reclaim_retry(gfp_t gfp_mask, unsigned order, |
| 4743 | struct alloc_context *ac, int alloc_flags, |
Vlastimil Babka | 423b452 | 2016-10-07 17:00:40 -0700 | [diff] [blame] | 4744 | bool did_some_progress, int *no_progress_loops) |
Michal Hocko | 0a0337e | 2016-05-20 16:57:00 -0700 | [diff] [blame] | 4745 | { |
| 4746 | struct zone *zone; |
| 4747 | struct zoneref *z; |
Michal Hocko | 15f570b | 2018-10-26 15:03:31 -0700 | [diff] [blame] | 4748 | bool ret = false; |
Michal Hocko | 0a0337e | 2016-05-20 16:57:00 -0700 | [diff] [blame] | 4749 | |
| 4750 | /* |
Vlastimil Babka | 423b452 | 2016-10-07 17:00:40 -0700 | [diff] [blame] | 4751 | * Costly allocations might have made a progress but this doesn't mean |
| 4752 | * their order will become available due to high fragmentation so |
| 4753 | * always increment the no progress counter for them |
| 4754 | */ |
| 4755 | if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) |
| 4756 | *no_progress_loops = 0; |
| 4757 | else |
| 4758 | (*no_progress_loops)++; |
| 4759 | |
| 4760 | /* |
Michal Hocko | 0a0337e | 2016-05-20 16:57:00 -0700 | [diff] [blame] | 4761 | * Make sure we converge to OOM if we cannot make any progress |
| 4762 | * several times in the row. |
| 4763 | */ |
Minchan Kim | 04c8716 | 2016-12-12 16:42:11 -0800 | [diff] [blame] | 4764 | if (*no_progress_loops > MAX_RECLAIM_RETRIES) { |
| 4765 | /* Before OOM, exhaust highatomic_reserve */ |
Minchan Kim | 29fac03 | 2016-12-12 16:42:14 -0800 | [diff] [blame] | 4766 | return unreserve_highatomic_pageblock(ac, true); |
Minchan Kim | 04c8716 | 2016-12-12 16:42:11 -0800 | [diff] [blame] | 4767 | } |
Michal Hocko | 0a0337e | 2016-05-20 16:57:00 -0700 | [diff] [blame] | 4768 | |
Michal Hocko | 0a0337e | 2016-05-20 16:57:00 -0700 | [diff] [blame] | 4769 | /* |
Mel Gorman | bca6759 | 2016-07-28 15:47:05 -0700 | [diff] [blame] | 4770 | * Keep reclaiming pages while there is a chance this will lead |
| 4771 | * somewhere. If none of the target zones can satisfy our allocation |
| 4772 | * request even if all reclaimable pages are considered then we are |
| 4773 | * screwed and have to go OOM. |
Michal Hocko | 0a0337e | 2016-05-20 16:57:00 -0700 | [diff] [blame] | 4774 | */ |
Joonsoo Kim | 97a225e | 2020-06-03 15:59:01 -0700 | [diff] [blame] | 4775 | for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, |
| 4776 | ac->highest_zoneidx, ac->nodemask) { |
Michal Hocko | 0a0337e | 2016-05-20 16:57:00 -0700 | [diff] [blame] | 4777 | unsigned long available; |
Michal Hocko | ede3771 | 2016-05-20 16:57:03 -0700 | [diff] [blame] | 4778 | unsigned long reclaimable; |
Michal Hocko | d379f01 | 2017-02-22 15:42:00 -0800 | [diff] [blame] | 4779 | unsigned long min_wmark = min_wmark_pages(zone); |
| 4780 | bool wmark; |
Michal Hocko | 0a0337e | 2016-05-20 16:57:00 -0700 | [diff] [blame] | 4781 | |
Mel Gorman | 5a1c84b | 2016-07-28 15:47:31 -0700 | [diff] [blame] | 4782 | available = reclaimable = zone_reclaimable_pages(zone); |
Mel Gorman | 5a1c84b | 2016-07-28 15:47:31 -0700 | [diff] [blame] | 4783 | available += zone_page_state_snapshot(zone, NR_FREE_PAGES); |
Michal Hocko | 0a0337e | 2016-05-20 16:57:00 -0700 | [diff] [blame] | 4784 | |
| 4785 | /* |
Johannes Weiner | 491d79a | 2017-05-03 14:52:16 -0700 | [diff] [blame] | 4786 | * Would the allocation succeed if we reclaimed all |
| 4787 | * reclaimable pages? |
Michal Hocko | 0a0337e | 2016-05-20 16:57:00 -0700 | [diff] [blame] | 4788 | */ |
Michal Hocko | d379f01 | 2017-02-22 15:42:00 -0800 | [diff] [blame] | 4789 | wmark = __zone_watermark_ok(zone, order, min_wmark, |
Joonsoo Kim | 97a225e | 2020-06-03 15:59:01 -0700 | [diff] [blame] | 4790 | ac->highest_zoneidx, alloc_flags, available); |
Michal Hocko | d379f01 | 2017-02-22 15:42:00 -0800 | [diff] [blame] | 4791 | trace_reclaim_retry_zone(z, order, reclaimable, |
| 4792 | available, min_wmark, *no_progress_loops, wmark); |
| 4793 | if (wmark) { |
Michal Hocko | 15f570b | 2018-10-26 15:03:31 -0700 | [diff] [blame] | 4794 | ret = true; |
Mel Gorman | 132b0d2 | 2021-11-05 13:42:38 -0700 | [diff] [blame] | 4795 | break; |
Michal Hocko | 0a0337e | 2016-05-20 16:57:00 -0700 | [diff] [blame] | 4796 | } |
| 4797 | } |
| 4798 | |
Michal Hocko | 15f570b | 2018-10-26 15:03:31 -0700 | [diff] [blame] | 4799 | /* |
| 4800 | * Memory allocation/reclaim might be called from a WQ context and the |
| 4801 | * current implementation of the WQ concurrency control doesn't |
| 4802 | * recognize that a particular WQ is congested if the worker thread is |
| 4803 | * looping without ever sleeping. Therefore we have to do a short sleep |
| 4804 | * here rather than calling cond_resched(). |
| 4805 | */ |
| 4806 | if (current->flags & PF_WQ_WORKER) |
| 4807 | schedule_timeout_uninterruptible(1); |
| 4808 | else |
| 4809 | cond_resched(); |
| 4810 | return ret; |
Michal Hocko | 0a0337e | 2016-05-20 16:57:00 -0700 | [diff] [blame] | 4811 | } |
| 4812 | |
Vlastimil Babka | 902b628 | 2017-07-06 15:39:56 -0700 | [diff] [blame] | 4813 | static inline bool |
| 4814 | check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac) |
| 4815 | { |
| 4816 | /* |
| 4817 | * It's possible that cpuset's mems_allowed and the nodemask from |
| 4818 | * mempolicy don't intersect. This should be normally dealt with by |
| 4819 | * policy_nodemask(), but it's possible to race with cpuset update in |
| 4820 | * such a way the check therein was true, and then it became false |
| 4821 | * before we got our cpuset_mems_cookie here. |
| 4822 | * This assumes that for all allocations, ac->nodemask can come only |
| 4823 | * from MPOL_BIND mempolicy (whose documented semantics is to be ignored |
| 4824 | * when it does not intersect with the cpuset restrictions) or the |
| 4825 | * caller can deal with a violated nodemask. |
| 4826 | */ |
| 4827 | if (cpusets_enabled() && ac->nodemask && |
| 4828 | !cpuset_nodemask_valid_mems_allowed(ac->nodemask)) { |
| 4829 | ac->nodemask = NULL; |
| 4830 | return true; |
| 4831 | } |
| 4832 | |
| 4833 | /* |
| 4834 | * When updating a task's mems_allowed or mempolicy nodemask, it is |
| 4835 | * possible to race with parallel threads in such a way that our |
| 4836 | * allocation can fail while the mask is being updated. If we are about |
| 4837 | * to fail, check if the cpuset changed during allocation and if so, |
| 4838 | * retry. |
| 4839 | */ |
| 4840 | if (read_mems_allowed_retry(cpuset_mems_cookie)) |
| 4841 | return true; |
| 4842 | |
| 4843 | return false; |
| 4844 | } |
| 4845 | |
Mel Gorman | 11e33f6 | 2009-06-16 15:31:57 -0700 | [diff] [blame] | 4846 | static inline struct page * |
| 4847 | __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, |
Vlastimil Babka | a926375 | 2015-02-11 15:25:41 -0800 | [diff] [blame] | 4848 | struct alloc_context *ac) |
Mel Gorman | 11e33f6 | 2009-06-16 15:31:57 -0700 | [diff] [blame] | 4849 | { |
Mel Gorman | d0164ad | 2015-11-06 16:28:21 -0800 | [diff] [blame] | 4850 | bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM; |
Vlastimil Babka | 282722b | 2017-05-08 15:54:49 -0700 | [diff] [blame] | 4851 | const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER; |
Mel Gorman | 11e33f6 | 2009-06-16 15:31:57 -0700 | [diff] [blame] | 4852 | struct page *page = NULL; |
Mel Gorman | c603844 | 2016-05-19 17:13:38 -0700 | [diff] [blame] | 4853 | unsigned int alloc_flags; |
Mel Gorman | 11e33f6 | 2009-06-16 15:31:57 -0700 | [diff] [blame] | 4854 | unsigned long did_some_progress; |
Vlastimil Babka | 5ce9bfe | 2017-01-24 15:18:38 -0800 | [diff] [blame] | 4855 | enum compact_priority compact_priority; |
Michal Hocko | c5d01d0 | 2016-05-20 16:56:53 -0700 | [diff] [blame] | 4856 | enum compact_result compact_result; |
Vlastimil Babka | 5ce9bfe | 2017-01-24 15:18:38 -0800 | [diff] [blame] | 4857 | int compaction_retries; |
| 4858 | int no_progress_loops; |
Vlastimil Babka | 5ce9bfe | 2017-01-24 15:18:38 -0800 | [diff] [blame] | 4859 | unsigned int cpuset_mems_cookie; |
Michal Hocko | cd04ae1 | 2017-09-06 16:24:50 -0700 | [diff] [blame] | 4860 | int reserve_flags; |
Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 4861 | |
Christoph Lameter | 952f3b5 | 2006-12-06 20:33:26 -0800 | [diff] [blame] | 4862 | /* |
Mel Gorman | d0164ad | 2015-11-06 16:28:21 -0800 | [diff] [blame] | 4863 | * We also sanity check to catch abuse of atomic reserves being used by |
| 4864 | * callers that are not in atomic context. |
| 4865 | */ |
| 4866 | if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) == |
| 4867 | (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM))) |
| 4868 | gfp_mask &= ~__GFP_ATOMIC; |
| 4869 | |
Vlastimil Babka | 5ce9bfe | 2017-01-24 15:18:38 -0800 | [diff] [blame] | 4870 | retry_cpuset: |
| 4871 | compaction_retries = 0; |
| 4872 | no_progress_loops = 0; |
| 4873 | compact_priority = DEF_COMPACT_PRIORITY; |
| 4874 | cpuset_mems_cookie = read_mems_allowed_begin(); |
Michal Hocko | 9a67f64 | 2017-02-22 15:46:19 -0800 | [diff] [blame] | 4875 | |
| 4876 | /* |
| 4877 | * The fast path uses conservative alloc_flags to succeed only until |
| 4878 | * kswapd needs to be woken up, and to avoid the cost of setting up |
| 4879 | * alloc_flags precisely. So we do that now. |
| 4880 | */ |
| 4881 | alloc_flags = gfp_to_alloc_flags(gfp_mask); |
| 4882 | |
Vlastimil Babka | e47483b | 2017-01-24 15:18:41 -0800 | [diff] [blame] | 4883 | /* |
| 4884 | * We need to recalculate the starting point for the zonelist iterator |
| 4885 | * because we might have used different nodemask in the fast path, or |
| 4886 | * there was a cpuset modification and we are retrying - otherwise we |
| 4887 | * could end up iterating over non-eligible zones endlessly. |
| 4888 | */ |
| 4889 | ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, |
Joonsoo Kim | 97a225e | 2020-06-03 15:59:01 -0700 | [diff] [blame] | 4890 | ac->highest_zoneidx, ac->nodemask); |
Vlastimil Babka | e47483b | 2017-01-24 15:18:41 -0800 | [diff] [blame] | 4891 | if (!ac->preferred_zoneref->zone) |
| 4892 | goto nopage; |
| 4893 | |
Feng Tang | 8ca1b5a | 2021-11-05 13:40:34 -0700 | [diff] [blame] | 4894 | /* |
| 4895 | * Check for insane configurations where the cpuset doesn't contain |
| 4896 | * any suitable zone to satisfy the request - e.g. non-movable |
| 4897 | * GFP_HIGHUSER allocations from MOVABLE nodes only. |
| 4898 | */ |
| 4899 | if (cpusets_insane_config() && (gfp_mask & __GFP_HARDWALL)) { |
| 4900 | struct zoneref *z = first_zones_zonelist(ac->zonelist, |
| 4901 | ac->highest_zoneidx, |
| 4902 | &cpuset_current_mems_allowed); |
| 4903 | if (!z->zone) |
| 4904 | goto nopage; |
| 4905 | } |
| 4906 | |
Mel Gorman | 0a79cda | 2018-12-28 00:35:48 -0800 | [diff] [blame] | 4907 | if (alloc_flags & ALLOC_KSWAPD) |
David Rientjes | 5ecd9d4 | 2018-04-05 16:25:16 -0700 | [diff] [blame] | 4908 | wake_all_kswapds(order, gfp_mask, ac); |
Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 4909 | |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 4910 | /* |
Vlastimil Babka | 2377123 | 2016-07-28 15:49:16 -0700 | [diff] [blame] | 4911 | * The adjusted alloc_flags might result in immediate success, so try |
| 4912 | * that first |
| 4913 | */ |
| 4914 | page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); |
| 4915 | if (page) |
| 4916 | goto got_pg; |
| 4917 | |
Vlastimil Babka | a8161d1 | 2016-07-28 15:49:19 -0700 | [diff] [blame] | 4918 | /* |
| 4919 | * For costly allocations, try direct compaction first, as it's likely |
Vlastimil Babka | 282722b | 2017-05-08 15:54:49 -0700 | [diff] [blame] | 4920 | * that we have enough base pages and don't need to reclaim. For non- |
| 4921 | * movable high-order allocations, do that as well, as compaction will |
| 4922 | * try prevent permanent fragmentation by migrating from blocks of the |
| 4923 | * same migratetype. |
| 4924 | * Don't try this for allocations that are allowed to ignore |
| 4925 | * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen. |
Vlastimil Babka | a8161d1 | 2016-07-28 15:49:19 -0700 | [diff] [blame] | 4926 | */ |
Vlastimil Babka | 282722b | 2017-05-08 15:54:49 -0700 | [diff] [blame] | 4927 | if (can_direct_reclaim && |
| 4928 | (costly_order || |
| 4929 | (order > 0 && ac->migratetype != MIGRATE_MOVABLE)) |
| 4930 | && !gfp_pfmemalloc_allowed(gfp_mask)) { |
Vlastimil Babka | a8161d1 | 2016-07-28 15:49:19 -0700 | [diff] [blame] | 4931 | page = __alloc_pages_direct_compact(gfp_mask, order, |
| 4932 | alloc_flags, ac, |
Vlastimil Babka | a5508cd | 2016-07-28 15:49:28 -0700 | [diff] [blame] | 4933 | INIT_COMPACT_PRIORITY, |
Vlastimil Babka | a8161d1 | 2016-07-28 15:49:19 -0700 | [diff] [blame] | 4934 | &compact_result); |
| 4935 | if (page) |
| 4936 | goto got_pg; |
| 4937 | |
Vlastimil Babka | cc638f3 | 2020-01-13 16:29:04 -0800 | [diff] [blame] | 4938 | /* |
| 4939 | * Checks for costly allocations with __GFP_NORETRY, which |
| 4940 | * includes some THP page fault allocations |
| 4941 | */ |
| 4942 | if (costly_order && (gfp_mask & __GFP_NORETRY)) { |
David Rientjes | b39d0ee | 2019-09-04 12:54:22 -0700 | [diff] [blame] | 4943 | /* |
| 4944 | * If allocating entire pageblock(s) and compaction |
| 4945 | * failed because all zones are below low watermarks |
| 4946 | * or is prohibited because it recently failed at this |
David Rientjes | 3f36d86 | 2019-10-14 14:12:04 -0700 | [diff] [blame] | 4947 | * order, fail immediately unless the allocator has |
| 4948 | * requested compaction and reclaim retry. |
David Rientjes | b39d0ee | 2019-09-04 12:54:22 -0700 | [diff] [blame] | 4949 | * |
| 4950 | * Reclaim is |
| 4951 | * - potentially very expensive because zones are far |
| 4952 | * below their low watermarks or this is part of very |
| 4953 | * bursty high order allocations, |
| 4954 | * - not guaranteed to help because isolate_freepages() |
| 4955 | * may not iterate over freed pages as part of its |
| 4956 | * linear scan, and |
| 4957 | * - unlikely to make entire pageblocks free on its |
| 4958 | * own. |
| 4959 | */ |
| 4960 | if (compact_result == COMPACT_SKIPPED || |
| 4961 | compact_result == COMPACT_DEFERRED) |
| 4962 | goto nopage; |
Vlastimil Babka | a8161d1 | 2016-07-28 15:49:19 -0700 | [diff] [blame] | 4963 | |
| 4964 | /* |
Vlastimil Babka | 3eb2771 | 2016-07-28 15:49:22 -0700 | [diff] [blame] | 4965 | * Looks like reclaim/compaction is worth trying, but |
| 4966 | * sync compaction could be very expensive, so keep |
Vlastimil Babka | 2516035 | 2016-07-28 15:49:25 -0700 | [diff] [blame] | 4967 | * using async compaction. |
Vlastimil Babka | a8161d1 | 2016-07-28 15:49:19 -0700 | [diff] [blame] | 4968 | */ |
Vlastimil Babka | a5508cd | 2016-07-28 15:49:28 -0700 | [diff] [blame] | 4969 | compact_priority = INIT_COMPACT_PRIORITY; |
Vlastimil Babka | a8161d1 | 2016-07-28 15:49:19 -0700 | [diff] [blame] | 4970 | } |
| 4971 | } |
Vlastimil Babka | 2377123 | 2016-07-28 15:49:16 -0700 | [diff] [blame] | 4972 | |
| 4973 | retry: |
| 4974 | /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */ |
Mel Gorman | 0a79cda | 2018-12-28 00:35:48 -0800 | [diff] [blame] | 4975 | if (alloc_flags & ALLOC_KSWAPD) |
David Rientjes | 5ecd9d4 | 2018-04-05 16:25:16 -0700 | [diff] [blame] | 4976 | wake_all_kswapds(order, gfp_mask, ac); |
Vlastimil Babka | 2377123 | 2016-07-28 15:49:16 -0700 | [diff] [blame] | 4977 | |
Michal Hocko | cd04ae1 | 2017-09-06 16:24:50 -0700 | [diff] [blame] | 4978 | reserve_flags = __gfp_pfmemalloc_flags(gfp_mask); |
| 4979 | if (reserve_flags) |
Pavel Tatashin | 8e3560d | 2021-05-04 18:39:00 -0700 | [diff] [blame] | 4980 | alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags); |
Vlastimil Babka | 2377123 | 2016-07-28 15:49:16 -0700 | [diff] [blame] | 4981 | |
| 4982 | /* |
Vlastimil Babka | d6a24df | 2018-08-17 15:45:05 -0700 | [diff] [blame] | 4983 | * Reset the nodemask and zonelist iterators if memory policies can be |
| 4984 | * ignored. These allocations are high priority and system rather than |
| 4985 | * user oriented. |
Mel Gorman | e46e7b7 | 2016-06-03 14:56:01 -0700 | [diff] [blame] | 4986 | */ |
Michal Hocko | cd04ae1 | 2017-09-06 16:24:50 -0700 | [diff] [blame] | 4987 | if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) { |
Vlastimil Babka | d6a24df | 2018-08-17 15:45:05 -0700 | [diff] [blame] | 4988 | ac->nodemask = NULL; |
Mel Gorman | e46e7b7 | 2016-06-03 14:56:01 -0700 | [diff] [blame] | 4989 | ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, |
Joonsoo Kim | 97a225e | 2020-06-03 15:59:01 -0700 | [diff] [blame] | 4990 | ac->highest_zoneidx, ac->nodemask); |
Mel Gorman | e46e7b7 | 2016-06-03 14:56:01 -0700 | [diff] [blame] | 4991 | } |
| 4992 | |
Vlastimil Babka | 2377123 | 2016-07-28 15:49:16 -0700 | [diff] [blame] | 4993 | /* Attempt with potentially adjusted zonelist and alloc_flags */ |
Vlastimil Babka | 31a6c19 | 2016-07-28 15:49:13 -0700 | [diff] [blame] | 4994 | page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); |
Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 4995 | if (page) |
| 4996 | goto got_pg; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4997 | |
Mel Gorman | d0164ad | 2015-11-06 16:28:21 -0800 | [diff] [blame] | 4998 | /* Caller is not willing to reclaim, we can't balance anything */ |
Michal Hocko | 9a67f64 | 2017-02-22 15:46:19 -0800 | [diff] [blame] | 4999 | if (!can_direct_reclaim) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5000 | goto nopage; |
Michal Hocko | 9a67f64 | 2017-02-22 15:46:19 -0800 | [diff] [blame] | 5001 | |
Peter Zijlstra | 341ce06 | 2009-06-16 15:32:02 -0700 | [diff] [blame] | 5002 | /* Avoid recursion of direct reclaim */ |
Michal Hocko | 9a67f64 | 2017-02-22 15:46:19 -0800 | [diff] [blame] | 5003 | if (current->flags & PF_MEMALLOC) |
Peter Zijlstra | 341ce06 | 2009-06-16 15:32:02 -0700 | [diff] [blame] | 5004 | goto nopage; |
David Rientjes | 8fe7804 | 2014-08-06 16:07:54 -0700 | [diff] [blame] | 5005 | |
Mel Gorman | 11e33f6 | 2009-06-16 15:31:57 -0700 | [diff] [blame] | 5006 | /* Try direct reclaim and then allocating */ |
Vlastimil Babka | a926375 | 2015-02-11 15:25:41 -0800 | [diff] [blame] | 5007 | page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, |
| 5008 | &did_some_progress); |
Mel Gorman | 11e33f6 | 2009-06-16 15:31:57 -0700 | [diff] [blame] | 5009 | if (page) |
| 5010 | goto got_pg; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5011 | |
Vlastimil Babka | a8161d1 | 2016-07-28 15:49:19 -0700 | [diff] [blame] | 5012 | /* Try direct compaction and then allocating */ |
| 5013 | page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, |
Vlastimil Babka | a5508cd | 2016-07-28 15:49:28 -0700 | [diff] [blame] | 5014 | compact_priority, &compact_result); |
Vlastimil Babka | a8161d1 | 2016-07-28 15:49:19 -0700 | [diff] [blame] | 5015 | if (page) |
| 5016 | goto got_pg; |
| 5017 | |
Johannes Weiner | 9083905 | 2015-06-24 16:57:21 -0700 | [diff] [blame] | 5018 | /* Do not loop if specifically requested */ |
| 5019 | if (gfp_mask & __GFP_NORETRY) |
Vlastimil Babka | a8161d1 | 2016-07-28 15:49:19 -0700 | [diff] [blame] | 5020 | goto nopage; |
Johannes Weiner | 9083905 | 2015-06-24 16:57:21 -0700 | [diff] [blame] | 5021 | |
Michal Hocko | 0a0337e | 2016-05-20 16:57:00 -0700 | [diff] [blame] | 5022 | /* |
| 5023 | * Do not retry costly high order allocations unless they are |
Michal Hocko | dcda9b0 | 2017-07-12 14:36:45 -0700 | [diff] [blame] | 5024 | * __GFP_RETRY_MAYFAIL |
Michal Hocko | 0a0337e | 2016-05-20 16:57:00 -0700 | [diff] [blame] | 5025 | */ |
Michal Hocko | dcda9b0 | 2017-07-12 14:36:45 -0700 | [diff] [blame] | 5026 | if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL)) |
Vlastimil Babka | a8161d1 | 2016-07-28 15:49:19 -0700 | [diff] [blame] | 5027 | goto nopage; |
Michal Hocko | 0a0337e | 2016-05-20 16:57:00 -0700 | [diff] [blame] | 5028 | |
Michal Hocko | 0a0337e | 2016-05-20 16:57:00 -0700 | [diff] [blame] | 5029 | if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags, |
Vlastimil Babka | 423b452 | 2016-10-07 17:00:40 -0700 | [diff] [blame] | 5030 | did_some_progress > 0, &no_progress_loops)) |
Michal Hocko | 0a0337e | 2016-05-20 16:57:00 -0700 | [diff] [blame] | 5031 | goto retry; |
| 5032 | |
Michal Hocko | 33c2d21 | 2016-05-20 16:57:06 -0700 | [diff] [blame] | 5033 | /* |
| 5034 | * It doesn't make any sense to retry for the compaction if the order-0 |
| 5035 | * reclaim is not able to make any progress because the current |
| 5036 | * implementation of the compaction depends on the sufficient amount |
| 5037 | * of free memory (see __compaction_suitable) |
| 5038 | */ |
| 5039 | if (did_some_progress > 0 && |
Michal Hocko | 86a294a | 2016-05-20 16:57:12 -0700 | [diff] [blame] | 5040 | should_compact_retry(ac, order, alloc_flags, |
Vlastimil Babka | a5508cd | 2016-07-28 15:49:28 -0700 | [diff] [blame] | 5041 | compact_result, &compact_priority, |
Vlastimil Babka | d943649 | 2016-10-07 17:00:31 -0700 | [diff] [blame] | 5042 | &compaction_retries)) |
Michal Hocko | 33c2d21 | 2016-05-20 16:57:06 -0700 | [diff] [blame] | 5043 | goto retry; |
| 5044 | |
Vlastimil Babka | 902b628 | 2017-07-06 15:39:56 -0700 | [diff] [blame] | 5045 | |
| 5046 | /* Deal with possible cpuset update races before we start OOM killing */ |
| 5047 | if (check_retry_cpuset(cpuset_mems_cookie, ac)) |
Vlastimil Babka | e47483b | 2017-01-24 15:18:41 -0800 | [diff] [blame] | 5048 | goto retry_cpuset; |
| 5049 | |
Johannes Weiner | 9083905 | 2015-06-24 16:57:21 -0700 | [diff] [blame] | 5050 | /* Reclaim has failed us, start killing things */ |
| 5051 | page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); |
| 5052 | if (page) |
| 5053 | goto got_pg; |
| 5054 | |
Michal Hocko | 9a67f64 | 2017-02-22 15:46:19 -0800 | [diff] [blame] | 5055 | /* Avoid allocations with no watermarks from looping endlessly */ |
Michal Hocko | cd04ae1 | 2017-09-06 16:24:50 -0700 | [diff] [blame] | 5056 | if (tsk_is_oom_victim(current) && |
Joonsoo Kim | 8510e69 | 2020-08-06 23:26:04 -0700 | [diff] [blame] | 5057 | (alloc_flags & ALLOC_OOM || |
Tetsuo Handa | c288983 | 2017-06-02 14:46:31 -0700 | [diff] [blame] | 5058 | (gfp_mask & __GFP_NOMEMALLOC))) |
Michal Hocko | 9a67f64 | 2017-02-22 15:46:19 -0800 | [diff] [blame] | 5059 | goto nopage; |
| 5060 | |
Johannes Weiner | 9083905 | 2015-06-24 16:57:21 -0700 | [diff] [blame] | 5061 | /* Retry as long as the OOM killer is making progress */ |
Michal Hocko | 0a0337e | 2016-05-20 16:57:00 -0700 | [diff] [blame] | 5062 | if (did_some_progress) { |
| 5063 | no_progress_loops = 0; |
Johannes Weiner | 9083905 | 2015-06-24 16:57:21 -0700 | [diff] [blame] | 5064 | goto retry; |
Michal Hocko | 0a0337e | 2016-05-20 16:57:00 -0700 | [diff] [blame] | 5065 | } |
Johannes Weiner | 9083905 | 2015-06-24 16:57:21 -0700 | [diff] [blame] | 5066 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5067 | nopage: |
Vlastimil Babka | 902b628 | 2017-07-06 15:39:56 -0700 | [diff] [blame] | 5068 | /* Deal with possible cpuset update races before we fail */ |
| 5069 | if (check_retry_cpuset(cpuset_mems_cookie, ac)) |
Vlastimil Babka | 5ce9bfe | 2017-01-24 15:18:38 -0800 | [diff] [blame] | 5070 | goto retry_cpuset; |
| 5071 | |
Michal Hocko | 9a67f64 | 2017-02-22 15:46:19 -0800 | [diff] [blame] | 5072 | /* |
| 5073 | * Make sure that __GFP_NOFAIL request doesn't leak out and make sure |
| 5074 | * we always retry |
| 5075 | */ |
| 5076 | if (gfp_mask & __GFP_NOFAIL) { |
| 5077 | /* |
| 5078 | * All existing users of the __GFP_NOFAIL are blockable, so warn |
| 5079 | * of any new users that actually require GFP_NOWAIT |
| 5080 | */ |
| 5081 | if (WARN_ON_ONCE(!can_direct_reclaim)) |
| 5082 | goto fail; |
| 5083 | |
| 5084 | /* |
| 5085 | * PF_MEMALLOC request from this context is rather bizarre |
| 5086 | * because we cannot reclaim anything and only can loop waiting |
| 5087 | * for somebody to do a work for us |
| 5088 | */ |
| 5089 | WARN_ON_ONCE(current->flags & PF_MEMALLOC); |
| 5090 | |
| 5091 | /* |
| 5092 | * non failing costly orders are a hard requirement which we |
| 5093 | * are not prepared for much so let's warn about these users |
| 5094 | * so that we can identify them and convert them to something |
| 5095 | * else. |
| 5096 | */ |
| 5097 | WARN_ON_ONCE(order > PAGE_ALLOC_COSTLY_ORDER); |
| 5098 | |
Michal Hocko | 6c18ba7 | 2017-02-22 15:46:25 -0800 | [diff] [blame] | 5099 | /* |
| 5100 | * Help non-failing allocations by giving them access to memory |
| 5101 | * reserves but do not use ALLOC_NO_WATERMARKS because this |
| 5102 | * could deplete whole memory reserves which would just make |
| 5103 | * the situation worse |
| 5104 | */ |
| 5105 | page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac); |
| 5106 | if (page) |
| 5107 | goto got_pg; |
| 5108 | |
Michal Hocko | 9a67f64 | 2017-02-22 15:46:19 -0800 | [diff] [blame] | 5109 | cond_resched(); |
| 5110 | goto retry; |
| 5111 | } |
| 5112 | fail: |
Michal Hocko | a8e9925 | 2017-02-22 15:46:10 -0800 | [diff] [blame] | 5113 | warn_alloc(gfp_mask, ac->nodemask, |
Michal Hocko | 7877cdc | 2016-10-07 17:01:55 -0700 | [diff] [blame] | 5114 | "page allocation failure: order:%u", order); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5115 | got_pg: |
Mel Gorman | 072bb0a | 2012-07-31 16:43:58 -0700 | [diff] [blame] | 5116 | return page; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5117 | } |
Mel Gorman | 11e33f6 | 2009-06-16 15:31:57 -0700 | [diff] [blame] | 5118 | |
Mel Gorman | 9cd7555 | 2017-02-24 14:56:29 -0800 | [diff] [blame] | 5119 | static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, |
Vlastimil Babka | 04ec626 | 2017-07-06 15:40:03 -0700 | [diff] [blame] | 5120 | int preferred_nid, nodemask_t *nodemask, |
Matthew Wilcox (Oracle) | 8e6a930 | 2021-04-29 23:01:10 -0700 | [diff] [blame] | 5121 | struct alloc_context *ac, gfp_t *alloc_gfp, |
Mel Gorman | 9cd7555 | 2017-02-24 14:56:29 -0800 | [diff] [blame] | 5122 | unsigned int *alloc_flags) |
| 5123 | { |
Joonsoo Kim | 97a225e | 2020-06-03 15:59:01 -0700 | [diff] [blame] | 5124 | ac->highest_zoneidx = gfp_zone(gfp_mask); |
Vlastimil Babka | 04ec626 | 2017-07-06 15:40:03 -0700 | [diff] [blame] | 5125 | ac->zonelist = node_zonelist(preferred_nid, gfp_mask); |
Mel Gorman | 9cd7555 | 2017-02-24 14:56:29 -0800 | [diff] [blame] | 5126 | ac->nodemask = nodemask; |
Wei Yang | 01c0bfe | 2020-06-03 15:59:08 -0700 | [diff] [blame] | 5127 | ac->migratetype = gfp_migratetype(gfp_mask); |
Mel Gorman | 9cd7555 | 2017-02-24 14:56:29 -0800 | [diff] [blame] | 5128 | |
| 5129 | if (cpusets_enabled()) { |
Matthew Wilcox (Oracle) | 8e6a930 | 2021-04-29 23:01:10 -0700 | [diff] [blame] | 5130 | *alloc_gfp |= __GFP_HARDWALL; |
Muchun Song | 182f3d7 | 2020-08-06 23:26:01 -0700 | [diff] [blame] | 5131 | /* |
| 5132 | * When we are in the interrupt context, it is irrelevant |
| 5133 | * to the current task context. It means that any node ok. |
| 5134 | */ |
Vasily Averin | 88dc6f20 | 2021-09-02 14:58:13 -0700 | [diff] [blame] | 5135 | if (in_task() && !ac->nodemask) |
Mel Gorman | 9cd7555 | 2017-02-24 14:56:29 -0800 | [diff] [blame] | 5136 | ac->nodemask = &cpuset_current_mems_allowed; |
Vlastimil Babka | 5104782 | 2017-02-24 14:56:53 -0800 | [diff] [blame] | 5137 | else |
| 5138 | *alloc_flags |= ALLOC_CPUSET; |
Mel Gorman | 9cd7555 | 2017-02-24 14:56:29 -0800 | [diff] [blame] | 5139 | } |
| 5140 | |
Peter Zijlstra | d92a8cf | 2017-03-03 10:13:38 +0100 | [diff] [blame] | 5141 | fs_reclaim_acquire(gfp_mask); |
| 5142 | fs_reclaim_release(gfp_mask); |
Mel Gorman | 9cd7555 | 2017-02-24 14:56:29 -0800 | [diff] [blame] | 5143 | |
| 5144 | might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM); |
| 5145 | |
| 5146 | if (should_fail_alloc_page(gfp_mask, order)) |
| 5147 | return false; |
| 5148 | |
Pavel Tatashin | 8e3560d | 2021-05-04 18:39:00 -0700 | [diff] [blame] | 5149 | *alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags); |
Joonsoo Kim | d883c6c | 2018-05-23 10:18:21 +0900 | [diff] [blame] | 5150 | |
Mel Gorman | 9cd7555 | 2017-02-24 14:56:29 -0800 | [diff] [blame] | 5151 | /* Dirty zone balancing only done in the fast path */ |
| 5152 | ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE); |
| 5153 | |
| 5154 | /* |
| 5155 | * The preferred zone is used for statistics but crucially it is |
| 5156 | * also used as the starting point for the zonelist iterator. It |
| 5157 | * may get reset for allocations that ignore memory policies. |
| 5158 | */ |
| 5159 | ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, |
Joonsoo Kim | 97a225e | 2020-06-03 15:59:01 -0700 | [diff] [blame] | 5160 | ac->highest_zoneidx, ac->nodemask); |
Mateusz Nosek | a0622d0 | 2020-10-13 16:55:51 -0700 | [diff] [blame] | 5161 | |
| 5162 | return true; |
Mel Gorman | 9cd7555 | 2017-02-24 14:56:29 -0800 | [diff] [blame] | 5163 | } |
| 5164 | |
Mel Gorman | 11e33f6 | 2009-06-16 15:31:57 -0700 | [diff] [blame] | 5165 | /* |
Mel Gorman | 0f87d9d | 2021-04-29 23:01:48 -0700 | [diff] [blame] | 5166 | * __alloc_pages_bulk - Allocate a number of order-0 pages to a list or array |
Mel Gorman | 387ba26f | 2021-04-29 23:01:45 -0700 | [diff] [blame] | 5167 | * @gfp: GFP flags for the allocation |
| 5168 | * @preferred_nid: The preferred NUMA node ID to allocate from |
| 5169 | * @nodemask: Set of nodes to allocate from, may be NULL |
Mel Gorman | 0f87d9d | 2021-04-29 23:01:48 -0700 | [diff] [blame] | 5170 | * @nr_pages: The number of pages desired on the list or array |
| 5171 | * @page_list: Optional list to store the allocated pages |
| 5172 | * @page_array: Optional array to store the pages |
Mel Gorman | 387ba26f | 2021-04-29 23:01:45 -0700 | [diff] [blame] | 5173 | * |
| 5174 | * This is a batched version of the page allocator that attempts to |
Mel Gorman | 0f87d9d | 2021-04-29 23:01:48 -0700 | [diff] [blame] | 5175 | * allocate nr_pages quickly. Pages are added to page_list if page_list |
| 5176 | * is not NULL, otherwise it is assumed that the page_array is valid. |
Mel Gorman | 387ba26f | 2021-04-29 23:01:45 -0700 | [diff] [blame] | 5177 | * |
Mel Gorman | 0f87d9d | 2021-04-29 23:01:48 -0700 | [diff] [blame] | 5178 | * For lists, nr_pages is the number of pages that should be allocated. |
| 5179 | * |
| 5180 | * For arrays, only NULL elements are populated with pages and nr_pages |
| 5181 | * is the maximum number of pages that will be stored in the array. |
| 5182 | * |
| 5183 | * Returns the number of pages on the list or array. |
Mel Gorman | 387ba26f | 2021-04-29 23:01:45 -0700 | [diff] [blame] | 5184 | */ |
| 5185 | unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, |
| 5186 | nodemask_t *nodemask, int nr_pages, |
Mel Gorman | 0f87d9d | 2021-04-29 23:01:48 -0700 | [diff] [blame] | 5187 | struct list_head *page_list, |
| 5188 | struct page **page_array) |
Mel Gorman | 387ba26f | 2021-04-29 23:01:45 -0700 | [diff] [blame] | 5189 | { |
| 5190 | struct page *page; |
| 5191 | unsigned long flags; |
| 5192 | struct zone *zone; |
| 5193 | struct zoneref *z; |
| 5194 | struct per_cpu_pages *pcp; |
| 5195 | struct list_head *pcp_list; |
| 5196 | struct alloc_context ac; |
| 5197 | gfp_t alloc_gfp; |
| 5198 | unsigned int alloc_flags = ALLOC_WMARK_LOW; |
Mel Gorman | 3e23060 | 2021-06-28 19:41:50 -0700 | [diff] [blame] | 5199 | int nr_populated = 0, nr_account = 0; |
Mel Gorman | 387ba26f | 2021-04-29 23:01:45 -0700 | [diff] [blame] | 5200 | |
Mel Gorman | 0f87d9d | 2021-04-29 23:01:48 -0700 | [diff] [blame] | 5201 | /* |
| 5202 | * Skip populated array elements to determine if any pages need |
| 5203 | * to be allocated before disabling IRQs. |
| 5204 | */ |
Rasmus Villemoes | b08e50d | 2021-06-24 18:40:04 -0700 | [diff] [blame] | 5205 | while (page_array && nr_populated < nr_pages && page_array[nr_populated]) |
Mel Gorman | 0f87d9d | 2021-04-29 23:01:48 -0700 | [diff] [blame] | 5206 | nr_populated++; |
| 5207 | |
Chuck Lever | 0614784 | 2021-07-14 21:26:52 -0700 | [diff] [blame] | 5208 | /* No pages requested? */ |
| 5209 | if (unlikely(nr_pages <= 0)) |
| 5210 | goto out; |
| 5211 | |
Mel Gorman | b3b64eb | 2021-06-24 18:40:07 -0700 | [diff] [blame] | 5212 | /* Already populated array? */ |
| 5213 | if (unlikely(page_array && nr_pages - nr_populated == 0)) |
Chuck Lever | 0614784 | 2021-07-14 21:26:52 -0700 | [diff] [blame] | 5214 | goto out; |
Mel Gorman | b3b64eb | 2021-06-24 18:40:07 -0700 | [diff] [blame] | 5215 | |
Shakeel Butt | 8dcb306 | 2021-10-28 14:36:04 -0700 | [diff] [blame] | 5216 | /* Bulk allocator does not support memcg accounting. */ |
| 5217 | if (memcg_kmem_enabled() && (gfp & __GFP_ACCOUNT)) |
| 5218 | goto failed; |
| 5219 | |
Mel Gorman | 387ba26f | 2021-04-29 23:01:45 -0700 | [diff] [blame] | 5220 | /* Use the single page allocator for one page. */ |
Mel Gorman | 0f87d9d | 2021-04-29 23:01:48 -0700 | [diff] [blame] | 5221 | if (nr_pages - nr_populated == 1) |
Mel Gorman | 387ba26f | 2021-04-29 23:01:45 -0700 | [diff] [blame] | 5222 | goto failed; |
| 5223 | |
Mel Gorman | 187ad46 | 2021-07-14 21:26:46 -0700 | [diff] [blame] | 5224 | #ifdef CONFIG_PAGE_OWNER |
| 5225 | /* |
| 5226 | * PAGE_OWNER may recurse into the allocator to allocate space to |
| 5227 | * save the stack with pagesets.lock held. Releasing/reacquiring |
| 5228 | * removes much of the performance benefit of bulk allocation so |
| 5229 | * force the caller to allocate one page at a time as it'll have |
| 5230 | * similar performance to added complexity to the bulk allocator. |
| 5231 | */ |
| 5232 | if (static_branch_unlikely(&page_owner_inited)) |
| 5233 | goto failed; |
| 5234 | #endif |
| 5235 | |
Mel Gorman | 387ba26f | 2021-04-29 23:01:45 -0700 | [diff] [blame] | 5236 | /* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */ |
| 5237 | gfp &= gfp_allowed_mask; |
| 5238 | alloc_gfp = gfp; |
| 5239 | if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags)) |
Chuck Lever | 0614784 | 2021-07-14 21:26:52 -0700 | [diff] [blame] | 5240 | goto out; |
Mel Gorman | 387ba26f | 2021-04-29 23:01:45 -0700 | [diff] [blame] | 5241 | gfp = alloc_gfp; |
| 5242 | |
| 5243 | /* Find an allowed local zone that meets the low watermark. */ |
| 5244 | for_each_zone_zonelist_nodemask(zone, z, ac.zonelist, ac.highest_zoneidx, ac.nodemask) { |
| 5245 | unsigned long mark; |
| 5246 | |
| 5247 | if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) && |
| 5248 | !__cpuset_zone_allowed(zone, gfp)) { |
| 5249 | continue; |
| 5250 | } |
| 5251 | |
| 5252 | if (nr_online_nodes > 1 && zone != ac.preferred_zoneref->zone && |
| 5253 | zone_to_nid(zone) != zone_to_nid(ac.preferred_zoneref->zone)) { |
| 5254 | goto failed; |
| 5255 | } |
| 5256 | |
| 5257 | mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages; |
| 5258 | if (zone_watermark_fast(zone, 0, mark, |
| 5259 | zonelist_zone_idx(ac.preferred_zoneref), |
| 5260 | alloc_flags, gfp)) { |
| 5261 | break; |
| 5262 | } |
| 5263 | } |
| 5264 | |
| 5265 | /* |
| 5266 | * If there are no allowed local zones that meets the watermarks then |
| 5267 | * try to allocate a single page and reclaim if necessary. |
| 5268 | */ |
Jesper Dangaard Brouer | ce76f9a | 2021-04-29 23:01:51 -0700 | [diff] [blame] | 5269 | if (unlikely(!zone)) |
Mel Gorman | 387ba26f | 2021-04-29 23:01:45 -0700 | [diff] [blame] | 5270 | goto failed; |
| 5271 | |
| 5272 | /* Attempt the batch allocation */ |
Mel Gorman | dbbee9d | 2021-06-28 19:41:41 -0700 | [diff] [blame] | 5273 | local_lock_irqsave(&pagesets.lock, flags); |
Mel Gorman | 28f836b | 2021-06-28 19:41:38 -0700 | [diff] [blame] | 5274 | pcp = this_cpu_ptr(zone->per_cpu_pageset); |
Mel Gorman | 44042b4 | 2021-06-28 19:43:08 -0700 | [diff] [blame] | 5275 | pcp_list = &pcp->lists[order_to_pindex(ac.migratetype, 0)]; |
Mel Gorman | 387ba26f | 2021-04-29 23:01:45 -0700 | [diff] [blame] | 5276 | |
Mel Gorman | 0f87d9d | 2021-04-29 23:01:48 -0700 | [diff] [blame] | 5277 | while (nr_populated < nr_pages) { |
| 5278 | |
| 5279 | /* Skip existing pages */ |
| 5280 | if (page_array && page_array[nr_populated]) { |
| 5281 | nr_populated++; |
| 5282 | continue; |
| 5283 | } |
| 5284 | |
Mel Gorman | 44042b4 | 2021-06-28 19:43:08 -0700 | [diff] [blame] | 5285 | page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags, |
Mel Gorman | 387ba26f | 2021-04-29 23:01:45 -0700 | [diff] [blame] | 5286 | pcp, pcp_list); |
Jesper Dangaard Brouer | ce76f9a | 2021-04-29 23:01:51 -0700 | [diff] [blame] | 5287 | if (unlikely(!page)) { |
Mel Gorman | 387ba26f | 2021-04-29 23:01:45 -0700 | [diff] [blame] | 5288 | /* Try and get at least one page */ |
Mel Gorman | 0f87d9d | 2021-04-29 23:01:48 -0700 | [diff] [blame] | 5289 | if (!nr_populated) |
Mel Gorman | 387ba26f | 2021-04-29 23:01:45 -0700 | [diff] [blame] | 5290 | goto failed_irq; |
| 5291 | break; |
| 5292 | } |
Mel Gorman | 3e23060 | 2021-06-28 19:41:50 -0700 | [diff] [blame] | 5293 | nr_account++; |
Mel Gorman | 387ba26f | 2021-04-29 23:01:45 -0700 | [diff] [blame] | 5294 | |
| 5295 | prep_new_page(page, 0, gfp, 0); |
Mel Gorman | 0f87d9d | 2021-04-29 23:01:48 -0700 | [diff] [blame] | 5296 | if (page_list) |
| 5297 | list_add(&page->lru, page_list); |
| 5298 | else |
| 5299 | page_array[nr_populated] = page; |
| 5300 | nr_populated++; |
Mel Gorman | 387ba26f | 2021-04-29 23:01:45 -0700 | [diff] [blame] | 5301 | } |
| 5302 | |
Mel Gorman | 43c95bc | 2021-06-28 19:41:54 -0700 | [diff] [blame] | 5303 | local_unlock_irqrestore(&pagesets.lock, flags); |
| 5304 | |
Mel Gorman | 3e23060 | 2021-06-28 19:41:50 -0700 | [diff] [blame] | 5305 | __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account); |
| 5306 | zone_statistics(ac.preferred_zoneref->zone, zone, nr_account); |
Mel Gorman | 387ba26f | 2021-04-29 23:01:45 -0700 | [diff] [blame] | 5307 | |
Chuck Lever | 0614784 | 2021-07-14 21:26:52 -0700 | [diff] [blame] | 5308 | out: |
Mel Gorman | 0f87d9d | 2021-04-29 23:01:48 -0700 | [diff] [blame] | 5309 | return nr_populated; |
Mel Gorman | 387ba26f | 2021-04-29 23:01:45 -0700 | [diff] [blame] | 5310 | |
| 5311 | failed_irq: |
Mel Gorman | dbbee9d | 2021-06-28 19:41:41 -0700 | [diff] [blame] | 5312 | local_unlock_irqrestore(&pagesets.lock, flags); |
Mel Gorman | 387ba26f | 2021-04-29 23:01:45 -0700 | [diff] [blame] | 5313 | |
| 5314 | failed: |
| 5315 | page = __alloc_pages(gfp, 0, preferred_nid, nodemask); |
| 5316 | if (page) { |
Mel Gorman | 0f87d9d | 2021-04-29 23:01:48 -0700 | [diff] [blame] | 5317 | if (page_list) |
| 5318 | list_add(&page->lru, page_list); |
| 5319 | else |
| 5320 | page_array[nr_populated] = page; |
| 5321 | nr_populated++; |
Mel Gorman | 387ba26f | 2021-04-29 23:01:45 -0700 | [diff] [blame] | 5322 | } |
| 5323 | |
Chuck Lever | 0614784 | 2021-07-14 21:26:52 -0700 | [diff] [blame] | 5324 | goto out; |
Mel Gorman | 387ba26f | 2021-04-29 23:01:45 -0700 | [diff] [blame] | 5325 | } |
| 5326 | EXPORT_SYMBOL_GPL(__alloc_pages_bulk); |
| 5327 | |
| 5328 | /* |
Mel Gorman | 11e33f6 | 2009-06-16 15:31:57 -0700 | [diff] [blame] | 5329 | * This is the 'heart' of the zoned buddy allocator. |
| 5330 | */ |
Matthew Wilcox (Oracle) | 84172f4 | 2021-04-29 23:01:15 -0700 | [diff] [blame] | 5331 | struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid, |
Vlastimil Babka | 04ec626 | 2017-07-06 15:40:03 -0700 | [diff] [blame] | 5332 | nodemask_t *nodemask) |
Mel Gorman | 11e33f6 | 2009-06-16 15:31:57 -0700 | [diff] [blame] | 5333 | { |
Mel Gorman | 5bb1b16 | 2016-05-19 17:13:50 -0700 | [diff] [blame] | 5334 | struct page *page; |
Mel Gorman | e6cbd7f | 2016-07-28 15:46:50 -0700 | [diff] [blame] | 5335 | unsigned int alloc_flags = ALLOC_WMARK_LOW; |
Matthew Wilcox (Oracle) | 8e6a930 | 2021-04-29 23:01:10 -0700 | [diff] [blame] | 5336 | gfp_t alloc_gfp; /* The gfp_t that was actually used for allocation */ |
Mel Gorman | 9cd7555 | 2017-02-24 14:56:29 -0800 | [diff] [blame] | 5337 | struct alloc_context ac = { }; |
Mel Gorman | 682a338 | 2016-05-19 17:13:30 -0700 | [diff] [blame] | 5338 | |
Michal Hocko | c63ae43 | 2018-11-16 15:08:53 -0800 | [diff] [blame] | 5339 | /* |
| 5340 | * There are several places where we assume that the order value is sane |
| 5341 | * so bail out early if the request is out of bound. |
| 5342 | */ |
| 5343 | if (unlikely(order >= MAX_ORDER)) { |
Matthew Wilcox (Oracle) | 6e5e0f2 | 2021-04-29 23:01:13 -0700 | [diff] [blame] | 5344 | WARN_ON_ONCE(!(gfp & __GFP_NOWARN)); |
Michal Hocko | c63ae43 | 2018-11-16 15:08:53 -0800 | [diff] [blame] | 5345 | return NULL; |
| 5346 | } |
| 5347 | |
Matthew Wilcox (Oracle) | 6e5e0f2 | 2021-04-29 23:01:13 -0700 | [diff] [blame] | 5348 | gfp &= gfp_allowed_mask; |
Pavel Tatashin | da6df1b | 2021-05-04 18:38:57 -0700 | [diff] [blame] | 5349 | /* |
| 5350 | * Apply scoped allocation constraints. This is mainly about GFP_NOFS |
| 5351 | * resp. GFP_NOIO which has to be inherited for all allocation requests |
| 5352 | * from a particular context which has been marked by |
Pavel Tatashin | 8e3560d | 2021-05-04 18:39:00 -0700 | [diff] [blame] | 5353 | * memalloc_no{fs,io}_{save,restore}. And PF_MEMALLOC_PIN which ensures |
| 5354 | * movable zones are not used during allocation. |
Pavel Tatashin | da6df1b | 2021-05-04 18:38:57 -0700 | [diff] [blame] | 5355 | */ |
| 5356 | gfp = current_gfp_context(gfp); |
Matthew Wilcox (Oracle) | 6e5e0f2 | 2021-04-29 23:01:13 -0700 | [diff] [blame] | 5357 | alloc_gfp = gfp; |
| 5358 | if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac, |
Matthew Wilcox (Oracle) | 8e6a930 | 2021-04-29 23:01:10 -0700 | [diff] [blame] | 5359 | &alloc_gfp, &alloc_flags)) |
Mel Gorman | 11e33f6 | 2009-06-16 15:31:57 -0700 | [diff] [blame] | 5360 | return NULL; |
| 5361 | |
Mel Gorman | 6bb1545 | 2018-12-28 00:35:41 -0800 | [diff] [blame] | 5362 | /* |
| 5363 | * Forbid the first pass from falling back to types that fragment |
| 5364 | * memory until all local zones are considered. |
| 5365 | */ |
Matthew Wilcox (Oracle) | 6e5e0f2 | 2021-04-29 23:01:13 -0700 | [diff] [blame] | 5366 | alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp); |
Mel Gorman | 6bb1545 | 2018-12-28 00:35:41 -0800 | [diff] [blame] | 5367 | |
Mel Gorman | 5117f45 | 2009-06-16 15:31:59 -0700 | [diff] [blame] | 5368 | /* First allocation attempt */ |
Matthew Wilcox (Oracle) | 8e6a930 | 2021-04-29 23:01:10 -0700 | [diff] [blame] | 5369 | page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac); |
Mel Gorman | 4fcb097 | 2016-05-19 17:14:01 -0700 | [diff] [blame] | 5370 | if (likely(page)) |
| 5371 | goto out; |
Andrew Morton | 91fbdc0 | 2015-02-11 15:25:04 -0800 | [diff] [blame] | 5372 | |
Pavel Tatashin | da6df1b | 2021-05-04 18:38:57 -0700 | [diff] [blame] | 5373 | alloc_gfp = gfp; |
Mel Gorman | 4fcb097 | 2016-05-19 17:14:01 -0700 | [diff] [blame] | 5374 | ac.spread_dirty_pages = false; |
Mel Gorman | 11e33f6 | 2009-06-16 15:31:57 -0700 | [diff] [blame] | 5375 | |
Mel Gorman | 4741526 | 2016-05-19 17:14:44 -0700 | [diff] [blame] | 5376 | /* |
| 5377 | * Restore the original nodemask if it was potentially replaced with |
| 5378 | * &cpuset_current_mems_allowed to optimize the fast-path attempt. |
| 5379 | */ |
Mateusz Nosek | 97ce86f | 2020-04-01 21:09:53 -0700 | [diff] [blame] | 5380 | ac.nodemask = nodemask; |
Vlastimil Babka | 16096c2 | 2017-01-24 15:18:35 -0800 | [diff] [blame] | 5381 | |
Matthew Wilcox (Oracle) | 8e6a930 | 2021-04-29 23:01:10 -0700 | [diff] [blame] | 5382 | page = __alloc_pages_slowpath(alloc_gfp, order, &ac); |
Xishi Qiu | 23f086f | 2015-02-11 15:25:07 -0800 | [diff] [blame] | 5383 | |
Mel Gorman | 4fcb097 | 2016-05-19 17:14:01 -0700 | [diff] [blame] | 5384 | out: |
Matthew Wilcox (Oracle) | 6e5e0f2 | 2021-04-29 23:01:13 -0700 | [diff] [blame] | 5385 | if (memcg_kmem_enabled() && (gfp & __GFP_ACCOUNT) && page && |
| 5386 | unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) { |
Vladimir Davydov | c4159a7 | 2016-08-08 23:03:12 +0300 | [diff] [blame] | 5387 | __free_pages(page, order); |
| 5388 | page = NULL; |
Vladimir Davydov | 4949148 | 2016-07-26 15:24:24 -0700 | [diff] [blame] | 5389 | } |
| 5390 | |
Matthew Wilcox (Oracle) | 8e6a930 | 2021-04-29 23:01:10 -0700 | [diff] [blame] | 5391 | trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype); |
Mel Gorman | 4fcb097 | 2016-05-19 17:14:01 -0700 | [diff] [blame] | 5392 | |
Mel Gorman | 11e33f6 | 2009-06-16 15:31:57 -0700 | [diff] [blame] | 5393 | return page; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5394 | } |
Matthew Wilcox (Oracle) | 84172f4 | 2021-04-29 23:01:15 -0700 | [diff] [blame] | 5395 | EXPORT_SYMBOL(__alloc_pages); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5396 | |
Matthew Wilcox (Oracle) | cc09cb1 | 2020-12-15 22:55:54 -0500 | [diff] [blame] | 5397 | struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid, |
| 5398 | nodemask_t *nodemask) |
| 5399 | { |
| 5400 | struct page *page = __alloc_pages(gfp | __GFP_COMP, order, |
| 5401 | preferred_nid, nodemask); |
| 5402 | |
| 5403 | if (page && order > 1) |
| 5404 | prep_transhuge_page(page); |
| 5405 | return (struct folio *)page; |
| 5406 | } |
| 5407 | EXPORT_SYMBOL(__folio_alloc); |
| 5408 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5409 | /* |
Michal Hocko | 9ea9a68 | 2018-08-17 15:46:01 -0700 | [diff] [blame] | 5410 | * Common helper functions. Never use with __GFP_HIGHMEM because the returned |
| 5411 | * address cannot represent highmem pages. Use alloc_pages and then kmap if |
| 5412 | * you need to access high mem. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5413 | */ |
Harvey Harrison | 920c7a5 | 2008-02-04 22:29:26 -0800 | [diff] [blame] | 5414 | unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5415 | { |
Akinobu Mita | 945a111 | 2009-09-21 17:01:47 -0700 | [diff] [blame] | 5416 | struct page *page; |
| 5417 | |
Michal Hocko | 9ea9a68 | 2018-08-17 15:46:01 -0700 | [diff] [blame] | 5418 | page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5419 | if (!page) |
| 5420 | return 0; |
| 5421 | return (unsigned long) page_address(page); |
| 5422 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5423 | EXPORT_SYMBOL(__get_free_pages); |
| 5424 | |
Harvey Harrison | 920c7a5 | 2008-02-04 22:29:26 -0800 | [diff] [blame] | 5425 | unsigned long get_zeroed_page(gfp_t gfp_mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5426 | { |
Akinobu Mita | 945a111 | 2009-09-21 17:01:47 -0700 | [diff] [blame] | 5427 | return __get_free_pages(gfp_mask | __GFP_ZERO, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5428 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5429 | EXPORT_SYMBOL(get_zeroed_page); |
| 5430 | |
Matthew Wilcox (Oracle) | 7f194fb | 2020-12-14 19:11:09 -0800 | [diff] [blame] | 5431 | /** |
| 5432 | * __free_pages - Free pages allocated with alloc_pages(). |
| 5433 | * @page: The page pointer returned from alloc_pages(). |
| 5434 | * @order: The order of the allocation. |
| 5435 | * |
| 5436 | * This function can free multi-page allocations that are not compound |
| 5437 | * pages. It does not check that the @order passed in matches that of |
| 5438 | * the allocation, so it is easy to leak memory. Freeing more memory |
| 5439 | * than was allocated will probably emit a warning. |
| 5440 | * |
| 5441 | * If the last reference to this page is speculative, it will be released |
| 5442 | * by put_page() which only frees the first page of a non-compound |
| 5443 | * allocation. To prevent the remaining pages from being leaked, we free |
| 5444 | * the subsequent pages here. If you want to use the page's reference |
| 5445 | * count to decide when to free the allocation, you should allocate a |
| 5446 | * compound page, and use put_page() instead of __free_pages(). |
| 5447 | * |
| 5448 | * Context: May be called in interrupt context or while holding a normal |
| 5449 | * spinlock, but not in NMI context or while holding a raw spinlock. |
| 5450 | */ |
Aaron Lu | 742aa7f | 2018-12-28 00:35:22 -0800 | [diff] [blame] | 5451 | void __free_pages(struct page *page, unsigned int order) |
| 5452 | { |
| 5453 | if (put_page_testzero(page)) |
| 5454 | free_the_page(page, order); |
Matthew Wilcox (Oracle) | e320d30 | 2020-10-13 16:56:04 -0700 | [diff] [blame] | 5455 | else if (!PageHead(page)) |
| 5456 | while (order-- > 0) |
| 5457 | free_the_page(page + (1 << order), order); |
Aaron Lu | 742aa7f | 2018-12-28 00:35:22 -0800 | [diff] [blame] | 5458 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5459 | EXPORT_SYMBOL(__free_pages); |
| 5460 | |
Harvey Harrison | 920c7a5 | 2008-02-04 22:29:26 -0800 | [diff] [blame] | 5461 | void free_pages(unsigned long addr, unsigned int order) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5462 | { |
| 5463 | if (addr != 0) { |
Nick Piggin | 725d704 | 2006-09-25 23:30:55 -0700 | [diff] [blame] | 5464 | VM_BUG_ON(!virt_addr_valid((void *)addr)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5465 | __free_pages(virt_to_page((void *)addr), order); |
| 5466 | } |
| 5467 | } |
| 5468 | |
| 5469 | EXPORT_SYMBOL(free_pages); |
| 5470 | |
Glauber Costa | 6a1a0d3 | 2012-12-18 14:22:00 -0800 | [diff] [blame] | 5471 | /* |
Alexander Duyck | b63ae8c | 2015-05-06 21:11:57 -0700 | [diff] [blame] | 5472 | * Page Fragment: |
| 5473 | * An arbitrary-length arbitrary-offset area of memory which resides |
| 5474 | * within a 0 or higher order page. Multiple fragments within that page |
| 5475 | * are individually refcounted, in the page's reference counter. |
| 5476 | * |
| 5477 | * The page_frag functions below provide a simple allocation framework for |
| 5478 | * page fragments. This is used by the network stack and network device |
| 5479 | * drivers to provide a backing region of memory for use as either an |
| 5480 | * sk_buff->head, or to be used in the "frags" portion of skb_shared_info. |
| 5481 | */ |
Alexander Duyck | 2976db8 | 2017-01-10 16:58:09 -0800 | [diff] [blame] | 5482 | static struct page *__page_frag_cache_refill(struct page_frag_cache *nc, |
| 5483 | gfp_t gfp_mask) |
Alexander Duyck | b63ae8c | 2015-05-06 21:11:57 -0700 | [diff] [blame] | 5484 | { |
| 5485 | struct page *page = NULL; |
| 5486 | gfp_t gfp = gfp_mask; |
| 5487 | |
| 5488 | #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) |
| 5489 | gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY | |
| 5490 | __GFP_NOMEMALLOC; |
| 5491 | page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, |
| 5492 | PAGE_FRAG_CACHE_MAX_ORDER); |
| 5493 | nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE; |
| 5494 | #endif |
| 5495 | if (unlikely(!page)) |
| 5496 | page = alloc_pages_node(NUMA_NO_NODE, gfp, 0); |
| 5497 | |
| 5498 | nc->va = page ? page_address(page) : NULL; |
| 5499 | |
| 5500 | return page; |
| 5501 | } |
| 5502 | |
Alexander Duyck | 2976db8 | 2017-01-10 16:58:09 -0800 | [diff] [blame] | 5503 | void __page_frag_cache_drain(struct page *page, unsigned int count) |
Alexander Duyck | 44fdffd | 2016-12-14 15:05:26 -0800 | [diff] [blame] | 5504 | { |
| 5505 | VM_BUG_ON_PAGE(page_ref_count(page) == 0, page); |
| 5506 | |
Aaron Lu | 742aa7f | 2018-12-28 00:35:22 -0800 | [diff] [blame] | 5507 | if (page_ref_sub_and_test(page, count)) |
| 5508 | free_the_page(page, compound_order(page)); |
Alexander Duyck | 44fdffd | 2016-12-14 15:05:26 -0800 | [diff] [blame] | 5509 | } |
Alexander Duyck | 2976db8 | 2017-01-10 16:58:09 -0800 | [diff] [blame] | 5510 | EXPORT_SYMBOL(__page_frag_cache_drain); |
Alexander Duyck | 44fdffd | 2016-12-14 15:05:26 -0800 | [diff] [blame] | 5511 | |
Kevin Hao | b358e21 | 2021-02-04 18:56:35 +0800 | [diff] [blame] | 5512 | void *page_frag_alloc_align(struct page_frag_cache *nc, |
| 5513 | unsigned int fragsz, gfp_t gfp_mask, |
| 5514 | unsigned int align_mask) |
Alexander Duyck | b63ae8c | 2015-05-06 21:11:57 -0700 | [diff] [blame] | 5515 | { |
| 5516 | unsigned int size = PAGE_SIZE; |
| 5517 | struct page *page; |
| 5518 | int offset; |
| 5519 | |
| 5520 | if (unlikely(!nc->va)) { |
| 5521 | refill: |
Alexander Duyck | 2976db8 | 2017-01-10 16:58:09 -0800 | [diff] [blame] | 5522 | page = __page_frag_cache_refill(nc, gfp_mask); |
Alexander Duyck | b63ae8c | 2015-05-06 21:11:57 -0700 | [diff] [blame] | 5523 | if (!page) |
| 5524 | return NULL; |
| 5525 | |
| 5526 | #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) |
| 5527 | /* if size can vary use size else just use PAGE_SIZE */ |
| 5528 | size = nc->size; |
| 5529 | #endif |
| 5530 | /* Even if we own the page, we do not use atomic_set(). |
| 5531 | * This would break get_page_unless_zero() users. |
| 5532 | */ |
Alexander Duyck | 8644772 | 2019-02-15 14:44:12 -0800 | [diff] [blame] | 5533 | page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE); |
Alexander Duyck | b63ae8c | 2015-05-06 21:11:57 -0700 | [diff] [blame] | 5534 | |
| 5535 | /* reset page count bias and offset to start of new frag */ |
Michal Hocko | 2f064f3 | 2015-08-21 14:11:51 -0700 | [diff] [blame] | 5536 | nc->pfmemalloc = page_is_pfmemalloc(page); |
Alexander Duyck | 8644772 | 2019-02-15 14:44:12 -0800 | [diff] [blame] | 5537 | nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; |
Alexander Duyck | b63ae8c | 2015-05-06 21:11:57 -0700 | [diff] [blame] | 5538 | nc->offset = size; |
| 5539 | } |
| 5540 | |
| 5541 | offset = nc->offset - fragsz; |
| 5542 | if (unlikely(offset < 0)) { |
| 5543 | page = virt_to_page(nc->va); |
| 5544 | |
Joonsoo Kim | fe896d1 | 2016-03-17 14:19:26 -0700 | [diff] [blame] | 5545 | if (!page_ref_sub_and_test(page, nc->pagecnt_bias)) |
Alexander Duyck | b63ae8c | 2015-05-06 21:11:57 -0700 | [diff] [blame] | 5546 | goto refill; |
| 5547 | |
Dongli Zhang | d8c1901 | 2020-11-15 12:10:29 -0800 | [diff] [blame] | 5548 | if (unlikely(nc->pfmemalloc)) { |
| 5549 | free_the_page(page, compound_order(page)); |
| 5550 | goto refill; |
| 5551 | } |
| 5552 | |
Alexander Duyck | b63ae8c | 2015-05-06 21:11:57 -0700 | [diff] [blame] | 5553 | #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) |
| 5554 | /* if size can vary use size else just use PAGE_SIZE */ |
| 5555 | size = nc->size; |
| 5556 | #endif |
| 5557 | /* OK, page count is 0, we can safely set it */ |
Alexander Duyck | 8644772 | 2019-02-15 14:44:12 -0800 | [diff] [blame] | 5558 | set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1); |
Alexander Duyck | b63ae8c | 2015-05-06 21:11:57 -0700 | [diff] [blame] | 5559 | |
| 5560 | /* reset page count bias and offset to start of new frag */ |
Alexander Duyck | 8644772 | 2019-02-15 14:44:12 -0800 | [diff] [blame] | 5561 | nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; |
Alexander Duyck | b63ae8c | 2015-05-06 21:11:57 -0700 | [diff] [blame] | 5562 | offset = size - fragsz; |
| 5563 | } |
| 5564 | |
| 5565 | nc->pagecnt_bias--; |
Kevin Hao | b358e21 | 2021-02-04 18:56:35 +0800 | [diff] [blame] | 5566 | offset &= align_mask; |
Alexander Duyck | b63ae8c | 2015-05-06 21:11:57 -0700 | [diff] [blame] | 5567 | nc->offset = offset; |
| 5568 | |
| 5569 | return nc->va + offset; |
| 5570 | } |
Kevin Hao | b358e21 | 2021-02-04 18:56:35 +0800 | [diff] [blame] | 5571 | EXPORT_SYMBOL(page_frag_alloc_align); |
Alexander Duyck | b63ae8c | 2015-05-06 21:11:57 -0700 | [diff] [blame] | 5572 | |
| 5573 | /* |
| 5574 | * Frees a page fragment allocated out of either a compound or order 0 page. |
| 5575 | */ |
Alexander Duyck | 8c2dd3e | 2017-01-10 16:58:06 -0800 | [diff] [blame] | 5576 | void page_frag_free(void *addr) |
Alexander Duyck | b63ae8c | 2015-05-06 21:11:57 -0700 | [diff] [blame] | 5577 | { |
| 5578 | struct page *page = virt_to_head_page(addr); |
| 5579 | |
Aaron Lu | 742aa7f | 2018-12-28 00:35:22 -0800 | [diff] [blame] | 5580 | if (unlikely(put_page_testzero(page))) |
| 5581 | free_the_page(page, compound_order(page)); |
Alexander Duyck | b63ae8c | 2015-05-06 21:11:57 -0700 | [diff] [blame] | 5582 | } |
Alexander Duyck | 8c2dd3e | 2017-01-10 16:58:06 -0800 | [diff] [blame] | 5583 | EXPORT_SYMBOL(page_frag_free); |
Alexander Duyck | b63ae8c | 2015-05-06 21:11:57 -0700 | [diff] [blame] | 5584 | |
Kirill A. Shutemov | d00181b | 2015-11-06 16:29:57 -0800 | [diff] [blame] | 5585 | static void *make_alloc_exact(unsigned long addr, unsigned int order, |
| 5586 | size_t size) |
Andi Kleen | ee85c2e | 2011-05-11 15:13:34 -0700 | [diff] [blame] | 5587 | { |
| 5588 | if (addr) { |
| 5589 | unsigned long alloc_end = addr + (PAGE_SIZE << order); |
| 5590 | unsigned long used = addr + PAGE_ALIGN(size); |
| 5591 | |
| 5592 | split_page(virt_to_page((void *)addr), order); |
| 5593 | while (used < alloc_end) { |
| 5594 | free_page(used); |
| 5595 | used += PAGE_SIZE; |
| 5596 | } |
| 5597 | } |
| 5598 | return (void *)addr; |
| 5599 | } |
| 5600 | |
Timur Tabi | 2be0ffe | 2008-07-23 21:28:11 -0700 | [diff] [blame] | 5601 | /** |
| 5602 | * alloc_pages_exact - allocate an exact number physically-contiguous pages. |
| 5603 | * @size: the number of bytes to allocate |
Vlastimil Babka | 63931eb | 2019-05-13 17:16:47 -0700 | [diff] [blame] | 5604 | * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP |
Timur Tabi | 2be0ffe | 2008-07-23 21:28:11 -0700 | [diff] [blame] | 5605 | * |
| 5606 | * This function is similar to alloc_pages(), except that it allocates the |
| 5607 | * minimum number of pages to satisfy the request. alloc_pages() can only |
| 5608 | * allocate memory in power-of-two pages. |
| 5609 | * |
| 5610 | * This function is also limited by MAX_ORDER. |
| 5611 | * |
| 5612 | * Memory allocated by this function must be released by free_pages_exact(). |
Mike Rapoport | a862f68 | 2019-03-05 15:48:42 -0800 | [diff] [blame] | 5613 | * |
| 5614 | * Return: pointer to the allocated area or %NULL in case of error. |
Timur Tabi | 2be0ffe | 2008-07-23 21:28:11 -0700 | [diff] [blame] | 5615 | */ |
| 5616 | void *alloc_pages_exact(size_t size, gfp_t gfp_mask) |
| 5617 | { |
| 5618 | unsigned int order = get_order(size); |
| 5619 | unsigned long addr; |
| 5620 | |
Miaohe Lin | ba7f1b9 | 2021-11-05 13:40:15 -0700 | [diff] [blame] | 5621 | if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM))) |
| 5622 | gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM); |
Vlastimil Babka | 63931eb | 2019-05-13 17:16:47 -0700 | [diff] [blame] | 5623 | |
Timur Tabi | 2be0ffe | 2008-07-23 21:28:11 -0700 | [diff] [blame] | 5624 | addr = __get_free_pages(gfp_mask, order); |
Andi Kleen | ee85c2e | 2011-05-11 15:13:34 -0700 | [diff] [blame] | 5625 | return make_alloc_exact(addr, order, size); |
Timur Tabi | 2be0ffe | 2008-07-23 21:28:11 -0700 | [diff] [blame] | 5626 | } |
| 5627 | EXPORT_SYMBOL(alloc_pages_exact); |
| 5628 | |
| 5629 | /** |
Andi Kleen | ee85c2e | 2011-05-11 15:13:34 -0700 | [diff] [blame] | 5630 | * alloc_pages_exact_nid - allocate an exact number of physically-contiguous |
| 5631 | * pages on a node. |
Randy Dunlap | b5e6ab5 | 2011-05-16 13:16:54 -0700 | [diff] [blame] | 5632 | * @nid: the preferred node ID where memory should be allocated |
Andi Kleen | ee85c2e | 2011-05-11 15:13:34 -0700 | [diff] [blame] | 5633 | * @size: the number of bytes to allocate |
Vlastimil Babka | 63931eb | 2019-05-13 17:16:47 -0700 | [diff] [blame] | 5634 | * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP |
Andi Kleen | ee85c2e | 2011-05-11 15:13:34 -0700 | [diff] [blame] | 5635 | * |
| 5636 | * Like alloc_pages_exact(), but try to allocate on node nid first before falling |
| 5637 | * back. |
Mike Rapoport | a862f68 | 2019-03-05 15:48:42 -0800 | [diff] [blame] | 5638 | * |
| 5639 | * Return: pointer to the allocated area or %NULL in case of error. |
Andi Kleen | ee85c2e | 2011-05-11 15:13:34 -0700 | [diff] [blame] | 5640 | */ |
Fabian Frederick | e193181 | 2014-08-06 16:04:59 -0700 | [diff] [blame] | 5641 | void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) |
Andi Kleen | ee85c2e | 2011-05-11 15:13:34 -0700 | [diff] [blame] | 5642 | { |
Kirill A. Shutemov | d00181b | 2015-11-06 16:29:57 -0800 | [diff] [blame] | 5643 | unsigned int order = get_order(size); |
Vlastimil Babka | 63931eb | 2019-05-13 17:16:47 -0700 | [diff] [blame] | 5644 | struct page *p; |
| 5645 | |
Miaohe Lin | ba7f1b9 | 2021-11-05 13:40:15 -0700 | [diff] [blame] | 5646 | if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM))) |
| 5647 | gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM); |
Vlastimil Babka | 63931eb | 2019-05-13 17:16:47 -0700 | [diff] [blame] | 5648 | |
| 5649 | p = alloc_pages_node(nid, gfp_mask, order); |
Andi Kleen | ee85c2e | 2011-05-11 15:13:34 -0700 | [diff] [blame] | 5650 | if (!p) |
| 5651 | return NULL; |
| 5652 | return make_alloc_exact((unsigned long)page_address(p), order, size); |
| 5653 | } |
Andi Kleen | ee85c2e | 2011-05-11 15:13:34 -0700 | [diff] [blame] | 5654 | |
| 5655 | /** |
Timur Tabi | 2be0ffe | 2008-07-23 21:28:11 -0700 | [diff] [blame] | 5656 | * free_pages_exact - release memory allocated via alloc_pages_exact() |
| 5657 | * @virt: the value returned by alloc_pages_exact. |
| 5658 | * @size: size of allocation, same value as passed to alloc_pages_exact(). |
| 5659 | * |
| 5660 | * Release the memory allocated by a previous call to alloc_pages_exact. |
| 5661 | */ |
| 5662 | void free_pages_exact(void *virt, size_t size) |
| 5663 | { |
| 5664 | unsigned long addr = (unsigned long)virt; |
| 5665 | unsigned long end = addr + PAGE_ALIGN(size); |
| 5666 | |
| 5667 | while (addr < end) { |
| 5668 | free_page(addr); |
| 5669 | addr += PAGE_SIZE; |
| 5670 | } |
| 5671 | } |
| 5672 | EXPORT_SYMBOL(free_pages_exact); |
| 5673 | |
Zhang Yanfei | e0fb581 | 2013-02-22 16:35:54 -0800 | [diff] [blame] | 5674 | /** |
| 5675 | * nr_free_zone_pages - count number of pages beyond high watermark |
| 5676 | * @offset: The zone index of the highest zone |
| 5677 | * |
Mike Rapoport | a862f68 | 2019-03-05 15:48:42 -0800 | [diff] [blame] | 5678 | * nr_free_zone_pages() counts the number of pages which are beyond the |
Zhang Yanfei | e0fb581 | 2013-02-22 16:35:54 -0800 | [diff] [blame] | 5679 | * high watermark within all zones at or below a given zone index. For each |
| 5680 | * zone, the number of pages is calculated as: |
mchehab@s-opensource.com | 0e056eb | 2017-03-30 17:11:36 -0300 | [diff] [blame] | 5681 | * |
| 5682 | * nr_free_zone_pages = managed_pages - high_pages |
Mike Rapoport | a862f68 | 2019-03-05 15:48:42 -0800 | [diff] [blame] | 5683 | * |
| 5684 | * Return: number of pages beyond high watermark. |
Zhang Yanfei | e0fb581 | 2013-02-22 16:35:54 -0800 | [diff] [blame] | 5685 | */ |
Zhang Yanfei | ebec386 | 2013-02-22 16:35:43 -0800 | [diff] [blame] | 5686 | static unsigned long nr_free_zone_pages(int offset) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5687 | { |
Mel Gorman | dd1a239 | 2008-04-28 02:12:17 -0700 | [diff] [blame] | 5688 | struct zoneref *z; |
Mel Gorman | 54a6eb5 | 2008-04-28 02:12:16 -0700 | [diff] [blame] | 5689 | struct zone *zone; |
| 5690 | |
Martin J. Bligh | e310fd4 | 2005-07-29 22:59:18 -0700 | [diff] [blame] | 5691 | /* Just pick one node, since fallback list is circular */ |
Zhang Yanfei | ebec386 | 2013-02-22 16:35:43 -0800 | [diff] [blame] | 5692 | unsigned long sum = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5693 | |
Mel Gorman | 0e88460 | 2008-04-28 02:12:14 -0700 | [diff] [blame] | 5694 | struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5695 | |
Mel Gorman | 54a6eb5 | 2008-04-28 02:12:16 -0700 | [diff] [blame] | 5696 | for_each_zone_zonelist(zone, z, zonelist, offset) { |
Arun KS | 9705bea | 2018-12-28 00:34:24 -0800 | [diff] [blame] | 5697 | unsigned long size = zone_managed_pages(zone); |
Mel Gorman | 4185896 | 2009-06-16 15:32:12 -0700 | [diff] [blame] | 5698 | unsigned long high = high_wmark_pages(zone); |
Martin J. Bligh | e310fd4 | 2005-07-29 22:59:18 -0700 | [diff] [blame] | 5699 | if (size > high) |
| 5700 | sum += size - high; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5701 | } |
| 5702 | |
| 5703 | return sum; |
| 5704 | } |
| 5705 | |
Zhang Yanfei | e0fb581 | 2013-02-22 16:35:54 -0800 | [diff] [blame] | 5706 | /** |
| 5707 | * nr_free_buffer_pages - count number of pages beyond high watermark |
| 5708 | * |
| 5709 | * nr_free_buffer_pages() counts the number of pages which are beyond the high |
| 5710 | * watermark within ZONE_DMA and ZONE_NORMAL. |
Mike Rapoport | a862f68 | 2019-03-05 15:48:42 -0800 | [diff] [blame] | 5711 | * |
| 5712 | * Return: number of pages beyond high watermark within ZONE_DMA and |
| 5713 | * ZONE_NORMAL. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5714 | */ |
Zhang Yanfei | ebec386 | 2013-02-22 16:35:43 -0800 | [diff] [blame] | 5715 | unsigned long nr_free_buffer_pages(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5716 | { |
Al Viro | af4ca45 | 2005-10-21 02:55:38 -0400 | [diff] [blame] | 5717 | return nr_free_zone_pages(gfp_zone(GFP_USER)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5718 | } |
Meelap Shah | c2f1a55 | 2007-07-17 04:04:39 -0700 | [diff] [blame] | 5719 | EXPORT_SYMBOL_GPL(nr_free_buffer_pages); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5720 | |
Christoph Lameter | 08e0f6a | 2006-09-27 01:50:06 -0700 | [diff] [blame] | 5721 | static inline void show_node(struct zone *zone) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5722 | { |
Kirill A. Shutemov | e5adfff | 2012-12-11 16:00:29 -0800 | [diff] [blame] | 5723 | if (IS_ENABLED(CONFIG_NUMA)) |
Andy Whitcroft | 25ba77c | 2006-12-06 20:33:03 -0800 | [diff] [blame] | 5724 | printk("Node %d ", zone_to_nid(zone)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5725 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5726 | |
Igor Redko | d02bd27 | 2016-03-17 14:19:05 -0700 | [diff] [blame] | 5727 | long si_mem_available(void) |
| 5728 | { |
| 5729 | long available; |
| 5730 | unsigned long pagecache; |
| 5731 | unsigned long wmark_low = 0; |
| 5732 | unsigned long pages[NR_LRU_LISTS]; |
Vlastimil Babka | b29940c | 2018-10-26 15:05:46 -0700 | [diff] [blame] | 5733 | unsigned long reclaimable; |
Igor Redko | d02bd27 | 2016-03-17 14:19:05 -0700 | [diff] [blame] | 5734 | struct zone *zone; |
| 5735 | int lru; |
| 5736 | |
| 5737 | for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++) |
Mel Gorman | 2f95ff9 | 2016-08-11 15:32:57 -0700 | [diff] [blame] | 5738 | pages[lru] = global_node_page_state(NR_LRU_BASE + lru); |
Igor Redko | d02bd27 | 2016-03-17 14:19:05 -0700 | [diff] [blame] | 5739 | |
| 5740 | for_each_zone(zone) |
Mel Gorman | a921444 | 2018-12-28 00:35:44 -0800 | [diff] [blame] | 5741 | wmark_low += low_wmark_pages(zone); |
Igor Redko | d02bd27 | 2016-03-17 14:19:05 -0700 | [diff] [blame] | 5742 | |
| 5743 | /* |
| 5744 | * Estimate the amount of memory available for userspace allocations, |
| 5745 | * without causing swapping. |
| 5746 | */ |
Michal Hocko | c41f012 | 2017-09-06 16:23:36 -0700 | [diff] [blame] | 5747 | available = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages; |
Igor Redko | d02bd27 | 2016-03-17 14:19:05 -0700 | [diff] [blame] | 5748 | |
| 5749 | /* |
| 5750 | * Not all the page cache can be freed, otherwise the system will |
| 5751 | * start swapping. Assume at least half of the page cache, or the |
| 5752 | * low watermark worth of cache, needs to stay. |
| 5753 | */ |
| 5754 | pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE]; |
| 5755 | pagecache -= min(pagecache / 2, wmark_low); |
| 5756 | available += pagecache; |
| 5757 | |
| 5758 | /* |
Vlastimil Babka | b29940c | 2018-10-26 15:05:46 -0700 | [diff] [blame] | 5759 | * Part of the reclaimable slab and other kernel memory consists of |
| 5760 | * items that are in use, and cannot be freed. Cap this estimate at the |
| 5761 | * low watermark. |
Igor Redko | d02bd27 | 2016-03-17 14:19:05 -0700 | [diff] [blame] | 5762 | */ |
Roman Gushchin | d42f324 | 2020-08-06 23:20:39 -0700 | [diff] [blame] | 5763 | reclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B) + |
| 5764 | global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE); |
Vlastimil Babka | b29940c | 2018-10-26 15:05:46 -0700 | [diff] [blame] | 5765 | available += reclaimable - min(reclaimable / 2, wmark_low); |
Roman Gushchin | 034ebf6 | 2018-04-10 16:27:40 -0700 | [diff] [blame] | 5766 | |
Igor Redko | d02bd27 | 2016-03-17 14:19:05 -0700 | [diff] [blame] | 5767 | if (available < 0) |
| 5768 | available = 0; |
| 5769 | return available; |
| 5770 | } |
| 5771 | EXPORT_SYMBOL_GPL(si_mem_available); |
| 5772 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5773 | void si_meminfo(struct sysinfo *val) |
| 5774 | { |
Arun KS | ca79b0c | 2018-12-28 00:34:29 -0800 | [diff] [blame] | 5775 | val->totalram = totalram_pages(); |
Mel Gorman | 11fb998 | 2016-07-28 15:46:20 -0700 | [diff] [blame] | 5776 | val->sharedram = global_node_page_state(NR_SHMEM); |
Michal Hocko | c41f012 | 2017-09-06 16:23:36 -0700 | [diff] [blame] | 5777 | val->freeram = global_zone_page_state(NR_FREE_PAGES); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5778 | val->bufferram = nr_blockdev_pages(); |
Arun KS | ca79b0c | 2018-12-28 00:34:29 -0800 | [diff] [blame] | 5779 | val->totalhigh = totalhigh_pages(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5780 | val->freehigh = nr_free_highpages(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5781 | val->mem_unit = PAGE_SIZE; |
| 5782 | } |
| 5783 | |
| 5784 | EXPORT_SYMBOL(si_meminfo); |
| 5785 | |
| 5786 | #ifdef CONFIG_NUMA |
| 5787 | void si_meminfo_node(struct sysinfo *val, int nid) |
| 5788 | { |
Jiang Liu | cdd91a7 | 2013-07-03 15:03:27 -0700 | [diff] [blame] | 5789 | int zone_type; /* needs to be signed */ |
| 5790 | unsigned long managed_pages = 0; |
Joonsoo Kim | fc2bd79 | 2016-05-19 17:12:23 -0700 | [diff] [blame] | 5791 | unsigned long managed_highpages = 0; |
| 5792 | unsigned long free_highpages = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5793 | pg_data_t *pgdat = NODE_DATA(nid); |
| 5794 | |
Jiang Liu | cdd91a7 | 2013-07-03 15:03:27 -0700 | [diff] [blame] | 5795 | for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) |
Arun KS | 9705bea | 2018-12-28 00:34:24 -0800 | [diff] [blame] | 5796 | managed_pages += zone_managed_pages(&pgdat->node_zones[zone_type]); |
Jiang Liu | cdd91a7 | 2013-07-03 15:03:27 -0700 | [diff] [blame] | 5797 | val->totalram = managed_pages; |
Mel Gorman | 11fb998 | 2016-07-28 15:46:20 -0700 | [diff] [blame] | 5798 | val->sharedram = node_page_state(pgdat, NR_SHMEM); |
Mel Gorman | 75ef718 | 2016-07-28 15:45:24 -0700 | [diff] [blame] | 5799 | val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES); |
Christoph Lameter | 98d2b0e | 2006-09-25 23:31:12 -0700 | [diff] [blame] | 5800 | #ifdef CONFIG_HIGHMEM |
Joonsoo Kim | fc2bd79 | 2016-05-19 17:12:23 -0700 | [diff] [blame] | 5801 | for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) { |
| 5802 | struct zone *zone = &pgdat->node_zones[zone_type]; |
| 5803 | |
| 5804 | if (is_highmem(zone)) { |
Arun KS | 9705bea | 2018-12-28 00:34:24 -0800 | [diff] [blame] | 5805 | managed_highpages += zone_managed_pages(zone); |
Joonsoo Kim | fc2bd79 | 2016-05-19 17:12:23 -0700 | [diff] [blame] | 5806 | free_highpages += zone_page_state(zone, NR_FREE_PAGES); |
| 5807 | } |
| 5808 | } |
| 5809 | val->totalhigh = managed_highpages; |
| 5810 | val->freehigh = free_highpages; |
Christoph Lameter | 98d2b0e | 2006-09-25 23:31:12 -0700 | [diff] [blame] | 5811 | #else |
Joonsoo Kim | fc2bd79 | 2016-05-19 17:12:23 -0700 | [diff] [blame] | 5812 | val->totalhigh = managed_highpages; |
| 5813 | val->freehigh = free_highpages; |
Christoph Lameter | 98d2b0e | 2006-09-25 23:31:12 -0700 | [diff] [blame] | 5814 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5815 | val->mem_unit = PAGE_SIZE; |
| 5816 | } |
| 5817 | #endif |
| 5818 | |
David Rientjes | ddd588b | 2011-03-22 16:30:46 -0700 | [diff] [blame] | 5819 | /* |
David Rientjes | 7bf02ea | 2011-05-24 17:11:16 -0700 | [diff] [blame] | 5820 | * Determine whether the node should be displayed or not, depending on whether |
| 5821 | * SHOW_MEM_FILTER_NODES was passed to show_free_areas(). |
David Rientjes | ddd588b | 2011-03-22 16:30:46 -0700 | [diff] [blame] | 5822 | */ |
Michal Hocko | 9af744d | 2017-02-22 15:46:16 -0800 | [diff] [blame] | 5823 | static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask) |
David Rientjes | ddd588b | 2011-03-22 16:30:46 -0700 | [diff] [blame] | 5824 | { |
David Rientjes | ddd588b | 2011-03-22 16:30:46 -0700 | [diff] [blame] | 5825 | if (!(flags & SHOW_MEM_FILTER_NODES)) |
Michal Hocko | 9af744d | 2017-02-22 15:46:16 -0800 | [diff] [blame] | 5826 | return false; |
David Rientjes | ddd588b | 2011-03-22 16:30:46 -0700 | [diff] [blame] | 5827 | |
Michal Hocko | 9af744d | 2017-02-22 15:46:16 -0800 | [diff] [blame] | 5828 | /* |
| 5829 | * no node mask - aka implicit memory numa policy. Do not bother with |
| 5830 | * the synchronization - read_mems_allowed_begin - because we do not |
| 5831 | * have to be precise here. |
| 5832 | */ |
| 5833 | if (!nodemask) |
| 5834 | nodemask = &cpuset_current_mems_allowed; |
| 5835 | |
| 5836 | return !node_isset(nid, *nodemask); |
David Rientjes | ddd588b | 2011-03-22 16:30:46 -0700 | [diff] [blame] | 5837 | } |
| 5838 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5839 | #define K(x) ((x) << (PAGE_SHIFT-10)) |
| 5840 | |
Rabin Vincent | 377e4f1 | 2012-12-11 16:00:24 -0800 | [diff] [blame] | 5841 | static void show_migration_types(unsigned char type) |
| 5842 | { |
| 5843 | static const char types[MIGRATE_TYPES] = { |
| 5844 | [MIGRATE_UNMOVABLE] = 'U', |
Rabin Vincent | 377e4f1 | 2012-12-11 16:00:24 -0800 | [diff] [blame] | 5845 | [MIGRATE_MOVABLE] = 'M', |
Vlastimil Babka | 475a2f9 | 2015-12-11 13:40:29 -0800 | [diff] [blame] | 5846 | [MIGRATE_RECLAIMABLE] = 'E', |
| 5847 | [MIGRATE_HIGHATOMIC] = 'H', |
Rabin Vincent | 377e4f1 | 2012-12-11 16:00:24 -0800 | [diff] [blame] | 5848 | #ifdef CONFIG_CMA |
| 5849 | [MIGRATE_CMA] = 'C', |
| 5850 | #endif |
Minchan Kim | 194159f | 2013-02-22 16:33:58 -0800 | [diff] [blame] | 5851 | #ifdef CONFIG_MEMORY_ISOLATION |
Rabin Vincent | 377e4f1 | 2012-12-11 16:00:24 -0800 | [diff] [blame] | 5852 | [MIGRATE_ISOLATE] = 'I', |
Minchan Kim | 194159f | 2013-02-22 16:33:58 -0800 | [diff] [blame] | 5853 | #endif |
Rabin Vincent | 377e4f1 | 2012-12-11 16:00:24 -0800 | [diff] [blame] | 5854 | }; |
| 5855 | char tmp[MIGRATE_TYPES + 1]; |
| 5856 | char *p = tmp; |
| 5857 | int i; |
| 5858 | |
| 5859 | for (i = 0; i < MIGRATE_TYPES; i++) { |
| 5860 | if (type & (1 << i)) |
| 5861 | *p++ = types[i]; |
| 5862 | } |
| 5863 | |
| 5864 | *p = '\0'; |
Joe Perches | 1f84a18 | 2016-10-27 17:46:29 -0700 | [diff] [blame] | 5865 | printk(KERN_CONT "(%s) ", tmp); |
Rabin Vincent | 377e4f1 | 2012-12-11 16:00:24 -0800 | [diff] [blame] | 5866 | } |
| 5867 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5868 | /* |
| 5869 | * Show free area list (used inside shift_scroll-lock stuff) |
| 5870 | * We also calculate the percentage fragmentation. We do this by counting the |
| 5871 | * memory on each free list with the exception of the first item on the list. |
Konstantin Khlebnikov | d1bfcdb | 2015-04-14 15:45:30 -0700 | [diff] [blame] | 5872 | * |
| 5873 | * Bits in @filter: |
| 5874 | * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's |
| 5875 | * cpuset. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5876 | */ |
Michal Hocko | 9af744d | 2017-02-22 15:46:16 -0800 | [diff] [blame] | 5877 | void show_free_areas(unsigned int filter, nodemask_t *nodemask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5878 | { |
Konstantin Khlebnikov | d1bfcdb | 2015-04-14 15:45:30 -0700 | [diff] [blame] | 5879 | unsigned long free_pcp = 0; |
Jes Sorensen | c724191 | 2006-09-27 01:50:05 -0700 | [diff] [blame] | 5880 | int cpu; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5881 | struct zone *zone; |
Mel Gorman | 599d0c9 | 2016-07-28 15:45:31 -0700 | [diff] [blame] | 5882 | pg_data_t *pgdat; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5883 | |
KOSAKI Motohiro | ee99c71 | 2009-03-31 15:19:31 -0700 | [diff] [blame] | 5884 | for_each_populated_zone(zone) { |
Michal Hocko | 9af744d | 2017-02-22 15:46:16 -0800 | [diff] [blame] | 5885 | if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) |
David Rientjes | ddd588b | 2011-03-22 16:30:46 -0700 | [diff] [blame] | 5886 | continue; |
Konstantin Khlebnikov | d1bfcdb | 2015-04-14 15:45:30 -0700 | [diff] [blame] | 5887 | |
Konstantin Khlebnikov | 761b067 | 2015-04-14 15:45:32 -0700 | [diff] [blame] | 5888 | for_each_online_cpu(cpu) |
Mel Gorman | 28f836b | 2021-06-28 19:41:38 -0700 | [diff] [blame] | 5889 | free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5890 | } |
| 5891 | |
KOSAKI Motohiro | a731286 | 2009-09-21 17:01:37 -0700 | [diff] [blame] | 5892 | printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n" |
| 5893 | " active_file:%lu inactive_file:%lu isolated_file:%lu\n" |
NeilBrown | 8d92890 | 2020-06-01 21:48:21 -0700 | [diff] [blame] | 5894 | " unevictable:%lu dirty:%lu writeback:%lu\n" |
Konstantin Khlebnikov | d1bfcdb | 2015-04-14 15:45:30 -0700 | [diff] [blame] | 5895 | " slab_reclaimable:%lu slab_unreclaimable:%lu\n" |
Bartlomiej Zolnierkiewicz | d1ce749 | 2012-10-08 16:32:02 -0700 | [diff] [blame] | 5896 | " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n" |
liuhailong | eb2169c | 2021-09-02 14:53:01 -0700 | [diff] [blame] | 5897 | " kernel_misc_reclaimable:%lu\n" |
Konstantin Khlebnikov | d1bfcdb | 2015-04-14 15:45:30 -0700 | [diff] [blame] | 5898 | " free:%lu free_pcp:%lu free_cma:%lu\n", |
Mel Gorman | 599d0c9 | 2016-07-28 15:45:31 -0700 | [diff] [blame] | 5899 | global_node_page_state(NR_ACTIVE_ANON), |
| 5900 | global_node_page_state(NR_INACTIVE_ANON), |
| 5901 | global_node_page_state(NR_ISOLATED_ANON), |
| 5902 | global_node_page_state(NR_ACTIVE_FILE), |
| 5903 | global_node_page_state(NR_INACTIVE_FILE), |
| 5904 | global_node_page_state(NR_ISOLATED_FILE), |
| 5905 | global_node_page_state(NR_UNEVICTABLE), |
Mel Gorman | 11fb998 | 2016-07-28 15:46:20 -0700 | [diff] [blame] | 5906 | global_node_page_state(NR_FILE_DIRTY), |
| 5907 | global_node_page_state(NR_WRITEBACK), |
Roman Gushchin | d42f324 | 2020-08-06 23:20:39 -0700 | [diff] [blame] | 5908 | global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B), |
| 5909 | global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B), |
Mel Gorman | 50658e2 | 2016-07-28 15:46:14 -0700 | [diff] [blame] | 5910 | global_node_page_state(NR_FILE_MAPPED), |
Mel Gorman | 11fb998 | 2016-07-28 15:46:20 -0700 | [diff] [blame] | 5911 | global_node_page_state(NR_SHMEM), |
Shakeel Butt | f0c0c11 | 2020-12-14 19:07:17 -0800 | [diff] [blame] | 5912 | global_node_page_state(NR_PAGETABLE), |
Michal Hocko | c41f012 | 2017-09-06 16:23:36 -0700 | [diff] [blame] | 5913 | global_zone_page_state(NR_BOUNCE), |
liuhailong | eb2169c | 2021-09-02 14:53:01 -0700 | [diff] [blame] | 5914 | global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE), |
Michal Hocko | c41f012 | 2017-09-06 16:23:36 -0700 | [diff] [blame] | 5915 | global_zone_page_state(NR_FREE_PAGES), |
Konstantin Khlebnikov | d1bfcdb | 2015-04-14 15:45:30 -0700 | [diff] [blame] | 5916 | free_pcp, |
Michal Hocko | c41f012 | 2017-09-06 16:23:36 -0700 | [diff] [blame] | 5917 | global_zone_page_state(NR_FREE_CMA_PAGES)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5918 | |
Mel Gorman | 599d0c9 | 2016-07-28 15:45:31 -0700 | [diff] [blame] | 5919 | for_each_online_pgdat(pgdat) { |
Michal Hocko | 9af744d | 2017-02-22 15:46:16 -0800 | [diff] [blame] | 5920 | if (show_mem_node_skip(filter, pgdat->node_id, nodemask)) |
Michal Hocko | c02e50b | 2017-02-22 15:46:07 -0800 | [diff] [blame] | 5921 | continue; |
| 5922 | |
Mel Gorman | 599d0c9 | 2016-07-28 15:45:31 -0700 | [diff] [blame] | 5923 | printk("Node %d" |
| 5924 | " active_anon:%lukB" |
| 5925 | " inactive_anon:%lukB" |
| 5926 | " active_file:%lukB" |
| 5927 | " inactive_file:%lukB" |
| 5928 | " unevictable:%lukB" |
| 5929 | " isolated(anon):%lukB" |
| 5930 | " isolated(file):%lukB" |
Mel Gorman | 50658e2 | 2016-07-28 15:46:14 -0700 | [diff] [blame] | 5931 | " mapped:%lukB" |
Mel Gorman | 11fb998 | 2016-07-28 15:46:20 -0700 | [diff] [blame] | 5932 | " dirty:%lukB" |
| 5933 | " writeback:%lukB" |
| 5934 | " shmem:%lukB" |
| 5935 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 5936 | " shmem_thp: %lukB" |
| 5937 | " shmem_pmdmapped: %lukB" |
| 5938 | " anon_thp: %lukB" |
| 5939 | #endif |
| 5940 | " writeback_tmp:%lukB" |
Shakeel Butt | 991e767 | 2020-08-06 23:21:37 -0700 | [diff] [blame] | 5941 | " kernel_stack:%lukB" |
| 5942 | #ifdef CONFIG_SHADOW_CALL_STACK |
| 5943 | " shadow_call_stack:%lukB" |
| 5944 | #endif |
Shakeel Butt | f0c0c11 | 2020-12-14 19:07:17 -0800 | [diff] [blame] | 5945 | " pagetables:%lukB" |
Mel Gorman | 599d0c9 | 2016-07-28 15:45:31 -0700 | [diff] [blame] | 5946 | " all_unreclaimable? %s" |
| 5947 | "\n", |
| 5948 | pgdat->node_id, |
| 5949 | K(node_page_state(pgdat, NR_ACTIVE_ANON)), |
| 5950 | K(node_page_state(pgdat, NR_INACTIVE_ANON)), |
| 5951 | K(node_page_state(pgdat, NR_ACTIVE_FILE)), |
| 5952 | K(node_page_state(pgdat, NR_INACTIVE_FILE)), |
| 5953 | K(node_page_state(pgdat, NR_UNEVICTABLE)), |
| 5954 | K(node_page_state(pgdat, NR_ISOLATED_ANON)), |
| 5955 | K(node_page_state(pgdat, NR_ISOLATED_FILE)), |
Mel Gorman | 50658e2 | 2016-07-28 15:46:14 -0700 | [diff] [blame] | 5956 | K(node_page_state(pgdat, NR_FILE_MAPPED)), |
Mel Gorman | 11fb998 | 2016-07-28 15:46:20 -0700 | [diff] [blame] | 5957 | K(node_page_state(pgdat, NR_FILE_DIRTY)), |
| 5958 | K(node_page_state(pgdat, NR_WRITEBACK)), |
Alexander Polakov | 1f06b81 | 2017-04-07 16:04:45 -0700 | [diff] [blame] | 5959 | K(node_page_state(pgdat, NR_SHMEM)), |
Mel Gorman | 11fb998 | 2016-07-28 15:46:20 -0700 | [diff] [blame] | 5960 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
Muchun Song | 57b2847 | 2021-02-24 12:03:31 -0800 | [diff] [blame] | 5961 | K(node_page_state(pgdat, NR_SHMEM_THPS)), |
Muchun Song | a1528e2 | 2021-02-24 12:03:35 -0800 | [diff] [blame] | 5962 | K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)), |
Muchun Song | 69473e5 | 2021-02-24 12:03:23 -0800 | [diff] [blame] | 5963 | K(node_page_state(pgdat, NR_ANON_THPS)), |
Mel Gorman | 11fb998 | 2016-07-28 15:46:20 -0700 | [diff] [blame] | 5964 | #endif |
Mel Gorman | 11fb998 | 2016-07-28 15:46:20 -0700 | [diff] [blame] | 5965 | K(node_page_state(pgdat, NR_WRITEBACK_TEMP)), |
Shakeel Butt | 991e767 | 2020-08-06 23:21:37 -0700 | [diff] [blame] | 5966 | node_page_state(pgdat, NR_KERNEL_STACK_KB), |
| 5967 | #ifdef CONFIG_SHADOW_CALL_STACK |
| 5968 | node_page_state(pgdat, NR_KERNEL_SCS_KB), |
| 5969 | #endif |
Shakeel Butt | f0c0c11 | 2020-12-14 19:07:17 -0800 | [diff] [blame] | 5970 | K(node_page_state(pgdat, NR_PAGETABLE)), |
Johannes Weiner | c73322d | 2017-05-03 14:51:51 -0700 | [diff] [blame] | 5971 | pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ? |
| 5972 | "yes" : "no"); |
Mel Gorman | 599d0c9 | 2016-07-28 15:45:31 -0700 | [diff] [blame] | 5973 | } |
| 5974 | |
KOSAKI Motohiro | ee99c71 | 2009-03-31 15:19:31 -0700 | [diff] [blame] | 5975 | for_each_populated_zone(zone) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5976 | int i; |
| 5977 | |
Michal Hocko | 9af744d | 2017-02-22 15:46:16 -0800 | [diff] [blame] | 5978 | if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) |
David Rientjes | ddd588b | 2011-03-22 16:30:46 -0700 | [diff] [blame] | 5979 | continue; |
Konstantin Khlebnikov | d1bfcdb | 2015-04-14 15:45:30 -0700 | [diff] [blame] | 5980 | |
| 5981 | free_pcp = 0; |
| 5982 | for_each_online_cpu(cpu) |
Mel Gorman | 28f836b | 2021-06-28 19:41:38 -0700 | [diff] [blame] | 5983 | free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count; |
Konstantin Khlebnikov | d1bfcdb | 2015-04-14 15:45:30 -0700 | [diff] [blame] | 5984 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5985 | show_node(zone); |
Joe Perches | 1f84a18 | 2016-10-27 17:46:29 -0700 | [diff] [blame] | 5986 | printk(KERN_CONT |
| 5987 | "%s" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5988 | " free:%lukB" |
Liangcai Fan | a6ea8b5 | 2021-11-05 13:40:37 -0700 | [diff] [blame] | 5989 | " boost:%lukB" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5990 | " min:%lukB" |
| 5991 | " low:%lukB" |
| 5992 | " high:%lukB" |
lijiazi | e47b346 | 2019-11-30 17:55:21 -0800 | [diff] [blame] | 5993 | " reserved_highatomic:%luKB" |
Minchan Kim | 71c799f | 2016-07-28 15:47:26 -0700 | [diff] [blame] | 5994 | " active_anon:%lukB" |
| 5995 | " inactive_anon:%lukB" |
| 5996 | " active_file:%lukB" |
| 5997 | " inactive_file:%lukB" |
| 5998 | " unevictable:%lukB" |
Mel Gorman | 5a1c84b | 2016-07-28 15:47:31 -0700 | [diff] [blame] | 5999 | " writepending:%lukB" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6000 | " present:%lukB" |
Jiang Liu | 9feedc9 | 2012-12-12 13:52:12 -0800 | [diff] [blame] | 6001 | " managed:%lukB" |
KOSAKI Motohiro | 4a0aa73 | 2009-09-21 17:01:30 -0700 | [diff] [blame] | 6002 | " mlocked:%lukB" |
KOSAKI Motohiro | 4a0aa73 | 2009-09-21 17:01:30 -0700 | [diff] [blame] | 6003 | " bounce:%lukB" |
Konstantin Khlebnikov | d1bfcdb | 2015-04-14 15:45:30 -0700 | [diff] [blame] | 6004 | " free_pcp:%lukB" |
| 6005 | " local_pcp:%ukB" |
Bartlomiej Zolnierkiewicz | d1ce749 | 2012-10-08 16:32:02 -0700 | [diff] [blame] | 6006 | " free_cma:%lukB" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6007 | "\n", |
| 6008 | zone->name, |
Mel Gorman | 88f5acf | 2011-01-13 15:45:41 -0800 | [diff] [blame] | 6009 | K(zone_page_state(zone, NR_FREE_PAGES)), |
Liangcai Fan | a6ea8b5 | 2021-11-05 13:40:37 -0700 | [diff] [blame] | 6010 | K(zone->watermark_boost), |
Mel Gorman | 4185896 | 2009-06-16 15:32:12 -0700 | [diff] [blame] | 6011 | K(min_wmark_pages(zone)), |
| 6012 | K(low_wmark_pages(zone)), |
| 6013 | K(high_wmark_pages(zone)), |
lijiazi | e47b346 | 2019-11-30 17:55:21 -0800 | [diff] [blame] | 6014 | K(zone->nr_reserved_highatomic), |
Minchan Kim | 71c799f | 2016-07-28 15:47:26 -0700 | [diff] [blame] | 6015 | K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)), |
| 6016 | K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)), |
| 6017 | K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)), |
| 6018 | K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)), |
| 6019 | K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)), |
Mel Gorman | 5a1c84b | 2016-07-28 15:47:31 -0700 | [diff] [blame] | 6020 | K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)), |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6021 | K(zone->present_pages), |
Arun KS | 9705bea | 2018-12-28 00:34:24 -0800 | [diff] [blame] | 6022 | K(zone_managed_pages(zone)), |
KOSAKI Motohiro | 4a0aa73 | 2009-09-21 17:01:30 -0700 | [diff] [blame] | 6023 | K(zone_page_state(zone, NR_MLOCK)), |
KOSAKI Motohiro | 4a0aa73 | 2009-09-21 17:01:30 -0700 | [diff] [blame] | 6024 | K(zone_page_state(zone, NR_BOUNCE)), |
Konstantin Khlebnikov | d1bfcdb | 2015-04-14 15:45:30 -0700 | [diff] [blame] | 6025 | K(free_pcp), |
Mel Gorman | 28f836b | 2021-06-28 19:41:38 -0700 | [diff] [blame] | 6026 | K(this_cpu_read(zone->per_cpu_pageset->count)), |
Minchan Kim | 33e077b | 2016-07-28 15:47:14 -0700 | [diff] [blame] | 6027 | K(zone_page_state(zone, NR_FREE_CMA_PAGES))); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6028 | printk("lowmem_reserve[]:"); |
| 6029 | for (i = 0; i < MAX_NR_ZONES; i++) |
Joe Perches | 1f84a18 | 2016-10-27 17:46:29 -0700 | [diff] [blame] | 6030 | printk(KERN_CONT " %ld", zone->lowmem_reserve[i]); |
| 6031 | printk(KERN_CONT "\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6032 | } |
| 6033 | |
KOSAKI Motohiro | ee99c71 | 2009-03-31 15:19:31 -0700 | [diff] [blame] | 6034 | for_each_populated_zone(zone) { |
Kirill A. Shutemov | d00181b | 2015-11-06 16:29:57 -0800 | [diff] [blame] | 6035 | unsigned int order; |
| 6036 | unsigned long nr[MAX_ORDER], flags, total = 0; |
Rabin Vincent | 377e4f1 | 2012-12-11 16:00:24 -0800 | [diff] [blame] | 6037 | unsigned char types[MAX_ORDER]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6038 | |
Michal Hocko | 9af744d | 2017-02-22 15:46:16 -0800 | [diff] [blame] | 6039 | if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) |
David Rientjes | ddd588b | 2011-03-22 16:30:46 -0700 | [diff] [blame] | 6040 | continue; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6041 | show_node(zone); |
Joe Perches | 1f84a18 | 2016-10-27 17:46:29 -0700 | [diff] [blame] | 6042 | printk(KERN_CONT "%s: ", zone->name); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6043 | |
| 6044 | spin_lock_irqsave(&zone->lock, flags); |
| 6045 | for (order = 0; order < MAX_ORDER; order++) { |
Rabin Vincent | 377e4f1 | 2012-12-11 16:00:24 -0800 | [diff] [blame] | 6046 | struct free_area *area = &zone->free_area[order]; |
| 6047 | int type; |
| 6048 | |
| 6049 | nr[order] = area->nr_free; |
Kirill Korotaev | 8f9de51 | 2006-06-23 02:03:50 -0700 | [diff] [blame] | 6050 | total += nr[order] << order; |
Rabin Vincent | 377e4f1 | 2012-12-11 16:00:24 -0800 | [diff] [blame] | 6051 | |
| 6052 | types[order] = 0; |
| 6053 | for (type = 0; type < MIGRATE_TYPES; type++) { |
Dan Williams | b03641a | 2019-05-14 15:41:32 -0700 | [diff] [blame] | 6054 | if (!free_area_empty(area, type)) |
Rabin Vincent | 377e4f1 | 2012-12-11 16:00:24 -0800 | [diff] [blame] | 6055 | types[order] |= 1 << type; |
| 6056 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6057 | } |
| 6058 | spin_unlock_irqrestore(&zone->lock, flags); |
Rabin Vincent | 377e4f1 | 2012-12-11 16:00:24 -0800 | [diff] [blame] | 6059 | for (order = 0; order < MAX_ORDER; order++) { |
Joe Perches | 1f84a18 | 2016-10-27 17:46:29 -0700 | [diff] [blame] | 6060 | printk(KERN_CONT "%lu*%lukB ", |
| 6061 | nr[order], K(1UL) << order); |
Rabin Vincent | 377e4f1 | 2012-12-11 16:00:24 -0800 | [diff] [blame] | 6062 | if (nr[order]) |
| 6063 | show_migration_types(types[order]); |
| 6064 | } |
Joe Perches | 1f84a18 | 2016-10-27 17:46:29 -0700 | [diff] [blame] | 6065 | printk(KERN_CONT "= %lukB\n", K(total)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6066 | } |
| 6067 | |
David Rientjes | 949f7ec | 2013-04-29 15:07:48 -0700 | [diff] [blame] | 6068 | hugetlb_show_meminfo(); |
| 6069 | |
Mel Gorman | 11fb998 | 2016-07-28 15:46:20 -0700 | [diff] [blame] | 6070 | printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES)); |
Larry Woodman | e6f3602 | 2008-02-04 22:29:30 -0800 | [diff] [blame] | 6071 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6072 | show_swap_cache_info(); |
| 6073 | } |
| 6074 | |
Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 6075 | static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) |
| 6076 | { |
| 6077 | zoneref->zone = zone; |
| 6078 | zoneref->zone_idx = zone_idx(zone); |
| 6079 | } |
| 6080 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6081 | /* |
| 6082 | * Builds allocation fallback zone lists. |
Christoph Lameter | 1a93205 | 2006-01-06 00:11:16 -0800 | [diff] [blame] | 6083 | * |
| 6084 | * Add all populated zones of a node to the zonelist. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6085 | */ |
Michal Hocko | 9d3be21 | 2017-09-06 16:20:30 -0700 | [diff] [blame] | 6086 | static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6087 | { |
Christoph Lameter | 1a93205 | 2006-01-06 00:11:16 -0800 | [diff] [blame] | 6088 | struct zone *zone; |
Zhang Yanfei | bc732f1 | 2013-07-08 16:00:06 -0700 | [diff] [blame] | 6089 | enum zone_type zone_type = MAX_NR_ZONES; |
Michal Hocko | 9d3be21 | 2017-09-06 16:20:30 -0700 | [diff] [blame] | 6090 | int nr_zones = 0; |
Christoph Lameter | 02a68a5 | 2006-01-06 00:11:18 -0800 | [diff] [blame] | 6091 | |
| 6092 | do { |
Christoph Lameter | 2f6726e | 2006-09-25 23:31:18 -0700 | [diff] [blame] | 6093 | zone_type--; |
Christoph Lameter | 070f803 | 2006-01-06 00:11:19 -0800 | [diff] [blame] | 6094 | zone = pgdat->node_zones + zone_type; |
Mel Gorman | 6aa303d | 2016-09-01 16:14:55 -0700 | [diff] [blame] | 6095 | if (managed_zone(zone)) { |
Michal Hocko | 9d3be21 | 2017-09-06 16:20:30 -0700 | [diff] [blame] | 6096 | zoneref_set_zone(zone, &zonerefs[nr_zones++]); |
Christoph Lameter | 070f803 | 2006-01-06 00:11:19 -0800 | [diff] [blame] | 6097 | check_highest_zone(zone_type); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6098 | } |
Christoph Lameter | 2f6726e | 2006-09-25 23:31:18 -0700 | [diff] [blame] | 6099 | } while (zone_type); |
Zhang Yanfei | bc732f1 | 2013-07-08 16:00:06 -0700 | [diff] [blame] | 6100 | |
Christoph Lameter | 070f803 | 2006-01-06 00:11:19 -0800 | [diff] [blame] | 6101 | return nr_zones; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6102 | } |
| 6103 | |
| 6104 | #ifdef CONFIG_NUMA |
KAMEZAWA Hiroyuki | f0c0b2b | 2007-07-15 23:38:01 -0700 | [diff] [blame] | 6105 | |
| 6106 | static int __parse_numa_zonelist_order(char *s) |
| 6107 | { |
Michal Hocko | c9bff3e | 2017-09-06 16:20:13 -0700 | [diff] [blame] | 6108 | /* |
Ingo Molnar | f0953a1 | 2021-05-06 18:06:47 -0700 | [diff] [blame] | 6109 | * We used to support different zonelists modes but they turned |
Michal Hocko | c9bff3e | 2017-09-06 16:20:13 -0700 | [diff] [blame] | 6110 | * out to be just not useful. Let's keep the warning in place |
| 6111 | * if somebody still use the cmd line parameter so that we do |
| 6112 | * not fail it silently |
| 6113 | */ |
| 6114 | if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) { |
| 6115 | pr_warn("Ignoring unsupported numa_zonelist_order value: %s\n", s); |
KAMEZAWA Hiroyuki | f0c0b2b | 2007-07-15 23:38:01 -0700 | [diff] [blame] | 6116 | return -EINVAL; |
| 6117 | } |
| 6118 | return 0; |
| 6119 | } |
| 6120 | |
Michal Hocko | c9bff3e | 2017-09-06 16:20:13 -0700 | [diff] [blame] | 6121 | char numa_zonelist_order[] = "Node"; |
| 6122 | |
KAMEZAWA Hiroyuki | f0c0b2b | 2007-07-15 23:38:01 -0700 | [diff] [blame] | 6123 | /* |
| 6124 | * sysctl handler for numa_zonelist_order |
| 6125 | */ |
Joe Perches | cccad5b | 2014-06-06 14:38:09 -0700 | [diff] [blame] | 6126 | int numa_zonelist_order_handler(struct ctl_table *table, int write, |
Christoph Hellwig | 3292739 | 2020-04-24 08:43:38 +0200 | [diff] [blame] | 6127 | void *buffer, size_t *length, loff_t *ppos) |
KAMEZAWA Hiroyuki | f0c0b2b | 2007-07-15 23:38:01 -0700 | [diff] [blame] | 6128 | { |
Christoph Hellwig | 3292739 | 2020-04-24 08:43:38 +0200 | [diff] [blame] | 6129 | if (write) |
| 6130 | return __parse_numa_zonelist_order(buffer); |
| 6131 | return proc_dostring(table, write, buffer, length, ppos); |
KAMEZAWA Hiroyuki | f0c0b2b | 2007-07-15 23:38:01 -0700 | [diff] [blame] | 6132 | } |
| 6133 | |
| 6134 | |
Christoph Lameter | 62bc62a | 2009-06-16 15:32:15 -0700 | [diff] [blame] | 6135 | #define MAX_NODE_LOAD (nr_online_nodes) |
KAMEZAWA Hiroyuki | f0c0b2b | 2007-07-15 23:38:01 -0700 | [diff] [blame] | 6136 | static int node_load[MAX_NUMNODES]; |
| 6137 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6138 | /** |
Pavel Pisa | 4dc3b16 | 2005-05-01 08:59:25 -0700 | [diff] [blame] | 6139 | * find_next_best_node - find the next node that should appear in a given node's fallback list |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6140 | * @node: node whose fallback list we're appending |
| 6141 | * @used_node_mask: nodemask_t of already used nodes |
| 6142 | * |
| 6143 | * We use a number of factors to determine which is the next node that should |
| 6144 | * appear on a given node's fallback list. The node should not have appeared |
| 6145 | * already in @node's fallback list, and it should be the next closest node |
| 6146 | * according to the distance array (which contains arbitrary distance values |
| 6147 | * from each node to each node in the system), and should also prefer nodes |
| 6148 | * with no CPUs, since presumably they'll have very little allocation pressure |
| 6149 | * on them otherwise. |
Mike Rapoport | a862f68 | 2019-03-05 15:48:42 -0800 | [diff] [blame] | 6150 | * |
| 6151 | * Return: node id of the found node or %NUMA_NO_NODE if no node is found. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6152 | */ |
Dave Hansen | 79c28a4 | 2021-09-02 14:59:06 -0700 | [diff] [blame] | 6153 | int find_next_best_node(int node, nodemask_t *used_node_mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6154 | { |
Linus Torvalds | 4cf808eb | 2006-02-17 20:38:21 +0100 | [diff] [blame] | 6155 | int n, val; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6156 | int min_val = INT_MAX; |
David Rientjes | 00ef2d2 | 2013-02-22 16:35:36 -0800 | [diff] [blame] | 6157 | int best_node = NUMA_NO_NODE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6158 | |
Linus Torvalds | 4cf808eb | 2006-02-17 20:38:21 +0100 | [diff] [blame] | 6159 | /* Use the local node if we haven't already */ |
| 6160 | if (!node_isset(node, *used_node_mask)) { |
| 6161 | node_set(node, *used_node_mask); |
| 6162 | return node; |
| 6163 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6164 | |
Lai Jiangshan | 4b0ef1fe | 2012-12-12 13:51:46 -0800 | [diff] [blame] | 6165 | for_each_node_state(n, N_MEMORY) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6166 | |
| 6167 | /* Don't want a node to appear more than once */ |
| 6168 | if (node_isset(n, *used_node_mask)) |
| 6169 | continue; |
| 6170 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6171 | /* Use the distance array to find the distance */ |
| 6172 | val = node_distance(node, n); |
| 6173 | |
Linus Torvalds | 4cf808eb | 2006-02-17 20:38:21 +0100 | [diff] [blame] | 6174 | /* Penalize nodes under us ("prefer the next node") */ |
| 6175 | val += (n < node); |
| 6176 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6177 | /* Give preference to headless and unused nodes */ |
Mateusz Nosek | b630749 | 2020-10-13 16:55:42 -0700 | [diff] [blame] | 6178 | if (!cpumask_empty(cpumask_of_node(n))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6179 | val += PENALTY_FOR_NODE_WITH_CPUS; |
| 6180 | |
| 6181 | /* Slight preference for less loaded node */ |
| 6182 | val *= (MAX_NODE_LOAD*MAX_NUMNODES); |
| 6183 | val += node_load[n]; |
| 6184 | |
| 6185 | if (val < min_val) { |
| 6186 | min_val = val; |
| 6187 | best_node = n; |
| 6188 | } |
| 6189 | } |
| 6190 | |
| 6191 | if (best_node >= 0) |
| 6192 | node_set(best_node, *used_node_mask); |
| 6193 | |
| 6194 | return best_node; |
| 6195 | } |
| 6196 | |
KAMEZAWA Hiroyuki | f0c0b2b | 2007-07-15 23:38:01 -0700 | [diff] [blame] | 6197 | |
| 6198 | /* |
| 6199 | * Build zonelists ordered by node and zones within node. |
| 6200 | * This results in maximum locality--normal zone overflows into local |
| 6201 | * DMA zone, if any--but risks exhausting DMA zone. |
| 6202 | */ |
Michal Hocko | 9d3be21 | 2017-09-06 16:20:30 -0700 | [diff] [blame] | 6203 | static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order, |
| 6204 | unsigned nr_nodes) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6205 | { |
Michal Hocko | 9d3be21 | 2017-09-06 16:20:30 -0700 | [diff] [blame] | 6206 | struct zoneref *zonerefs; |
| 6207 | int i; |
KAMEZAWA Hiroyuki | f0c0b2b | 2007-07-15 23:38:01 -0700 | [diff] [blame] | 6208 | |
Michal Hocko | 9d3be21 | 2017-09-06 16:20:30 -0700 | [diff] [blame] | 6209 | zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs; |
| 6210 | |
| 6211 | for (i = 0; i < nr_nodes; i++) { |
| 6212 | int nr_zones; |
| 6213 | |
| 6214 | pg_data_t *node = NODE_DATA(node_order[i]); |
| 6215 | |
| 6216 | nr_zones = build_zonerefs_node(node, zonerefs); |
| 6217 | zonerefs += nr_zones; |
| 6218 | } |
| 6219 | zonerefs->zone = NULL; |
| 6220 | zonerefs->zone_idx = 0; |
KAMEZAWA Hiroyuki | f0c0b2b | 2007-07-15 23:38:01 -0700 | [diff] [blame] | 6221 | } |
| 6222 | |
| 6223 | /* |
Christoph Lameter | 523b945 | 2007-10-16 01:25:37 -0700 | [diff] [blame] | 6224 | * Build gfp_thisnode zonelists |
| 6225 | */ |
| 6226 | static void build_thisnode_zonelists(pg_data_t *pgdat) |
| 6227 | { |
Michal Hocko | 9d3be21 | 2017-09-06 16:20:30 -0700 | [diff] [blame] | 6228 | struct zoneref *zonerefs; |
| 6229 | int nr_zones; |
Christoph Lameter | 523b945 | 2007-10-16 01:25:37 -0700 | [diff] [blame] | 6230 | |
Michal Hocko | 9d3be21 | 2017-09-06 16:20:30 -0700 | [diff] [blame] | 6231 | zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs; |
| 6232 | nr_zones = build_zonerefs_node(pgdat, zonerefs); |
| 6233 | zonerefs += nr_zones; |
| 6234 | zonerefs->zone = NULL; |
| 6235 | zonerefs->zone_idx = 0; |
Christoph Lameter | 523b945 | 2007-10-16 01:25:37 -0700 | [diff] [blame] | 6236 | } |
| 6237 | |
| 6238 | /* |
KAMEZAWA Hiroyuki | f0c0b2b | 2007-07-15 23:38:01 -0700 | [diff] [blame] | 6239 | * Build zonelists ordered by zone and nodes within zones. |
| 6240 | * This results in conserving DMA zone[s] until all Normal memory is |
| 6241 | * exhausted, but results in overflowing to remote node while memory |
| 6242 | * may still exist in local DMA zone. |
| 6243 | */ |
KAMEZAWA Hiroyuki | f0c0b2b | 2007-07-15 23:38:01 -0700 | [diff] [blame] | 6244 | |
KAMEZAWA Hiroyuki | f0c0b2b | 2007-07-15 23:38:01 -0700 | [diff] [blame] | 6245 | static void build_zonelists(pg_data_t *pgdat) |
| 6246 | { |
Michal Hocko | 9d3be21 | 2017-09-06 16:20:30 -0700 | [diff] [blame] | 6247 | static int node_order[MAX_NUMNODES]; |
| 6248 | int node, load, nr_nodes = 0; |
Wei Yang | d0ddf49 | 2020-06-03 15:59:05 -0700 | [diff] [blame] | 6249 | nodemask_t used_mask = NODE_MASK_NONE; |
KAMEZAWA Hiroyuki | f0c0b2b | 2007-07-15 23:38:01 -0700 | [diff] [blame] | 6250 | int local_node, prev_node; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6251 | |
| 6252 | /* NUMA-aware ordering of nodes */ |
| 6253 | local_node = pgdat->node_id; |
Christoph Lameter | 62bc62a | 2009-06-16 15:32:15 -0700 | [diff] [blame] | 6254 | load = nr_online_nodes; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6255 | prev_node = local_node; |
KAMEZAWA Hiroyuki | f0c0b2b | 2007-07-15 23:38:01 -0700 | [diff] [blame] | 6256 | |
KAMEZAWA Hiroyuki | f0c0b2b | 2007-07-15 23:38:01 -0700 | [diff] [blame] | 6257 | memset(node_order, 0, sizeof(node_order)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6258 | while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { |
| 6259 | /* |
| 6260 | * We don't want to pressure a particular node. |
| 6261 | * So adding penalty to the first node in same |
| 6262 | * distance group to make it round-robin. |
| 6263 | */ |
David Rientjes | 957f822 | 2012-10-08 16:33:24 -0700 | [diff] [blame] | 6264 | if (node_distance(local_node, node) != |
| 6265 | node_distance(local_node, prev_node)) |
Krupa Ramakrishnan | 54d032c | 2021-11-05 13:40:21 -0700 | [diff] [blame] | 6266 | node_load[node] += load; |
KAMEZAWA Hiroyuki | f0c0b2b | 2007-07-15 23:38:01 -0700 | [diff] [blame] | 6267 | |
Michal Hocko | 9d3be21 | 2017-09-06 16:20:30 -0700 | [diff] [blame] | 6268 | node_order[nr_nodes++] = node; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6269 | prev_node = node; |
| 6270 | load--; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6271 | } |
Christoph Lameter | 523b945 | 2007-10-16 01:25:37 -0700 | [diff] [blame] | 6272 | |
Michal Hocko | 9d3be21 | 2017-09-06 16:20:30 -0700 | [diff] [blame] | 6273 | build_zonelists_in_node_order(pgdat, node_order, nr_nodes); |
Christoph Lameter | 523b945 | 2007-10-16 01:25:37 -0700 | [diff] [blame] | 6274 | build_thisnode_zonelists(pgdat); |
Bharata B Rao | 6cf2539 | 2021-11-05 13:40:18 -0700 | [diff] [blame] | 6275 | pr_info("Fallback order for Node %d: ", local_node); |
| 6276 | for (node = 0; node < nr_nodes; node++) |
| 6277 | pr_cont("%d ", node_order[node]); |
| 6278 | pr_cont("\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6279 | } |
| 6280 | |
Lee Schermerhorn | 7aac789 | 2010-05-26 14:45:00 -0700 | [diff] [blame] | 6281 | #ifdef CONFIG_HAVE_MEMORYLESS_NODES |
| 6282 | /* |
| 6283 | * Return node id of node used for "local" allocations. |
| 6284 | * I.e., first node id of first zone in arg node's generic zonelist. |
| 6285 | * Used for initializing percpu 'numa_mem', which is used primarily |
| 6286 | * for kernel allocations, so use GFP_KERNEL flags to locate zonelist. |
| 6287 | */ |
| 6288 | int local_memory_node(int node) |
| 6289 | { |
Mel Gorman | c33d6c0 | 2016-05-19 17:14:10 -0700 | [diff] [blame] | 6290 | struct zoneref *z; |
Lee Schermerhorn | 7aac789 | 2010-05-26 14:45:00 -0700 | [diff] [blame] | 6291 | |
Mel Gorman | c33d6c0 | 2016-05-19 17:14:10 -0700 | [diff] [blame] | 6292 | z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL), |
Lee Schermerhorn | 7aac789 | 2010-05-26 14:45:00 -0700 | [diff] [blame] | 6293 | gfp_zone(GFP_KERNEL), |
Mel Gorman | c33d6c0 | 2016-05-19 17:14:10 -0700 | [diff] [blame] | 6294 | NULL); |
Pavel Tatashin | c1093b7 | 2018-08-21 21:53:32 -0700 | [diff] [blame] | 6295 | return zone_to_nid(z->zone); |
Lee Schermerhorn | 7aac789 | 2010-05-26 14:45:00 -0700 | [diff] [blame] | 6296 | } |
| 6297 | #endif |
KAMEZAWA Hiroyuki | f0c0b2b | 2007-07-15 23:38:01 -0700 | [diff] [blame] | 6298 | |
Joonsoo Kim | 6423aa8 | 2016-08-10 16:27:49 -0700 | [diff] [blame] | 6299 | static void setup_min_unmapped_ratio(void); |
| 6300 | static void setup_min_slab_ratio(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6301 | #else /* CONFIG_NUMA */ |
| 6302 | |
KAMEZAWA Hiroyuki | f0c0b2b | 2007-07-15 23:38:01 -0700 | [diff] [blame] | 6303 | static void build_zonelists(pg_data_t *pgdat) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6304 | { |
Christoph Lameter | 19655d3 | 2006-09-25 23:31:19 -0700 | [diff] [blame] | 6305 | int node, local_node; |
Michal Hocko | 9d3be21 | 2017-09-06 16:20:30 -0700 | [diff] [blame] | 6306 | struct zoneref *zonerefs; |
| 6307 | int nr_zones; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6308 | |
| 6309 | local_node = pgdat->node_id; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6310 | |
Michal Hocko | 9d3be21 | 2017-09-06 16:20:30 -0700 | [diff] [blame] | 6311 | zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs; |
| 6312 | nr_zones = build_zonerefs_node(pgdat, zonerefs); |
| 6313 | zonerefs += nr_zones; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6314 | |
Mel Gorman | 54a6eb5 | 2008-04-28 02:12:16 -0700 | [diff] [blame] | 6315 | /* |
| 6316 | * Now we build the zonelist so that it contains the zones |
| 6317 | * of all the other nodes. |
| 6318 | * We don't want to pressure a particular node, so when |
| 6319 | * building the zones for node N, we make sure that the |
| 6320 | * zones coming right after the local ones are those from |
| 6321 | * node N+1 (modulo N) |
| 6322 | */ |
| 6323 | for (node = local_node + 1; node < MAX_NUMNODES; node++) { |
| 6324 | if (!node_online(node)) |
| 6325 | continue; |
Michal Hocko | 9d3be21 | 2017-09-06 16:20:30 -0700 | [diff] [blame] | 6326 | nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs); |
| 6327 | zonerefs += nr_zones; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6328 | } |
Mel Gorman | 54a6eb5 | 2008-04-28 02:12:16 -0700 | [diff] [blame] | 6329 | for (node = 0; node < local_node; node++) { |
| 6330 | if (!node_online(node)) |
| 6331 | continue; |
Michal Hocko | 9d3be21 | 2017-09-06 16:20:30 -0700 | [diff] [blame] | 6332 | nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs); |
| 6333 | zonerefs += nr_zones; |
Mel Gorman | 54a6eb5 | 2008-04-28 02:12:16 -0700 | [diff] [blame] | 6334 | } |
| 6335 | |
Michal Hocko | 9d3be21 | 2017-09-06 16:20:30 -0700 | [diff] [blame] | 6336 | zonerefs->zone = NULL; |
| 6337 | zonerefs->zone_idx = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6338 | } |
| 6339 | |
| 6340 | #endif /* CONFIG_NUMA */ |
| 6341 | |
Christoph Lameter | 99dcc3e | 2010-01-05 15:34:51 +0900 | [diff] [blame] | 6342 | /* |
| 6343 | * Boot pageset table. One per cpu which is going to be used for all |
| 6344 | * zones and all nodes. The parameters will be set in such a way |
| 6345 | * that an item put on a list will immediately be handed over to |
| 6346 | * the buddy list. This is safe since pageset manipulation is done |
| 6347 | * with interrupts disabled. |
| 6348 | * |
| 6349 | * The boot_pagesets must be kept even after bootup is complete for |
| 6350 | * unused processors and/or zones. They do play a role for bootstrapping |
| 6351 | * hotplugged processors. |
| 6352 | * |
| 6353 | * zoneinfo_show() and maybe other functions do |
| 6354 | * not check if the processor is online before following the pageset pointer. |
| 6355 | * Other parts of the kernel may not check if the zone is available. |
| 6356 | */ |
Mel Gorman | 28f836b | 2021-06-28 19:41:38 -0700 | [diff] [blame] | 6357 | static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats); |
Vlastimil Babka | 952eaf8 | 2020-12-14 19:10:53 -0800 | [diff] [blame] | 6358 | /* These effectively disable the pcplists in the boot pageset completely */ |
| 6359 | #define BOOT_PAGESET_HIGH 0 |
| 6360 | #define BOOT_PAGESET_BATCH 1 |
Mel Gorman | 28f836b | 2021-06-28 19:41:38 -0700 | [diff] [blame] | 6361 | static DEFINE_PER_CPU(struct per_cpu_pages, boot_pageset); |
| 6362 | static DEFINE_PER_CPU(struct per_cpu_zonestat, boot_zonestats); |
Johannes Weiner | 385386c | 2017-07-06 15:40:43 -0700 | [diff] [blame] | 6363 | static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats); |
Christoph Lameter | 99dcc3e | 2010-01-05 15:34:51 +0900 | [diff] [blame] | 6364 | |
Michal Hocko | 11cd863 | 2017-09-06 16:20:34 -0700 | [diff] [blame] | 6365 | static void __build_all_zonelists(void *data) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6366 | { |
Yasunori Goto | 6811378 | 2006-06-23 02:03:11 -0700 | [diff] [blame] | 6367 | int nid; |
Michal Hocko | afb6ebb | 2017-09-06 16:20:17 -0700 | [diff] [blame] | 6368 | int __maybe_unused cpu; |
Jiang Liu | 9adb62a | 2012-07-31 16:43:28 -0700 | [diff] [blame] | 6369 | pg_data_t *self = data; |
Michal Hocko | b93e0f3 | 2017-09-06 16:20:37 -0700 | [diff] [blame] | 6370 | static DEFINE_SPINLOCK(lock); |
| 6371 | |
| 6372 | spin_lock(&lock); |
Paul Jackson | 9276b1bc | 2006-12-06 20:31:48 -0800 | [diff] [blame] | 6373 | |
Bo Liu | 7f9cfb3 | 2009-08-18 14:11:19 -0700 | [diff] [blame] | 6374 | #ifdef CONFIG_NUMA |
| 6375 | memset(node_load, 0, sizeof(node_load)); |
| 6376 | #endif |
Jiang Liu | 9adb62a | 2012-07-31 16:43:28 -0700 | [diff] [blame] | 6377 | |
Wei Yang | c115258 | 2017-09-06 16:19:33 -0700 | [diff] [blame] | 6378 | /* |
| 6379 | * This node is hotadded and no memory is yet present. So just |
| 6380 | * building zonelists is fine - no need to touch other nodes. |
| 6381 | */ |
Jiang Liu | 9adb62a | 2012-07-31 16:43:28 -0700 | [diff] [blame] | 6382 | if (self && !node_online(self->node_id)) { |
| 6383 | build_zonelists(self); |
Wei Yang | c115258 | 2017-09-06 16:19:33 -0700 | [diff] [blame] | 6384 | } else { |
| 6385 | for_each_online_node(nid) { |
| 6386 | pg_data_t *pgdat = NODE_DATA(nid); |
Jiang Liu | 9adb62a | 2012-07-31 16:43:28 -0700 | [diff] [blame] | 6387 | |
Wei Yang | c115258 | 2017-09-06 16:19:33 -0700 | [diff] [blame] | 6388 | build_zonelists(pgdat); |
| 6389 | } |
Christoph Lameter | 99dcc3e | 2010-01-05 15:34:51 +0900 | [diff] [blame] | 6390 | |
Michal Hocko | afb6ebb | 2017-09-06 16:20:17 -0700 | [diff] [blame] | 6391 | #ifdef CONFIG_HAVE_MEMORYLESS_NODES |
Michal Hocko | afb6ebb | 2017-09-06 16:20:17 -0700 | [diff] [blame] | 6392 | /* |
| 6393 | * We now know the "local memory node" for each node-- |
| 6394 | * i.e., the node of the first zone in the generic zonelist. |
| 6395 | * Set up numa_mem percpu variable for on-line cpus. During |
| 6396 | * boot, only the boot cpu should be on-line; we'll init the |
| 6397 | * secondary cpus' numa_mem as they come on-line. During |
| 6398 | * node/memory hotplug, we'll fixup all on-line cpus. |
| 6399 | */ |
Michal Hocko | d9c9a0b | 2017-09-06 16:20:20 -0700 | [diff] [blame] | 6400 | for_each_online_cpu(cpu) |
Michal Hocko | afb6ebb | 2017-09-06 16:20:17 -0700 | [diff] [blame] | 6401 | set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu))); |
Michal Hocko | afb6ebb | 2017-09-06 16:20:17 -0700 | [diff] [blame] | 6402 | #endif |
Michal Hocko | d9c9a0b | 2017-09-06 16:20:20 -0700 | [diff] [blame] | 6403 | } |
Michal Hocko | b93e0f3 | 2017-09-06 16:20:37 -0700 | [diff] [blame] | 6404 | |
| 6405 | spin_unlock(&lock); |
Michal Hocko | afb6ebb | 2017-09-06 16:20:17 -0700 | [diff] [blame] | 6406 | } |
| 6407 | |
| 6408 | static noinline void __init |
| 6409 | build_all_zonelists_init(void) |
| 6410 | { |
| 6411 | int cpu; |
| 6412 | |
| 6413 | __build_all_zonelists(NULL); |
| 6414 | |
Christoph Lameter | 99dcc3e | 2010-01-05 15:34:51 +0900 | [diff] [blame] | 6415 | /* |
| 6416 | * Initialize the boot_pagesets that are going to be used |
| 6417 | * for bootstrapping processors. The real pagesets for |
| 6418 | * each zone will be allocated later when the per cpu |
| 6419 | * allocator is available. |
| 6420 | * |
| 6421 | * boot_pagesets are used also for bootstrapping offline |
| 6422 | * cpus if the system is already booted because the pagesets |
| 6423 | * are needed to initialize allocators on a specific cpu too. |
| 6424 | * F.e. the percpu allocator needs the page allocator which |
| 6425 | * needs the percpu allocator in order to allocate its pagesets |
| 6426 | * (a chicken-egg dilemma). |
| 6427 | */ |
Michal Hocko | afb6ebb | 2017-09-06 16:20:17 -0700 | [diff] [blame] | 6428 | for_each_possible_cpu(cpu) |
Mel Gorman | 28f836b | 2021-06-28 19:41:38 -0700 | [diff] [blame] | 6429 | per_cpu_pages_init(&per_cpu(boot_pageset, cpu), &per_cpu(boot_zonestats, cpu)); |
Christoph Lameter | 99dcc3e | 2010-01-05 15:34:51 +0900 | [diff] [blame] | 6430 | |
Rasmus Villemoes | 061f67b | 2015-02-12 15:00:06 -0800 | [diff] [blame] | 6431 | mminit_verify_zonelist(); |
| 6432 | cpuset_init_current_mems_allowed(); |
| 6433 | } |
| 6434 | |
Haicheng Li | 4eaf3f6 | 2010-05-24 14:32:52 -0700 | [diff] [blame] | 6435 | /* |
Haicheng Li | 4eaf3f6 | 2010-05-24 14:32:52 -0700 | [diff] [blame] | 6436 | * unless system_state == SYSTEM_BOOTING. |
Rasmus Villemoes | 061f67b | 2015-02-12 15:00:06 -0800 | [diff] [blame] | 6437 | * |
Michal Hocko | 72675e1 | 2017-09-06 16:20:24 -0700 | [diff] [blame] | 6438 | * __ref due to call of __init annotated helper build_all_zonelists_init |
Rasmus Villemoes | 061f67b | 2015-02-12 15:00:06 -0800 | [diff] [blame] | 6439 | * [protected by SYSTEM_BOOTING]. |
Haicheng Li | 4eaf3f6 | 2010-05-24 14:32:52 -0700 | [diff] [blame] | 6440 | */ |
Michal Hocko | 72675e1 | 2017-09-06 16:20:24 -0700 | [diff] [blame] | 6441 | void __ref build_all_zonelists(pg_data_t *pgdat) |
Yasunori Goto | 6811378 | 2006-06-23 02:03:11 -0700 | [diff] [blame] | 6442 | { |
David Hildenbrand | 0a18e60 | 2020-08-06 23:25:27 -0700 | [diff] [blame] | 6443 | unsigned long vm_total_pages; |
| 6444 | |
Yasunori Goto | 6811378 | 2006-06-23 02:03:11 -0700 | [diff] [blame] | 6445 | if (system_state == SYSTEM_BOOTING) { |
Rasmus Villemoes | 061f67b | 2015-02-12 15:00:06 -0800 | [diff] [blame] | 6446 | build_all_zonelists_init(); |
Yasunori Goto | 6811378 | 2006-06-23 02:03:11 -0700 | [diff] [blame] | 6447 | } else { |
Michal Hocko | 11cd863 | 2017-09-06 16:20:34 -0700 | [diff] [blame] | 6448 | __build_all_zonelists(pgdat); |
Yasunori Goto | 6811378 | 2006-06-23 02:03:11 -0700 | [diff] [blame] | 6449 | /* cpuset refresh routine should be here */ |
| 6450 | } |
David Hildenbrand | 56b9413 | 2020-08-06 23:25:30 -0700 | [diff] [blame] | 6451 | /* Get the number of free pages beyond high watermark in all zones. */ |
| 6452 | vm_total_pages = nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE)); |
Mel Gorman | 9ef9acb | 2007-10-16 01:25:54 -0700 | [diff] [blame] | 6453 | /* |
| 6454 | * Disable grouping by mobility if the number of pages in the |
| 6455 | * system is too low to allow the mechanism to work. It would be |
| 6456 | * more accurate, but expensive to check per-zone. This check is |
| 6457 | * made on memory-hotadd so a system can start with mobility |
| 6458 | * disabled and enable it later |
| 6459 | */ |
Mel Gorman | d9c2340 | 2007-10-16 01:26:01 -0700 | [diff] [blame] | 6460 | if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES)) |
Mel Gorman | 9ef9acb | 2007-10-16 01:25:54 -0700 | [diff] [blame] | 6461 | page_group_by_mobility_disabled = 1; |
| 6462 | else |
| 6463 | page_group_by_mobility_disabled = 0; |
| 6464 | |
Alexey Dobriyan | ce0725f | 2019-03-05 15:48:29 -0800 | [diff] [blame] | 6465 | pr_info("Built %u zonelists, mobility grouping %s. Total pages: %ld\n", |
Joe Perches | 756a025 | 2016-03-17 14:19:47 -0700 | [diff] [blame] | 6466 | nr_online_nodes, |
Joe Perches | 756a025 | 2016-03-17 14:19:47 -0700 | [diff] [blame] | 6467 | page_group_by_mobility_disabled ? "off" : "on", |
| 6468 | vm_total_pages); |
KAMEZAWA Hiroyuki | f0c0b2b | 2007-07-15 23:38:01 -0700 | [diff] [blame] | 6469 | #ifdef CONFIG_NUMA |
Anton Blanchard | f88dfff | 2014-12-10 15:42:53 -0800 | [diff] [blame] | 6470 | pr_info("Policy zone: %s\n", zone_names[policy_zone]); |
KAMEZAWA Hiroyuki | f0c0b2b | 2007-07-15 23:38:01 -0700 | [diff] [blame] | 6471 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6472 | } |
| 6473 | |
Pavel Tatashin | a9a9e77 | 2018-10-26 15:09:40 -0700 | [diff] [blame] | 6474 | /* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */ |
| 6475 | static bool __meminit |
| 6476 | overlap_memmap_init(unsigned long zone, unsigned long *pfn) |
| 6477 | { |
Pavel Tatashin | a9a9e77 | 2018-10-26 15:09:40 -0700 | [diff] [blame] | 6478 | static struct memblock_region *r; |
| 6479 | |
| 6480 | if (mirrored_kernelcore && zone == ZONE_MOVABLE) { |
| 6481 | if (!r || *pfn >= memblock_region_memory_end_pfn(r)) { |
Mike Rapoport | cc6de16 | 2020-10-13 16:58:30 -0700 | [diff] [blame] | 6482 | for_each_mem_region(r) { |
Pavel Tatashin | a9a9e77 | 2018-10-26 15:09:40 -0700 | [diff] [blame] | 6483 | if (*pfn < memblock_region_memory_end_pfn(r)) |
| 6484 | break; |
| 6485 | } |
| 6486 | } |
| 6487 | if (*pfn >= memblock_region_memory_base_pfn(r) && |
| 6488 | memblock_is_mirror(r)) { |
| 6489 | *pfn = memblock_region_memory_end_pfn(r); |
| 6490 | return true; |
| 6491 | } |
| 6492 | } |
Pavel Tatashin | a9a9e77 | 2018-10-26 15:09:40 -0700 | [diff] [blame] | 6493 | return false; |
| 6494 | } |
| 6495 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6496 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6497 | * Initially all pages are reserved - free ones are freed |
Mike Rapoport | c6ffc5c | 2018-10-30 15:09:30 -0700 | [diff] [blame] | 6498 | * up by memblock_free_all() once the early boot process is |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6499 | * done. Non-atomic initialization, single-pass. |
David Hildenbrand | d882c00 | 2020-10-15 20:08:19 -0700 | [diff] [blame] | 6500 | * |
| 6501 | * All aligned pageblocks are initialized to the specified migratetype |
| 6502 | * (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related |
| 6503 | * zone stats (e.g., nr_isolate_pageblock) are touched. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6504 | */ |
Baoquan He | ab28cb6 | 2021-02-24 12:06:14 -0800 | [diff] [blame] | 6505 | void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone, |
Baoquan He | dc2da7b | 2020-12-29 15:14:37 -0800 | [diff] [blame] | 6506 | unsigned long start_pfn, unsigned long zone_end_pfn, |
David Hildenbrand | d882c00 | 2020-10-15 20:08:19 -0700 | [diff] [blame] | 6507 | enum meminit_context context, |
| 6508 | struct vmem_altmap *altmap, int migratetype) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6509 | { |
Pavel Tatashin | a9a9e77 | 2018-10-26 15:09:40 -0700 | [diff] [blame] | 6510 | unsigned long pfn, end_pfn = start_pfn + size; |
Pavel Tatashin | d0dc12e | 2018-04-05 16:23:00 -0700 | [diff] [blame] | 6511 | struct page *page; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6512 | |
Hugh Dickins | 22b31ee | 2009-01-06 14:40:09 -0800 | [diff] [blame] | 6513 | if (highest_memmap_pfn < end_pfn - 1) |
| 6514 | highest_memmap_pfn = end_pfn - 1; |
| 6515 | |
Alexander Duyck | 966cf44 | 2018-10-26 15:07:52 -0700 | [diff] [blame] | 6516 | #ifdef CONFIG_ZONE_DEVICE |
Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 6517 | /* |
| 6518 | * Honor reservation requested by the driver for this ZONE_DEVICE |
Alexander Duyck | 966cf44 | 2018-10-26 15:07:52 -0700 | [diff] [blame] | 6519 | * memory. We limit the total number of pages to initialize to just |
| 6520 | * those that might contain the memory mapping. We will defer the |
| 6521 | * ZONE_DEVICE page initialization until after we have released |
| 6522 | * the hotplug lock. |
Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 6523 | */ |
Alexander Duyck | 966cf44 | 2018-10-26 15:07:52 -0700 | [diff] [blame] | 6524 | if (zone == ZONE_DEVICE) { |
| 6525 | if (!altmap) |
| 6526 | return; |
| 6527 | |
| 6528 | if (start_pfn == altmap->base_pfn) |
| 6529 | start_pfn += altmap->reserve; |
| 6530 | end_pfn = altmap->base_pfn + vmem_altmap_offset(altmap); |
| 6531 | } |
| 6532 | #endif |
Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 6533 | |
David Hildenbrand | 948c436 | 2020-02-03 17:33:59 -0800 | [diff] [blame] | 6534 | for (pfn = start_pfn; pfn < end_pfn; ) { |
Dave Hansen | a2f3aa02 | 2007-01-10 23:15:30 -0800 | [diff] [blame] | 6535 | /* |
Andrew Morton | b72d0ff | 2016-03-15 14:55:25 -0700 | [diff] [blame] | 6536 | * There can be holes in boot-time mem_map[]s handed to this |
| 6537 | * function. They do not exist on hotplugged memory. |
Dave Hansen | a2f3aa02 | 2007-01-10 23:15:30 -0800 | [diff] [blame] | 6538 | */ |
Laurent Dufour | c1d0da8 | 2020-09-25 21:19:28 -0700 | [diff] [blame] | 6539 | if (context == MEMINIT_EARLY) { |
Pavel Tatashin | a9a9e77 | 2018-10-26 15:09:40 -0700 | [diff] [blame] | 6540 | if (overlap_memmap_init(zone, &pfn)) |
| 6541 | continue; |
Baoquan He | dc2da7b | 2020-12-29 15:14:37 -0800 | [diff] [blame] | 6542 | if (defer_init(nid, pfn, zone_end_pfn)) |
Pavel Tatashin | a9a9e77 | 2018-10-26 15:09:40 -0700 | [diff] [blame] | 6543 | break; |
Dave Hansen | a2f3aa02 | 2007-01-10 23:15:30 -0800 | [diff] [blame] | 6544 | } |
Mel Gorman | ac5d253 | 2015-06-30 14:57:20 -0700 | [diff] [blame] | 6545 | |
Pavel Tatashin | d0dc12e | 2018-04-05 16:23:00 -0700 | [diff] [blame] | 6546 | page = pfn_to_page(pfn); |
| 6547 | __init_single_page(page, pfn, zone, nid); |
Laurent Dufour | c1d0da8 | 2020-09-25 21:19:28 -0700 | [diff] [blame] | 6548 | if (context == MEMINIT_HOTPLUG) |
Alexander Duyck | d483da5 | 2018-10-26 15:07:48 -0700 | [diff] [blame] | 6549 | __SetPageReserved(page); |
Pavel Tatashin | d0dc12e | 2018-04-05 16:23:00 -0700 | [diff] [blame] | 6550 | |
Mel Gorman | ac5d253 | 2015-06-30 14:57:20 -0700 | [diff] [blame] | 6551 | /* |
David Hildenbrand | d882c00 | 2020-10-15 20:08:19 -0700 | [diff] [blame] | 6552 | * Usually, we want to mark the pageblock MIGRATE_MOVABLE, |
| 6553 | * such that unmovable allocations won't be scattered all |
| 6554 | * over the place during system boot. |
Mel Gorman | ac5d253 | 2015-06-30 14:57:20 -0700 | [diff] [blame] | 6555 | */ |
David Hildenbrand | 4eb29bd | 2020-10-15 20:08:15 -0700 | [diff] [blame] | 6556 | if (IS_ALIGNED(pfn, pageblock_nr_pages)) { |
David Hildenbrand | d882c00 | 2020-10-15 20:08:19 -0700 | [diff] [blame] | 6557 | set_pageblock_migratetype(page, migratetype); |
Michal Hocko | 9b6e63c | 2017-10-03 16:16:19 -0700 | [diff] [blame] | 6558 | cond_resched(); |
Mel Gorman | ac5d253 | 2015-06-30 14:57:20 -0700 | [diff] [blame] | 6559 | } |
David Hildenbrand | 948c436 | 2020-02-03 17:33:59 -0800 | [diff] [blame] | 6560 | pfn++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6561 | } |
| 6562 | } |
| 6563 | |
Alexander Duyck | 966cf44 | 2018-10-26 15:07:52 -0700 | [diff] [blame] | 6564 | #ifdef CONFIG_ZONE_DEVICE |
| 6565 | void __ref memmap_init_zone_device(struct zone *zone, |
| 6566 | unsigned long start_pfn, |
Aneesh Kumar K.V | 1f8d75c | 2020-02-03 17:34:06 -0800 | [diff] [blame] | 6567 | unsigned long nr_pages, |
Alexander Duyck | 966cf44 | 2018-10-26 15:07:52 -0700 | [diff] [blame] | 6568 | struct dev_pagemap *pgmap) |
| 6569 | { |
Aneesh Kumar K.V | 1f8d75c | 2020-02-03 17:34:06 -0800 | [diff] [blame] | 6570 | unsigned long pfn, end_pfn = start_pfn + nr_pages; |
Alexander Duyck | 966cf44 | 2018-10-26 15:07:52 -0700 | [diff] [blame] | 6571 | struct pglist_data *pgdat = zone->zone_pgdat; |
Christoph Hellwig | 514caf2 | 2019-06-26 14:27:13 +0200 | [diff] [blame] | 6572 | struct vmem_altmap *altmap = pgmap_altmap(pgmap); |
Alexander Duyck | 966cf44 | 2018-10-26 15:07:52 -0700 | [diff] [blame] | 6573 | unsigned long zone_idx = zone_idx(zone); |
| 6574 | unsigned long start = jiffies; |
| 6575 | int nid = pgdat->node_id; |
| 6576 | |
Dan Williams | 46d945a | 2019-07-18 15:58:18 -0700 | [diff] [blame] | 6577 | if (WARN_ON_ONCE(!pgmap || zone_idx(zone) != ZONE_DEVICE)) |
Alexander Duyck | 966cf44 | 2018-10-26 15:07:52 -0700 | [diff] [blame] | 6578 | return; |
| 6579 | |
| 6580 | /* |
Mike Rapoport | 122e093 | 2021-06-28 19:33:26 -0700 | [diff] [blame] | 6581 | * The call to memmap_init should have already taken care |
Alexander Duyck | 966cf44 | 2018-10-26 15:07:52 -0700 | [diff] [blame] | 6582 | * of the pages reserved for the memmap, so we can just jump to |
| 6583 | * the end of that region and start processing the device pages. |
| 6584 | */ |
Christoph Hellwig | 514caf2 | 2019-06-26 14:27:13 +0200 | [diff] [blame] | 6585 | if (altmap) { |
Alexander Duyck | 966cf44 | 2018-10-26 15:07:52 -0700 | [diff] [blame] | 6586 | start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap); |
Aneesh Kumar K.V | 1f8d75c | 2020-02-03 17:34:06 -0800 | [diff] [blame] | 6587 | nr_pages = end_pfn - start_pfn; |
Alexander Duyck | 966cf44 | 2018-10-26 15:07:52 -0700 | [diff] [blame] | 6588 | } |
| 6589 | |
| 6590 | for (pfn = start_pfn; pfn < end_pfn; pfn++) { |
| 6591 | struct page *page = pfn_to_page(pfn); |
| 6592 | |
| 6593 | __init_single_page(page, pfn, zone_idx, nid); |
| 6594 | |
| 6595 | /* |
| 6596 | * Mark page reserved as it will need to wait for onlining |
| 6597 | * phase for it to be fully associated with a zone. |
| 6598 | * |
| 6599 | * We can use the non-atomic __set_bit operation for setting |
| 6600 | * the flag as we are still initializing the pages. |
| 6601 | */ |
| 6602 | __SetPageReserved(page); |
| 6603 | |
| 6604 | /* |
Christoph Hellwig | 8a164fe | 2019-06-26 14:27:21 +0200 | [diff] [blame] | 6605 | * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer |
| 6606 | * and zone_device_data. It is a bug if a ZONE_DEVICE page is |
| 6607 | * ever freed or placed on a driver-private list. |
Alexander Duyck | 966cf44 | 2018-10-26 15:07:52 -0700 | [diff] [blame] | 6608 | */ |
| 6609 | page->pgmap = pgmap; |
Christoph Hellwig | 8a164fe | 2019-06-26 14:27:21 +0200 | [diff] [blame] | 6610 | page->zone_device_data = NULL; |
Alexander Duyck | 966cf44 | 2018-10-26 15:07:52 -0700 | [diff] [blame] | 6611 | |
| 6612 | /* |
| 6613 | * Mark the block movable so that blocks are reserved for |
| 6614 | * movable at startup. This will force kernel allocations |
| 6615 | * to reserve their blocks rather than leaking throughout |
| 6616 | * the address space during boot when many long-lived |
| 6617 | * kernel allocations are made. |
| 6618 | * |
Laurent Dufour | c1d0da8 | 2020-09-25 21:19:28 -0700 | [diff] [blame] | 6619 | * Please note that MEMINIT_HOTPLUG path doesn't clear memmap |
Dan Williams | ba72b4c | 2019-07-18 15:58:26 -0700 | [diff] [blame] | 6620 | * because this is done early in section_activate() |
Alexander Duyck | 966cf44 | 2018-10-26 15:07:52 -0700 | [diff] [blame] | 6621 | */ |
David Hildenbrand | 4eb29bd | 2020-10-15 20:08:15 -0700 | [diff] [blame] | 6622 | if (IS_ALIGNED(pfn, pageblock_nr_pages)) { |
Alexander Duyck | 966cf44 | 2018-10-26 15:07:52 -0700 | [diff] [blame] | 6623 | set_pageblock_migratetype(page, MIGRATE_MOVABLE); |
| 6624 | cond_resched(); |
| 6625 | } |
| 6626 | } |
| 6627 | |
Christoph Hellwig | fdc029b | 2019-08-18 11:05:55 +0200 | [diff] [blame] | 6628 | pr_info("%s initialised %lu pages in %ums\n", __func__, |
Aneesh Kumar K.V | 1f8d75c | 2020-02-03 17:34:06 -0800 | [diff] [blame] | 6629 | nr_pages, jiffies_to_msecs(jiffies - start)); |
Alexander Duyck | 966cf44 | 2018-10-26 15:07:52 -0700 | [diff] [blame] | 6630 | } |
| 6631 | |
| 6632 | #endif |
Andi Kleen | 1e548de | 2008-02-04 22:29:26 -0800 | [diff] [blame] | 6633 | static void __meminit zone_init_free_lists(struct zone *zone) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6634 | { |
Mel Gorman | 7aeb09f | 2014-06-04 16:10:21 -0700 | [diff] [blame] | 6635 | unsigned int order, t; |
Mel Gorman | b2a0ac8 | 2007-10-16 01:25:48 -0700 | [diff] [blame] | 6636 | for_each_migratetype_order(order, t) { |
| 6637 | INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6638 | zone->free_area[order].nr_free = 0; |
| 6639 | } |
| 6640 | } |
| 6641 | |
Mike Rapoport | 0740a50 | 2021-03-12 21:07:12 -0800 | [diff] [blame] | 6642 | /* |
| 6643 | * Only struct pages that correspond to ranges defined by memblock.memory |
| 6644 | * are zeroed and initialized by going through __init_single_page() during |
Mike Rapoport | 122e093 | 2021-06-28 19:33:26 -0700 | [diff] [blame] | 6645 | * memmap_init_zone_range(). |
Mike Rapoport | 0740a50 | 2021-03-12 21:07:12 -0800 | [diff] [blame] | 6646 | * |
| 6647 | * But, there could be struct pages that correspond to holes in |
| 6648 | * memblock.memory. This can happen because of the following reasons: |
| 6649 | * - physical memory bank size is not necessarily the exact multiple of the |
| 6650 | * arbitrary section size |
| 6651 | * - early reserved memory may not be listed in memblock.memory |
| 6652 | * - memory layouts defined with memmap= kernel parameter may not align |
| 6653 | * nicely with memmap sections |
| 6654 | * |
| 6655 | * Explicitly initialize those struct pages so that: |
| 6656 | * - PG_Reserved is set |
| 6657 | * - zone and node links point to zone and node that span the page if the |
| 6658 | * hole is in the middle of a zone |
| 6659 | * - zone and node links point to adjacent zone/node if the hole falls on |
| 6660 | * the zone boundary; the pages in such holes will be prepended to the |
| 6661 | * zone/node above the hole except for the trailing pages in the last |
| 6662 | * section that will be appended to the zone/node below. |
| 6663 | */ |
Mike Rapoport | 122e093 | 2021-06-28 19:33:26 -0700 | [diff] [blame] | 6664 | static void __init init_unavailable_range(unsigned long spfn, |
| 6665 | unsigned long epfn, |
| 6666 | int zone, int node) |
Mike Rapoport | 0740a50 | 2021-03-12 21:07:12 -0800 | [diff] [blame] | 6667 | { |
| 6668 | unsigned long pfn; |
| 6669 | u64 pgcnt = 0; |
| 6670 | |
| 6671 | for (pfn = spfn; pfn < epfn; pfn++) { |
| 6672 | if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages))) { |
| 6673 | pfn = ALIGN_DOWN(pfn, pageblock_nr_pages) |
| 6674 | + pageblock_nr_pages - 1; |
| 6675 | continue; |
| 6676 | } |
| 6677 | __init_single_page(pfn_to_page(pfn), pfn, zone, node); |
| 6678 | __SetPageReserved(pfn_to_page(pfn)); |
| 6679 | pgcnt++; |
| 6680 | } |
| 6681 | |
Mike Rapoport | 122e093 | 2021-06-28 19:33:26 -0700 | [diff] [blame] | 6682 | if (pgcnt) |
| 6683 | pr_info("On node %d, zone %s: %lld pages in unavailable ranges", |
| 6684 | node, zone_names[zone], pgcnt); |
Mike Rapoport | 0740a50 | 2021-03-12 21:07:12 -0800 | [diff] [blame] | 6685 | } |
Mike Rapoport | 0740a50 | 2021-03-12 21:07:12 -0800 | [diff] [blame] | 6686 | |
Mike Rapoport | 122e093 | 2021-06-28 19:33:26 -0700 | [diff] [blame] | 6687 | static void __init memmap_init_zone_range(struct zone *zone, |
| 6688 | unsigned long start_pfn, |
| 6689 | unsigned long end_pfn, |
| 6690 | unsigned long *hole_pfn) |
Pavel Tatashin | dfb3ccd | 2018-10-26 15:09:32 -0700 | [diff] [blame] | 6691 | { |
Baoquan He | 3256ff8 | 2021-02-24 12:06:17 -0800 | [diff] [blame] | 6692 | unsigned long zone_start_pfn = zone->zone_start_pfn; |
| 6693 | unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages; |
Mike Rapoport | 122e093 | 2021-06-28 19:33:26 -0700 | [diff] [blame] | 6694 | int nid = zone_to_nid(zone), zone_id = zone_idx(zone); |
| 6695 | |
| 6696 | start_pfn = clamp(start_pfn, zone_start_pfn, zone_end_pfn); |
| 6697 | end_pfn = clamp(end_pfn, zone_start_pfn, zone_end_pfn); |
| 6698 | |
| 6699 | if (start_pfn >= end_pfn) |
| 6700 | return; |
| 6701 | |
| 6702 | memmap_init_range(end_pfn - start_pfn, nid, zone_id, start_pfn, |
| 6703 | zone_end_pfn, MEMINIT_EARLY, NULL, MIGRATE_MOVABLE); |
| 6704 | |
| 6705 | if (*hole_pfn < start_pfn) |
| 6706 | init_unavailable_range(*hole_pfn, start_pfn, zone_id, nid); |
| 6707 | |
| 6708 | *hole_pfn = end_pfn; |
| 6709 | } |
| 6710 | |
| 6711 | static void __init memmap_init(void) |
| 6712 | { |
Baoquan He | 73a6e47 | 2020-06-03 15:57:55 -0700 | [diff] [blame] | 6713 | unsigned long start_pfn, end_pfn; |
Mike Rapoport | 122e093 | 2021-06-28 19:33:26 -0700 | [diff] [blame] | 6714 | unsigned long hole_pfn = 0; |
Nico Pache | b346075 | 2021-09-02 14:58:08 -0700 | [diff] [blame] | 6715 | int i, j, zone_id = 0, nid; |
Baoquan He | 73a6e47 | 2020-06-03 15:57:55 -0700 | [diff] [blame] | 6716 | |
Mike Rapoport | 122e093 | 2021-06-28 19:33:26 -0700 | [diff] [blame] | 6717 | for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { |
| 6718 | struct pglist_data *node = NODE_DATA(nid); |
Baoquan He | 73a6e47 | 2020-06-03 15:57:55 -0700 | [diff] [blame] | 6719 | |
Mike Rapoport | 122e093 | 2021-06-28 19:33:26 -0700 | [diff] [blame] | 6720 | for (j = 0; j < MAX_NR_ZONES; j++) { |
| 6721 | struct zone *zone = node->node_zones + j; |
Mike Rapoport | 0740a50 | 2021-03-12 21:07:12 -0800 | [diff] [blame] | 6722 | |
Mike Rapoport | 122e093 | 2021-06-28 19:33:26 -0700 | [diff] [blame] | 6723 | if (!populated_zone(zone)) |
| 6724 | continue; |
| 6725 | |
| 6726 | memmap_init_zone_range(zone, start_pfn, end_pfn, |
| 6727 | &hole_pfn); |
| 6728 | zone_id = j; |
| 6729 | } |
Baoquan He | 73a6e47 | 2020-06-03 15:57:55 -0700 | [diff] [blame] | 6730 | } |
Mike Rapoport | 0740a50 | 2021-03-12 21:07:12 -0800 | [diff] [blame] | 6731 | |
| 6732 | #ifdef CONFIG_SPARSEMEM |
| 6733 | /* |
Mike Rapoport | 122e093 | 2021-06-28 19:33:26 -0700 | [diff] [blame] | 6734 | * Initialize the memory map for hole in the range [memory_end, |
| 6735 | * section_end]. |
| 6736 | * Append the pages in this hole to the highest zone in the last |
| 6737 | * node. |
| 6738 | * The call to init_unavailable_range() is outside the ifdef to |
| 6739 | * silence the compiler warining about zone_id set but not used; |
| 6740 | * for FLATMEM it is a nop anyway |
Mike Rapoport | 0740a50 | 2021-03-12 21:07:12 -0800 | [diff] [blame] | 6741 | */ |
Mike Rapoport | 122e093 | 2021-06-28 19:33:26 -0700 | [diff] [blame] | 6742 | end_pfn = round_up(end_pfn, PAGES_PER_SECTION); |
Mike Rapoport | 0740a50 | 2021-03-12 21:07:12 -0800 | [diff] [blame] | 6743 | if (hole_pfn < end_pfn) |
Mike Rapoport | 0740a50 | 2021-03-12 21:07:12 -0800 | [diff] [blame] | 6744 | #endif |
Mike Rapoport | 122e093 | 2021-06-28 19:33:26 -0700 | [diff] [blame] | 6745 | init_unavailable_range(hole_pfn, end_pfn, zone_id, nid); |
Pavel Tatashin | dfb3ccd | 2018-10-26 15:09:32 -0700 | [diff] [blame] | 6746 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6747 | |
Mike Rapoport | c803b3c | 2021-09-02 14:58:02 -0700 | [diff] [blame] | 6748 | void __init *memmap_alloc(phys_addr_t size, phys_addr_t align, |
| 6749 | phys_addr_t min_addr, int nid, bool exact_nid) |
| 6750 | { |
| 6751 | void *ptr; |
| 6752 | |
| 6753 | if (exact_nid) |
| 6754 | ptr = memblock_alloc_exact_nid_raw(size, align, min_addr, |
| 6755 | MEMBLOCK_ALLOC_ACCESSIBLE, |
| 6756 | nid); |
| 6757 | else |
| 6758 | ptr = memblock_alloc_try_nid_raw(size, align, min_addr, |
| 6759 | MEMBLOCK_ALLOC_ACCESSIBLE, |
| 6760 | nid); |
| 6761 | |
| 6762 | if (ptr && size > 0) |
| 6763 | page_init_poison(ptr, size); |
| 6764 | |
| 6765 | return ptr; |
| 6766 | } |
| 6767 | |
David Rientjes | 7cd2b0a | 2014-06-23 13:22:04 -0700 | [diff] [blame] | 6768 | static int zone_batchsize(struct zone *zone) |
Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 6769 | { |
David Howells | 3a6be87 | 2009-05-06 16:03:03 -0700 | [diff] [blame] | 6770 | #ifdef CONFIG_MMU |
Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 6771 | int batch; |
| 6772 | |
| 6773 | /* |
Mel Gorman | b92ca18 | 2021-06-28 19:42:12 -0700 | [diff] [blame] | 6774 | * The number of pages to batch allocate is either ~0.1% |
| 6775 | * of the zone or 1MB, whichever is smaller. The batch |
| 6776 | * size is striking a balance between allocation latency |
| 6777 | * and zone lock contention. |
Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 6778 | */ |
Mel Gorman | b92ca18 | 2021-06-28 19:42:12 -0700 | [diff] [blame] | 6779 | batch = min(zone_managed_pages(zone) >> 10, (1024 * 1024) / PAGE_SIZE); |
Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 6780 | batch /= 4; /* We effectively *= 4 below */ |
| 6781 | if (batch < 1) |
| 6782 | batch = 1; |
| 6783 | |
| 6784 | /* |
Nick Piggin | 0ceaacc | 2005-12-04 13:55:25 +1100 | [diff] [blame] | 6785 | * Clamp the batch to a 2^n - 1 value. Having a power |
| 6786 | * of 2 value was found to be more likely to have |
| 6787 | * suboptimal cache aliasing properties in some cases. |
Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 6788 | * |
Nick Piggin | 0ceaacc | 2005-12-04 13:55:25 +1100 | [diff] [blame] | 6789 | * For example if 2 tasks are alternately allocating |
| 6790 | * batches of pages, one task can end up with a lot |
| 6791 | * of pages of one half of the possible page colors |
| 6792 | * and the other with pages of the other colors. |
Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 6793 | */ |
David Howells | 9155203 | 2009-05-06 16:03:02 -0700 | [diff] [blame] | 6794 | batch = rounddown_pow_of_two(batch + batch/2) - 1; |
Seth, Rohit | ba56e91 | 2005-10-29 18:15:47 -0700 | [diff] [blame] | 6795 | |
Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 6796 | return batch; |
David Howells | 3a6be87 | 2009-05-06 16:03:03 -0700 | [diff] [blame] | 6797 | |
| 6798 | #else |
| 6799 | /* The deferral and batching of frees should be suppressed under NOMMU |
| 6800 | * conditions. |
| 6801 | * |
| 6802 | * The problem is that NOMMU needs to be able to allocate large chunks |
| 6803 | * of contiguous memory as there's no hardware page translation to |
| 6804 | * assemble apparent contiguous memory from discontiguous pages. |
| 6805 | * |
| 6806 | * Queueing large contiguous runs of pages for batching, however, |
| 6807 | * causes the pages to actually be freed in smaller chunks. As there |
| 6808 | * can be a significant delay between the individual batches being |
| 6809 | * recycled, this leads to the once large chunks of space being |
| 6810 | * fragmented and becoming unavailable for high-order allocations. |
| 6811 | */ |
| 6812 | return 0; |
| 6813 | #endif |
Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 6814 | } |
| 6815 | |
Mel Gorman | 04f8cfe | 2021-06-28 19:42:15 -0700 | [diff] [blame] | 6816 | static int zone_highsize(struct zone *zone, int batch, int cpu_online) |
Mel Gorman | b92ca18 | 2021-06-28 19:42:12 -0700 | [diff] [blame] | 6817 | { |
| 6818 | #ifdef CONFIG_MMU |
| 6819 | int high; |
Mel Gorman | 203c06e | 2021-06-28 19:43:11 -0700 | [diff] [blame] | 6820 | int nr_split_cpus; |
Mel Gorman | 74f4482 | 2021-06-28 19:42:24 -0700 | [diff] [blame] | 6821 | unsigned long total_pages; |
| 6822 | |
| 6823 | if (!percpu_pagelist_high_fraction) { |
| 6824 | /* |
| 6825 | * By default, the high value of the pcp is based on the zone |
| 6826 | * low watermark so that if they are full then background |
| 6827 | * reclaim will not be started prematurely. |
| 6828 | */ |
| 6829 | total_pages = low_wmark_pages(zone); |
| 6830 | } else { |
| 6831 | /* |
| 6832 | * If percpu_pagelist_high_fraction is configured, the high |
| 6833 | * value is based on a fraction of the managed pages in the |
| 6834 | * zone. |
| 6835 | */ |
| 6836 | total_pages = zone_managed_pages(zone) / percpu_pagelist_high_fraction; |
| 6837 | } |
Mel Gorman | b92ca18 | 2021-06-28 19:42:12 -0700 | [diff] [blame] | 6838 | |
| 6839 | /* |
Mel Gorman | 74f4482 | 2021-06-28 19:42:24 -0700 | [diff] [blame] | 6840 | * Split the high value across all online CPUs local to the zone. Note |
| 6841 | * that early in boot that CPUs may not be online yet and that during |
| 6842 | * CPU hotplug that the cpumask is not yet updated when a CPU is being |
Mel Gorman | 203c06e | 2021-06-28 19:43:11 -0700 | [diff] [blame] | 6843 | * onlined. For memory nodes that have no CPUs, split pcp->high across |
| 6844 | * all online CPUs to mitigate the risk that reclaim is triggered |
| 6845 | * prematurely due to pages stored on pcp lists. |
Mel Gorman | b92ca18 | 2021-06-28 19:42:12 -0700 | [diff] [blame] | 6846 | */ |
Mel Gorman | 203c06e | 2021-06-28 19:43:11 -0700 | [diff] [blame] | 6847 | nr_split_cpus = cpumask_weight(cpumask_of_node(zone_to_nid(zone))) + cpu_online; |
| 6848 | if (!nr_split_cpus) |
| 6849 | nr_split_cpus = num_online_cpus(); |
| 6850 | high = total_pages / nr_split_cpus; |
Mel Gorman | b92ca18 | 2021-06-28 19:42:12 -0700 | [diff] [blame] | 6851 | |
| 6852 | /* |
| 6853 | * Ensure high is at least batch*4. The multiple is based on the |
| 6854 | * historical relationship between high and batch. |
| 6855 | */ |
| 6856 | high = max(high, batch << 2); |
| 6857 | |
| 6858 | return high; |
| 6859 | #else |
| 6860 | return 0; |
| 6861 | #endif |
| 6862 | } |
| 6863 | |
Cody P Schafer | 8d7a8fa | 2013-07-03 15:01:31 -0700 | [diff] [blame] | 6864 | /* |
Vlastimil Babka | 5c3ad2e | 2020-12-14 19:10:50 -0800 | [diff] [blame] | 6865 | * pcp->high and pcp->batch values are related and generally batch is lower |
| 6866 | * than high. They are also related to pcp->count such that count is lower |
| 6867 | * than high, and as soon as it reaches high, the pcplist is flushed. |
Cody P Schafer | 8d7a8fa | 2013-07-03 15:01:31 -0700 | [diff] [blame] | 6868 | * |
Vlastimil Babka | 5c3ad2e | 2020-12-14 19:10:50 -0800 | [diff] [blame] | 6869 | * However, guaranteeing these relations at all times would require e.g. write |
| 6870 | * barriers here but also careful usage of read barriers at the read side, and |
| 6871 | * thus be prone to error and bad for performance. Thus the update only prevents |
| 6872 | * store tearing. Any new users of pcp->batch and pcp->high should ensure they |
| 6873 | * can cope with those fields changing asynchronously, and fully trust only the |
| 6874 | * pcp->count field on the local CPU with interrupts disabled. |
Cody P Schafer | 8d7a8fa | 2013-07-03 15:01:31 -0700 | [diff] [blame] | 6875 | * |
| 6876 | * mutex_is_locked(&pcp_batch_high_lock) required when calling this function |
| 6877 | * outside of boot time (or some other assurance that no concurrent updaters |
| 6878 | * exist). |
| 6879 | */ |
| 6880 | static void pageset_update(struct per_cpu_pages *pcp, unsigned long high, |
| 6881 | unsigned long batch) |
| 6882 | { |
Vlastimil Babka | 5c3ad2e | 2020-12-14 19:10:50 -0800 | [diff] [blame] | 6883 | WRITE_ONCE(pcp->batch, batch); |
| 6884 | WRITE_ONCE(pcp->high, high); |
Cody P Schafer | 8d7a8fa | 2013-07-03 15:01:31 -0700 | [diff] [blame] | 6885 | } |
| 6886 | |
Mel Gorman | 28f836b | 2021-06-28 19:41:38 -0700 | [diff] [blame] | 6887 | static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats) |
Christoph Lameter | 2caaad4 | 2005-06-21 17:15:00 -0700 | [diff] [blame] | 6888 | { |
Mel Gorman | 44042b4 | 2021-06-28 19:43:08 -0700 | [diff] [blame] | 6889 | int pindex; |
Christoph Lameter | 2caaad4 | 2005-06-21 17:15:00 -0700 | [diff] [blame] | 6890 | |
Mel Gorman | 28f836b | 2021-06-28 19:41:38 -0700 | [diff] [blame] | 6891 | memset(pcp, 0, sizeof(*pcp)); |
| 6892 | memset(pzstats, 0, sizeof(*pzstats)); |
Magnus Damm | 1c6fe94 | 2005-10-26 01:58:59 -0700 | [diff] [blame] | 6893 | |
Mel Gorman | 44042b4 | 2021-06-28 19:43:08 -0700 | [diff] [blame] | 6894 | for (pindex = 0; pindex < NR_PCP_LISTS; pindex++) |
| 6895 | INIT_LIST_HEAD(&pcp->lists[pindex]); |
Christoph Lameter | 2caaad4 | 2005-06-21 17:15:00 -0700 | [diff] [blame] | 6896 | |
Vlastimil Babka | 69a8396 | 2020-12-14 19:10:47 -0800 | [diff] [blame] | 6897 | /* |
| 6898 | * Set batch and high values safe for a boot pageset. A true percpu |
| 6899 | * pageset's initialization will update them subsequently. Here we don't |
| 6900 | * need to be as careful as pageset_update() as nobody can access the |
| 6901 | * pageset yet. |
| 6902 | */ |
Vlastimil Babka | 952eaf8 | 2020-12-14 19:10:53 -0800 | [diff] [blame] | 6903 | pcp->high = BOOT_PAGESET_HIGH; |
| 6904 | pcp->batch = BOOT_PAGESET_BATCH; |
Mel Gorman | 3b12e7e | 2021-06-28 19:42:18 -0700 | [diff] [blame] | 6905 | pcp->free_factor = 0; |
Cody P Schafer | 88c90db | 2013-07-03 15:01:35 -0700 | [diff] [blame] | 6906 | } |
| 6907 | |
Zou Wei | 3b1f365 | 2020-12-14 19:11:12 -0800 | [diff] [blame] | 6908 | static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high, |
Vlastimil Babka | ec6e8c7e | 2020-12-14 19:10:59 -0800 | [diff] [blame] | 6909 | unsigned long batch) |
| 6910 | { |
Mel Gorman | 28f836b | 2021-06-28 19:41:38 -0700 | [diff] [blame] | 6911 | struct per_cpu_pages *pcp; |
Vlastimil Babka | ec6e8c7e | 2020-12-14 19:10:59 -0800 | [diff] [blame] | 6912 | int cpu; |
| 6913 | |
| 6914 | for_each_possible_cpu(cpu) { |
Mel Gorman | 28f836b | 2021-06-28 19:41:38 -0700 | [diff] [blame] | 6915 | pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); |
| 6916 | pageset_update(pcp, high, batch); |
Vlastimil Babka | ec6e8c7e | 2020-12-14 19:10:59 -0800 | [diff] [blame] | 6917 | } |
| 6918 | } |
| 6919 | |
Rohit Seth | 8ad4b1f | 2006-01-08 01:00:40 -0800 | [diff] [blame] | 6920 | /* |
Vlastimil Babka | 0a8b4f1 | 2020-12-14 19:10:43 -0800 | [diff] [blame] | 6921 | * Calculate and set new high and batch values for all per-cpu pagesets of a |
Mel Gorman | bbbecb3 | 2021-06-28 19:42:09 -0700 | [diff] [blame] | 6922 | * zone based on the zone's size. |
Rohit Seth | 8ad4b1f | 2006-01-08 01:00:40 -0800 | [diff] [blame] | 6923 | */ |
Mel Gorman | 04f8cfe | 2021-06-28 19:42:15 -0700 | [diff] [blame] | 6924 | static void zone_set_pageset_high_and_batch(struct zone *zone, int cpu_online) |
Cody P Schafer | 56cef2b | 2013-07-03 15:01:38 -0700 | [diff] [blame] | 6925 | { |
Mel Gorman | b92ca18 | 2021-06-28 19:42:12 -0700 | [diff] [blame] | 6926 | int new_high, new_batch; |
Vlastimil Babka | 7115ac6 | 2020-12-14 19:10:40 -0800 | [diff] [blame] | 6927 | |
Mel Gorman | b92ca18 | 2021-06-28 19:42:12 -0700 | [diff] [blame] | 6928 | new_batch = max(1, zone_batchsize(zone)); |
Mel Gorman | 04f8cfe | 2021-06-28 19:42:15 -0700 | [diff] [blame] | 6929 | new_high = zone_highsize(zone, new_batch, cpu_online); |
Cody P Schafer | 56cef2b | 2013-07-03 15:01:38 -0700 | [diff] [blame] | 6930 | |
Vlastimil Babka | 952eaf8 | 2020-12-14 19:10:53 -0800 | [diff] [blame] | 6931 | if (zone->pageset_high == new_high && |
| 6932 | zone->pageset_batch == new_batch) |
| 6933 | return; |
| 6934 | |
| 6935 | zone->pageset_high = new_high; |
| 6936 | zone->pageset_batch = new_batch; |
| 6937 | |
Vlastimil Babka | ec6e8c7e | 2020-12-14 19:10:59 -0800 | [diff] [blame] | 6938 | __zone_set_pageset_high_and_batch(zone, new_high, new_batch); |
Cody P Schafer | 169f6c1 | 2013-07-03 15:01:41 -0700 | [diff] [blame] | 6939 | } |
| 6940 | |
Michal Hocko | 72675e1 | 2017-09-06 16:20:24 -0700 | [diff] [blame] | 6941 | void __meminit setup_zone_pageset(struct zone *zone) |
Wu Fengguang | 319774e | 2010-05-24 14:32:49 -0700 | [diff] [blame] | 6942 | { |
| 6943 | int cpu; |
Vlastimil Babka | 0a8b4f1 | 2020-12-14 19:10:43 -0800 | [diff] [blame] | 6944 | |
Mel Gorman | 28f836b | 2021-06-28 19:41:38 -0700 | [diff] [blame] | 6945 | /* Size may be 0 on !SMP && !NUMA */ |
| 6946 | if (sizeof(struct per_cpu_zonestat) > 0) |
| 6947 | zone->per_cpu_zonestats = alloc_percpu(struct per_cpu_zonestat); |
| 6948 | |
| 6949 | zone->per_cpu_pageset = alloc_percpu(struct per_cpu_pages); |
Vlastimil Babka | 0a8b4f1 | 2020-12-14 19:10:43 -0800 | [diff] [blame] | 6950 | for_each_possible_cpu(cpu) { |
Mel Gorman | 28f836b | 2021-06-28 19:41:38 -0700 | [diff] [blame] | 6951 | struct per_cpu_pages *pcp; |
| 6952 | struct per_cpu_zonestat *pzstats; |
| 6953 | |
| 6954 | pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); |
| 6955 | pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); |
| 6956 | per_cpu_pages_init(pcp, pzstats); |
Vlastimil Babka | 0a8b4f1 | 2020-12-14 19:10:43 -0800 | [diff] [blame] | 6957 | } |
| 6958 | |
Mel Gorman | 04f8cfe | 2021-06-28 19:42:15 -0700 | [diff] [blame] | 6959 | zone_set_pageset_high_and_batch(zone, 0); |
Wu Fengguang | 319774e | 2010-05-24 14:32:49 -0700 | [diff] [blame] | 6960 | } |
| 6961 | |
Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 6962 | /* |
Christoph Lameter | 99dcc3e | 2010-01-05 15:34:51 +0900 | [diff] [blame] | 6963 | * Allocate per cpu pagesets and initialize them. |
| 6964 | * Before this call only boot pagesets were available. |
Christoph Lameter | 2caaad4 | 2005-06-21 17:15:00 -0700 | [diff] [blame] | 6965 | */ |
Al Viro | 78d9955 | 2005-12-15 09:18:25 +0000 | [diff] [blame] | 6966 | void __init setup_per_cpu_pageset(void) |
Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 6967 | { |
Mel Gorman | b4911ea | 2016-08-04 15:31:49 -0700 | [diff] [blame] | 6968 | struct pglist_data *pgdat; |
Christoph Lameter | 99dcc3e | 2010-01-05 15:34:51 +0900 | [diff] [blame] | 6969 | struct zone *zone; |
Sandipan Das | b418a0f | 2020-06-03 15:59:11 -0700 | [diff] [blame] | 6970 | int __maybe_unused cpu; |
Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 6971 | |
Wu Fengguang | 319774e | 2010-05-24 14:32:49 -0700 | [diff] [blame] | 6972 | for_each_populated_zone(zone) |
| 6973 | setup_zone_pageset(zone); |
Mel Gorman | b4911ea | 2016-08-04 15:31:49 -0700 | [diff] [blame] | 6974 | |
Sandipan Das | b418a0f | 2020-06-03 15:59:11 -0700 | [diff] [blame] | 6975 | #ifdef CONFIG_NUMA |
| 6976 | /* |
| 6977 | * Unpopulated zones continue using the boot pagesets. |
| 6978 | * The numa stats for these pagesets need to be reset. |
| 6979 | * Otherwise, they will end up skewing the stats of |
| 6980 | * the nodes these zones are associated with. |
| 6981 | */ |
| 6982 | for_each_possible_cpu(cpu) { |
Mel Gorman | 28f836b | 2021-06-28 19:41:38 -0700 | [diff] [blame] | 6983 | struct per_cpu_zonestat *pzstats = &per_cpu(boot_zonestats, cpu); |
Mel Gorman | f19298b | 2021-06-28 19:41:44 -0700 | [diff] [blame] | 6984 | memset(pzstats->vm_numa_event, 0, |
| 6985 | sizeof(pzstats->vm_numa_event)); |
Sandipan Das | b418a0f | 2020-06-03 15:59:11 -0700 | [diff] [blame] | 6986 | } |
| 6987 | #endif |
| 6988 | |
Mel Gorman | b4911ea | 2016-08-04 15:31:49 -0700 | [diff] [blame] | 6989 | for_each_online_pgdat(pgdat) |
| 6990 | pgdat->per_cpu_nodestats = |
| 6991 | alloc_percpu(struct per_cpu_nodestat); |
Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 6992 | } |
| 6993 | |
Matt Tolentino | c09b424 | 2006-01-17 07:03:44 +0100 | [diff] [blame] | 6994 | static __meminit void zone_pcp_init(struct zone *zone) |
Dave Hansen | ed8ece2 | 2005-10-29 18:16:50 -0700 | [diff] [blame] | 6995 | { |
Christoph Lameter | 99dcc3e | 2010-01-05 15:34:51 +0900 | [diff] [blame] | 6996 | /* |
| 6997 | * per cpu subsystem is not up at this point. The following code |
| 6998 | * relies on the ability of the linker to provide the |
| 6999 | * offset of a (static) per cpu variable into the per cpu area. |
| 7000 | */ |
Mel Gorman | 28f836b | 2021-06-28 19:41:38 -0700 | [diff] [blame] | 7001 | zone->per_cpu_pageset = &boot_pageset; |
| 7002 | zone->per_cpu_zonestats = &boot_zonestats; |
Vlastimil Babka | 952eaf8 | 2020-12-14 19:10:53 -0800 | [diff] [blame] | 7003 | zone->pageset_high = BOOT_PAGESET_HIGH; |
| 7004 | zone->pageset_batch = BOOT_PAGESET_BATCH; |
Dave Hansen | ed8ece2 | 2005-10-29 18:16:50 -0700 | [diff] [blame] | 7005 | |
Xishi Qiu | b38a872 | 2013-11-12 15:07:20 -0800 | [diff] [blame] | 7006 | if (populated_zone(zone)) |
Heiner Kallweit | 9660eca | 2021-06-28 19:41:31 -0700 | [diff] [blame] | 7007 | pr_debug(" %s zone: %lu pages, LIFO batch:%u\n", zone->name, |
| 7008 | zone->present_pages, zone_batchsize(zone)); |
Dave Hansen | ed8ece2 | 2005-10-29 18:16:50 -0700 | [diff] [blame] | 7009 | } |
| 7010 | |
Michal Hocko | dc0bbf3 | 2017-07-06 15:37:35 -0700 | [diff] [blame] | 7011 | void __meminit init_currently_empty_zone(struct zone *zone, |
Yasunori Goto | 718127c | 2006-06-23 02:03:10 -0700 | [diff] [blame] | 7012 | unsigned long zone_start_pfn, |
Yaowei Bai | b171e40 | 2015-11-05 18:47:06 -0800 | [diff] [blame] | 7013 | unsigned long size) |
Dave Hansen | ed8ece2 | 2005-10-29 18:16:50 -0700 | [diff] [blame] | 7014 | { |
| 7015 | struct pglist_data *pgdat = zone->zone_pgdat; |
Wei Yang | 8f41683 | 2018-11-30 14:09:07 -0800 | [diff] [blame] | 7016 | int zone_idx = zone_idx(zone) + 1; |
Linus Torvalds | 9dcb8b6 | 2016-10-26 10:15:30 -0700 | [diff] [blame] | 7017 | |
Wei Yang | 8f41683 | 2018-11-30 14:09:07 -0800 | [diff] [blame] | 7018 | if (zone_idx > pgdat->nr_zones) |
| 7019 | pgdat->nr_zones = zone_idx; |
Dave Hansen | ed8ece2 | 2005-10-29 18:16:50 -0700 | [diff] [blame] | 7020 | |
Dave Hansen | ed8ece2 | 2005-10-29 18:16:50 -0700 | [diff] [blame] | 7021 | zone->zone_start_pfn = zone_start_pfn; |
| 7022 | |
Mel Gorman | 708614e | 2008-07-23 21:26:51 -0700 | [diff] [blame] | 7023 | mminit_dprintk(MMINIT_TRACE, "memmap_init", |
| 7024 | "Initialising map node %d zone %lu pfns %lu -> %lu\n", |
| 7025 | pgdat->node_id, |
| 7026 | (unsigned long)zone_idx(zone), |
| 7027 | zone_start_pfn, (zone_start_pfn + size)); |
| 7028 | |
Andi Kleen | 1e548de | 2008-02-04 22:29:26 -0800 | [diff] [blame] | 7029 | zone_init_free_lists(zone); |
Linus Torvalds | 9dcb8b6 | 2016-10-26 10:15:30 -0700 | [diff] [blame] | 7030 | zone->initialized = 1; |
Dave Hansen | ed8ece2 | 2005-10-29 18:16:50 -0700 | [diff] [blame] | 7031 | } |
| 7032 | |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 7033 | /** |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 7034 | * get_pfn_range_for_nid - Return the start and end page frames for a node |
Randy Dunlap | 88ca3b9 | 2006-10-04 02:15:25 -0700 | [diff] [blame] | 7035 | * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned. |
| 7036 | * @start_pfn: Passed by reference. On return, it will have the node start_pfn. |
| 7037 | * @end_pfn: Passed by reference. On return, it will have the node end_pfn. |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 7038 | * |
| 7039 | * It returns the start and end page frame of a node based on information |
Zhang Zhen | 7d01817 | 2014-06-04 16:10:53 -0700 | [diff] [blame] | 7040 | * provided by memblock_set_node(). If called for a node |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 7041 | * with no available memory, a warning is printed and the start and end |
Randy Dunlap | 88ca3b9 | 2006-10-04 02:15:25 -0700 | [diff] [blame] | 7042 | * PFNs will be 0. |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 7043 | */ |
Oscar Salvador | bbe5d99 | 2018-12-28 00:37:24 -0800 | [diff] [blame] | 7044 | void __init get_pfn_range_for_nid(unsigned int nid, |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 7045 | unsigned long *start_pfn, unsigned long *end_pfn) |
| 7046 | { |
Tejun Heo | c13291a | 2011-07-12 10:46:30 +0200 | [diff] [blame] | 7047 | unsigned long this_start_pfn, this_end_pfn; |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 7048 | int i; |
Tejun Heo | c13291a | 2011-07-12 10:46:30 +0200 | [diff] [blame] | 7049 | |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 7050 | *start_pfn = -1UL; |
| 7051 | *end_pfn = 0; |
| 7052 | |
Tejun Heo | c13291a | 2011-07-12 10:46:30 +0200 | [diff] [blame] | 7053 | for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) { |
| 7054 | *start_pfn = min(*start_pfn, this_start_pfn); |
| 7055 | *end_pfn = max(*end_pfn, this_end_pfn); |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 7056 | } |
| 7057 | |
Christoph Lameter | 633c066 | 2007-10-16 01:25:37 -0700 | [diff] [blame] | 7058 | if (*start_pfn == -1UL) |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 7059 | *start_pfn = 0; |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 7060 | } |
| 7061 | |
| 7062 | /* |
Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 7063 | * This finds a zone that can be used for ZONE_MOVABLE pages. The |
| 7064 | * assumption is made that zones within a node are ordered in monotonic |
| 7065 | * increasing memory addresses so that the "highest" populated zone is used |
| 7066 | */ |
Adrian Bunk | b69a728 | 2008-07-23 21:28:12 -0700 | [diff] [blame] | 7067 | static void __init find_usable_zone_for_movable(void) |
Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 7068 | { |
| 7069 | int zone_index; |
| 7070 | for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) { |
| 7071 | if (zone_index == ZONE_MOVABLE) |
| 7072 | continue; |
| 7073 | |
| 7074 | if (arch_zone_highest_possible_pfn[zone_index] > |
| 7075 | arch_zone_lowest_possible_pfn[zone_index]) |
| 7076 | break; |
| 7077 | } |
| 7078 | |
| 7079 | VM_BUG_ON(zone_index == -1); |
| 7080 | movable_zone = zone_index; |
| 7081 | } |
| 7082 | |
| 7083 | /* |
| 7084 | * The zone ranges provided by the architecture do not include ZONE_MOVABLE |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 7085 | * because it is sized independent of architecture. Unlike the other zones, |
Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 7086 | * the starting point for ZONE_MOVABLE is not fixed. It may be different |
| 7087 | * in each node depending on the size of each node and how evenly kernelcore |
| 7088 | * is distributed. This helper function adjusts the zone ranges |
| 7089 | * provided by the architecture for a given node by using the end of the |
| 7090 | * highest usable zone for ZONE_MOVABLE. This preserves the assumption that |
| 7091 | * zones within a node are in order of monotonic increases memory addresses |
| 7092 | */ |
Oscar Salvador | bbe5d99 | 2018-12-28 00:37:24 -0800 | [diff] [blame] | 7093 | static void __init adjust_zone_range_for_zone_movable(int nid, |
Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 7094 | unsigned long zone_type, |
| 7095 | unsigned long node_start_pfn, |
| 7096 | unsigned long node_end_pfn, |
| 7097 | unsigned long *zone_start_pfn, |
| 7098 | unsigned long *zone_end_pfn) |
| 7099 | { |
| 7100 | /* Only adjust if ZONE_MOVABLE is on this node */ |
| 7101 | if (zone_movable_pfn[nid]) { |
| 7102 | /* Size ZONE_MOVABLE */ |
| 7103 | if (zone_type == ZONE_MOVABLE) { |
| 7104 | *zone_start_pfn = zone_movable_pfn[nid]; |
| 7105 | *zone_end_pfn = min(node_end_pfn, |
| 7106 | arch_zone_highest_possible_pfn[movable_zone]); |
| 7107 | |
Xishi Qiu | e506b99 | 2016-10-07 16:58:06 -0700 | [diff] [blame] | 7108 | /* Adjust for ZONE_MOVABLE starting within this range */ |
| 7109 | } else if (!mirrored_kernelcore && |
| 7110 | *zone_start_pfn < zone_movable_pfn[nid] && |
| 7111 | *zone_end_pfn > zone_movable_pfn[nid]) { |
| 7112 | *zone_end_pfn = zone_movable_pfn[nid]; |
| 7113 | |
Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 7114 | /* Check if this whole range is within ZONE_MOVABLE */ |
| 7115 | } else if (*zone_start_pfn >= zone_movable_pfn[nid]) |
| 7116 | *zone_start_pfn = *zone_end_pfn; |
| 7117 | } |
| 7118 | } |
| 7119 | |
| 7120 | /* |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 7121 | * Return the number of pages a zone spans in a node, including holes |
| 7122 | * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node() |
| 7123 | */ |
Oscar Salvador | bbe5d99 | 2018-12-28 00:37:24 -0800 | [diff] [blame] | 7124 | static unsigned long __init zone_spanned_pages_in_node(int nid, |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 7125 | unsigned long zone_type, |
Zhang Yanfei | 7960aed | 2013-07-08 15:59:52 -0700 | [diff] [blame] | 7126 | unsigned long node_start_pfn, |
| 7127 | unsigned long node_end_pfn, |
Taku Izumi | d91749c | 2016-03-15 14:55:18 -0700 | [diff] [blame] | 7128 | unsigned long *zone_start_pfn, |
Mike Rapoport | 854e884 | 2020-06-03 15:58:13 -0700 | [diff] [blame] | 7129 | unsigned long *zone_end_pfn) |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 7130 | { |
Linxu Fang | 299c83d | 2019-05-13 17:19:17 -0700 | [diff] [blame] | 7131 | unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type]; |
| 7132 | unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type]; |
Xishi Qiu | b5685e9 | 2015-09-08 15:04:16 -0700 | [diff] [blame] | 7133 | /* When hotadd a new node from cpu_up(), the node should be empty */ |
Xishi Qiu | f9126ab | 2015-08-14 15:35:16 -0700 | [diff] [blame] | 7134 | if (!node_start_pfn && !node_end_pfn) |
| 7135 | return 0; |
| 7136 | |
Zhang Yanfei | 7960aed | 2013-07-08 15:59:52 -0700 | [diff] [blame] | 7137 | /* Get the start and end of the zone */ |
Linxu Fang | 299c83d | 2019-05-13 17:19:17 -0700 | [diff] [blame] | 7138 | *zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high); |
| 7139 | *zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high); |
Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 7140 | adjust_zone_range_for_zone_movable(nid, zone_type, |
| 7141 | node_start_pfn, node_end_pfn, |
Taku Izumi | d91749c | 2016-03-15 14:55:18 -0700 | [diff] [blame] | 7142 | zone_start_pfn, zone_end_pfn); |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 7143 | |
| 7144 | /* Check that this node has pages within the zone's required range */ |
Taku Izumi | d91749c | 2016-03-15 14:55:18 -0700 | [diff] [blame] | 7145 | if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn) |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 7146 | return 0; |
| 7147 | |
| 7148 | /* Move the zone boundaries inside the node if necessary */ |
Taku Izumi | d91749c | 2016-03-15 14:55:18 -0700 | [diff] [blame] | 7149 | *zone_end_pfn = min(*zone_end_pfn, node_end_pfn); |
| 7150 | *zone_start_pfn = max(*zone_start_pfn, node_start_pfn); |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 7151 | |
| 7152 | /* Return the spanned pages */ |
Taku Izumi | d91749c | 2016-03-15 14:55:18 -0700 | [diff] [blame] | 7153 | return *zone_end_pfn - *zone_start_pfn; |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 7154 | } |
| 7155 | |
| 7156 | /* |
| 7157 | * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, |
Randy Dunlap | 88ca3b9 | 2006-10-04 02:15:25 -0700 | [diff] [blame] | 7158 | * then all holes in the requested range will be accounted for. |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 7159 | */ |
Oscar Salvador | bbe5d99 | 2018-12-28 00:37:24 -0800 | [diff] [blame] | 7160 | unsigned long __init __absent_pages_in_range(int nid, |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 7161 | unsigned long range_start_pfn, |
| 7162 | unsigned long range_end_pfn) |
| 7163 | { |
Tejun Heo | 96e907d | 2011-07-12 10:46:29 +0200 | [diff] [blame] | 7164 | unsigned long nr_absent = range_end_pfn - range_start_pfn; |
| 7165 | unsigned long start_pfn, end_pfn; |
| 7166 | int i; |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 7167 | |
Tejun Heo | 96e907d | 2011-07-12 10:46:29 +0200 | [diff] [blame] | 7168 | for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { |
| 7169 | start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn); |
| 7170 | end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn); |
| 7171 | nr_absent -= end_pfn - start_pfn; |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 7172 | } |
Tejun Heo | 96e907d | 2011-07-12 10:46:29 +0200 | [diff] [blame] | 7173 | return nr_absent; |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 7174 | } |
| 7175 | |
| 7176 | /** |
| 7177 | * absent_pages_in_range - Return number of page frames in holes within a range |
| 7178 | * @start_pfn: The start PFN to start searching for holes |
| 7179 | * @end_pfn: The end PFN to stop searching for holes |
| 7180 | * |
Mike Rapoport | a862f68 | 2019-03-05 15:48:42 -0800 | [diff] [blame] | 7181 | * Return: the number of pages frames in memory holes within a range. |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 7182 | */ |
| 7183 | unsigned long __init absent_pages_in_range(unsigned long start_pfn, |
| 7184 | unsigned long end_pfn) |
| 7185 | { |
| 7186 | return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn); |
| 7187 | } |
| 7188 | |
| 7189 | /* Return the number of page frames in holes in a zone on a node */ |
Oscar Salvador | bbe5d99 | 2018-12-28 00:37:24 -0800 | [diff] [blame] | 7190 | static unsigned long __init zone_absent_pages_in_node(int nid, |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 7191 | unsigned long zone_type, |
Zhang Yanfei | 7960aed | 2013-07-08 15:59:52 -0700 | [diff] [blame] | 7192 | unsigned long node_start_pfn, |
Mike Rapoport | 854e884 | 2020-06-03 15:58:13 -0700 | [diff] [blame] | 7193 | unsigned long node_end_pfn) |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 7194 | { |
Tejun Heo | 96e907d | 2011-07-12 10:46:29 +0200 | [diff] [blame] | 7195 | unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type]; |
| 7196 | unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type]; |
Mel Gorman | 9c7cd68 | 2006-09-27 01:49:58 -0700 | [diff] [blame] | 7197 | unsigned long zone_start_pfn, zone_end_pfn; |
Taku Izumi | 342332e | 2016-03-15 14:55:22 -0700 | [diff] [blame] | 7198 | unsigned long nr_absent; |
Mel Gorman | 9c7cd68 | 2006-09-27 01:49:58 -0700 | [diff] [blame] | 7199 | |
Xishi Qiu | b5685e9 | 2015-09-08 15:04:16 -0700 | [diff] [blame] | 7200 | /* When hotadd a new node from cpu_up(), the node should be empty */ |
Xishi Qiu | f9126ab | 2015-08-14 15:35:16 -0700 | [diff] [blame] | 7201 | if (!node_start_pfn && !node_end_pfn) |
| 7202 | return 0; |
| 7203 | |
Tejun Heo | 96e907d | 2011-07-12 10:46:29 +0200 | [diff] [blame] | 7204 | zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high); |
| 7205 | zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high); |
Mel Gorman | 9c7cd68 | 2006-09-27 01:49:58 -0700 | [diff] [blame] | 7206 | |
Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 7207 | adjust_zone_range_for_zone_movable(nid, zone_type, |
| 7208 | node_start_pfn, node_end_pfn, |
| 7209 | &zone_start_pfn, &zone_end_pfn); |
Taku Izumi | 342332e | 2016-03-15 14:55:22 -0700 | [diff] [blame] | 7210 | nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn); |
| 7211 | |
| 7212 | /* |
| 7213 | * ZONE_MOVABLE handling. |
| 7214 | * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages |
| 7215 | * and vice versa. |
| 7216 | */ |
Xishi Qiu | e506b99 | 2016-10-07 16:58:06 -0700 | [diff] [blame] | 7217 | if (mirrored_kernelcore && zone_movable_pfn[nid]) { |
| 7218 | unsigned long start_pfn, end_pfn; |
| 7219 | struct memblock_region *r; |
Taku Izumi | 342332e | 2016-03-15 14:55:22 -0700 | [diff] [blame] | 7220 | |
Mike Rapoport | cc6de16 | 2020-10-13 16:58:30 -0700 | [diff] [blame] | 7221 | for_each_mem_region(r) { |
Xishi Qiu | e506b99 | 2016-10-07 16:58:06 -0700 | [diff] [blame] | 7222 | start_pfn = clamp(memblock_region_memory_base_pfn(r), |
| 7223 | zone_start_pfn, zone_end_pfn); |
| 7224 | end_pfn = clamp(memblock_region_memory_end_pfn(r), |
| 7225 | zone_start_pfn, zone_end_pfn); |
Taku Izumi | 342332e | 2016-03-15 14:55:22 -0700 | [diff] [blame] | 7226 | |
Xishi Qiu | e506b99 | 2016-10-07 16:58:06 -0700 | [diff] [blame] | 7227 | if (zone_type == ZONE_MOVABLE && |
| 7228 | memblock_is_mirror(r)) |
| 7229 | nr_absent += end_pfn - start_pfn; |
Taku Izumi | 342332e | 2016-03-15 14:55:22 -0700 | [diff] [blame] | 7230 | |
Xishi Qiu | e506b99 | 2016-10-07 16:58:06 -0700 | [diff] [blame] | 7231 | if (zone_type == ZONE_NORMAL && |
| 7232 | !memblock_is_mirror(r)) |
| 7233 | nr_absent += end_pfn - start_pfn; |
Taku Izumi | 342332e | 2016-03-15 14:55:22 -0700 | [diff] [blame] | 7234 | } |
| 7235 | } |
| 7236 | |
| 7237 | return nr_absent; |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 7238 | } |
Mel Gorman | 0e0b864 | 2006-09-27 01:49:56 -0700 | [diff] [blame] | 7239 | |
Oscar Salvador | bbe5d99 | 2018-12-28 00:37:24 -0800 | [diff] [blame] | 7240 | static void __init calculate_node_totalpages(struct pglist_data *pgdat, |
Zhang Yanfei | 7960aed | 2013-07-08 15:59:52 -0700 | [diff] [blame] | 7241 | unsigned long node_start_pfn, |
Mike Rapoport | 854e884 | 2020-06-03 15:58:13 -0700 | [diff] [blame] | 7242 | unsigned long node_end_pfn) |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 7243 | { |
Gu Zheng | febd594 | 2015-06-24 16:57:02 -0700 | [diff] [blame] | 7244 | unsigned long realtotalpages = 0, totalpages = 0; |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 7245 | enum zone_type i; |
| 7246 | |
Gu Zheng | febd594 | 2015-06-24 16:57:02 -0700 | [diff] [blame] | 7247 | for (i = 0; i < MAX_NR_ZONES; i++) { |
| 7248 | struct zone *zone = pgdat->node_zones + i; |
Taku Izumi | d91749c | 2016-03-15 14:55:18 -0700 | [diff] [blame] | 7249 | unsigned long zone_start_pfn, zone_end_pfn; |
Mike Rapoport | 3f08a30 | 2020-06-03 15:57:02 -0700 | [diff] [blame] | 7250 | unsigned long spanned, absent; |
Gu Zheng | febd594 | 2015-06-24 16:57:02 -0700 | [diff] [blame] | 7251 | unsigned long size, real_size; |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 7252 | |
Mike Rapoport | 854e884 | 2020-06-03 15:58:13 -0700 | [diff] [blame] | 7253 | spanned = zone_spanned_pages_in_node(pgdat->node_id, i, |
| 7254 | node_start_pfn, |
| 7255 | node_end_pfn, |
| 7256 | &zone_start_pfn, |
| 7257 | &zone_end_pfn); |
| 7258 | absent = zone_absent_pages_in_node(pgdat->node_id, i, |
| 7259 | node_start_pfn, |
| 7260 | node_end_pfn); |
Mike Rapoport | 3f08a30 | 2020-06-03 15:57:02 -0700 | [diff] [blame] | 7261 | |
| 7262 | size = spanned; |
| 7263 | real_size = size - absent; |
| 7264 | |
Taku Izumi | d91749c | 2016-03-15 14:55:18 -0700 | [diff] [blame] | 7265 | if (size) |
| 7266 | zone->zone_start_pfn = zone_start_pfn; |
| 7267 | else |
| 7268 | zone->zone_start_pfn = 0; |
Gu Zheng | febd594 | 2015-06-24 16:57:02 -0700 | [diff] [blame] | 7269 | zone->spanned_pages = size; |
| 7270 | zone->present_pages = real_size; |
David Hildenbrand | 4b09700 | 2021-09-07 19:55:19 -0700 | [diff] [blame] | 7271 | #if defined(CONFIG_MEMORY_HOTPLUG) |
| 7272 | zone->present_early_pages = real_size; |
| 7273 | #endif |
Gu Zheng | febd594 | 2015-06-24 16:57:02 -0700 | [diff] [blame] | 7274 | |
| 7275 | totalpages += size; |
| 7276 | realtotalpages += real_size; |
| 7277 | } |
| 7278 | |
| 7279 | pgdat->node_spanned_pages = totalpages; |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 7280 | pgdat->node_present_pages = realtotalpages; |
Heiner Kallweit | 9660eca | 2021-06-28 19:41:31 -0700 | [diff] [blame] | 7281 | pr_debug("On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages); |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 7282 | } |
| 7283 | |
Mel Gorman | 835c134 | 2007-10-16 01:25:47 -0700 | [diff] [blame] | 7284 | #ifndef CONFIG_SPARSEMEM |
| 7285 | /* |
| 7286 | * Calculate the size of the zone->blockflags rounded to an unsigned long |
Mel Gorman | d9c2340 | 2007-10-16 01:26:01 -0700 | [diff] [blame] | 7287 | * Start by making sure zonesize is a multiple of pageblock_order by rounding |
| 7288 | * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally |
Mel Gorman | 835c134 | 2007-10-16 01:25:47 -0700 | [diff] [blame] | 7289 | * round what is now in bits to nearest long in bits, then return it in |
| 7290 | * bytes. |
| 7291 | */ |
Linus Torvalds | 7c45512 | 2013-02-18 09:58:02 -0800 | [diff] [blame] | 7292 | static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize) |
Mel Gorman | 835c134 | 2007-10-16 01:25:47 -0700 | [diff] [blame] | 7293 | { |
| 7294 | unsigned long usemapsize; |
| 7295 | |
Linus Torvalds | 7c45512 | 2013-02-18 09:58:02 -0800 | [diff] [blame] | 7296 | zonesize += zone_start_pfn & (pageblock_nr_pages-1); |
Mel Gorman | d9c2340 | 2007-10-16 01:26:01 -0700 | [diff] [blame] | 7297 | usemapsize = roundup(zonesize, pageblock_nr_pages); |
| 7298 | usemapsize = usemapsize >> pageblock_order; |
Mel Gorman | 835c134 | 2007-10-16 01:25:47 -0700 | [diff] [blame] | 7299 | usemapsize *= NR_PAGEBLOCK_BITS; |
| 7300 | usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long)); |
| 7301 | |
| 7302 | return usemapsize / 8; |
| 7303 | } |
| 7304 | |
Baoquan He | 7010a6e | 2021-02-24 12:06:20 -0800 | [diff] [blame] | 7305 | static void __ref setup_usemap(struct zone *zone) |
Mel Gorman | 835c134 | 2007-10-16 01:25:47 -0700 | [diff] [blame] | 7306 | { |
Baoquan He | 7010a6e | 2021-02-24 12:06:20 -0800 | [diff] [blame] | 7307 | unsigned long usemapsize = usemap_size(zone->zone_start_pfn, |
| 7308 | zone->spanned_pages); |
Mel Gorman | 835c134 | 2007-10-16 01:25:47 -0700 | [diff] [blame] | 7309 | zone->pageblock_flags = NULL; |
Mike Rapoport | 23a7052 | 2019-03-05 15:46:43 -0800 | [diff] [blame] | 7310 | if (usemapsize) { |
Santosh Shilimkar | 6782832 | 2014-01-21 15:50:25 -0800 | [diff] [blame] | 7311 | zone->pageblock_flags = |
Mike Rapoport | 26fb3da | 2019-03-11 23:30:42 -0700 | [diff] [blame] | 7312 | memblock_alloc_node(usemapsize, SMP_CACHE_BYTES, |
Baoquan He | 7010a6e | 2021-02-24 12:06:20 -0800 | [diff] [blame] | 7313 | zone_to_nid(zone)); |
Mike Rapoport | 23a7052 | 2019-03-05 15:46:43 -0800 | [diff] [blame] | 7314 | if (!zone->pageblock_flags) |
| 7315 | panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n", |
Baoquan He | 7010a6e | 2021-02-24 12:06:20 -0800 | [diff] [blame] | 7316 | usemapsize, zone->name, zone_to_nid(zone)); |
Mike Rapoport | 23a7052 | 2019-03-05 15:46:43 -0800 | [diff] [blame] | 7317 | } |
Mel Gorman | 835c134 | 2007-10-16 01:25:47 -0700 | [diff] [blame] | 7318 | } |
| 7319 | #else |
Baoquan He | 7010a6e | 2021-02-24 12:06:20 -0800 | [diff] [blame] | 7320 | static inline void setup_usemap(struct zone *zone) {} |
Mel Gorman | 835c134 | 2007-10-16 01:25:47 -0700 | [diff] [blame] | 7321 | #endif /* CONFIG_SPARSEMEM */ |
| 7322 | |
Mel Gorman | d9c2340 | 2007-10-16 01:26:01 -0700 | [diff] [blame] | 7323 | #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE |
Mel Gorman | ba72cb8 | 2007-11-28 16:21:13 -0800 | [diff] [blame] | 7324 | |
Mel Gorman | d9c2340 | 2007-10-16 01:26:01 -0700 | [diff] [blame] | 7325 | /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */ |
Oscar Salvador | 03e85f9 | 2018-08-21 21:53:43 -0700 | [diff] [blame] | 7326 | void __init set_pageblock_order(void) |
Mel Gorman | d9c2340 | 2007-10-16 01:26:01 -0700 | [diff] [blame] | 7327 | { |
Andrew Morton | 955c1cd | 2012-05-29 15:06:31 -0700 | [diff] [blame] | 7328 | unsigned int order; |
| 7329 | |
Mel Gorman | d9c2340 | 2007-10-16 01:26:01 -0700 | [diff] [blame] | 7330 | /* Check that pageblock_nr_pages has not already been setup */ |
| 7331 | if (pageblock_order) |
| 7332 | return; |
| 7333 | |
Andrew Morton | 955c1cd | 2012-05-29 15:06:31 -0700 | [diff] [blame] | 7334 | if (HPAGE_SHIFT > PAGE_SHIFT) |
| 7335 | order = HUGETLB_PAGE_ORDER; |
| 7336 | else |
| 7337 | order = MAX_ORDER - 1; |
| 7338 | |
Mel Gorman | d9c2340 | 2007-10-16 01:26:01 -0700 | [diff] [blame] | 7339 | /* |
| 7340 | * Assume the largest contiguous order of interest is a huge page. |
Andrew Morton | 955c1cd | 2012-05-29 15:06:31 -0700 | [diff] [blame] | 7341 | * This value may be variable depending on boot parameters on IA64 and |
| 7342 | * powerpc. |
Mel Gorman | d9c2340 | 2007-10-16 01:26:01 -0700 | [diff] [blame] | 7343 | */ |
| 7344 | pageblock_order = order; |
| 7345 | } |
| 7346 | #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ |
| 7347 | |
Mel Gorman | ba72cb8 | 2007-11-28 16:21:13 -0800 | [diff] [blame] | 7348 | /* |
| 7349 | * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order() |
Andrew Morton | 955c1cd | 2012-05-29 15:06:31 -0700 | [diff] [blame] | 7350 | * is unused as pageblock_order is set at compile-time. See |
| 7351 | * include/linux/pageblock-flags.h for the values of pageblock_order based on |
| 7352 | * the kernel config |
Mel Gorman | ba72cb8 | 2007-11-28 16:21:13 -0800 | [diff] [blame] | 7353 | */ |
Oscar Salvador | 03e85f9 | 2018-08-21 21:53:43 -0700 | [diff] [blame] | 7354 | void __init set_pageblock_order(void) |
Mel Gorman | ba72cb8 | 2007-11-28 16:21:13 -0800 | [diff] [blame] | 7355 | { |
Mel Gorman | ba72cb8 | 2007-11-28 16:21:13 -0800 | [diff] [blame] | 7356 | } |
Mel Gorman | d9c2340 | 2007-10-16 01:26:01 -0700 | [diff] [blame] | 7357 | |
| 7358 | #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ |
| 7359 | |
Oscar Salvador | 03e85f9 | 2018-08-21 21:53:43 -0700 | [diff] [blame] | 7360 | static unsigned long __init calc_memmap_size(unsigned long spanned_pages, |
Pavel Tatashin | 7cc2a95 | 2018-08-21 21:53:36 -0700 | [diff] [blame] | 7361 | unsigned long present_pages) |
Jiang Liu | 01cefae | 2012-12-12 13:52:19 -0800 | [diff] [blame] | 7362 | { |
| 7363 | unsigned long pages = spanned_pages; |
| 7364 | |
| 7365 | /* |
| 7366 | * Provide a more accurate estimation if there are holes within |
| 7367 | * the zone and SPARSEMEM is in use. If there are holes within the |
| 7368 | * zone, each populated memory region may cost us one or two extra |
| 7369 | * memmap pages due to alignment because memmap pages for each |
Masahiro Yamada | 89d790a | 2017-02-27 14:29:01 -0800 | [diff] [blame] | 7370 | * populated regions may not be naturally aligned on page boundary. |
Jiang Liu | 01cefae | 2012-12-12 13:52:19 -0800 | [diff] [blame] | 7371 | * So the (present_pages >> 4) heuristic is a tradeoff for that. |
| 7372 | */ |
| 7373 | if (spanned_pages > present_pages + (present_pages >> 4) && |
| 7374 | IS_ENABLED(CONFIG_SPARSEMEM)) |
| 7375 | pages = present_pages; |
| 7376 | |
| 7377 | return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT; |
| 7378 | } |
| 7379 | |
Oscar Salvador | ace1db3 | 2018-08-21 21:53:29 -0700 | [diff] [blame] | 7380 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 7381 | static void pgdat_init_split_queue(struct pglist_data *pgdat) |
| 7382 | { |
Yang Shi | 364c1ee | 2019-09-23 15:38:06 -0700 | [diff] [blame] | 7383 | struct deferred_split *ds_queue = &pgdat->deferred_split_queue; |
| 7384 | |
| 7385 | spin_lock_init(&ds_queue->split_queue_lock); |
| 7386 | INIT_LIST_HEAD(&ds_queue->split_queue); |
| 7387 | ds_queue->split_queue_len = 0; |
Oscar Salvador | ace1db3 | 2018-08-21 21:53:29 -0700 | [diff] [blame] | 7388 | } |
| 7389 | #else |
| 7390 | static void pgdat_init_split_queue(struct pglist_data *pgdat) {} |
| 7391 | #endif |
| 7392 | |
| 7393 | #ifdef CONFIG_COMPACTION |
| 7394 | static void pgdat_init_kcompactd(struct pglist_data *pgdat) |
| 7395 | { |
| 7396 | init_waitqueue_head(&pgdat->kcompactd_wait); |
| 7397 | } |
| 7398 | #else |
| 7399 | static void pgdat_init_kcompactd(struct pglist_data *pgdat) {} |
| 7400 | #endif |
| 7401 | |
Oscar Salvador | 03e85f9 | 2018-08-21 21:53:43 -0700 | [diff] [blame] | 7402 | static void __meminit pgdat_init_internals(struct pglist_data *pgdat) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7403 | { |
Mel Gorman | 8cd7c58 | 2021-11-05 13:42:25 -0700 | [diff] [blame] | 7404 | int i; |
| 7405 | |
Dave Hansen | 208d54e | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 7406 | pgdat_resize_init(pgdat); |
Oscar Salvador | ace1db3 | 2018-08-21 21:53:29 -0700 | [diff] [blame] | 7407 | |
Oscar Salvador | ace1db3 | 2018-08-21 21:53:29 -0700 | [diff] [blame] | 7408 | pgdat_init_split_queue(pgdat); |
| 7409 | pgdat_init_kcompactd(pgdat); |
| 7410 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7411 | init_waitqueue_head(&pgdat->kswapd_wait); |
Mel Gorman | 5515061 | 2012-07-31 16:44:35 -0700 | [diff] [blame] | 7412 | init_waitqueue_head(&pgdat->pfmemalloc_wait); |
Oscar Salvador | ace1db3 | 2018-08-21 21:53:29 -0700 | [diff] [blame] | 7413 | |
Mel Gorman | 8cd7c58 | 2021-11-05 13:42:25 -0700 | [diff] [blame] | 7414 | for (i = 0; i < NR_VMSCAN_THROTTLE; i++) |
| 7415 | init_waitqueue_head(&pgdat->reclaim_wait[i]); |
| 7416 | |
Joonsoo Kim | eefa864b | 2014-12-12 16:55:46 -0800 | [diff] [blame] | 7417 | pgdat_page_ext_init(pgdat); |
Johannes Weiner | 867e5e1 | 2019-11-30 17:55:34 -0800 | [diff] [blame] | 7418 | lruvec_init(&pgdat->__lruvec); |
Oscar Salvador | 03e85f9 | 2018-08-21 21:53:43 -0700 | [diff] [blame] | 7419 | } |
Michal Nazarewicz | 5f63b72 | 2012-01-11 15:16:11 +0100 | [diff] [blame] | 7420 | |
Oscar Salvador | 03e85f9 | 2018-08-21 21:53:43 -0700 | [diff] [blame] | 7421 | static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid, |
| 7422 | unsigned long remaining_pages) |
| 7423 | { |
Arun KS | 9705bea | 2018-12-28 00:34:24 -0800 | [diff] [blame] | 7424 | atomic_long_set(&zone->managed_pages, remaining_pages); |
Oscar Salvador | 03e85f9 | 2018-08-21 21:53:43 -0700 | [diff] [blame] | 7425 | zone_set_nid(zone, nid); |
| 7426 | zone->name = zone_names[idx]; |
| 7427 | zone->zone_pgdat = NODE_DATA(nid); |
| 7428 | spin_lock_init(&zone->lock); |
| 7429 | zone_seqlock_init(zone); |
| 7430 | zone_pcp_init(zone); |
| 7431 | } |
| 7432 | |
| 7433 | /* |
| 7434 | * Set up the zone data structures |
| 7435 | * - init pgdat internals |
| 7436 | * - init all zones belonging to this node |
| 7437 | * |
| 7438 | * NOTE: this function is only called during memory hotplug |
| 7439 | */ |
| 7440 | #ifdef CONFIG_MEMORY_HOTPLUG |
| 7441 | void __ref free_area_init_core_hotplug(int nid) |
| 7442 | { |
| 7443 | enum zone_type z; |
| 7444 | pg_data_t *pgdat = NODE_DATA(nid); |
| 7445 | |
| 7446 | pgdat_init_internals(pgdat); |
| 7447 | for (z = 0; z < MAX_NR_ZONES; z++) |
| 7448 | zone_init_internals(&pgdat->node_zones[z], z, nid, 0); |
| 7449 | } |
| 7450 | #endif |
| 7451 | |
| 7452 | /* |
| 7453 | * Set up the zone data structures: |
| 7454 | * - mark all pages reserved |
| 7455 | * - mark all memory queues empty |
| 7456 | * - clear the memory bitmaps |
| 7457 | * |
| 7458 | * NOTE: pgdat should get zeroed by caller. |
| 7459 | * NOTE: this function is only called during early init. |
| 7460 | */ |
| 7461 | static void __init free_area_init_core(struct pglist_data *pgdat) |
| 7462 | { |
| 7463 | enum zone_type j; |
| 7464 | int nid = pgdat->node_id; |
| 7465 | |
| 7466 | pgdat_init_internals(pgdat); |
Johannes Weiner | 385386c | 2017-07-06 15:40:43 -0700 | [diff] [blame] | 7467 | pgdat->per_cpu_nodestats = &boot_nodestats; |
| 7468 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7469 | for (j = 0; j < MAX_NR_ZONES; j++) { |
| 7470 | struct zone *zone = pgdat->node_zones + j; |
Wei Yang | e694385 | 2018-06-07 17:06:04 -0700 | [diff] [blame] | 7471 | unsigned long size, freesize, memmap_pages; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7472 | |
Gu Zheng | febd594 | 2015-06-24 16:57:02 -0700 | [diff] [blame] | 7473 | size = zone->spanned_pages; |
Wei Yang | e694385 | 2018-06-07 17:06:04 -0700 | [diff] [blame] | 7474 | freesize = zone->present_pages; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7475 | |
Mel Gorman | 0e0b864 | 2006-09-27 01:49:56 -0700 | [diff] [blame] | 7476 | /* |
Jiang Liu | 9feedc9 | 2012-12-12 13:52:12 -0800 | [diff] [blame] | 7477 | * Adjust freesize so that it accounts for how much memory |
Mel Gorman | 0e0b864 | 2006-09-27 01:49:56 -0700 | [diff] [blame] | 7478 | * is used by this zone for memmap. This affects the watermark |
| 7479 | * and per-cpu initialisations |
| 7480 | */ |
Wei Yang | e694385 | 2018-06-07 17:06:04 -0700 | [diff] [blame] | 7481 | memmap_pages = calc_memmap_size(size, freesize); |
Zhong Hongbo | ba914f4 | 2014-12-12 16:56:21 -0800 | [diff] [blame] | 7482 | if (!is_highmem_idx(j)) { |
| 7483 | if (freesize >= memmap_pages) { |
| 7484 | freesize -= memmap_pages; |
| 7485 | if (memmap_pages) |
Heiner Kallweit | 9660eca | 2021-06-28 19:41:31 -0700 | [diff] [blame] | 7486 | pr_debug(" %s zone: %lu pages used for memmap\n", |
| 7487 | zone_names[j], memmap_pages); |
Zhong Hongbo | ba914f4 | 2014-12-12 16:56:21 -0800 | [diff] [blame] | 7488 | } else |
Dong Aisheng | e47aa90 | 2021-06-28 19:42:30 -0700 | [diff] [blame] | 7489 | pr_warn(" %s zone: %lu memmap pages exceeds freesize %lu\n", |
Zhong Hongbo | ba914f4 | 2014-12-12 16:56:21 -0800 | [diff] [blame] | 7490 | zone_names[j], memmap_pages, freesize); |
| 7491 | } |
Mel Gorman | 0e0b864 | 2006-09-27 01:49:56 -0700 | [diff] [blame] | 7492 | |
Christoph Lameter | 6267276 | 2007-02-10 01:43:07 -0800 | [diff] [blame] | 7493 | /* Account for reserved pages */ |
Jiang Liu | 9feedc9 | 2012-12-12 13:52:12 -0800 | [diff] [blame] | 7494 | if (j == 0 && freesize > dma_reserve) { |
| 7495 | freesize -= dma_reserve; |
Heiner Kallweit | 9660eca | 2021-06-28 19:41:31 -0700 | [diff] [blame] | 7496 | pr_debug(" %s zone: %lu pages reserved\n", zone_names[0], dma_reserve); |
Mel Gorman | 0e0b864 | 2006-09-27 01:49:56 -0700 | [diff] [blame] | 7497 | } |
| 7498 | |
Christoph Lameter | 98d2b0e | 2006-09-25 23:31:12 -0700 | [diff] [blame] | 7499 | if (!is_highmem_idx(j)) |
Jiang Liu | 9feedc9 | 2012-12-12 13:52:12 -0800 | [diff] [blame] | 7500 | nr_kernel_pages += freesize; |
Jiang Liu | 01cefae | 2012-12-12 13:52:19 -0800 | [diff] [blame] | 7501 | /* Charge for highmem memmap if there are enough kernel pages */ |
| 7502 | else if (nr_kernel_pages > memmap_pages * 2) |
| 7503 | nr_kernel_pages -= memmap_pages; |
Jiang Liu | 9feedc9 | 2012-12-12 13:52:12 -0800 | [diff] [blame] | 7504 | nr_all_pages += freesize; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7505 | |
Jiang Liu | 9feedc9 | 2012-12-12 13:52:12 -0800 | [diff] [blame] | 7506 | /* |
| 7507 | * Set an approximate value for lowmem here, it will be adjusted |
| 7508 | * when the bootmem allocator frees pages into the buddy system. |
| 7509 | * And all highmem pages will be managed by the buddy system. |
| 7510 | */ |
Oscar Salvador | 03e85f9 | 2018-08-21 21:53:43 -0700 | [diff] [blame] | 7511 | zone_init_internals(zone, j, nid, freesize); |
Johannes Weiner | 81c0a2b | 2013-09-11 14:20:47 -0700 | [diff] [blame] | 7512 | |
Joonsoo Kim | d883c6c | 2018-05-23 10:18:21 +0900 | [diff] [blame] | 7513 | if (!size) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7514 | continue; |
| 7515 | |
Andrew Morton | 955c1cd | 2012-05-29 15:06:31 -0700 | [diff] [blame] | 7516 | set_pageblock_order(); |
Baoquan He | 7010a6e | 2021-02-24 12:06:20 -0800 | [diff] [blame] | 7517 | setup_usemap(zone); |
Baoquan He | 9699ee7 | 2021-02-24 12:06:24 -0800 | [diff] [blame] | 7518 | init_currently_empty_zone(zone, zone->zone_start_pfn, size); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7519 | } |
| 7520 | } |
| 7521 | |
Mike Rapoport | 43b02ba | 2021-06-28 19:43:05 -0700 | [diff] [blame] | 7522 | #ifdef CONFIG_FLATMEM |
Mike Rapoport | 3b446da | 2021-09-02 14:58:10 -0700 | [diff] [blame] | 7523 | static void __init alloc_node_mem_map(struct pglist_data *pgdat) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7524 | { |
Tony Luck | b0aeba7 | 2015-11-10 10:09:47 -0800 | [diff] [blame] | 7525 | unsigned long __maybe_unused start = 0; |
Laura Abbott | a1c34a3 | 2015-11-05 18:48:46 -0800 | [diff] [blame] | 7526 | unsigned long __maybe_unused offset = 0; |
| 7527 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7528 | /* Skip empty nodes */ |
| 7529 | if (!pgdat->node_spanned_pages) |
| 7530 | return; |
| 7531 | |
Tony Luck | b0aeba7 | 2015-11-10 10:09:47 -0800 | [diff] [blame] | 7532 | start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); |
| 7533 | offset = pgdat->node_start_pfn - start; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7534 | /* ia64 gets its own node_mem_map, before this, without bootmem */ |
| 7535 | if (!pgdat->node_mem_map) { |
Tony Luck | b0aeba7 | 2015-11-10 10:09:47 -0800 | [diff] [blame] | 7536 | unsigned long size, end; |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 7537 | struct page *map; |
| 7538 | |
Bob Picco | e984bb4 | 2006-05-20 15:00:31 -0700 | [diff] [blame] | 7539 | /* |
| 7540 | * The zone's endpoints aren't required to be MAX_ORDER |
| 7541 | * aligned but the node_mem_map endpoints must be in order |
| 7542 | * for the buddy allocator to function correctly. |
| 7543 | */ |
Cody P Schafer | 108bcc9 | 2013-02-22 16:35:23 -0800 | [diff] [blame] | 7544 | end = pgdat_end_pfn(pgdat); |
Bob Picco | e984bb4 | 2006-05-20 15:00:31 -0700 | [diff] [blame] | 7545 | end = ALIGN(end, MAX_ORDER_NR_PAGES); |
| 7546 | size = (end - start) * sizeof(struct page); |
Mike Rapoport | c803b3c | 2021-09-02 14:58:02 -0700 | [diff] [blame] | 7547 | map = memmap_alloc(size, SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT, |
| 7548 | pgdat->node_id, false); |
Mike Rapoport | 23a7052 | 2019-03-05 15:46:43 -0800 | [diff] [blame] | 7549 | if (!map) |
| 7550 | panic("Failed to allocate %ld bytes for node %d memory map\n", |
| 7551 | size, pgdat->node_id); |
Laura Abbott | a1c34a3 | 2015-11-05 18:48:46 -0800 | [diff] [blame] | 7552 | pgdat->node_mem_map = map + offset; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7553 | } |
Oscar Salvador | 0cd842f | 2017-11-15 17:39:18 -0800 | [diff] [blame] | 7554 | pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n", |
| 7555 | __func__, pgdat->node_id, (unsigned long)pgdat, |
| 7556 | (unsigned long)pgdat->node_mem_map); |
Mike Rapoport | a9ee6cf | 2021-06-28 19:43:01 -0700 | [diff] [blame] | 7557 | #ifndef CONFIG_NUMA |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7558 | /* |
| 7559 | * With no DISCONTIG, the global mem_map is just set as node 0's |
| 7560 | */ |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 7561 | if (pgdat == NODE_DATA(0)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7562 | mem_map = NODE_DATA(0)->node_mem_map; |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 7563 | if (page_to_pfn(mem_map) != pgdat->node_start_pfn) |
Laura Abbott | a1c34a3 | 2015-11-05 18:48:46 -0800 | [diff] [blame] | 7564 | mem_map -= offset; |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 7565 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7566 | #endif |
| 7567 | } |
Oscar Salvador | 0cd842f | 2017-11-15 17:39:18 -0800 | [diff] [blame] | 7568 | #else |
Mike Rapoport | 3b446da | 2021-09-02 14:58:10 -0700 | [diff] [blame] | 7569 | static inline void alloc_node_mem_map(struct pglist_data *pgdat) { } |
Mike Rapoport | 43b02ba | 2021-06-28 19:43:05 -0700 | [diff] [blame] | 7570 | #endif /* CONFIG_FLATMEM */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7571 | |
Oscar Salvador | 0188dc9 | 2018-08-21 21:53:39 -0700 | [diff] [blame] | 7572 | #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT |
| 7573 | static inline void pgdat_set_deferred_range(pg_data_t *pgdat) |
| 7574 | { |
Oscar Salvador | 0188dc9 | 2018-08-21 21:53:39 -0700 | [diff] [blame] | 7575 | pgdat->first_deferred_pfn = ULONG_MAX; |
| 7576 | } |
| 7577 | #else |
| 7578 | static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {} |
| 7579 | #endif |
| 7580 | |
Mike Rapoport | 854e884 | 2020-06-03 15:58:13 -0700 | [diff] [blame] | 7581 | static void __init free_area_init_node(int nid) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7582 | { |
Johannes Weiner | 9109fb7 | 2008-07-23 21:27:20 -0700 | [diff] [blame] | 7583 | pg_data_t *pgdat = NODE_DATA(nid); |
Zhang Yanfei | 7960aed | 2013-07-08 15:59:52 -0700 | [diff] [blame] | 7584 | unsigned long start_pfn = 0; |
| 7585 | unsigned long end_pfn = 0; |
Johannes Weiner | 9109fb7 | 2008-07-23 21:27:20 -0700 | [diff] [blame] | 7586 | |
Minchan Kim | 88fdf75 | 2012-07-31 16:46:14 -0700 | [diff] [blame] | 7587 | /* pg_data_t should be reset to zero when it's allocated */ |
Joonsoo Kim | 97a225e | 2020-06-03 15:59:01 -0700 | [diff] [blame] | 7588 | WARN_ON(pgdat->nr_zones || pgdat->kswapd_highest_zoneidx); |
Minchan Kim | 88fdf75 | 2012-07-31 16:46:14 -0700 | [diff] [blame] | 7589 | |
Mike Rapoport | 854e884 | 2020-06-03 15:58:13 -0700 | [diff] [blame] | 7590 | get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 7591 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7592 | pgdat->node_id = nid; |
Mike Rapoport | 854e884 | 2020-06-03 15:58:13 -0700 | [diff] [blame] | 7593 | pgdat->node_start_pfn = start_pfn; |
Mel Gorman | 75ef718 | 2016-07-28 15:45:24 -0700 | [diff] [blame] | 7594 | pgdat->per_cpu_nodestats = NULL; |
Mike Rapoport | 854e884 | 2020-06-03 15:58:13 -0700 | [diff] [blame] | 7595 | |
Juergen Gross | 8d29e18 | 2015-02-11 15:26:01 -0800 | [diff] [blame] | 7596 | pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid, |
Zhen Lei | 4ada0c5 | 2015-09-08 15:04:19 -0700 | [diff] [blame] | 7597 | (u64)start_pfn << PAGE_SHIFT, |
| 7598 | end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0); |
Mike Rapoport | 854e884 | 2020-06-03 15:58:13 -0700 | [diff] [blame] | 7599 | calculate_node_totalpages(pgdat, start_pfn, end_pfn); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7600 | |
| 7601 | alloc_node_mem_map(pgdat); |
Oscar Salvador | 0188dc9 | 2018-08-21 21:53:39 -0700 | [diff] [blame] | 7602 | pgdat_set_deferred_range(pgdat); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7603 | |
Wei Yang | 7f3eb55 | 2015-09-08 14:59:50 -0700 | [diff] [blame] | 7604 | free_area_init_core(pgdat); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7605 | } |
| 7606 | |
Mike Rapoport | bc9331a | 2020-06-03 15:58:09 -0700 | [diff] [blame] | 7607 | void __init free_area_init_memoryless_node(int nid) |
Mike Rapoport | 3f08a30 | 2020-06-03 15:57:02 -0700 | [diff] [blame] | 7608 | { |
Mike Rapoport | 854e884 | 2020-06-03 15:58:13 -0700 | [diff] [blame] | 7609 | free_area_init_node(nid); |
Mike Rapoport | 3f08a30 | 2020-06-03 15:57:02 -0700 | [diff] [blame] | 7610 | } |
| 7611 | |
Miklos Szeredi | 418508c | 2007-05-23 13:57:55 -0700 | [diff] [blame] | 7612 | #if MAX_NUMNODES > 1 |
| 7613 | /* |
| 7614 | * Figure out the number of possible node ids. |
| 7615 | */ |
Cody P Schafer | f9872ca | 2013-04-29 15:08:01 -0700 | [diff] [blame] | 7616 | void __init setup_nr_node_ids(void) |
Miklos Szeredi | 418508c | 2007-05-23 13:57:55 -0700 | [diff] [blame] | 7617 | { |
Wei Yang | 904a955 | 2015-09-08 14:59:48 -0700 | [diff] [blame] | 7618 | unsigned int highest; |
Miklos Szeredi | 418508c | 2007-05-23 13:57:55 -0700 | [diff] [blame] | 7619 | |
Wei Yang | 904a955 | 2015-09-08 14:59:48 -0700 | [diff] [blame] | 7620 | highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES); |
Miklos Szeredi | 418508c | 2007-05-23 13:57:55 -0700 | [diff] [blame] | 7621 | nr_node_ids = highest + 1; |
| 7622 | } |
Miklos Szeredi | 418508c | 2007-05-23 13:57:55 -0700 | [diff] [blame] | 7623 | #endif |
| 7624 | |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 7625 | /** |
Tejun Heo | 1e01979 | 2011-07-12 09:45:34 +0200 | [diff] [blame] | 7626 | * node_map_pfn_alignment - determine the maximum internode alignment |
| 7627 | * |
| 7628 | * This function should be called after node map is populated and sorted. |
| 7629 | * It calculates the maximum power of two alignment which can distinguish |
| 7630 | * all the nodes. |
| 7631 | * |
| 7632 | * For example, if all nodes are 1GiB and aligned to 1GiB, the return value |
| 7633 | * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the |
| 7634 | * nodes are shifted by 256MiB, 256MiB. Note that if only the last node is |
| 7635 | * shifted, 1GiB is enough and this function will indicate so. |
| 7636 | * |
| 7637 | * This is used to test whether pfn -> nid mapping of the chosen memory |
| 7638 | * model has fine enough granularity to avoid incorrect mapping for the |
| 7639 | * populated node map. |
| 7640 | * |
Mike Rapoport | a862f68 | 2019-03-05 15:48:42 -0800 | [diff] [blame] | 7641 | * Return: the determined alignment in pfn's. 0 if there is no alignment |
Tejun Heo | 1e01979 | 2011-07-12 09:45:34 +0200 | [diff] [blame] | 7642 | * requirement (single node). |
| 7643 | */ |
| 7644 | unsigned long __init node_map_pfn_alignment(void) |
| 7645 | { |
| 7646 | unsigned long accl_mask = 0, last_end = 0; |
Tejun Heo | c13291a | 2011-07-12 10:46:30 +0200 | [diff] [blame] | 7647 | unsigned long start, end, mask; |
Anshuman Khandual | 98fa15f | 2019-03-05 15:42:58 -0800 | [diff] [blame] | 7648 | int last_nid = NUMA_NO_NODE; |
Tejun Heo | c13291a | 2011-07-12 10:46:30 +0200 | [diff] [blame] | 7649 | int i, nid; |
Tejun Heo | 1e01979 | 2011-07-12 09:45:34 +0200 | [diff] [blame] | 7650 | |
Tejun Heo | c13291a | 2011-07-12 10:46:30 +0200 | [diff] [blame] | 7651 | for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) { |
Tejun Heo | 1e01979 | 2011-07-12 09:45:34 +0200 | [diff] [blame] | 7652 | if (!start || last_nid < 0 || last_nid == nid) { |
| 7653 | last_nid = nid; |
| 7654 | last_end = end; |
| 7655 | continue; |
| 7656 | } |
| 7657 | |
| 7658 | /* |
| 7659 | * Start with a mask granular enough to pin-point to the |
| 7660 | * start pfn and tick off bits one-by-one until it becomes |
| 7661 | * too coarse to separate the current node from the last. |
| 7662 | */ |
| 7663 | mask = ~((1 << __ffs(start)) - 1); |
| 7664 | while (mask && last_end <= (start & (mask << 1))) |
| 7665 | mask <<= 1; |
| 7666 | |
| 7667 | /* accumulate all internode masks */ |
| 7668 | accl_mask |= mask; |
| 7669 | } |
| 7670 | |
| 7671 | /* convert mask to number of pages */ |
| 7672 | return ~accl_mask + 1; |
| 7673 | } |
| 7674 | |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 7675 | /** |
| 7676 | * find_min_pfn_with_active_regions - Find the minimum PFN registered |
| 7677 | * |
Mike Rapoport | a862f68 | 2019-03-05 15:48:42 -0800 | [diff] [blame] | 7678 | * Return: the minimum PFN based on information provided via |
Zhang Zhen | 7d01817 | 2014-06-04 16:10:53 -0700 | [diff] [blame] | 7679 | * memblock_set_node(). |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 7680 | */ |
| 7681 | unsigned long __init find_min_pfn_with_active_regions(void) |
| 7682 | { |
Mike Rapoport | 8a1b25f | 2020-06-03 15:58:18 -0700 | [diff] [blame] | 7683 | return PHYS_PFN(memblock_start_of_DRAM()); |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 7684 | } |
| 7685 | |
Lee Schermerhorn | 37b07e4 | 2007-10-16 01:25:39 -0700 | [diff] [blame] | 7686 | /* |
| 7687 | * early_calculate_totalpages() |
| 7688 | * Sum pages in active regions for movable zone. |
Lai Jiangshan | 4b0ef1fe | 2012-12-12 13:51:46 -0800 | [diff] [blame] | 7689 | * Populate N_MEMORY for calculating usable_nodes. |
Lee Schermerhorn | 37b07e4 | 2007-10-16 01:25:39 -0700 | [diff] [blame] | 7690 | */ |
Adrian Bunk | 484f51f | 2007-10-16 01:26:03 -0700 | [diff] [blame] | 7691 | static unsigned long __init early_calculate_totalpages(void) |
Mel Gorman | 7e63efef | 2007-07-17 04:03:15 -0700 | [diff] [blame] | 7692 | { |
Mel Gorman | 7e63efef | 2007-07-17 04:03:15 -0700 | [diff] [blame] | 7693 | unsigned long totalpages = 0; |
Tejun Heo | c13291a | 2011-07-12 10:46:30 +0200 | [diff] [blame] | 7694 | unsigned long start_pfn, end_pfn; |
| 7695 | int i, nid; |
Mel Gorman | 7e63efef | 2007-07-17 04:03:15 -0700 | [diff] [blame] | 7696 | |
Tejun Heo | c13291a | 2011-07-12 10:46:30 +0200 | [diff] [blame] | 7697 | for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { |
| 7698 | unsigned long pages = end_pfn - start_pfn; |
| 7699 | |
Lee Schermerhorn | 37b07e4 | 2007-10-16 01:25:39 -0700 | [diff] [blame] | 7700 | totalpages += pages; |
| 7701 | if (pages) |
Lai Jiangshan | 4b0ef1fe | 2012-12-12 13:51:46 -0800 | [diff] [blame] | 7702 | node_set_state(nid, N_MEMORY); |
Lee Schermerhorn | 37b07e4 | 2007-10-16 01:25:39 -0700 | [diff] [blame] | 7703 | } |
Pintu Kumar | b8af294 | 2013-09-11 14:20:34 -0700 | [diff] [blame] | 7704 | return totalpages; |
Mel Gorman | 7e63efef | 2007-07-17 04:03:15 -0700 | [diff] [blame] | 7705 | } |
| 7706 | |
Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 7707 | /* |
| 7708 | * Find the PFN the Movable zone begins in each node. Kernel memory |
| 7709 | * is spread evenly between nodes as long as the nodes have enough |
| 7710 | * memory. When they don't, some nodes will have more kernelcore than |
| 7711 | * others |
| 7712 | */ |
Kautuk Consul | b224ef8 | 2012-03-21 16:34:15 -0700 | [diff] [blame] | 7713 | static void __init find_zone_movable_pfns_for_nodes(void) |
Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 7714 | { |
| 7715 | int i, nid; |
| 7716 | unsigned long usable_startpfn; |
| 7717 | unsigned long kernelcore_node, kernelcore_remaining; |
Yinghai Lu | 66918dc | 2009-06-30 11:41:37 -0700 | [diff] [blame] | 7718 | /* save the state before borrow the nodemask */ |
Lai Jiangshan | 4b0ef1fe | 2012-12-12 13:51:46 -0800 | [diff] [blame] | 7719 | nodemask_t saved_node_state = node_states[N_MEMORY]; |
Lee Schermerhorn | 37b07e4 | 2007-10-16 01:25:39 -0700 | [diff] [blame] | 7720 | unsigned long totalpages = early_calculate_totalpages(); |
Lai Jiangshan | 4b0ef1fe | 2012-12-12 13:51:46 -0800 | [diff] [blame] | 7721 | int usable_nodes = nodes_weight(node_states[N_MEMORY]); |
Emil Medve | 136199f | 2014-04-07 15:37:52 -0700 | [diff] [blame] | 7722 | struct memblock_region *r; |
Tang Chen | b2f3eeb | 2014-01-21 15:49:38 -0800 | [diff] [blame] | 7723 | |
| 7724 | /* Need to find movable_zone earlier when movable_node is specified. */ |
| 7725 | find_usable_zone_for_movable(); |
Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 7726 | |
Mel Gorman | 7e63efef | 2007-07-17 04:03:15 -0700 | [diff] [blame] | 7727 | /* |
Tang Chen | b2f3eeb | 2014-01-21 15:49:38 -0800 | [diff] [blame] | 7728 | * If movable_node is specified, ignore kernelcore and movablecore |
| 7729 | * options. |
| 7730 | */ |
| 7731 | if (movable_node_is_enabled()) { |
Mike Rapoport | cc6de16 | 2020-10-13 16:58:30 -0700 | [diff] [blame] | 7732 | for_each_mem_region(r) { |
Emil Medve | 136199f | 2014-04-07 15:37:52 -0700 | [diff] [blame] | 7733 | if (!memblock_is_hotpluggable(r)) |
Tang Chen | b2f3eeb | 2014-01-21 15:49:38 -0800 | [diff] [blame] | 7734 | continue; |
| 7735 | |
Mike Rapoport | d622abf | 2020-06-03 15:56:53 -0700 | [diff] [blame] | 7736 | nid = memblock_get_region_node(r); |
Tang Chen | b2f3eeb | 2014-01-21 15:49:38 -0800 | [diff] [blame] | 7737 | |
Emil Medve | 136199f | 2014-04-07 15:37:52 -0700 | [diff] [blame] | 7738 | usable_startpfn = PFN_DOWN(r->base); |
Tang Chen | b2f3eeb | 2014-01-21 15:49:38 -0800 | [diff] [blame] | 7739 | zone_movable_pfn[nid] = zone_movable_pfn[nid] ? |
| 7740 | min(usable_startpfn, zone_movable_pfn[nid]) : |
| 7741 | usable_startpfn; |
| 7742 | } |
| 7743 | |
| 7744 | goto out2; |
| 7745 | } |
| 7746 | |
| 7747 | /* |
Taku Izumi | 342332e | 2016-03-15 14:55:22 -0700 | [diff] [blame] | 7748 | * If kernelcore=mirror is specified, ignore movablecore option |
| 7749 | */ |
| 7750 | if (mirrored_kernelcore) { |
| 7751 | bool mem_below_4gb_not_mirrored = false; |
| 7752 | |
Mike Rapoport | cc6de16 | 2020-10-13 16:58:30 -0700 | [diff] [blame] | 7753 | for_each_mem_region(r) { |
Taku Izumi | 342332e | 2016-03-15 14:55:22 -0700 | [diff] [blame] | 7754 | if (memblock_is_mirror(r)) |
| 7755 | continue; |
| 7756 | |
Mike Rapoport | d622abf | 2020-06-03 15:56:53 -0700 | [diff] [blame] | 7757 | nid = memblock_get_region_node(r); |
Taku Izumi | 342332e | 2016-03-15 14:55:22 -0700 | [diff] [blame] | 7758 | |
| 7759 | usable_startpfn = memblock_region_memory_base_pfn(r); |
| 7760 | |
| 7761 | if (usable_startpfn < 0x100000) { |
| 7762 | mem_below_4gb_not_mirrored = true; |
| 7763 | continue; |
| 7764 | } |
| 7765 | |
| 7766 | zone_movable_pfn[nid] = zone_movable_pfn[nid] ? |
| 7767 | min(usable_startpfn, zone_movable_pfn[nid]) : |
| 7768 | usable_startpfn; |
| 7769 | } |
| 7770 | |
| 7771 | if (mem_below_4gb_not_mirrored) |
Chen Tao | 633bf2f | 2020-06-03 16:00:02 -0700 | [diff] [blame] | 7772 | pr_warn("This configuration results in unmirrored kernel memory.\n"); |
Taku Izumi | 342332e | 2016-03-15 14:55:22 -0700 | [diff] [blame] | 7773 | |
| 7774 | goto out2; |
| 7775 | } |
| 7776 | |
| 7777 | /* |
David Rientjes | a5c6d65 | 2018-04-05 16:23:09 -0700 | [diff] [blame] | 7778 | * If kernelcore=nn% or movablecore=nn% was specified, calculate the |
| 7779 | * amount of necessary memory. |
| 7780 | */ |
| 7781 | if (required_kernelcore_percent) |
| 7782 | required_kernelcore = (totalpages * 100 * required_kernelcore_percent) / |
| 7783 | 10000UL; |
| 7784 | if (required_movablecore_percent) |
| 7785 | required_movablecore = (totalpages * 100 * required_movablecore_percent) / |
| 7786 | 10000UL; |
| 7787 | |
| 7788 | /* |
| 7789 | * If movablecore= was specified, calculate what size of |
Mel Gorman | 7e63efef | 2007-07-17 04:03:15 -0700 | [diff] [blame] | 7790 | * kernelcore that corresponds so that memory usable for |
| 7791 | * any allocation type is evenly spread. If both kernelcore |
| 7792 | * and movablecore are specified, then the value of kernelcore |
| 7793 | * will be used for required_kernelcore if it's greater than |
| 7794 | * what movablecore would have allowed. |
| 7795 | */ |
| 7796 | if (required_movablecore) { |
Mel Gorman | 7e63efef | 2007-07-17 04:03:15 -0700 | [diff] [blame] | 7797 | unsigned long corepages; |
| 7798 | |
| 7799 | /* |
| 7800 | * Round-up so that ZONE_MOVABLE is at least as large as what |
| 7801 | * was requested by the user |
| 7802 | */ |
| 7803 | required_movablecore = |
| 7804 | roundup(required_movablecore, MAX_ORDER_NR_PAGES); |
Xishi Qiu | 9fd745d | 2015-11-05 18:48:11 -0800 | [diff] [blame] | 7805 | required_movablecore = min(totalpages, required_movablecore); |
Mel Gorman | 7e63efef | 2007-07-17 04:03:15 -0700 | [diff] [blame] | 7806 | corepages = totalpages - required_movablecore; |
| 7807 | |
| 7808 | required_kernelcore = max(required_kernelcore, corepages); |
| 7809 | } |
| 7810 | |
Xishi Qiu | bde304b | 2015-11-05 18:48:56 -0800 | [diff] [blame] | 7811 | /* |
| 7812 | * If kernelcore was not specified or kernelcore size is larger |
| 7813 | * than totalpages, there is no ZONE_MOVABLE. |
| 7814 | */ |
| 7815 | if (!required_kernelcore || required_kernelcore >= totalpages) |
Yinghai Lu | 66918dc | 2009-06-30 11:41:37 -0700 | [diff] [blame] | 7816 | goto out; |
Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 7817 | |
| 7818 | /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */ |
Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 7819 | usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone]; |
| 7820 | |
| 7821 | restart: |
| 7822 | /* Spread kernelcore memory as evenly as possible throughout nodes */ |
| 7823 | kernelcore_node = required_kernelcore / usable_nodes; |
Lai Jiangshan | 4b0ef1fe | 2012-12-12 13:51:46 -0800 | [diff] [blame] | 7824 | for_each_node_state(nid, N_MEMORY) { |
Tejun Heo | c13291a | 2011-07-12 10:46:30 +0200 | [diff] [blame] | 7825 | unsigned long start_pfn, end_pfn; |
| 7826 | |
Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 7827 | /* |
| 7828 | * Recalculate kernelcore_node if the division per node |
| 7829 | * now exceeds what is necessary to satisfy the requested |
| 7830 | * amount of memory for the kernel |
| 7831 | */ |
| 7832 | if (required_kernelcore < kernelcore_node) |
| 7833 | kernelcore_node = required_kernelcore / usable_nodes; |
| 7834 | |
| 7835 | /* |
| 7836 | * As the map is walked, we track how much memory is usable |
| 7837 | * by the kernel using kernelcore_remaining. When it is |
| 7838 | * 0, the rest of the node is usable by ZONE_MOVABLE |
| 7839 | */ |
| 7840 | kernelcore_remaining = kernelcore_node; |
| 7841 | |
| 7842 | /* Go through each range of PFNs within this node */ |
Tejun Heo | c13291a | 2011-07-12 10:46:30 +0200 | [diff] [blame] | 7843 | for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { |
Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 7844 | unsigned long size_pages; |
| 7845 | |
Tejun Heo | c13291a | 2011-07-12 10:46:30 +0200 | [diff] [blame] | 7846 | start_pfn = max(start_pfn, zone_movable_pfn[nid]); |
Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 7847 | if (start_pfn >= end_pfn) |
| 7848 | continue; |
| 7849 | |
| 7850 | /* Account for what is only usable for kernelcore */ |
| 7851 | if (start_pfn < usable_startpfn) { |
| 7852 | unsigned long kernel_pages; |
| 7853 | kernel_pages = min(end_pfn, usable_startpfn) |
| 7854 | - start_pfn; |
| 7855 | |
| 7856 | kernelcore_remaining -= min(kernel_pages, |
| 7857 | kernelcore_remaining); |
| 7858 | required_kernelcore -= min(kernel_pages, |
| 7859 | required_kernelcore); |
| 7860 | |
| 7861 | /* Continue if range is now fully accounted */ |
| 7862 | if (end_pfn <= usable_startpfn) { |
| 7863 | |
| 7864 | /* |
| 7865 | * Push zone_movable_pfn to the end so |
| 7866 | * that if we have to rebalance |
| 7867 | * kernelcore across nodes, we will |
| 7868 | * not double account here |
| 7869 | */ |
| 7870 | zone_movable_pfn[nid] = end_pfn; |
| 7871 | continue; |
| 7872 | } |
| 7873 | start_pfn = usable_startpfn; |
| 7874 | } |
| 7875 | |
| 7876 | /* |
| 7877 | * The usable PFN range for ZONE_MOVABLE is from |
| 7878 | * start_pfn->end_pfn. Calculate size_pages as the |
| 7879 | * number of pages used as kernelcore |
| 7880 | */ |
| 7881 | size_pages = end_pfn - start_pfn; |
| 7882 | if (size_pages > kernelcore_remaining) |
| 7883 | size_pages = kernelcore_remaining; |
| 7884 | zone_movable_pfn[nid] = start_pfn + size_pages; |
| 7885 | |
| 7886 | /* |
| 7887 | * Some kernelcore has been met, update counts and |
| 7888 | * break if the kernelcore for this node has been |
Pintu Kumar | b8af294 | 2013-09-11 14:20:34 -0700 | [diff] [blame] | 7889 | * satisfied |
Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 7890 | */ |
| 7891 | required_kernelcore -= min(required_kernelcore, |
| 7892 | size_pages); |
| 7893 | kernelcore_remaining -= size_pages; |
| 7894 | if (!kernelcore_remaining) |
| 7895 | break; |
| 7896 | } |
| 7897 | } |
| 7898 | |
| 7899 | /* |
| 7900 | * If there is still required_kernelcore, we do another pass with one |
| 7901 | * less node in the count. This will push zone_movable_pfn[nid] further |
| 7902 | * along on the nodes that still have memory until kernelcore is |
Pintu Kumar | b8af294 | 2013-09-11 14:20:34 -0700 | [diff] [blame] | 7903 | * satisfied |
Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 7904 | */ |
| 7905 | usable_nodes--; |
| 7906 | if (usable_nodes && required_kernelcore > usable_nodes) |
| 7907 | goto restart; |
| 7908 | |
Tang Chen | b2f3eeb | 2014-01-21 15:49:38 -0800 | [diff] [blame] | 7909 | out2: |
Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 7910 | /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */ |
| 7911 | for (nid = 0; nid < MAX_NUMNODES; nid++) |
| 7912 | zone_movable_pfn[nid] = |
| 7913 | roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES); |
Yinghai Lu | 66918dc | 2009-06-30 11:41:37 -0700 | [diff] [blame] | 7914 | |
Yinghai Lu | 20e6926 | 2013-03-01 14:51:27 -0800 | [diff] [blame] | 7915 | out: |
Yinghai Lu | 66918dc | 2009-06-30 11:41:37 -0700 | [diff] [blame] | 7916 | /* restore the node_state */ |
Lai Jiangshan | 4b0ef1fe | 2012-12-12 13:51:46 -0800 | [diff] [blame] | 7917 | node_states[N_MEMORY] = saved_node_state; |
Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 7918 | } |
| 7919 | |
Lai Jiangshan | 4b0ef1fe | 2012-12-12 13:51:46 -0800 | [diff] [blame] | 7920 | /* Any regular or high memory on that node ? */ |
| 7921 | static void check_for_memory(pg_data_t *pgdat, int nid) |
Lee Schermerhorn | 37b07e4 | 2007-10-16 01:25:39 -0700 | [diff] [blame] | 7922 | { |
Lee Schermerhorn | 37b07e4 | 2007-10-16 01:25:39 -0700 | [diff] [blame] | 7923 | enum zone_type zone_type; |
| 7924 | |
Lai Jiangshan | 4b0ef1fe | 2012-12-12 13:51:46 -0800 | [diff] [blame] | 7925 | for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) { |
Lee Schermerhorn | 37b07e4 | 2007-10-16 01:25:39 -0700 | [diff] [blame] | 7926 | struct zone *zone = &pgdat->node_zones[zone_type]; |
Xishi Qiu | b38a872 | 2013-11-12 15:07:20 -0800 | [diff] [blame] | 7927 | if (populated_zone(zone)) { |
Oscar Salvador | 7b0e0c0 | 2018-10-26 15:03:58 -0700 | [diff] [blame] | 7928 | if (IS_ENABLED(CONFIG_HIGHMEM)) |
| 7929 | node_set_state(nid, N_HIGH_MEMORY); |
| 7930 | if (zone_type <= ZONE_NORMAL) |
Lai Jiangshan | 4b0ef1fe | 2012-12-12 13:51:46 -0800 | [diff] [blame] | 7931 | node_set_state(nid, N_NORMAL_MEMORY); |
Bob Liu | d0048b0 | 2012-01-12 17:19:07 -0800 | [diff] [blame] | 7932 | break; |
| 7933 | } |
Lee Schermerhorn | 37b07e4 | 2007-10-16 01:25:39 -0700 | [diff] [blame] | 7934 | } |
Lee Schermerhorn | 37b07e4 | 2007-10-16 01:25:39 -0700 | [diff] [blame] | 7935 | } |
| 7936 | |
Mike Rapoport | 51930df | 2020-06-03 15:58:03 -0700 | [diff] [blame] | 7937 | /* |
Ingo Molnar | f0953a1 | 2021-05-06 18:06:47 -0700 | [diff] [blame] | 7938 | * Some architectures, e.g. ARC may have ZONE_HIGHMEM below ZONE_NORMAL. For |
Mike Rapoport | 51930df | 2020-06-03 15:58:03 -0700 | [diff] [blame] | 7939 | * such cases we allow max_zone_pfn sorted in the descending order |
| 7940 | */ |
| 7941 | bool __weak arch_has_descending_max_zone_pfns(void) |
| 7942 | { |
| 7943 | return false; |
| 7944 | } |
| 7945 | |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 7946 | /** |
Mike Rapoport | 9691a07 | 2020-06-03 15:57:10 -0700 | [diff] [blame] | 7947 | * free_area_init - Initialise all pg_data_t and zone data |
Randy Dunlap | 88ca3b9 | 2006-10-04 02:15:25 -0700 | [diff] [blame] | 7948 | * @max_zone_pfn: an array of max PFNs for each zone |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 7949 | * |
| 7950 | * This will call free_area_init_node() for each active node in the system. |
Zhang Zhen | 7d01817 | 2014-06-04 16:10:53 -0700 | [diff] [blame] | 7951 | * Using the page ranges provided by memblock_set_node(), the size of each |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 7952 | * zone in each node and their holes is calculated. If the maximum PFN |
| 7953 | * between two adjacent zones match, it is assumed that the zone is empty. |
| 7954 | * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed |
| 7955 | * that arch_max_dma32_pfn has no pages. It is also assumed that a zone |
| 7956 | * starts where the previous one ended. For example, ZONE_DMA32 starts |
| 7957 | * at arch_max_dma_pfn. |
| 7958 | */ |
Mike Rapoport | 9691a07 | 2020-06-03 15:57:10 -0700 | [diff] [blame] | 7959 | void __init free_area_init(unsigned long *max_zone_pfn) |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 7960 | { |
Tejun Heo | c13291a | 2011-07-12 10:46:30 +0200 | [diff] [blame] | 7961 | unsigned long start_pfn, end_pfn; |
Mike Rapoport | 51930df | 2020-06-03 15:58:03 -0700 | [diff] [blame] | 7962 | int i, nid, zone; |
| 7963 | bool descending; |
Mel Gorman | a6af2bc | 2007-02-10 01:42:57 -0800 | [diff] [blame] | 7964 | |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 7965 | /* Record where the zone boundaries are */ |
| 7966 | memset(arch_zone_lowest_possible_pfn, 0, |
| 7967 | sizeof(arch_zone_lowest_possible_pfn)); |
| 7968 | memset(arch_zone_highest_possible_pfn, 0, |
| 7969 | sizeof(arch_zone_highest_possible_pfn)); |
Oliver O'Halloran | 90cae1f | 2016-07-26 15:22:17 -0700 | [diff] [blame] | 7970 | |
| 7971 | start_pfn = find_min_pfn_with_active_regions(); |
Mike Rapoport | 51930df | 2020-06-03 15:58:03 -0700 | [diff] [blame] | 7972 | descending = arch_has_descending_max_zone_pfns(); |
Oliver O'Halloran | 90cae1f | 2016-07-26 15:22:17 -0700 | [diff] [blame] | 7973 | |
| 7974 | for (i = 0; i < MAX_NR_ZONES; i++) { |
Mike Rapoport | 51930df | 2020-06-03 15:58:03 -0700 | [diff] [blame] | 7975 | if (descending) |
| 7976 | zone = MAX_NR_ZONES - i - 1; |
| 7977 | else |
| 7978 | zone = i; |
| 7979 | |
| 7980 | if (zone == ZONE_MOVABLE) |
Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 7981 | continue; |
Oliver O'Halloran | 90cae1f | 2016-07-26 15:22:17 -0700 | [diff] [blame] | 7982 | |
Mike Rapoport | 51930df | 2020-06-03 15:58:03 -0700 | [diff] [blame] | 7983 | end_pfn = max(max_zone_pfn[zone], start_pfn); |
| 7984 | arch_zone_lowest_possible_pfn[zone] = start_pfn; |
| 7985 | arch_zone_highest_possible_pfn[zone] = end_pfn; |
Oliver O'Halloran | 90cae1f | 2016-07-26 15:22:17 -0700 | [diff] [blame] | 7986 | |
| 7987 | start_pfn = end_pfn; |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 7988 | } |
Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 7989 | |
| 7990 | /* Find the PFNs that ZONE_MOVABLE begins at in each node */ |
| 7991 | memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn)); |
Kautuk Consul | b224ef8 | 2012-03-21 16:34:15 -0700 | [diff] [blame] | 7992 | find_zone_movable_pfns_for_nodes(); |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 7993 | |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 7994 | /* Print out the zone ranges */ |
Anton Blanchard | f88dfff | 2014-12-10 15:42:53 -0800 | [diff] [blame] | 7995 | pr_info("Zone ranges:\n"); |
Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 7996 | for (i = 0; i < MAX_NR_ZONES; i++) { |
| 7997 | if (i == ZONE_MOVABLE) |
| 7998 | continue; |
Anton Blanchard | f88dfff | 2014-12-10 15:42:53 -0800 | [diff] [blame] | 7999 | pr_info(" %-8s ", zone_names[i]); |
David Rientjes | 72f0ba0 | 2010-03-05 13:42:14 -0800 | [diff] [blame] | 8000 | if (arch_zone_lowest_possible_pfn[i] == |
| 8001 | arch_zone_highest_possible_pfn[i]) |
Anton Blanchard | f88dfff | 2014-12-10 15:42:53 -0800 | [diff] [blame] | 8002 | pr_cont("empty\n"); |
David Rientjes | 72f0ba0 | 2010-03-05 13:42:14 -0800 | [diff] [blame] | 8003 | else |
Juergen Gross | 8d29e18 | 2015-02-11 15:26:01 -0800 | [diff] [blame] | 8004 | pr_cont("[mem %#018Lx-%#018Lx]\n", |
| 8005 | (u64)arch_zone_lowest_possible_pfn[i] |
| 8006 | << PAGE_SHIFT, |
| 8007 | ((u64)arch_zone_highest_possible_pfn[i] |
Bjorn Helgaas | a62e2f4 | 2012-05-29 15:06:30 -0700 | [diff] [blame] | 8008 | << PAGE_SHIFT) - 1); |
Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 8009 | } |
| 8010 | |
| 8011 | /* Print out the PFNs ZONE_MOVABLE begins at in each node */ |
Anton Blanchard | f88dfff | 2014-12-10 15:42:53 -0800 | [diff] [blame] | 8012 | pr_info("Movable zone start for each node\n"); |
Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 8013 | for (i = 0; i < MAX_NUMNODES; i++) { |
| 8014 | if (zone_movable_pfn[i]) |
Juergen Gross | 8d29e18 | 2015-02-11 15:26:01 -0800 | [diff] [blame] | 8015 | pr_info(" Node %d: %#018Lx\n", i, |
| 8016 | (u64)zone_movable_pfn[i] << PAGE_SHIFT); |
Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 8017 | } |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 8018 | |
Dan Williams | f46edbd | 2019-07-18 15:58:04 -0700 | [diff] [blame] | 8019 | /* |
| 8020 | * Print out the early node map, and initialize the |
| 8021 | * subsection-map relative to active online memory ranges to |
| 8022 | * enable future "sub-section" extensions of the memory map. |
| 8023 | */ |
Anton Blanchard | f88dfff | 2014-12-10 15:42:53 -0800 | [diff] [blame] | 8024 | pr_info("Early memory node ranges\n"); |
Dan Williams | f46edbd | 2019-07-18 15:58:04 -0700 | [diff] [blame] | 8025 | for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { |
Juergen Gross | 8d29e18 | 2015-02-11 15:26:01 -0800 | [diff] [blame] | 8026 | pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid, |
| 8027 | (u64)start_pfn << PAGE_SHIFT, |
| 8028 | ((u64)end_pfn << PAGE_SHIFT) - 1); |
Dan Williams | f46edbd | 2019-07-18 15:58:04 -0700 | [diff] [blame] | 8029 | subsection_map_init(start_pfn, end_pfn - start_pfn); |
| 8030 | } |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 8031 | |
| 8032 | /* Initialise every node */ |
Mel Gorman | 708614e | 2008-07-23 21:26:51 -0700 | [diff] [blame] | 8033 | mminit_verify_pageflags_layout(); |
Christoph Lameter | 8ef8286 | 2007-02-20 13:57:52 -0800 | [diff] [blame] | 8034 | setup_nr_node_ids(); |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 8035 | for_each_online_node(nid) { |
| 8036 | pg_data_t *pgdat = NODE_DATA(nid); |
Mike Rapoport | 854e884 | 2020-06-03 15:58:13 -0700 | [diff] [blame] | 8037 | free_area_init_node(nid); |
Lee Schermerhorn | 37b07e4 | 2007-10-16 01:25:39 -0700 | [diff] [blame] | 8038 | |
| 8039 | /* Any memory on that node */ |
| 8040 | if (pgdat->node_present_pages) |
Lai Jiangshan | 4b0ef1fe | 2012-12-12 13:51:46 -0800 | [diff] [blame] | 8041 | node_set_state(nid, N_MEMORY); |
| 8042 | check_for_memory(pgdat, nid); |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 8043 | } |
Mike Rapoport | 122e093 | 2021-06-28 19:33:26 -0700 | [diff] [blame] | 8044 | |
| 8045 | memmap_init(); |
Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 8046 | } |
Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 8047 | |
David Rientjes | a5c6d65 | 2018-04-05 16:23:09 -0700 | [diff] [blame] | 8048 | static int __init cmdline_parse_core(char *p, unsigned long *core, |
| 8049 | unsigned long *percent) |
Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 8050 | { |
| 8051 | unsigned long long coremem; |
David Rientjes | a5c6d65 | 2018-04-05 16:23:09 -0700 | [diff] [blame] | 8052 | char *endptr; |
| 8053 | |
Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 8054 | if (!p) |
| 8055 | return -EINVAL; |
| 8056 | |
David Rientjes | a5c6d65 | 2018-04-05 16:23:09 -0700 | [diff] [blame] | 8057 | /* Value may be a percentage of total memory, otherwise bytes */ |
| 8058 | coremem = simple_strtoull(p, &endptr, 0); |
| 8059 | if (*endptr == '%') { |
| 8060 | /* Paranoid check for percent values greater than 100 */ |
| 8061 | WARN_ON(coremem > 100); |
Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 8062 | |
David Rientjes | a5c6d65 | 2018-04-05 16:23:09 -0700 | [diff] [blame] | 8063 | *percent = coremem; |
| 8064 | } else { |
| 8065 | coremem = memparse(p, &p); |
| 8066 | /* Paranoid check that UL is enough for the coremem value */ |
| 8067 | WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX); |
Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 8068 | |
David Rientjes | a5c6d65 | 2018-04-05 16:23:09 -0700 | [diff] [blame] | 8069 | *core = coremem >> PAGE_SHIFT; |
| 8070 | *percent = 0UL; |
| 8071 | } |
Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 8072 | return 0; |
| 8073 | } |
Mel Gorman | ed7ed36 | 2007-07-17 04:03:14 -0700 | [diff] [blame] | 8074 | |
Mel Gorman | 7e63efef | 2007-07-17 04:03:15 -0700 | [diff] [blame] | 8075 | /* |
| 8076 | * kernelcore=size sets the amount of memory for use for allocations that |
| 8077 | * cannot be reclaimed or migrated. |
| 8078 | */ |
| 8079 | static int __init cmdline_parse_kernelcore(char *p) |
| 8080 | { |
Taku Izumi | 342332e | 2016-03-15 14:55:22 -0700 | [diff] [blame] | 8081 | /* parse kernelcore=mirror */ |
| 8082 | if (parse_option_str(p, "mirror")) { |
| 8083 | mirrored_kernelcore = true; |
| 8084 | return 0; |
| 8085 | } |
| 8086 | |
David Rientjes | a5c6d65 | 2018-04-05 16:23:09 -0700 | [diff] [blame] | 8087 | return cmdline_parse_core(p, &required_kernelcore, |
| 8088 | &required_kernelcore_percent); |
Mel Gorman | 7e63efef | 2007-07-17 04:03:15 -0700 | [diff] [blame] | 8089 | } |
| 8090 | |
| 8091 | /* |
| 8092 | * movablecore=size sets the amount of memory for use for allocations that |
| 8093 | * can be reclaimed or migrated. |
| 8094 | */ |
| 8095 | static int __init cmdline_parse_movablecore(char *p) |
| 8096 | { |
David Rientjes | a5c6d65 | 2018-04-05 16:23:09 -0700 | [diff] [blame] | 8097 | return cmdline_parse_core(p, &required_movablecore, |
| 8098 | &required_movablecore_percent); |
Mel Gorman | 7e63efef | 2007-07-17 04:03:15 -0700 | [diff] [blame] | 8099 | } |
| 8100 | |
Mel Gorman | ed7ed36 | 2007-07-17 04:03:14 -0700 | [diff] [blame] | 8101 | early_param("kernelcore", cmdline_parse_kernelcore); |
Mel Gorman | 7e63efef | 2007-07-17 04:03:15 -0700 | [diff] [blame] | 8102 | early_param("movablecore", cmdline_parse_movablecore); |
Mel Gorman | ed7ed36 | 2007-07-17 04:03:14 -0700 | [diff] [blame] | 8103 | |
Jiang Liu | c3d5f5f | 2013-07-03 15:03:14 -0700 | [diff] [blame] | 8104 | void adjust_managed_page_count(struct page *page, long count) |
| 8105 | { |
Arun KS | 9705bea | 2018-12-28 00:34:24 -0800 | [diff] [blame] | 8106 | atomic_long_add(count, &page_zone(page)->managed_pages); |
Arun KS | ca79b0c | 2018-12-28 00:34:29 -0800 | [diff] [blame] | 8107 | totalram_pages_add(count); |
Jiang Liu | 3dcc057 | 2013-07-03 15:03:21 -0700 | [diff] [blame] | 8108 | #ifdef CONFIG_HIGHMEM |
| 8109 | if (PageHighMem(page)) |
Arun KS | ca79b0c | 2018-12-28 00:34:29 -0800 | [diff] [blame] | 8110 | totalhigh_pages_add(count); |
Jiang Liu | 3dcc057 | 2013-07-03 15:03:21 -0700 | [diff] [blame] | 8111 | #endif |
Jiang Liu | c3d5f5f | 2013-07-03 15:03:14 -0700 | [diff] [blame] | 8112 | } |
Jiang Liu | 3dcc057 | 2013-07-03 15:03:21 -0700 | [diff] [blame] | 8113 | EXPORT_SYMBOL(adjust_managed_page_count); |
Jiang Liu | c3d5f5f | 2013-07-03 15:03:14 -0700 | [diff] [blame] | 8114 | |
Alexey Dobriyan | e5cb113 | 2018-12-28 00:36:03 -0800 | [diff] [blame] | 8115 | unsigned long free_reserved_area(void *start, void *end, int poison, const char *s) |
Jiang Liu | 69afade | 2013-04-29 15:06:21 -0700 | [diff] [blame] | 8116 | { |
Jiang Liu | 1119969 | 2013-07-03 15:02:48 -0700 | [diff] [blame] | 8117 | void *pos; |
| 8118 | unsigned long pages = 0; |
Jiang Liu | 69afade | 2013-04-29 15:06:21 -0700 | [diff] [blame] | 8119 | |
Jiang Liu | 1119969 | 2013-07-03 15:02:48 -0700 | [diff] [blame] | 8120 | start = (void *)PAGE_ALIGN((unsigned long)start); |
| 8121 | end = (void *)((unsigned long)end & PAGE_MASK); |
| 8122 | for (pos = start; pos < end; pos += PAGE_SIZE, pages++) { |
Dave Hansen | 0d83432 | 2018-08-02 15:58:26 -0700 | [diff] [blame] | 8123 | struct page *page = virt_to_page(pos); |
| 8124 | void *direct_map_addr; |
| 8125 | |
| 8126 | /* |
| 8127 | * 'direct_map_addr' might be different from 'pos' |
| 8128 | * because some architectures' virt_to_page() |
| 8129 | * work with aliases. Getting the direct map |
| 8130 | * address ensures that we get a _writeable_ |
| 8131 | * alias for the memset(). |
| 8132 | */ |
| 8133 | direct_map_addr = page_address(page); |
Vincenzo Frascino | c746170 | 2020-12-22 12:01:49 -0800 | [diff] [blame] | 8134 | /* |
| 8135 | * Perform a kasan-unchecked memset() since this memory |
| 8136 | * has not been initialized. |
| 8137 | */ |
| 8138 | direct_map_addr = kasan_reset_tag(direct_map_addr); |
Jiang Liu | dbe67df | 2013-07-03 15:02:51 -0700 | [diff] [blame] | 8139 | if ((unsigned int)poison <= 0xFF) |
Dave Hansen | 0d83432 | 2018-08-02 15:58:26 -0700 | [diff] [blame] | 8140 | memset(direct_map_addr, poison, PAGE_SIZE); |
| 8141 | |
| 8142 | free_reserved_page(page); |
Jiang Liu | 69afade | 2013-04-29 15:06:21 -0700 | [diff] [blame] | 8143 | } |
| 8144 | |
| 8145 | if (pages && s) |
Miaohe Lin | ff7ed9e | 2021-11-05 13:40:05 -0700 | [diff] [blame] | 8146 | pr_info("Freeing %s memory: %ldK\n", s, K(pages)); |
Jiang Liu | 69afade | 2013-04-29 15:06:21 -0700 | [diff] [blame] | 8147 | |
| 8148 | return pages; |
| 8149 | } |
| 8150 | |
Kefeng Wang | 1f9d03c | 2021-04-29 23:00:55 -0700 | [diff] [blame] | 8151 | void __init mem_init_print_info(void) |
Jiang Liu | 7ee3d4e | 2013-07-03 15:03:41 -0700 | [diff] [blame] | 8152 | { |
| 8153 | unsigned long physpages, codesize, datasize, rosize, bss_size; |
| 8154 | unsigned long init_code_size, init_data_size; |
| 8155 | |
| 8156 | physpages = get_num_physpages(); |
| 8157 | codesize = _etext - _stext; |
| 8158 | datasize = _edata - _sdata; |
| 8159 | rosize = __end_rodata - __start_rodata; |
| 8160 | bss_size = __bss_stop - __bss_start; |
| 8161 | init_data_size = __init_end - __init_begin; |
| 8162 | init_code_size = _einittext - _sinittext; |
| 8163 | |
| 8164 | /* |
| 8165 | * Detect special cases and adjust section sizes accordingly: |
| 8166 | * 1) .init.* may be embedded into .data sections |
| 8167 | * 2) .init.text.* may be out of [__init_begin, __init_end], |
| 8168 | * please refer to arch/tile/kernel/vmlinux.lds.S. |
| 8169 | * 3) .rodata.* may be embedded into .text or .data sections. |
| 8170 | */ |
| 8171 | #define adj_init_size(start, end, size, pos, adj) \ |
Pintu Kumar | b8af294 | 2013-09-11 14:20:34 -0700 | [diff] [blame] | 8172 | do { \ |
| 8173 | if (start <= pos && pos < end && size > adj) \ |
| 8174 | size -= adj; \ |
| 8175 | } while (0) |
Jiang Liu | 7ee3d4e | 2013-07-03 15:03:41 -0700 | [diff] [blame] | 8176 | |
| 8177 | adj_init_size(__init_begin, __init_end, init_data_size, |
| 8178 | _sinittext, init_code_size); |
| 8179 | adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size); |
| 8180 | adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size); |
| 8181 | adj_init_size(_stext, _etext, codesize, __start_rodata, rosize); |
| 8182 | adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize); |
| 8183 | |
| 8184 | #undef adj_init_size |
| 8185 | |
Joe Perches | 756a025 | 2016-03-17 14:19:47 -0700 | [diff] [blame] | 8186 | pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved" |
Jiang Liu | 7ee3d4e | 2013-07-03 15:03:41 -0700 | [diff] [blame] | 8187 | #ifdef CONFIG_HIGHMEM |
Joe Perches | 756a025 | 2016-03-17 14:19:47 -0700 | [diff] [blame] | 8188 | ", %luK highmem" |
Jiang Liu | 7ee3d4e | 2013-07-03 15:03:41 -0700 | [diff] [blame] | 8189 | #endif |
Kefeng Wang | 1f9d03c | 2021-04-29 23:00:55 -0700 | [diff] [blame] | 8190 | ")\n", |
Miaohe Lin | ff7ed9e | 2021-11-05 13:40:05 -0700 | [diff] [blame] | 8191 | K(nr_free_pages()), K(physpages), |
Joe Perches | 756a025 | 2016-03-17 14:19:47 -0700 | [diff] [blame] | 8192 | codesize >> 10, datasize >> 10, rosize >> 10, |
| 8193 | (init_data_size + init_code_size) >> 10, bss_size >> 10, |
Miaohe Lin | ff7ed9e | 2021-11-05 13:40:05 -0700 | [diff] [blame] | 8194 | K(physpages - totalram_pages() - totalcma_pages), |
| 8195 | K(totalcma_pages) |
Jiang Liu | 7ee3d4e | 2013-07-03 15:03:41 -0700 | [diff] [blame] | 8196 | #ifdef CONFIG_HIGHMEM |
Miaohe Lin | ff7ed9e | 2021-11-05 13:40:05 -0700 | [diff] [blame] | 8197 | , K(totalhigh_pages()) |
Jiang Liu | 7ee3d4e | 2013-07-03 15:03:41 -0700 | [diff] [blame] | 8198 | #endif |
Kefeng Wang | 1f9d03c | 2021-04-29 23:00:55 -0700 | [diff] [blame] | 8199 | ); |
Jiang Liu | 7ee3d4e | 2013-07-03 15:03:41 -0700 | [diff] [blame] | 8200 | } |
| 8201 | |
Mel Gorman | 0e0b864 | 2006-09-27 01:49:56 -0700 | [diff] [blame] | 8202 | /** |
Randy Dunlap | 88ca3b9 | 2006-10-04 02:15:25 -0700 | [diff] [blame] | 8203 | * set_dma_reserve - set the specified number of pages reserved in the first zone |
| 8204 | * @new_dma_reserve: The number of pages to mark reserved |
Mel Gorman | 0e0b864 | 2006-09-27 01:49:56 -0700 | [diff] [blame] | 8205 | * |
Yaowei Bai | 013110a | 2015-09-08 15:04:10 -0700 | [diff] [blame] | 8206 | * The per-cpu batchsize and zone watermarks are determined by managed_pages. |
Mel Gorman | 0e0b864 | 2006-09-27 01:49:56 -0700 | [diff] [blame] | 8207 | * In the DMA zone, a significant percentage may be consumed by kernel image |
| 8208 | * and other unfreeable allocations which can skew the watermarks badly. This |
Randy Dunlap | 88ca3b9 | 2006-10-04 02:15:25 -0700 | [diff] [blame] | 8209 | * function may optionally be used to account for unfreeable pages in the |
| 8210 | * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and |
| 8211 | * smaller per-cpu batchsize. |
Mel Gorman | 0e0b864 | 2006-09-27 01:49:56 -0700 | [diff] [blame] | 8212 | */ |
| 8213 | void __init set_dma_reserve(unsigned long new_dma_reserve) |
| 8214 | { |
| 8215 | dma_reserve = new_dma_reserve; |
| 8216 | } |
| 8217 | |
Sebastian Andrzej Siewior | 005fd4b | 2016-11-03 15:50:02 +0100 | [diff] [blame] | 8218 | static int page_alloc_cpu_dead(unsigned int cpu) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8219 | { |
Mel Gorman | 04f8cfe | 2021-06-28 19:42:15 -0700 | [diff] [blame] | 8220 | struct zone *zone; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8221 | |
Sebastian Andrzej Siewior | 005fd4b | 2016-11-03 15:50:02 +0100 | [diff] [blame] | 8222 | lru_add_drain_cpu(cpu); |
| 8223 | drain_pages(cpu); |
Christoph Lameter | 9f8f217 | 2008-02-04 22:29:11 -0800 | [diff] [blame] | 8224 | |
Sebastian Andrzej Siewior | 005fd4b | 2016-11-03 15:50:02 +0100 | [diff] [blame] | 8225 | /* |
| 8226 | * Spill the event counters of the dead processor |
| 8227 | * into the current processors event counters. |
| 8228 | * This artificially elevates the count of the current |
| 8229 | * processor. |
| 8230 | */ |
| 8231 | vm_events_fold_cpu(cpu); |
Christoph Lameter | 9f8f217 | 2008-02-04 22:29:11 -0800 | [diff] [blame] | 8232 | |
Sebastian Andrzej Siewior | 005fd4b | 2016-11-03 15:50:02 +0100 | [diff] [blame] | 8233 | /* |
| 8234 | * Zero the differential counters of the dead processor |
| 8235 | * so that the vm statistics are consistent. |
| 8236 | * |
| 8237 | * This is only okay since the processor is dead and cannot |
| 8238 | * race with what we are doing. |
| 8239 | */ |
| 8240 | cpu_vm_stats_fold(cpu); |
Mel Gorman | 04f8cfe | 2021-06-28 19:42:15 -0700 | [diff] [blame] | 8241 | |
| 8242 | for_each_populated_zone(zone) |
| 8243 | zone_pcp_update(zone, 0); |
| 8244 | |
| 8245 | return 0; |
| 8246 | } |
| 8247 | |
| 8248 | static int page_alloc_cpu_online(unsigned int cpu) |
| 8249 | { |
| 8250 | struct zone *zone; |
| 8251 | |
| 8252 | for_each_populated_zone(zone) |
| 8253 | zone_pcp_update(zone, 1); |
Sebastian Andrzej Siewior | 005fd4b | 2016-11-03 15:50:02 +0100 | [diff] [blame] | 8254 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8255 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8256 | |
Nicholas Piggin | e03a512 | 2019-07-11 20:59:12 -0700 | [diff] [blame] | 8257 | #ifdef CONFIG_NUMA |
| 8258 | int hashdist = HASHDIST_DEFAULT; |
| 8259 | |
| 8260 | static int __init set_hashdist(char *str) |
| 8261 | { |
| 8262 | if (!str) |
| 8263 | return 0; |
| 8264 | hashdist = simple_strtoul(str, &str, 0); |
| 8265 | return 1; |
| 8266 | } |
| 8267 | __setup("hashdist=", set_hashdist); |
| 8268 | #endif |
| 8269 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8270 | void __init page_alloc_init(void) |
| 8271 | { |
Sebastian Andrzej Siewior | 005fd4b | 2016-11-03 15:50:02 +0100 | [diff] [blame] | 8272 | int ret; |
| 8273 | |
Nicholas Piggin | e03a512 | 2019-07-11 20:59:12 -0700 | [diff] [blame] | 8274 | #ifdef CONFIG_NUMA |
| 8275 | if (num_node_state(N_MEMORY) == 1) |
| 8276 | hashdist = 0; |
| 8277 | #endif |
| 8278 | |
Mel Gorman | 04f8cfe | 2021-06-28 19:42:15 -0700 | [diff] [blame] | 8279 | ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC, |
| 8280 | "mm/page_alloc:pcp", |
| 8281 | page_alloc_cpu_online, |
Sebastian Andrzej Siewior | 005fd4b | 2016-11-03 15:50:02 +0100 | [diff] [blame] | 8282 | page_alloc_cpu_dead); |
| 8283 | WARN_ON(ret < 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8284 | } |
| 8285 | |
| 8286 | /* |
Yaowei Bai | 34b1006 | 2015-09-08 15:04:13 -0700 | [diff] [blame] | 8287 | * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio |
Hideo AOKI | cb45b0e | 2006-04-10 22:52:59 -0700 | [diff] [blame] | 8288 | * or min_free_kbytes changes. |
| 8289 | */ |
| 8290 | static void calculate_totalreserve_pages(void) |
| 8291 | { |
| 8292 | struct pglist_data *pgdat; |
| 8293 | unsigned long reserve_pages = 0; |
Christoph Lameter | 2f6726e | 2006-09-25 23:31:18 -0700 | [diff] [blame] | 8294 | enum zone_type i, j; |
Hideo AOKI | cb45b0e | 2006-04-10 22:52:59 -0700 | [diff] [blame] | 8295 | |
| 8296 | for_each_online_pgdat(pgdat) { |
Mel Gorman | 281e372 | 2016-07-28 15:46:11 -0700 | [diff] [blame] | 8297 | |
| 8298 | pgdat->totalreserve_pages = 0; |
| 8299 | |
Hideo AOKI | cb45b0e | 2006-04-10 22:52:59 -0700 | [diff] [blame] | 8300 | for (i = 0; i < MAX_NR_ZONES; i++) { |
| 8301 | struct zone *zone = pgdat->node_zones + i; |
Mel Gorman | 3484b2d | 2014-08-06 16:07:14 -0700 | [diff] [blame] | 8302 | long max = 0; |
Arun KS | 9705bea | 2018-12-28 00:34:24 -0800 | [diff] [blame] | 8303 | unsigned long managed_pages = zone_managed_pages(zone); |
Hideo AOKI | cb45b0e | 2006-04-10 22:52:59 -0700 | [diff] [blame] | 8304 | |
| 8305 | /* Find valid and maximum lowmem_reserve in the zone */ |
| 8306 | for (j = i; j < MAX_NR_ZONES; j++) { |
| 8307 | if (zone->lowmem_reserve[j] > max) |
| 8308 | max = zone->lowmem_reserve[j]; |
| 8309 | } |
| 8310 | |
Mel Gorman | 4185896 | 2009-06-16 15:32:12 -0700 | [diff] [blame] | 8311 | /* we treat the high watermark as reserved pages. */ |
| 8312 | max += high_wmark_pages(zone); |
Hideo AOKI | cb45b0e | 2006-04-10 22:52:59 -0700 | [diff] [blame] | 8313 | |
Arun KS | 3d6357d | 2018-12-28 00:34:20 -0800 | [diff] [blame] | 8314 | if (max > managed_pages) |
| 8315 | max = managed_pages; |
Johannes Weiner | a8d0143 | 2016-01-14 15:20:15 -0800 | [diff] [blame] | 8316 | |
Mel Gorman | 281e372 | 2016-07-28 15:46:11 -0700 | [diff] [blame] | 8317 | pgdat->totalreserve_pages += max; |
Johannes Weiner | a8d0143 | 2016-01-14 15:20:15 -0800 | [diff] [blame] | 8318 | |
Hideo AOKI | cb45b0e | 2006-04-10 22:52:59 -0700 | [diff] [blame] | 8319 | reserve_pages += max; |
| 8320 | } |
| 8321 | } |
| 8322 | totalreserve_pages = reserve_pages; |
| 8323 | } |
| 8324 | |
| 8325 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8326 | * setup_per_zone_lowmem_reserve - called whenever |
Yaowei Bai | 34b1006 | 2015-09-08 15:04:13 -0700 | [diff] [blame] | 8327 | * sysctl_lowmem_reserve_ratio changes. Ensures that each zone |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8328 | * has a correct pages reserved value, so an adequate number of |
| 8329 | * pages are left in the zone after a successful __alloc_pages(). |
| 8330 | */ |
| 8331 | static void setup_per_zone_lowmem_reserve(void) |
| 8332 | { |
| 8333 | struct pglist_data *pgdat; |
Lorenzo Stoakes | 470c61d | 2020-12-14 19:11:22 -0800 | [diff] [blame] | 8334 | enum zone_type i, j; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8335 | |
KAMEZAWA Hiroyuki | ec936fc | 2006-03-27 01:15:59 -0800 | [diff] [blame] | 8336 | for_each_online_pgdat(pgdat) { |
Lorenzo Stoakes | 470c61d | 2020-12-14 19:11:22 -0800 | [diff] [blame] | 8337 | for (i = 0; i < MAX_NR_ZONES - 1; i++) { |
| 8338 | struct zone *zone = &pgdat->node_zones[i]; |
| 8339 | int ratio = sysctl_lowmem_reserve_ratio[i]; |
| 8340 | bool clear = !ratio || !zone_managed_pages(zone); |
| 8341 | unsigned long managed_pages = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8342 | |
Lorenzo Stoakes | 470c61d | 2020-12-14 19:11:22 -0800 | [diff] [blame] | 8343 | for (j = i + 1; j < MAX_NR_ZONES; j++) { |
Liu Shixin | f7ec104 | 2021-06-28 19:42:33 -0700 | [diff] [blame] | 8344 | struct zone *upper_zone = &pgdat->node_zones[j]; |
Lorenzo Stoakes | 470c61d | 2020-12-14 19:11:22 -0800 | [diff] [blame] | 8345 | |
Liu Shixin | f7ec104 | 2021-06-28 19:42:33 -0700 | [diff] [blame] | 8346 | managed_pages += zone_managed_pages(upper_zone); |
| 8347 | |
| 8348 | if (clear) |
| 8349 | zone->lowmem_reserve[j] = 0; |
| 8350 | else |
Lorenzo Stoakes | 470c61d | 2020-12-14 19:11:22 -0800 | [diff] [blame] | 8351 | zone->lowmem_reserve[j] = managed_pages / ratio; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8352 | } |
| 8353 | } |
| 8354 | } |
Hideo AOKI | cb45b0e | 2006-04-10 22:52:59 -0700 | [diff] [blame] | 8355 | |
| 8356 | /* update totalreserve_pages */ |
| 8357 | calculate_totalreserve_pages(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8358 | } |
| 8359 | |
Mel Gorman | cfd3da1 | 2011-04-25 21:36:42 +0000 | [diff] [blame] | 8360 | static void __setup_per_zone_wmarks(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8361 | { |
| 8362 | unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); |
| 8363 | unsigned long lowmem_pages = 0; |
| 8364 | struct zone *zone; |
| 8365 | unsigned long flags; |
| 8366 | |
| 8367 | /* Calculate total number of !ZONE_HIGHMEM pages */ |
| 8368 | for_each_zone(zone) { |
| 8369 | if (!is_highmem(zone)) |
Arun KS | 9705bea | 2018-12-28 00:34:24 -0800 | [diff] [blame] | 8370 | lowmem_pages += zone_managed_pages(zone); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8371 | } |
| 8372 | |
| 8373 | for_each_zone(zone) { |
Andrew Morton | ac924c6 | 2006-05-15 09:43:59 -0700 | [diff] [blame] | 8374 | u64 tmp; |
| 8375 | |
Gerald Schaefer | 1125b4e | 2008-10-18 20:27:11 -0700 | [diff] [blame] | 8376 | spin_lock_irqsave(&zone->lock, flags); |
Arun KS | 9705bea | 2018-12-28 00:34:24 -0800 | [diff] [blame] | 8377 | tmp = (u64)pages_min * zone_managed_pages(zone); |
Andrew Morton | ac924c6 | 2006-05-15 09:43:59 -0700 | [diff] [blame] | 8378 | do_div(tmp, lowmem_pages); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8379 | if (is_highmem(zone)) { |
| 8380 | /* |
Nick Piggin | 669ed17 | 2005-11-13 16:06:45 -0800 | [diff] [blame] | 8381 | * __GFP_HIGH and PF_MEMALLOC allocations usually don't |
| 8382 | * need highmem pages, so cap pages_min to a small |
| 8383 | * value here. |
| 8384 | * |
Mel Gorman | 4185896 | 2009-06-16 15:32:12 -0700 | [diff] [blame] | 8385 | * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN) |
Wei Yang | 8bb4e7a | 2019-03-05 15:46:22 -0800 | [diff] [blame] | 8386 | * deltas control async page reclaim, and so should |
Nick Piggin | 669ed17 | 2005-11-13 16:06:45 -0800 | [diff] [blame] | 8387 | * not be capped for highmem. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8388 | */ |
Andrew Morton | 90ae8d6 | 2013-02-22 16:32:22 -0800 | [diff] [blame] | 8389 | unsigned long min_pages; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8390 | |
Arun KS | 9705bea | 2018-12-28 00:34:24 -0800 | [diff] [blame] | 8391 | min_pages = zone_managed_pages(zone) / 1024; |
Andrew Morton | 90ae8d6 | 2013-02-22 16:32:22 -0800 | [diff] [blame] | 8392 | min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL); |
Mel Gorman | a921444 | 2018-12-28 00:35:44 -0800 | [diff] [blame] | 8393 | zone->_watermark[WMARK_MIN] = min_pages; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8394 | } else { |
Nick Piggin | 669ed17 | 2005-11-13 16:06:45 -0800 | [diff] [blame] | 8395 | /* |
| 8396 | * If it's a lowmem zone, reserve a number of pages |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8397 | * proportionate to the zone's size. |
| 8398 | */ |
Mel Gorman | a921444 | 2018-12-28 00:35:44 -0800 | [diff] [blame] | 8399 | zone->_watermark[WMARK_MIN] = tmp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8400 | } |
| 8401 | |
Johannes Weiner | 795ae7a | 2016-03-17 14:19:14 -0700 | [diff] [blame] | 8402 | /* |
| 8403 | * Set the kswapd watermarks distance according to the |
| 8404 | * scale factor in proportion to available memory, but |
| 8405 | * ensure a minimum size on small systems. |
| 8406 | */ |
| 8407 | tmp = max_t(u64, tmp >> 2, |
Arun KS | 9705bea | 2018-12-28 00:34:24 -0800 | [diff] [blame] | 8408 | mult_frac(zone_managed_pages(zone), |
Johannes Weiner | 795ae7a | 2016-03-17 14:19:14 -0700 | [diff] [blame] | 8409 | watermark_scale_factor, 10000)); |
| 8410 | |
Charan Teja Reddy | aa09259 | 2020-06-03 15:59:14 -0700 | [diff] [blame] | 8411 | zone->watermark_boost = 0; |
Mel Gorman | a921444 | 2018-12-28 00:35:44 -0800 | [diff] [blame] | 8412 | zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp; |
| 8413 | zone->_watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2; |
Marek Szyprowski | 49f223a | 2012-01-25 12:49:24 +0100 | [diff] [blame] | 8414 | |
Gerald Schaefer | 1125b4e | 2008-10-18 20:27:11 -0700 | [diff] [blame] | 8415 | spin_unlock_irqrestore(&zone->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8416 | } |
Hideo AOKI | cb45b0e | 2006-04-10 22:52:59 -0700 | [diff] [blame] | 8417 | |
| 8418 | /* update totalreserve_pages */ |
| 8419 | calculate_totalreserve_pages(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8420 | } |
| 8421 | |
Mel Gorman | cfd3da1 | 2011-04-25 21:36:42 +0000 | [diff] [blame] | 8422 | /** |
| 8423 | * setup_per_zone_wmarks - called when min_free_kbytes changes |
| 8424 | * or when memory is hot-{added|removed} |
| 8425 | * |
| 8426 | * Ensures that the watermark[min,low,high] values for each zone are set |
| 8427 | * correctly with respect to min_free_kbytes. |
| 8428 | */ |
| 8429 | void setup_per_zone_wmarks(void) |
| 8430 | { |
Mel Gorman | b92ca18 | 2021-06-28 19:42:12 -0700 | [diff] [blame] | 8431 | struct zone *zone; |
Michal Hocko | b93e0f3 | 2017-09-06 16:20:37 -0700 | [diff] [blame] | 8432 | static DEFINE_SPINLOCK(lock); |
| 8433 | |
| 8434 | spin_lock(&lock); |
Mel Gorman | cfd3da1 | 2011-04-25 21:36:42 +0000 | [diff] [blame] | 8435 | __setup_per_zone_wmarks(); |
Michal Hocko | b93e0f3 | 2017-09-06 16:20:37 -0700 | [diff] [blame] | 8436 | spin_unlock(&lock); |
Mel Gorman | b92ca18 | 2021-06-28 19:42:12 -0700 | [diff] [blame] | 8437 | |
| 8438 | /* |
| 8439 | * The watermark size have changed so update the pcpu batch |
| 8440 | * and high limits or the limits may be inappropriate. |
| 8441 | */ |
| 8442 | for_each_zone(zone) |
Mel Gorman | 04f8cfe | 2021-06-28 19:42:15 -0700 | [diff] [blame] | 8443 | zone_pcp_update(zone, 0); |
Mel Gorman | cfd3da1 | 2011-04-25 21:36:42 +0000 | [diff] [blame] | 8444 | } |
| 8445 | |
Randy Dunlap | 55a4462 | 2009-09-21 17:01:20 -0700 | [diff] [blame] | 8446 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8447 | * Initialise min_free_kbytes. |
| 8448 | * |
| 8449 | * For small machines we want it small (128k min). For large machines |
Joel Savitz | 8beeae8 | 2020-07-03 15:15:30 -0700 | [diff] [blame] | 8450 | * we want it large (256MB max). But it is not linear, because network |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8451 | * bandwidth does not increase linearly with machine size. We use |
| 8452 | * |
Pintu Kumar | b8af294 | 2013-09-11 14:20:34 -0700 | [diff] [blame] | 8453 | * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8454 | * min_free_kbytes = sqrt(lowmem_kbytes * 16) |
| 8455 | * |
| 8456 | * which yields |
| 8457 | * |
| 8458 | * 16MB: 512k |
| 8459 | * 32MB: 724k |
| 8460 | * 64MB: 1024k |
| 8461 | * 128MB: 1448k |
| 8462 | * 256MB: 2048k |
| 8463 | * 512MB: 2896k |
| 8464 | * 1024MB: 4096k |
| 8465 | * 2048MB: 5792k |
| 8466 | * 4096MB: 8192k |
| 8467 | * 8192MB: 11584k |
| 8468 | * 16384MB: 16384k |
| 8469 | */ |
Liangcai Fan | bd3400e | 2021-11-05 13:41:36 -0700 | [diff] [blame] | 8470 | void calculate_min_free_kbytes(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8471 | { |
| 8472 | unsigned long lowmem_kbytes; |
Michal Hocko | 5f12733 | 2013-07-08 16:00:40 -0700 | [diff] [blame] | 8473 | int new_min_free_kbytes; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8474 | |
| 8475 | lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10); |
Michal Hocko | 5f12733 | 2013-07-08 16:00:40 -0700 | [diff] [blame] | 8476 | new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8477 | |
Wang ShaoBo | 59d336b | 2021-11-05 13:40:55 -0700 | [diff] [blame] | 8478 | if (new_min_free_kbytes > user_min_free_kbytes) |
| 8479 | min_free_kbytes = clamp(new_min_free_kbytes, 128, 262144); |
| 8480 | else |
Michal Hocko | 5f12733 | 2013-07-08 16:00:40 -0700 | [diff] [blame] | 8481 | pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n", |
| 8482 | new_min_free_kbytes, user_min_free_kbytes); |
Wang ShaoBo | 59d336b | 2021-11-05 13:40:55 -0700 | [diff] [blame] | 8483 | |
Liangcai Fan | bd3400e | 2021-11-05 13:41:36 -0700 | [diff] [blame] | 8484 | } |
| 8485 | |
| 8486 | int __meminit init_per_zone_wmark_min(void) |
| 8487 | { |
| 8488 | calculate_min_free_kbytes(); |
Minchan Kim | bc75d33 | 2009-06-16 15:32:48 -0700 | [diff] [blame] | 8489 | setup_per_zone_wmarks(); |
KOSAKI Motohiro | a6cccdc | 2011-05-24 17:11:33 -0700 | [diff] [blame] | 8490 | refresh_zone_stat_thresholds(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8491 | setup_per_zone_lowmem_reserve(); |
Joonsoo Kim | 6423aa8 | 2016-08-10 16:27:49 -0700 | [diff] [blame] | 8492 | |
| 8493 | #ifdef CONFIG_NUMA |
| 8494 | setup_min_unmapped_ratio(); |
| 8495 | setup_min_slab_ratio(); |
| 8496 | #endif |
| 8497 | |
Vijay Balakrishna | 4aab2be | 2020-10-10 23:16:40 -0700 | [diff] [blame] | 8498 | khugepaged_min_free_kbytes_update(); |
| 8499 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8500 | return 0; |
| 8501 | } |
Doug Berger | e08d3fd | 2020-08-20 17:42:24 -0700 | [diff] [blame] | 8502 | postcore_initcall(init_per_zone_wmark_min) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8503 | |
| 8504 | /* |
Pintu Kumar | b8af294 | 2013-09-11 14:20:34 -0700 | [diff] [blame] | 8505 | * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8506 | * that we can call two helper functions whenever min_free_kbytes |
| 8507 | * changes. |
| 8508 | */ |
Joe Perches | cccad5b | 2014-06-06 14:38:09 -0700 | [diff] [blame] | 8509 | int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write, |
Christoph Hellwig | 3292739 | 2020-04-24 08:43:38 +0200 | [diff] [blame] | 8510 | void *buffer, size_t *length, loff_t *ppos) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8511 | { |
Han Pingtian | da8c757 | 2014-01-23 15:53:17 -0800 | [diff] [blame] | 8512 | int rc; |
| 8513 | |
| 8514 | rc = proc_dointvec_minmax(table, write, buffer, length, ppos); |
| 8515 | if (rc) |
| 8516 | return rc; |
| 8517 | |
Michal Hocko | 5f12733 | 2013-07-08 16:00:40 -0700 | [diff] [blame] | 8518 | if (write) { |
| 8519 | user_min_free_kbytes = min_free_kbytes; |
Minchan Kim | bc75d33 | 2009-06-16 15:32:48 -0700 | [diff] [blame] | 8520 | setup_per_zone_wmarks(); |
Michal Hocko | 5f12733 | 2013-07-08 16:00:40 -0700 | [diff] [blame] | 8521 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8522 | return 0; |
| 8523 | } |
| 8524 | |
Johannes Weiner | 795ae7a | 2016-03-17 14:19:14 -0700 | [diff] [blame] | 8525 | int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write, |
Christoph Hellwig | 3292739 | 2020-04-24 08:43:38 +0200 | [diff] [blame] | 8526 | void *buffer, size_t *length, loff_t *ppos) |
Johannes Weiner | 795ae7a | 2016-03-17 14:19:14 -0700 | [diff] [blame] | 8527 | { |
| 8528 | int rc; |
| 8529 | |
| 8530 | rc = proc_dointvec_minmax(table, write, buffer, length, ppos); |
| 8531 | if (rc) |
| 8532 | return rc; |
| 8533 | |
| 8534 | if (write) |
| 8535 | setup_per_zone_wmarks(); |
| 8536 | |
| 8537 | return 0; |
| 8538 | } |
| 8539 | |
Christoph Lameter | 9614634 | 2006-07-03 00:24:13 -0700 | [diff] [blame] | 8540 | #ifdef CONFIG_NUMA |
Joonsoo Kim | 6423aa8 | 2016-08-10 16:27:49 -0700 | [diff] [blame] | 8541 | static void setup_min_unmapped_ratio(void) |
Christoph Lameter | 9614634 | 2006-07-03 00:24:13 -0700 | [diff] [blame] | 8542 | { |
Joonsoo Kim | 6423aa8 | 2016-08-10 16:27:49 -0700 | [diff] [blame] | 8543 | pg_data_t *pgdat; |
Christoph Lameter | 9614634 | 2006-07-03 00:24:13 -0700 | [diff] [blame] | 8544 | struct zone *zone; |
Christoph Lameter | 9614634 | 2006-07-03 00:24:13 -0700 | [diff] [blame] | 8545 | |
Mel Gorman | a5f5f91 | 2016-07-28 15:46:32 -0700 | [diff] [blame] | 8546 | for_each_online_pgdat(pgdat) |
Joonsoo Kim | 81cbcbc | 2016-08-10 16:27:46 -0700 | [diff] [blame] | 8547 | pgdat->min_unmapped_pages = 0; |
Mel Gorman | a5f5f91 | 2016-07-28 15:46:32 -0700 | [diff] [blame] | 8548 | |
Christoph Lameter | 9614634 | 2006-07-03 00:24:13 -0700 | [diff] [blame] | 8549 | for_each_zone(zone) |
Arun KS | 9705bea | 2018-12-28 00:34:24 -0800 | [diff] [blame] | 8550 | zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) * |
| 8551 | sysctl_min_unmapped_ratio) / 100; |
Christoph Lameter | 9614634 | 2006-07-03 00:24:13 -0700 | [diff] [blame] | 8552 | } |
Christoph Lameter | 0ff3849 | 2006-09-25 23:31:52 -0700 | [diff] [blame] | 8553 | |
Joonsoo Kim | 6423aa8 | 2016-08-10 16:27:49 -0700 | [diff] [blame] | 8554 | |
| 8555 | int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write, |
Christoph Hellwig | 3292739 | 2020-04-24 08:43:38 +0200 | [diff] [blame] | 8556 | void *buffer, size_t *length, loff_t *ppos) |
Christoph Lameter | 0ff3849 | 2006-09-25 23:31:52 -0700 | [diff] [blame] | 8557 | { |
Christoph Lameter | 0ff3849 | 2006-09-25 23:31:52 -0700 | [diff] [blame] | 8558 | int rc; |
| 8559 | |
Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 8560 | rc = proc_dointvec_minmax(table, write, buffer, length, ppos); |
Christoph Lameter | 0ff3849 | 2006-09-25 23:31:52 -0700 | [diff] [blame] | 8561 | if (rc) |
| 8562 | return rc; |
| 8563 | |
Joonsoo Kim | 6423aa8 | 2016-08-10 16:27:49 -0700 | [diff] [blame] | 8564 | setup_min_unmapped_ratio(); |
| 8565 | |
| 8566 | return 0; |
| 8567 | } |
| 8568 | |
| 8569 | static void setup_min_slab_ratio(void) |
| 8570 | { |
| 8571 | pg_data_t *pgdat; |
| 8572 | struct zone *zone; |
| 8573 | |
Mel Gorman | a5f5f91 | 2016-07-28 15:46:32 -0700 | [diff] [blame] | 8574 | for_each_online_pgdat(pgdat) |
| 8575 | pgdat->min_slab_pages = 0; |
| 8576 | |
Christoph Lameter | 0ff3849 | 2006-09-25 23:31:52 -0700 | [diff] [blame] | 8577 | for_each_zone(zone) |
Arun KS | 9705bea | 2018-12-28 00:34:24 -0800 | [diff] [blame] | 8578 | zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) * |
| 8579 | sysctl_min_slab_ratio) / 100; |
Joonsoo Kim | 6423aa8 | 2016-08-10 16:27:49 -0700 | [diff] [blame] | 8580 | } |
| 8581 | |
| 8582 | int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write, |
Christoph Hellwig | 3292739 | 2020-04-24 08:43:38 +0200 | [diff] [blame] | 8583 | void *buffer, size_t *length, loff_t *ppos) |
Joonsoo Kim | 6423aa8 | 2016-08-10 16:27:49 -0700 | [diff] [blame] | 8584 | { |
| 8585 | int rc; |
| 8586 | |
| 8587 | rc = proc_dointvec_minmax(table, write, buffer, length, ppos); |
| 8588 | if (rc) |
| 8589 | return rc; |
| 8590 | |
| 8591 | setup_min_slab_ratio(); |
| 8592 | |
Christoph Lameter | 0ff3849 | 2006-09-25 23:31:52 -0700 | [diff] [blame] | 8593 | return 0; |
| 8594 | } |
Christoph Lameter | 9614634 | 2006-07-03 00:24:13 -0700 | [diff] [blame] | 8595 | #endif |
| 8596 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8597 | /* |
| 8598 | * lowmem_reserve_ratio_sysctl_handler - just a wrapper around |
| 8599 | * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve() |
| 8600 | * whenever sysctl_lowmem_reserve_ratio changes. |
| 8601 | * |
| 8602 | * The reserve ratio obviously has absolutely no relation with the |
Mel Gorman | 4185896 | 2009-06-16 15:32:12 -0700 | [diff] [blame] | 8603 | * minimum watermarks. The lowmem reserve ratio can only make sense |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8604 | * if in function of the boot time zone sizes. |
| 8605 | */ |
Joe Perches | cccad5b | 2014-06-06 14:38:09 -0700 | [diff] [blame] | 8606 | int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write, |
Christoph Hellwig | 3292739 | 2020-04-24 08:43:38 +0200 | [diff] [blame] | 8607 | void *buffer, size_t *length, loff_t *ppos) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8608 | { |
Baoquan He | 86aaf25 | 2020-06-03 15:58:48 -0700 | [diff] [blame] | 8609 | int i; |
| 8610 | |
Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 8611 | proc_dointvec_minmax(table, write, buffer, length, ppos); |
Baoquan He | 86aaf25 | 2020-06-03 15:58:48 -0700 | [diff] [blame] | 8612 | |
| 8613 | for (i = 0; i < MAX_NR_ZONES; i++) { |
| 8614 | if (sysctl_lowmem_reserve_ratio[i] < 1) |
| 8615 | sysctl_lowmem_reserve_ratio[i] = 0; |
| 8616 | } |
| 8617 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8618 | setup_per_zone_lowmem_reserve(); |
| 8619 | return 0; |
| 8620 | } |
| 8621 | |
Rohit Seth | 8ad4b1f | 2006-01-08 01:00:40 -0800 | [diff] [blame] | 8622 | /* |
Mel Gorman | 74f4482 | 2021-06-28 19:42:24 -0700 | [diff] [blame] | 8623 | * percpu_pagelist_high_fraction - changes the pcp->high for each zone on each |
| 8624 | * cpu. It is the fraction of total pages in each zone that a hot per cpu |
Pintu Kumar | b8af294 | 2013-09-11 14:20:34 -0700 | [diff] [blame] | 8625 | * pagelist can have before it gets flushed back to buddy allocator. |
Rohit Seth | 8ad4b1f | 2006-01-08 01:00:40 -0800 | [diff] [blame] | 8626 | */ |
Mel Gorman | 74f4482 | 2021-06-28 19:42:24 -0700 | [diff] [blame] | 8627 | int percpu_pagelist_high_fraction_sysctl_handler(struct ctl_table *table, |
| 8628 | int write, void *buffer, size_t *length, loff_t *ppos) |
Rohit Seth | 8ad4b1f | 2006-01-08 01:00:40 -0800 | [diff] [blame] | 8629 | { |
| 8630 | struct zone *zone; |
Mel Gorman | 74f4482 | 2021-06-28 19:42:24 -0700 | [diff] [blame] | 8631 | int old_percpu_pagelist_high_fraction; |
Rohit Seth | 8ad4b1f | 2006-01-08 01:00:40 -0800 | [diff] [blame] | 8632 | int ret; |
| 8633 | |
Cody P Schafer | c8e251f | 2013-07-03 15:01:29 -0700 | [diff] [blame] | 8634 | mutex_lock(&pcp_batch_high_lock); |
Mel Gorman | 74f4482 | 2021-06-28 19:42:24 -0700 | [diff] [blame] | 8635 | old_percpu_pagelist_high_fraction = percpu_pagelist_high_fraction; |
David Rientjes | 7cd2b0a | 2014-06-23 13:22:04 -0700 | [diff] [blame] | 8636 | |
| 8637 | ret = proc_dointvec_minmax(table, write, buffer, length, ppos); |
| 8638 | if (!write || ret < 0) |
| 8639 | goto out; |
| 8640 | |
| 8641 | /* Sanity checking to avoid pcp imbalance */ |
Mel Gorman | 74f4482 | 2021-06-28 19:42:24 -0700 | [diff] [blame] | 8642 | if (percpu_pagelist_high_fraction && |
| 8643 | percpu_pagelist_high_fraction < MIN_PERCPU_PAGELIST_HIGH_FRACTION) { |
| 8644 | percpu_pagelist_high_fraction = old_percpu_pagelist_high_fraction; |
David Rientjes | 7cd2b0a | 2014-06-23 13:22:04 -0700 | [diff] [blame] | 8645 | ret = -EINVAL; |
| 8646 | goto out; |
Rohit Seth | 8ad4b1f | 2006-01-08 01:00:40 -0800 | [diff] [blame] | 8647 | } |
David Rientjes | 7cd2b0a | 2014-06-23 13:22:04 -0700 | [diff] [blame] | 8648 | |
| 8649 | /* No change? */ |
Mel Gorman | 74f4482 | 2021-06-28 19:42:24 -0700 | [diff] [blame] | 8650 | if (percpu_pagelist_high_fraction == old_percpu_pagelist_high_fraction) |
David Rientjes | 7cd2b0a | 2014-06-23 13:22:04 -0700 | [diff] [blame] | 8651 | goto out; |
| 8652 | |
Mel Gorman | cb1ef53 | 2019-11-30 17:55:11 -0800 | [diff] [blame] | 8653 | for_each_populated_zone(zone) |
Mel Gorman | 74f4482 | 2021-06-28 19:42:24 -0700 | [diff] [blame] | 8654 | zone_set_pageset_high_and_batch(zone, 0); |
David Rientjes | 7cd2b0a | 2014-06-23 13:22:04 -0700 | [diff] [blame] | 8655 | out: |
Cody P Schafer | c8e251f | 2013-07-03 15:01:29 -0700 | [diff] [blame] | 8656 | mutex_unlock(&pcp_batch_high_lock); |
David Rientjes | 7cd2b0a | 2014-06-23 13:22:04 -0700 | [diff] [blame] | 8657 | return ret; |
Rohit Seth | 8ad4b1f | 2006-01-08 01:00:40 -0800 | [diff] [blame] | 8658 | } |
| 8659 | |
Srikar Dronamraju | f6f34b4 | 2016-10-07 16:59:15 -0700 | [diff] [blame] | 8660 | #ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES |
| 8661 | /* |
| 8662 | * Returns the number of pages that arch has reserved but |
| 8663 | * is not known to alloc_large_system_hash(). |
| 8664 | */ |
| 8665 | static unsigned long __init arch_reserved_kernel_pages(void) |
| 8666 | { |
| 8667 | return 0; |
| 8668 | } |
| 8669 | #endif |
| 8670 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8671 | /* |
Pavel Tatashin | 9017217 | 2017-07-06 15:39:14 -0700 | [diff] [blame] | 8672 | * Adaptive scale is meant to reduce sizes of hash tables on large memory |
| 8673 | * machines. As memory size is increased the scale is also increased but at |
| 8674 | * slower pace. Starting from ADAPT_SCALE_BASE (64G), every time memory |
| 8675 | * quadruples the scale is increased by one, which means the size of hash table |
| 8676 | * only doubles, instead of quadrupling as well. |
| 8677 | * Because 32-bit systems cannot have large physical memory, where this scaling |
| 8678 | * makes sense, it is disabled on such platforms. |
| 8679 | */ |
| 8680 | #if __BITS_PER_LONG > 32 |
| 8681 | #define ADAPT_SCALE_BASE (64ul << 30) |
| 8682 | #define ADAPT_SCALE_SHIFT 2 |
| 8683 | #define ADAPT_SCALE_NPAGES (ADAPT_SCALE_BASE >> PAGE_SHIFT) |
| 8684 | #endif |
| 8685 | |
| 8686 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8687 | * allocate a large system hash table from bootmem |
| 8688 | * - it is assumed that the hash table must contain an exact power-of-2 |
| 8689 | * quantity of entries |
| 8690 | * - limit is the number of hash buckets, not the total allocation size |
| 8691 | */ |
| 8692 | void *__init alloc_large_system_hash(const char *tablename, |
| 8693 | unsigned long bucketsize, |
| 8694 | unsigned long numentries, |
| 8695 | int scale, |
| 8696 | int flags, |
| 8697 | unsigned int *_hash_shift, |
| 8698 | unsigned int *_hash_mask, |
Tim Bird | 31fe62b | 2012-05-23 13:33:35 +0000 | [diff] [blame] | 8699 | unsigned long low_limit, |
| 8700 | unsigned long high_limit) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8701 | { |
Tim Bird | 31fe62b | 2012-05-23 13:33:35 +0000 | [diff] [blame] | 8702 | unsigned long long max = high_limit; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8703 | unsigned long log2qty, size; |
| 8704 | void *table = NULL; |
Pavel Tatashin | 3749a8f | 2017-07-06 15:39:08 -0700 | [diff] [blame] | 8705 | gfp_t gfp_flags; |
Nicholas Piggin | ec11408 | 2019-07-11 20:59:09 -0700 | [diff] [blame] | 8706 | bool virt; |
Nicholas Piggin | 121e6f3 | 2021-04-29 22:58:49 -0700 | [diff] [blame] | 8707 | bool huge; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8708 | |
| 8709 | /* allow the kernel cmdline to have a say */ |
| 8710 | if (!numentries) { |
| 8711 | /* round applicable memory size up to nearest megabyte */ |
Andrew Morton | 0490366 | 2006-12-06 20:37:33 -0800 | [diff] [blame] | 8712 | numentries = nr_kernel_pages; |
Srikar Dronamraju | f6f34b4 | 2016-10-07 16:59:15 -0700 | [diff] [blame] | 8713 | numentries -= arch_reserved_kernel_pages(); |
Jerry Zhou | a7e8331 | 2013-09-11 14:20:26 -0700 | [diff] [blame] | 8714 | |
| 8715 | /* It isn't necessary when PAGE_SIZE >= 1MB */ |
| 8716 | if (PAGE_SHIFT < 20) |
| 8717 | numentries = round_up(numentries, (1<<20)/PAGE_SIZE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8718 | |
Pavel Tatashin | 9017217 | 2017-07-06 15:39:14 -0700 | [diff] [blame] | 8719 | #if __BITS_PER_LONG > 32 |
| 8720 | if (!high_limit) { |
| 8721 | unsigned long adapt; |
| 8722 | |
| 8723 | for (adapt = ADAPT_SCALE_NPAGES; adapt < numentries; |
| 8724 | adapt <<= ADAPT_SCALE_SHIFT) |
| 8725 | scale++; |
| 8726 | } |
| 8727 | #endif |
| 8728 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8729 | /* limit to 1 bucket per 2^scale bytes of low memory */ |
| 8730 | if (scale > PAGE_SHIFT) |
| 8731 | numentries >>= (scale - PAGE_SHIFT); |
| 8732 | else |
| 8733 | numentries <<= (PAGE_SHIFT - scale); |
Paul Mundt | 9ab37b8 | 2007-01-05 16:36:30 -0800 | [diff] [blame] | 8734 | |
| 8735 | /* Make sure we've got at least a 0-order allocation.. */ |
Jan Beulich | 2c85f51 | 2009-09-21 17:03:07 -0700 | [diff] [blame] | 8736 | if (unlikely(flags & HASH_SMALL)) { |
| 8737 | /* Makes no sense without HASH_EARLY */ |
| 8738 | WARN_ON(!(flags & HASH_EARLY)); |
| 8739 | if (!(numentries >> *_hash_shift)) { |
| 8740 | numentries = 1UL << *_hash_shift; |
| 8741 | BUG_ON(!numentries); |
| 8742 | } |
| 8743 | } else if (unlikely((numentries * bucketsize) < PAGE_SIZE)) |
Paul Mundt | 9ab37b8 | 2007-01-05 16:36:30 -0800 | [diff] [blame] | 8744 | numentries = PAGE_SIZE / bucketsize; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8745 | } |
John Hawkes | 6e692ed | 2006-03-25 03:08:02 -0800 | [diff] [blame] | 8746 | numentries = roundup_pow_of_two(numentries); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8747 | |
| 8748 | /* limit allocation size to 1/16 total memory by default */ |
| 8749 | if (max == 0) { |
| 8750 | max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4; |
| 8751 | do_div(max, bucketsize); |
| 8752 | } |
Dimitri Sivanich | 074b851 | 2012-02-08 12:39:07 -0800 | [diff] [blame] | 8753 | max = min(max, 0x80000000ULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8754 | |
Tim Bird | 31fe62b | 2012-05-23 13:33:35 +0000 | [diff] [blame] | 8755 | if (numentries < low_limit) |
| 8756 | numentries = low_limit; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8757 | if (numentries > max) |
| 8758 | numentries = max; |
| 8759 | |
David Howells | f0d1b0b | 2006-12-08 02:37:49 -0800 | [diff] [blame] | 8760 | log2qty = ilog2(numentries); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8761 | |
Pavel Tatashin | 3749a8f | 2017-07-06 15:39:08 -0700 | [diff] [blame] | 8762 | gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8763 | do { |
Nicholas Piggin | ec11408 | 2019-07-11 20:59:09 -0700 | [diff] [blame] | 8764 | virt = false; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8765 | size = bucketsize << log2qty; |
Pavel Tatashin | ea1f5f3 | 2017-11-15 17:36:27 -0800 | [diff] [blame] | 8766 | if (flags & HASH_EARLY) { |
| 8767 | if (flags & HASH_ZERO) |
Mike Rapoport | 26fb3da | 2019-03-11 23:30:42 -0700 | [diff] [blame] | 8768 | table = memblock_alloc(size, SMP_CACHE_BYTES); |
Pavel Tatashin | ea1f5f3 | 2017-11-15 17:36:27 -0800 | [diff] [blame] | 8769 | else |
Mike Rapoport | 7e1c4e2 | 2018-10-30 15:09:57 -0700 | [diff] [blame] | 8770 | table = memblock_alloc_raw(size, |
| 8771 | SMP_CACHE_BYTES); |
Nicholas Piggin | ec11408 | 2019-07-11 20:59:09 -0700 | [diff] [blame] | 8772 | } else if (get_order(size) >= MAX_ORDER || hashdist) { |
Christoph Hellwig | 88dca4c | 2020-06-01 21:51:40 -0700 | [diff] [blame] | 8773 | table = __vmalloc(size, gfp_flags); |
Nicholas Piggin | ec11408 | 2019-07-11 20:59:09 -0700 | [diff] [blame] | 8774 | virt = true; |
Eric Dumazet | 084f7e2 | 2021-11-05 13:39:59 -0700 | [diff] [blame] | 8775 | if (table) |
| 8776 | huge = is_vm_area_hugepages(table); |
Pavel Tatashin | ea1f5f3 | 2017-11-15 17:36:27 -0800 | [diff] [blame] | 8777 | } else { |
Eric Dumazet | 1037b83 | 2007-07-15 23:38:05 -0700 | [diff] [blame] | 8778 | /* |
| 8779 | * If bucketsize is not a power-of-two, we may free |
Mel Gorman | a1dd268 | 2009-06-16 15:32:19 -0700 | [diff] [blame] | 8780 | * some pages at the end of hash table which |
| 8781 | * alloc_pages_exact() automatically does |
Eric Dumazet | 1037b83 | 2007-07-15 23:38:05 -0700 | [diff] [blame] | 8782 | */ |
Nicholas Piggin | ec11408 | 2019-07-11 20:59:09 -0700 | [diff] [blame] | 8783 | table = alloc_pages_exact(size, gfp_flags); |
| 8784 | kmemleak_alloc(table, size, 1, gfp_flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8785 | } |
| 8786 | } while (!table && size > PAGE_SIZE && --log2qty); |
| 8787 | |
| 8788 | if (!table) |
| 8789 | panic("Failed to allocate %s hash table\n", tablename); |
| 8790 | |
Nicholas Piggin | ec11408 | 2019-07-11 20:59:09 -0700 | [diff] [blame] | 8791 | pr_info("%s hash table entries: %ld (order: %d, %lu bytes, %s)\n", |
| 8792 | tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size, |
Nicholas Piggin | 121e6f3 | 2021-04-29 22:58:49 -0700 | [diff] [blame] | 8793 | virt ? (huge ? "vmalloc hugepage" : "vmalloc") : "linear"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8794 | |
| 8795 | if (_hash_shift) |
| 8796 | *_hash_shift = log2qty; |
| 8797 | if (_hash_mask) |
| 8798 | *_hash_mask = (1 << log2qty) - 1; |
| 8799 | |
| 8800 | return table; |
| 8801 | } |
KAMEZAWA Hiroyuki | a117e66 | 2006-03-27 01:15:25 -0800 | [diff] [blame] | 8802 | |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 8803 | /* |
Minchan Kim | 8093451 | 2012-07-31 16:43:01 -0700 | [diff] [blame] | 8804 | * This function checks whether pageblock includes unmovable pages or not. |
Minchan Kim | 8093451 | 2012-07-31 16:43:01 -0700 | [diff] [blame] | 8805 | * |
Pintu Kumar | b8af294 | 2013-09-11 14:20:34 -0700 | [diff] [blame] | 8806 | * PageLRU check without isolation or lru_lock could race so that |
Yisheng Xie | 0efadf4 | 2017-02-24 14:57:39 -0800 | [diff] [blame] | 8807 | * MIGRATE_MOVABLE block might include unmovable pages. And __PageMovable |
| 8808 | * check without lock_page also may miss some movable non-lru pages at |
| 8809 | * race condition. So you can't expect this function should be exact. |
Qian Cai | 4a55c04 | 2020-01-30 22:14:57 -0800 | [diff] [blame] | 8810 | * |
| 8811 | * Returns a page without holding a reference. If the caller wants to |
Randy Dunlap | 047b996 | 2020-08-11 18:33:14 -0700 | [diff] [blame] | 8812 | * dereference that page (e.g., dumping), it has to make sure that it |
Qian Cai | 4a55c04 | 2020-01-30 22:14:57 -0800 | [diff] [blame] | 8813 | * cannot get removed (e.g., via memory unplug) concurrently. |
| 8814 | * |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 8815 | */ |
Qian Cai | 4a55c04 | 2020-01-30 22:14:57 -0800 | [diff] [blame] | 8816 | struct page *has_unmovable_pages(struct zone *zone, struct page *page, |
| 8817 | int migratetype, int flags) |
KAMEZAWA Hiroyuki | 49ac825 | 2010-10-26 14:21:30 -0700 | [diff] [blame] | 8818 | { |
Qian Cai | 1a9f2191 | 2019-04-18 17:50:30 -0700 | [diff] [blame] | 8819 | unsigned long iter = 0; |
| 8820 | unsigned long pfn = page_to_pfn(page); |
Li Xinhai | 6a654e3 | 2020-10-13 16:55:39 -0700 | [diff] [blame] | 8821 | unsigned long offset = pfn % pageblock_nr_pages; |
Michal Nazarewicz | 47118af | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 8822 | |
Qian Cai | 1a9f2191 | 2019-04-18 17:50:30 -0700 | [diff] [blame] | 8823 | if (is_migrate_cma_page(page)) { |
| 8824 | /* |
| 8825 | * CMA allocations (alloc_contig_range) really need to mark |
| 8826 | * isolate CMA pageblocks even when they are not movable in fact |
| 8827 | * so consider them movable here. |
| 8828 | */ |
| 8829 | if (is_migrate_cma(migratetype)) |
Qian Cai | 4a55c04 | 2020-01-30 22:14:57 -0800 | [diff] [blame] | 8830 | return NULL; |
Michal Hocko | 4da2ce2 | 2017-11-15 17:33:26 -0800 | [diff] [blame] | 8831 | |
Qian Cai | 3d680bd | 2020-01-30 22:15:01 -0800 | [diff] [blame] | 8832 | return page; |
Qian Cai | 1a9f2191 | 2019-04-18 17:50:30 -0700 | [diff] [blame] | 8833 | } |
| 8834 | |
Li Xinhai | 6a654e3 | 2020-10-13 16:55:39 -0700 | [diff] [blame] | 8835 | for (; iter < pageblock_nr_pages - offset; iter++) { |
David Hildenbrand | fe4c86c | 2020-01-30 22:14:04 -0800 | [diff] [blame] | 8836 | page = pfn_to_page(pfn + iter); |
Naoya Horiguchi | c8721bb | 2013-09-11 14:22:09 -0700 | [diff] [blame] | 8837 | |
David Hildenbrand | c9c510d | 2020-10-13 16:55:17 -0700 | [diff] [blame] | 8838 | /* |
| 8839 | * Both, bootmem allocations and memory holes are marked |
| 8840 | * PG_reserved and are unmovable. We can even have unmovable |
| 8841 | * allocations inside ZONE_MOVABLE, for example when |
| 8842 | * specifying "movablecore". |
| 8843 | */ |
Michal Hocko | d7ab367 | 2017-11-15 17:33:30 -0800 | [diff] [blame] | 8844 | if (PageReserved(page)) |
Qian Cai | 3d680bd | 2020-01-30 22:15:01 -0800 | [diff] [blame] | 8845 | return page; |
Michal Hocko | d7ab367 | 2017-11-15 17:33:30 -0800 | [diff] [blame] | 8846 | |
Naoya Horiguchi | c8721bb | 2013-09-11 14:22:09 -0700 | [diff] [blame] | 8847 | /* |
Michal Hocko | 9d78999 | 2018-11-16 15:08:15 -0800 | [diff] [blame] | 8848 | * If the zone is movable and we have ruled out all reserved |
| 8849 | * pages then it should be reasonably safe to assume the rest |
| 8850 | * is movable. |
| 8851 | */ |
| 8852 | if (zone_idx(zone) == ZONE_MOVABLE) |
| 8853 | continue; |
| 8854 | |
| 8855 | /* |
Naoya Horiguchi | c8721bb | 2013-09-11 14:22:09 -0700 | [diff] [blame] | 8856 | * Hugepages are not in LRU lists, but they're movable. |
Rik van Riel | 1da2f32 | 2020-04-01 21:10:31 -0700 | [diff] [blame] | 8857 | * THPs are on the LRU, but need to be counted as #small pages. |
Wei Yang | 8bb4e7a | 2019-03-05 15:46:22 -0800 | [diff] [blame] | 8858 | * We need not scan over tail pages because we don't |
Naoya Horiguchi | c8721bb | 2013-09-11 14:22:09 -0700 | [diff] [blame] | 8859 | * handle each tail page individually in migration. |
| 8860 | */ |
Rik van Riel | 1da2f32 | 2020-04-01 21:10:31 -0700 | [diff] [blame] | 8861 | if (PageHuge(page) || PageTransCompound(page)) { |
Oscar Salvador | 17e2e7d | 2018-12-21 14:31:00 -0800 | [diff] [blame] | 8862 | struct page *head = compound_head(page); |
| 8863 | unsigned int skip_pages; |
Aneesh Kumar K.V | 464c7ff | 2018-09-04 15:45:59 -0700 | [diff] [blame] | 8864 | |
Rik van Riel | 1da2f32 | 2020-04-01 21:10:31 -0700 | [diff] [blame] | 8865 | if (PageHuge(page)) { |
| 8866 | if (!hugepage_migration_supported(page_hstate(head))) |
| 8867 | return page; |
| 8868 | } else if (!PageLRU(head) && !__PageMovable(head)) { |
Qian Cai | 3d680bd | 2020-01-30 22:15:01 -0800 | [diff] [blame] | 8869 | return page; |
Rik van Riel | 1da2f32 | 2020-04-01 21:10:31 -0700 | [diff] [blame] | 8870 | } |
Aneesh Kumar K.V | 464c7ff | 2018-09-04 15:45:59 -0700 | [diff] [blame] | 8871 | |
Matthew Wilcox (Oracle) | d8c6546 | 2019-09-23 15:34:30 -0700 | [diff] [blame] | 8872 | skip_pages = compound_nr(head) - (page - head); |
Oscar Salvador | 17e2e7d | 2018-12-21 14:31:00 -0800 | [diff] [blame] | 8873 | iter += skip_pages - 1; |
Naoya Horiguchi | c8721bb | 2013-09-11 14:22:09 -0700 | [diff] [blame] | 8874 | continue; |
| 8875 | } |
| 8876 | |
Minchan Kim | 97d255c | 2012-07-31 16:42:59 -0700 | [diff] [blame] | 8877 | /* |
| 8878 | * We can't use page_count without pin a page |
| 8879 | * because another CPU can free compound page. |
| 8880 | * This check already skips compound tails of THP |
Joonsoo Kim | 0139aa7 | 2016-05-19 17:10:49 -0700 | [diff] [blame] | 8881 | * because their page->_refcount is zero at all time. |
Minchan Kim | 97d255c | 2012-07-31 16:42:59 -0700 | [diff] [blame] | 8882 | */ |
Joonsoo Kim | fe896d1 | 2016-03-17 14:19:26 -0700 | [diff] [blame] | 8883 | if (!page_ref_count(page)) { |
KAMEZAWA Hiroyuki | 49ac825 | 2010-10-26 14:21:30 -0700 | [diff] [blame] | 8884 | if (PageBuddy(page)) |
Matthew Wilcox (Oracle) | ab130f91 | 2020-10-15 20:10:15 -0700 | [diff] [blame] | 8885 | iter += (1 << buddy_order(page)) - 1; |
KAMEZAWA Hiroyuki | 49ac825 | 2010-10-26 14:21:30 -0700 | [diff] [blame] | 8886 | continue; |
| 8887 | } |
Minchan Kim | 97d255c | 2012-07-31 16:42:59 -0700 | [diff] [blame] | 8888 | |
Wen Congyang | b023f46 | 2012-12-11 16:00:45 -0800 | [diff] [blame] | 8889 | /* |
| 8890 | * The HWPoisoned page may be not in buddy system, and |
| 8891 | * page_count() is not 0. |
| 8892 | */ |
David Hildenbrand | 756d25b | 2019-11-30 17:54:07 -0800 | [diff] [blame] | 8893 | if ((flags & MEMORY_OFFLINE) && PageHWPoison(page)) |
Wen Congyang | b023f46 | 2012-12-11 16:00:45 -0800 | [diff] [blame] | 8894 | continue; |
| 8895 | |
David Hildenbrand | aa21879 | 2020-05-07 16:01:30 +0200 | [diff] [blame] | 8896 | /* |
| 8897 | * We treat all PageOffline() pages as movable when offlining |
| 8898 | * to give drivers a chance to decrement their reference count |
| 8899 | * in MEM_GOING_OFFLINE in order to indicate that these pages |
| 8900 | * can be offlined as there are no direct references anymore. |
| 8901 | * For actually unmovable PageOffline() where the driver does |
| 8902 | * not support this, we will fail later when trying to actually |
| 8903 | * move these pages that still have a reference count > 0. |
| 8904 | * (false negatives in this function only) |
| 8905 | */ |
| 8906 | if ((flags & MEMORY_OFFLINE) && PageOffline(page)) |
| 8907 | continue; |
| 8908 | |
David Hildenbrand | fe4c86c | 2020-01-30 22:14:04 -0800 | [diff] [blame] | 8909 | if (__PageMovable(page) || PageLRU(page)) |
Yisheng Xie | 0efadf4 | 2017-02-24 14:57:39 -0800 | [diff] [blame] | 8910 | continue; |
| 8911 | |
KAMEZAWA Hiroyuki | 49ac825 | 2010-10-26 14:21:30 -0700 | [diff] [blame] | 8912 | /* |
Johannes Weiner | 6b4f779 | 2014-12-12 16:56:13 -0800 | [diff] [blame] | 8913 | * If there are RECLAIMABLE pages, we need to check |
| 8914 | * it. But now, memory offline itself doesn't call |
| 8915 | * shrink_node_slabs() and it still to be fixed. |
KAMEZAWA Hiroyuki | 49ac825 | 2010-10-26 14:21:30 -0700 | [diff] [blame] | 8916 | */ |
Qian Cai | 3d680bd | 2020-01-30 22:15:01 -0800 | [diff] [blame] | 8917 | return page; |
KAMEZAWA Hiroyuki | 49ac825 | 2010-10-26 14:21:30 -0700 | [diff] [blame] | 8918 | } |
Qian Cai | 4a55c04 | 2020-01-30 22:14:57 -0800 | [diff] [blame] | 8919 | return NULL; |
KAMEZAWA Hiroyuki | 49ac825 | 2010-10-26 14:21:30 -0700 | [diff] [blame] | 8920 | } |
| 8921 | |
Alexandre Ghiti | 8df995f | 2019-05-13 17:19:00 -0700 | [diff] [blame] | 8922 | #ifdef CONFIG_CONTIG_ALLOC |
Michal Nazarewicz | 041d3a8 | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 8923 | static unsigned long pfn_max_align_down(unsigned long pfn) |
| 8924 | { |
| 8925 | return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES, |
| 8926 | pageblock_nr_pages) - 1); |
| 8927 | } |
| 8928 | |
| 8929 | static unsigned long pfn_max_align_up(unsigned long pfn) |
| 8930 | { |
| 8931 | return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES, |
| 8932 | pageblock_nr_pages)); |
| 8933 | } |
| 8934 | |
Minchan Kim | a1394bd | 2021-04-29 23:01:30 -0700 | [diff] [blame] | 8935 | #if defined(CONFIG_DYNAMIC_DEBUG) || \ |
| 8936 | (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) |
| 8937 | /* Usage: See admin-guide/dynamic-debug-howto.rst */ |
| 8938 | static void alloc_contig_dump_pages(struct list_head *page_list) |
| 8939 | { |
| 8940 | DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, "migrate failure"); |
| 8941 | |
| 8942 | if (DYNAMIC_DEBUG_BRANCH(descriptor)) { |
| 8943 | struct page *page; |
| 8944 | |
| 8945 | dump_stack(); |
| 8946 | list_for_each_entry(page, page_list, lru) |
| 8947 | dump_page(page, "migration failure"); |
| 8948 | } |
| 8949 | } |
| 8950 | #else |
| 8951 | static inline void alloc_contig_dump_pages(struct list_head *page_list) |
| 8952 | { |
| 8953 | } |
| 8954 | #endif |
| 8955 | |
Michal Nazarewicz | 041d3a8 | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 8956 | /* [start, end) must belong to a single zone. */ |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 8957 | static int __alloc_contig_migrate_range(struct compact_control *cc, |
| 8958 | unsigned long start, unsigned long end) |
Michal Nazarewicz | 041d3a8 | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 8959 | { |
| 8960 | /* This function is based on compact_zone() from compaction.c. */ |
Maninder Singh | 730ec8c | 2020-06-03 16:01:18 -0700 | [diff] [blame] | 8961 | unsigned int nr_reclaimed; |
Michal Nazarewicz | 041d3a8 | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 8962 | unsigned long pfn = start; |
| 8963 | unsigned int tries = 0; |
| 8964 | int ret = 0; |
Joonsoo Kim | 8b94e0b | 2020-08-11 18:37:31 -0700 | [diff] [blame] | 8965 | struct migration_target_control mtc = { |
| 8966 | .nid = zone_to_nid(cc->zone), |
| 8967 | .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL, |
| 8968 | }; |
Michal Nazarewicz | 041d3a8 | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 8969 | |
Minchan Kim | 361a2a2 | 2021-05-04 18:36:57 -0700 | [diff] [blame] | 8970 | lru_cache_disable(); |
Michal Nazarewicz | 041d3a8 | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 8971 | |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 8972 | while (pfn < end || !list_empty(&cc->migratepages)) { |
Michal Nazarewicz | 041d3a8 | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 8973 | if (fatal_signal_pending(current)) { |
| 8974 | ret = -EINTR; |
| 8975 | break; |
| 8976 | } |
| 8977 | |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 8978 | if (list_empty(&cc->migratepages)) { |
| 8979 | cc->nr_migratepages = 0; |
Oscar Salvador | c2ad7a1 | 2021-05-04 18:35:17 -0700 | [diff] [blame] | 8980 | ret = isolate_migratepages_range(cc, pfn, end); |
| 8981 | if (ret && ret != -EAGAIN) |
Michal Nazarewicz | 041d3a8 | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 8982 | break; |
Oscar Salvador | c2ad7a1 | 2021-05-04 18:35:17 -0700 | [diff] [blame] | 8983 | pfn = cc->migrate_pfn; |
Michal Nazarewicz | 041d3a8 | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 8984 | tries = 0; |
| 8985 | } else if (++tries == 5) { |
Oscar Salvador | c8e28b4 | 2021-05-04 18:35:14 -0700 | [diff] [blame] | 8986 | ret = -EBUSY; |
Michal Nazarewicz | 041d3a8 | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 8987 | break; |
| 8988 | } |
| 8989 | |
Minchan Kim | beb51ea | 2012-10-08 16:33:51 -0700 | [diff] [blame] | 8990 | nr_reclaimed = reclaim_clean_pages_from_list(cc->zone, |
| 8991 | &cc->migratepages); |
| 8992 | cc->nr_migratepages -= nr_reclaimed; |
Minchan Kim | 02c6de8 | 2012-10-08 16:31:55 -0700 | [diff] [blame] | 8993 | |
Joonsoo Kim | 8b94e0b | 2020-08-11 18:37:31 -0700 | [diff] [blame] | 8994 | ret = migrate_pages(&cc->migratepages, alloc_migration_target, |
Yang Shi | 5ac9588 | 2021-09-02 14:59:13 -0700 | [diff] [blame] | 8995 | NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE, NULL); |
Oscar Salvador | c8e28b4 | 2021-05-04 18:35:14 -0700 | [diff] [blame] | 8996 | |
| 8997 | /* |
| 8998 | * On -ENOMEM, migrate_pages() bails out right away. It is pointless |
| 8999 | * to retry again over this error, so do the same here. |
| 9000 | */ |
| 9001 | if (ret == -ENOMEM) |
| 9002 | break; |
Michal Nazarewicz | 041d3a8 | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 9003 | } |
Minchan Kim | d479960e | 2021-05-04 18:36:54 -0700 | [diff] [blame] | 9004 | |
Minchan Kim | 361a2a2 | 2021-05-04 18:36:57 -0700 | [diff] [blame] | 9005 | lru_cache_enable(); |
Srinivas Pandruvada | 2a6f512 | 2013-02-22 16:32:09 -0800 | [diff] [blame] | 9006 | if (ret < 0) { |
Minchan Kim | 151e084a | 2021-06-28 19:42:06 -0700 | [diff] [blame] | 9007 | if (ret == -EBUSY) |
| 9008 | alloc_contig_dump_pages(&cc->migratepages); |
Srinivas Pandruvada | 2a6f512 | 2013-02-22 16:32:09 -0800 | [diff] [blame] | 9009 | putback_movable_pages(&cc->migratepages); |
| 9010 | return ret; |
| 9011 | } |
| 9012 | return 0; |
Michal Nazarewicz | 041d3a8 | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 9013 | } |
| 9014 | |
| 9015 | /** |
| 9016 | * alloc_contig_range() -- tries to allocate given range of pages |
| 9017 | * @start: start PFN to allocate |
| 9018 | * @end: one-past-the-last PFN to allocate |
Ingo Molnar | f0953a1 | 2021-05-06 18:06:47 -0700 | [diff] [blame] | 9019 | * @migratetype: migratetype of the underlying pageblocks (either |
Michal Nazarewicz | 0815f3d | 2012-04-03 15:06:15 +0200 | [diff] [blame] | 9020 | * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks |
| 9021 | * in range must have the same migratetype and it must |
| 9022 | * be either of the two. |
Lucas Stach | ca96b62 | 2017-02-24 14:58:37 -0800 | [diff] [blame] | 9023 | * @gfp_mask: GFP mask to use during compaction |
Michal Nazarewicz | 041d3a8 | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 9024 | * |
| 9025 | * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES |
Mike Kravetz | 2c7452a | 2018-04-05 16:25:26 -0700 | [diff] [blame] | 9026 | * aligned. The PFN range must belong to a single zone. |
Michal Nazarewicz | 041d3a8 | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 9027 | * |
Mike Kravetz | 2c7452a | 2018-04-05 16:25:26 -0700 | [diff] [blame] | 9028 | * The first thing this routine does is attempt to MIGRATE_ISOLATE all |
| 9029 | * pageblocks in the range. Once isolated, the pageblocks should not |
| 9030 | * be modified by others. |
Michal Nazarewicz | 041d3a8 | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 9031 | * |
Mike Rapoport | a862f68 | 2019-03-05 15:48:42 -0800 | [diff] [blame] | 9032 | * Return: zero on success or negative error code. On success all |
Michal Nazarewicz | 041d3a8 | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 9033 | * pages which PFN is in [start, end) are allocated for the caller and |
| 9034 | * need to be freed with free_contig_range(). |
| 9035 | */ |
Michal Nazarewicz | 0815f3d | 2012-04-03 15:06:15 +0200 | [diff] [blame] | 9036 | int alloc_contig_range(unsigned long start, unsigned long end, |
Lucas Stach | ca96b62 | 2017-02-24 14:58:37 -0800 | [diff] [blame] | 9037 | unsigned migratetype, gfp_t gfp_mask) |
Michal Nazarewicz | 041d3a8 | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 9038 | { |
Michal Nazarewicz | 041d3a8 | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 9039 | unsigned long outer_start, outer_end; |
Kirill A. Shutemov | d00181b | 2015-11-06 16:29:57 -0800 | [diff] [blame] | 9040 | unsigned int order; |
| 9041 | int ret = 0; |
Michal Nazarewicz | 041d3a8 | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 9042 | |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 9043 | struct compact_control cc = { |
| 9044 | .nr_migratepages = 0, |
| 9045 | .order = -1, |
| 9046 | .zone = page_zone(pfn_to_page(start)), |
David Rientjes | e0b9dae | 2014-06-04 16:08:28 -0700 | [diff] [blame] | 9047 | .mode = MIGRATE_SYNC, |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 9048 | .ignore_skip_hint = true, |
Vlastimil Babka | 2583d67 | 2017-11-17 15:26:38 -0800 | [diff] [blame] | 9049 | .no_set_skip_hint = true, |
Michal Hocko | 7dea19f | 2017-05-03 14:53:15 -0700 | [diff] [blame] | 9050 | .gfp_mask = current_gfp_context(gfp_mask), |
Rik van Riel | b06eda0 | 2020-04-01 21:10:28 -0700 | [diff] [blame] | 9051 | .alloc_contig = true, |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 9052 | }; |
| 9053 | INIT_LIST_HEAD(&cc.migratepages); |
| 9054 | |
Michal Nazarewicz | 041d3a8 | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 9055 | /* |
| 9056 | * What we do here is we mark all pageblocks in range as |
| 9057 | * MIGRATE_ISOLATE. Because pageblock and max order pages may |
| 9058 | * have different sizes, and due to the way page allocator |
| 9059 | * work, we align the range to biggest of the two pages so |
| 9060 | * that page allocator won't try to merge buddies from |
| 9061 | * different pageblocks and change MIGRATE_ISOLATE to some |
| 9062 | * other migration type. |
| 9063 | * |
| 9064 | * Once the pageblocks are marked as MIGRATE_ISOLATE, we |
| 9065 | * migrate the pages from an unaligned range (ie. pages that |
| 9066 | * we are interested in). This will put all the pages in |
| 9067 | * range back to page allocator as MIGRATE_ISOLATE. |
| 9068 | * |
| 9069 | * When this is done, we take the pages in range from page |
| 9070 | * allocator removing them from the buddy system. This way |
| 9071 | * page allocator will never consider using them. |
| 9072 | * |
| 9073 | * This lets us mark the pageblocks back as |
| 9074 | * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the |
| 9075 | * aligned range but not in the unaligned, original range are |
| 9076 | * put back to page allocator so that buddy can use them. |
| 9077 | */ |
| 9078 | |
| 9079 | ret = start_isolate_page_range(pfn_max_align_down(start), |
Michal Hocko | d381c54 | 2018-12-28 00:33:56 -0800 | [diff] [blame] | 9080 | pfn_max_align_up(end), migratetype, 0); |
David Hildenbrand | 3fa0c7c | 2020-10-15 20:08:07 -0700 | [diff] [blame] | 9081 | if (ret) |
Bob Liu | 86a595f | 2012-10-25 13:37:56 -0700 | [diff] [blame] | 9082 | return ret; |
Michal Nazarewicz | 041d3a8 | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 9083 | |
Vlastimil Babka | 7612921 | 2020-12-14 19:10:56 -0800 | [diff] [blame] | 9084 | drain_all_pages(cc.zone); |
| 9085 | |
Joonsoo Kim | 8ef5849 | 2016-01-14 15:18:45 -0800 | [diff] [blame] | 9086 | /* |
| 9087 | * In case of -EBUSY, we'd like to know which page causes problem. |
Mike Kravetz | 63cd448 | 2017-11-29 16:10:01 -0800 | [diff] [blame] | 9088 | * So, just fall through. test_pages_isolated() has a tracepoint |
| 9089 | * which will report the busy page. |
| 9090 | * |
| 9091 | * It is possible that busy pages could become available before |
| 9092 | * the call to test_pages_isolated, and the range will actually be |
| 9093 | * allocated. So, if we fall through be sure to clear ret so that |
| 9094 | * -EBUSY is not accidentally used or returned to caller. |
Joonsoo Kim | 8ef5849 | 2016-01-14 15:18:45 -0800 | [diff] [blame] | 9095 | */ |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 9096 | ret = __alloc_contig_migrate_range(&cc, start, end); |
Joonsoo Kim | 8ef5849 | 2016-01-14 15:18:45 -0800 | [diff] [blame] | 9097 | if (ret && ret != -EBUSY) |
Michal Nazarewicz | 041d3a8 | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 9098 | goto done; |
Zhiyuan Dai | 68d68ff | 2021-05-04 18:40:12 -0700 | [diff] [blame] | 9099 | ret = 0; |
Michal Nazarewicz | 041d3a8 | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 9100 | |
| 9101 | /* |
| 9102 | * Pages from [start, end) are within a MAX_ORDER_NR_PAGES |
| 9103 | * aligned blocks that are marked as MIGRATE_ISOLATE. What's |
| 9104 | * more, all pages in [start, end) are free in page allocator. |
| 9105 | * What we are going to do is to allocate all pages from |
| 9106 | * [start, end) (that is remove them from page allocator). |
| 9107 | * |
| 9108 | * The only problem is that pages at the beginning and at the |
| 9109 | * end of interesting range may be not aligned with pages that |
| 9110 | * page allocator holds, ie. they can be part of higher order |
| 9111 | * pages. Because of this, we reserve the bigger range and |
| 9112 | * once this is done free the pages we are not interested in. |
| 9113 | * |
| 9114 | * We don't have to hold zone->lock here because the pages are |
| 9115 | * isolated thus they won't get removed from buddy. |
| 9116 | */ |
| 9117 | |
Michal Nazarewicz | 041d3a8 | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 9118 | order = 0; |
| 9119 | outer_start = start; |
| 9120 | while (!PageBuddy(pfn_to_page(outer_start))) { |
| 9121 | if (++order >= MAX_ORDER) { |
Joonsoo Kim | 8ef5849 | 2016-01-14 15:18:45 -0800 | [diff] [blame] | 9122 | outer_start = start; |
| 9123 | break; |
Michal Nazarewicz | 041d3a8 | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 9124 | } |
| 9125 | outer_start &= ~0UL << order; |
| 9126 | } |
| 9127 | |
Joonsoo Kim | 8ef5849 | 2016-01-14 15:18:45 -0800 | [diff] [blame] | 9128 | if (outer_start != start) { |
Matthew Wilcox (Oracle) | ab130f91 | 2020-10-15 20:10:15 -0700 | [diff] [blame] | 9129 | order = buddy_order(pfn_to_page(outer_start)); |
Joonsoo Kim | 8ef5849 | 2016-01-14 15:18:45 -0800 | [diff] [blame] | 9130 | |
| 9131 | /* |
| 9132 | * outer_start page could be small order buddy page and |
| 9133 | * it doesn't include start page. Adjust outer_start |
| 9134 | * in this case to report failed page properly |
| 9135 | * on tracepoint in test_pages_isolated() |
| 9136 | */ |
| 9137 | if (outer_start + (1UL << order) <= start) |
| 9138 | outer_start = start; |
| 9139 | } |
| 9140 | |
Michal Nazarewicz | 041d3a8 | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 9141 | /* Make sure the range is really isolated. */ |
David Hildenbrand | 756d25b | 2019-11-30 17:54:07 -0800 | [diff] [blame] | 9142 | if (test_pages_isolated(outer_start, end, 0)) { |
Michal Nazarewicz | 041d3a8 | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 9143 | ret = -EBUSY; |
| 9144 | goto done; |
| 9145 | } |
| 9146 | |
Marek Szyprowski | 49f223a | 2012-01-25 12:49:24 +0100 | [diff] [blame] | 9147 | /* Grab isolated pages from freelists. */ |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 9148 | outer_end = isolate_freepages_range(&cc, outer_start, end); |
Michal Nazarewicz | 041d3a8 | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 9149 | if (!outer_end) { |
| 9150 | ret = -EBUSY; |
| 9151 | goto done; |
| 9152 | } |
| 9153 | |
| 9154 | /* Free head and tail (if any) */ |
| 9155 | if (start != outer_start) |
| 9156 | free_contig_range(outer_start, start - outer_start); |
| 9157 | if (end != outer_end) |
| 9158 | free_contig_range(end, outer_end - end); |
| 9159 | |
| 9160 | done: |
| 9161 | undo_isolate_page_range(pfn_max_align_down(start), |
Michal Nazarewicz | 0815f3d | 2012-04-03 15:06:15 +0200 | [diff] [blame] | 9162 | pfn_max_align_up(end), migratetype); |
Michal Nazarewicz | 041d3a8 | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 9163 | return ret; |
| 9164 | } |
David Hildenbrand | 255f598 | 2020-05-07 16:01:29 +0200 | [diff] [blame] | 9165 | EXPORT_SYMBOL(alloc_contig_range); |
Anshuman Khandual | 5e27a2d | 2019-11-30 17:55:06 -0800 | [diff] [blame] | 9166 | |
| 9167 | static int __alloc_contig_pages(unsigned long start_pfn, |
| 9168 | unsigned long nr_pages, gfp_t gfp_mask) |
| 9169 | { |
| 9170 | unsigned long end_pfn = start_pfn + nr_pages; |
| 9171 | |
| 9172 | return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE, |
| 9173 | gfp_mask); |
| 9174 | } |
| 9175 | |
| 9176 | static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn, |
| 9177 | unsigned long nr_pages) |
| 9178 | { |
| 9179 | unsigned long i, end_pfn = start_pfn + nr_pages; |
| 9180 | struct page *page; |
| 9181 | |
| 9182 | for (i = start_pfn; i < end_pfn; i++) { |
| 9183 | page = pfn_to_online_page(i); |
| 9184 | if (!page) |
| 9185 | return false; |
| 9186 | |
| 9187 | if (page_zone(page) != z) |
| 9188 | return false; |
| 9189 | |
| 9190 | if (PageReserved(page)) |
| 9191 | return false; |
Anshuman Khandual | 5e27a2d | 2019-11-30 17:55:06 -0800 | [diff] [blame] | 9192 | } |
| 9193 | return true; |
| 9194 | } |
| 9195 | |
| 9196 | static bool zone_spans_last_pfn(const struct zone *zone, |
| 9197 | unsigned long start_pfn, unsigned long nr_pages) |
| 9198 | { |
| 9199 | unsigned long last_pfn = start_pfn + nr_pages - 1; |
| 9200 | |
| 9201 | return zone_spans_pfn(zone, last_pfn); |
| 9202 | } |
| 9203 | |
| 9204 | /** |
| 9205 | * alloc_contig_pages() -- tries to find and allocate contiguous range of pages |
| 9206 | * @nr_pages: Number of contiguous pages to allocate |
| 9207 | * @gfp_mask: GFP mask to limit search and used during compaction |
| 9208 | * @nid: Target node |
| 9209 | * @nodemask: Mask for other possible nodes |
| 9210 | * |
| 9211 | * This routine is a wrapper around alloc_contig_range(). It scans over zones |
| 9212 | * on an applicable zonelist to find a contiguous pfn range which can then be |
| 9213 | * tried for allocation with alloc_contig_range(). This routine is intended |
| 9214 | * for allocation requests which can not be fulfilled with the buddy allocator. |
| 9215 | * |
| 9216 | * The allocated memory is always aligned to a page boundary. If nr_pages is a |
| 9217 | * power of two then the alignment is guaranteed to be to the given nr_pages |
| 9218 | * (e.g. 1GB request would be aligned to 1GB). |
| 9219 | * |
| 9220 | * Allocated pages can be freed with free_contig_range() or by manually calling |
| 9221 | * __free_page() on each allocated page. |
| 9222 | * |
| 9223 | * Return: pointer to contiguous pages on success, or NULL if not successful. |
| 9224 | */ |
| 9225 | struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask, |
| 9226 | int nid, nodemask_t *nodemask) |
| 9227 | { |
| 9228 | unsigned long ret, pfn, flags; |
| 9229 | struct zonelist *zonelist; |
| 9230 | struct zone *zone; |
| 9231 | struct zoneref *z; |
| 9232 | |
| 9233 | zonelist = node_zonelist(nid, gfp_mask); |
| 9234 | for_each_zone_zonelist_nodemask(zone, z, zonelist, |
| 9235 | gfp_zone(gfp_mask), nodemask) { |
| 9236 | spin_lock_irqsave(&zone->lock, flags); |
| 9237 | |
| 9238 | pfn = ALIGN(zone->zone_start_pfn, nr_pages); |
| 9239 | while (zone_spans_last_pfn(zone, pfn, nr_pages)) { |
| 9240 | if (pfn_range_valid_contig(zone, pfn, nr_pages)) { |
| 9241 | /* |
| 9242 | * We release the zone lock here because |
| 9243 | * alloc_contig_range() will also lock the zone |
| 9244 | * at some point. If there's an allocation |
| 9245 | * spinning on this lock, it may win the race |
| 9246 | * and cause alloc_contig_range() to fail... |
| 9247 | */ |
| 9248 | spin_unlock_irqrestore(&zone->lock, flags); |
| 9249 | ret = __alloc_contig_pages(pfn, nr_pages, |
| 9250 | gfp_mask); |
| 9251 | if (!ret) |
| 9252 | return pfn_to_page(pfn); |
| 9253 | spin_lock_irqsave(&zone->lock, flags); |
| 9254 | } |
| 9255 | pfn += nr_pages; |
| 9256 | } |
| 9257 | spin_unlock_irqrestore(&zone->lock, flags); |
| 9258 | } |
| 9259 | return NULL; |
| 9260 | } |
Alexandre Ghiti | 4eb0716 | 2019-05-13 17:19:04 -0700 | [diff] [blame] | 9261 | #endif /* CONFIG_CONTIG_ALLOC */ |
Michal Nazarewicz | 041d3a8 | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 9262 | |
Minchan Kim | 78fa515 | 2021-05-04 18:37:34 -0700 | [diff] [blame] | 9263 | void free_contig_range(unsigned long pfn, unsigned long nr_pages) |
Michal Nazarewicz | 041d3a8 | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 9264 | { |
Minchan Kim | 78fa515 | 2021-05-04 18:37:34 -0700 | [diff] [blame] | 9265 | unsigned long count = 0; |
Marek Szyprowski | bcc2b02 | 2012-12-20 15:05:18 -0800 | [diff] [blame] | 9266 | |
| 9267 | for (; nr_pages--; pfn++) { |
| 9268 | struct page *page = pfn_to_page(pfn); |
| 9269 | |
| 9270 | count += page_count(page) != 1; |
| 9271 | __free_page(page); |
| 9272 | } |
Minchan Kim | 78fa515 | 2021-05-04 18:37:34 -0700 | [diff] [blame] | 9273 | WARN(count != 0, "%lu pages are still in use!\n", count); |
Michal Nazarewicz | 041d3a8 | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 9274 | } |
David Hildenbrand | 255f598 | 2020-05-07 16:01:29 +0200 | [diff] [blame] | 9275 | EXPORT_SYMBOL(free_contig_range); |
Michal Nazarewicz | 041d3a8 | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 9276 | |
Cody P Schafer | 0a647f3 | 2013-07-03 15:01:33 -0700 | [diff] [blame] | 9277 | /* |
| 9278 | * The zone indicated has a new number of managed_pages; batch sizes and percpu |
Ingo Molnar | f0953a1 | 2021-05-06 18:06:47 -0700 | [diff] [blame] | 9279 | * page high values need to be recalculated. |
Cody P Schafer | 0a647f3 | 2013-07-03 15:01:33 -0700 | [diff] [blame] | 9280 | */ |
Mel Gorman | 04f8cfe | 2021-06-28 19:42:15 -0700 | [diff] [blame] | 9281 | void zone_pcp_update(struct zone *zone, int cpu_online) |
Jiang Liu | 4ed7e02 | 2012-07-31 16:43:35 -0700 | [diff] [blame] | 9282 | { |
Cody P Schafer | c8e251f | 2013-07-03 15:01:29 -0700 | [diff] [blame] | 9283 | mutex_lock(&pcp_batch_high_lock); |
Mel Gorman | 04f8cfe | 2021-06-28 19:42:15 -0700 | [diff] [blame] | 9284 | zone_set_pageset_high_and_batch(zone, cpu_online); |
Cody P Schafer | c8e251f | 2013-07-03 15:01:29 -0700 | [diff] [blame] | 9285 | mutex_unlock(&pcp_batch_high_lock); |
Jiang Liu | 4ed7e02 | 2012-07-31 16:43:35 -0700 | [diff] [blame] | 9286 | } |
Jiang Liu | 4ed7e02 | 2012-07-31 16:43:35 -0700 | [diff] [blame] | 9287 | |
Vlastimil Babka | ec6e8c7e | 2020-12-14 19:10:59 -0800 | [diff] [blame] | 9288 | /* |
| 9289 | * Effectively disable pcplists for the zone by setting the high limit to 0 |
| 9290 | * and draining all cpus. A concurrent page freeing on another CPU that's about |
| 9291 | * to put the page on pcplist will either finish before the drain and the page |
| 9292 | * will be drained, or observe the new high limit and skip the pcplist. |
| 9293 | * |
| 9294 | * Must be paired with a call to zone_pcp_enable(). |
| 9295 | */ |
| 9296 | void zone_pcp_disable(struct zone *zone) |
| 9297 | { |
| 9298 | mutex_lock(&pcp_batch_high_lock); |
| 9299 | __zone_set_pageset_high_and_batch(zone, 0, 1); |
| 9300 | __drain_all_pages(zone, true); |
| 9301 | } |
| 9302 | |
| 9303 | void zone_pcp_enable(struct zone *zone) |
| 9304 | { |
| 9305 | __zone_set_pageset_high_and_batch(zone, zone->pageset_high, zone->pageset_batch); |
| 9306 | mutex_unlock(&pcp_batch_high_lock); |
| 9307 | } |
| 9308 | |
Jiang Liu | 340175b | 2012-07-31 16:43:32 -0700 | [diff] [blame] | 9309 | void zone_pcp_reset(struct zone *zone) |
| 9310 | { |
Minchan Kim | 5a88381 | 2012-10-08 16:33:39 -0700 | [diff] [blame] | 9311 | int cpu; |
Mel Gorman | 28f836b | 2021-06-28 19:41:38 -0700 | [diff] [blame] | 9312 | struct per_cpu_zonestat *pzstats; |
Jiang Liu | 340175b | 2012-07-31 16:43:32 -0700 | [diff] [blame] | 9313 | |
Mel Gorman | 28f836b | 2021-06-28 19:41:38 -0700 | [diff] [blame] | 9314 | if (zone->per_cpu_pageset != &boot_pageset) { |
Minchan Kim | 5a88381 | 2012-10-08 16:33:39 -0700 | [diff] [blame] | 9315 | for_each_online_cpu(cpu) { |
Mel Gorman | 28f836b | 2021-06-28 19:41:38 -0700 | [diff] [blame] | 9316 | pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); |
| 9317 | drain_zonestat(zone, pzstats); |
Minchan Kim | 5a88381 | 2012-10-08 16:33:39 -0700 | [diff] [blame] | 9318 | } |
Mel Gorman | 28f836b | 2021-06-28 19:41:38 -0700 | [diff] [blame] | 9319 | free_percpu(zone->per_cpu_pageset); |
| 9320 | free_percpu(zone->per_cpu_zonestats); |
| 9321 | zone->per_cpu_pageset = &boot_pageset; |
| 9322 | zone->per_cpu_zonestats = &boot_zonestats; |
Jiang Liu | 340175b | 2012-07-31 16:43:32 -0700 | [diff] [blame] | 9323 | } |
Jiang Liu | 340175b | 2012-07-31 16:43:32 -0700 | [diff] [blame] | 9324 | } |
| 9325 | |
Wen Congyang | 6dcd73d | 2012-12-11 16:01:01 -0800 | [diff] [blame] | 9326 | #ifdef CONFIG_MEMORY_HOTREMOVE |
KAMEZAWA Hiroyuki | 0c0e619 | 2007-10-16 01:26:12 -0700 | [diff] [blame] | 9327 | /* |
David Hildenbrand | 257bea7 | 2020-10-15 20:07:59 -0700 | [diff] [blame] | 9328 | * All pages in the range must be in a single zone, must not contain holes, |
| 9329 | * must span full sections, and must be isolated before calling this function. |
KAMEZAWA Hiroyuki | 0c0e619 | 2007-10-16 01:26:12 -0700 | [diff] [blame] | 9330 | */ |
David Hildenbrand | 257bea7 | 2020-10-15 20:07:59 -0700 | [diff] [blame] | 9331 | void __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) |
KAMEZAWA Hiroyuki | 0c0e619 | 2007-10-16 01:26:12 -0700 | [diff] [blame] | 9332 | { |
David Hildenbrand | 257bea7 | 2020-10-15 20:07:59 -0700 | [diff] [blame] | 9333 | unsigned long pfn = start_pfn; |
KAMEZAWA Hiroyuki | 0c0e619 | 2007-10-16 01:26:12 -0700 | [diff] [blame] | 9334 | struct page *page; |
| 9335 | struct zone *zone; |
David Hildenbrand | 0ee5f4f | 2019-11-30 17:54:03 -0800 | [diff] [blame] | 9336 | unsigned int order; |
KAMEZAWA Hiroyuki | 0c0e619 | 2007-10-16 01:26:12 -0700 | [diff] [blame] | 9337 | unsigned long flags; |
Michal Hocko | 5557c76 | 2019-05-13 17:21:24 -0700 | [diff] [blame] | 9338 | |
Michal Hocko | 2d070ea | 2017-07-06 15:37:56 -0700 | [diff] [blame] | 9339 | offline_mem_sections(pfn, end_pfn); |
KAMEZAWA Hiroyuki | 0c0e619 | 2007-10-16 01:26:12 -0700 | [diff] [blame] | 9340 | zone = page_zone(pfn_to_page(pfn)); |
| 9341 | spin_lock_irqsave(&zone->lock, flags); |
KAMEZAWA Hiroyuki | 0c0e619 | 2007-10-16 01:26:12 -0700 | [diff] [blame] | 9342 | while (pfn < end_pfn) { |
KAMEZAWA Hiroyuki | 0c0e619 | 2007-10-16 01:26:12 -0700 | [diff] [blame] | 9343 | page = pfn_to_page(pfn); |
Wen Congyang | b023f46 | 2012-12-11 16:00:45 -0800 | [diff] [blame] | 9344 | /* |
| 9345 | * The HWPoisoned page may be not in buddy system, and |
| 9346 | * page_count() is not 0. |
| 9347 | */ |
| 9348 | if (unlikely(!PageBuddy(page) && PageHWPoison(page))) { |
| 9349 | pfn++; |
Wen Congyang | b023f46 | 2012-12-11 16:00:45 -0800 | [diff] [blame] | 9350 | continue; |
| 9351 | } |
David Hildenbrand | aa21879 | 2020-05-07 16:01:30 +0200 | [diff] [blame] | 9352 | /* |
| 9353 | * At this point all remaining PageOffline() pages have a |
| 9354 | * reference count of 0 and can simply be skipped. |
| 9355 | */ |
| 9356 | if (PageOffline(page)) { |
| 9357 | BUG_ON(page_count(page)); |
| 9358 | BUG_ON(PageBuddy(page)); |
| 9359 | pfn++; |
David Hildenbrand | aa21879 | 2020-05-07 16:01:30 +0200 | [diff] [blame] | 9360 | continue; |
| 9361 | } |
Wen Congyang | b023f46 | 2012-12-11 16:00:45 -0800 | [diff] [blame] | 9362 | |
KAMEZAWA Hiroyuki | 0c0e619 | 2007-10-16 01:26:12 -0700 | [diff] [blame] | 9363 | BUG_ON(page_count(page)); |
| 9364 | BUG_ON(!PageBuddy(page)); |
Matthew Wilcox (Oracle) | ab130f91 | 2020-10-15 20:10:15 -0700 | [diff] [blame] | 9365 | order = buddy_order(page); |
Alexander Duyck | 6ab0136 | 2020-04-06 20:04:49 -0700 | [diff] [blame] | 9366 | del_page_from_free_list(page, zone, order); |
KAMEZAWA Hiroyuki | 0c0e619 | 2007-10-16 01:26:12 -0700 | [diff] [blame] | 9367 | pfn += (1 << order); |
| 9368 | } |
| 9369 | spin_unlock_irqrestore(&zone->lock, flags); |
| 9370 | } |
| 9371 | #endif |
Wu Fengguang | 8d22ba1 | 2009-12-16 12:19:58 +0100 | [diff] [blame] | 9372 | |
Eric Dumazet | 8446b59 | 2021-11-05 13:40:31 -0700 | [diff] [blame] | 9373 | /* |
| 9374 | * This function returns a stable result only if called under zone lock. |
| 9375 | */ |
Wu Fengguang | 8d22ba1 | 2009-12-16 12:19:58 +0100 | [diff] [blame] | 9376 | bool is_free_buddy_page(struct page *page) |
| 9377 | { |
Wu Fengguang | 8d22ba1 | 2009-12-16 12:19:58 +0100 | [diff] [blame] | 9378 | unsigned long pfn = page_to_pfn(page); |
Mel Gorman | 7aeb09f | 2014-06-04 16:10:21 -0700 | [diff] [blame] | 9379 | unsigned int order; |
Wu Fengguang | 8d22ba1 | 2009-12-16 12:19:58 +0100 | [diff] [blame] | 9380 | |
Wu Fengguang | 8d22ba1 | 2009-12-16 12:19:58 +0100 | [diff] [blame] | 9381 | for (order = 0; order < MAX_ORDER; order++) { |
| 9382 | struct page *page_head = page - (pfn & ((1 << order) - 1)); |
| 9383 | |
Eric Dumazet | 8446b59 | 2021-11-05 13:40:31 -0700 | [diff] [blame] | 9384 | if (PageBuddy(page_head) && |
| 9385 | buddy_order_unsafe(page_head) >= order) |
Wu Fengguang | 8d22ba1 | 2009-12-16 12:19:58 +0100 | [diff] [blame] | 9386 | break; |
| 9387 | } |
Wu Fengguang | 8d22ba1 | 2009-12-16 12:19:58 +0100 | [diff] [blame] | 9388 | |
| 9389 | return order < MAX_ORDER; |
| 9390 | } |
Naoya Horiguchi | d4ae991 | 2018-08-23 17:00:42 -0700 | [diff] [blame] | 9391 | |
| 9392 | #ifdef CONFIG_MEMORY_FAILURE |
| 9393 | /* |
Oscar Salvador | 06be6ff | 2020-10-15 20:07:05 -0700 | [diff] [blame] | 9394 | * Break down a higher-order page in sub-pages, and keep our target out of |
| 9395 | * buddy allocator. |
Naoya Horiguchi | d4ae991 | 2018-08-23 17:00:42 -0700 | [diff] [blame] | 9396 | */ |
Oscar Salvador | 06be6ff | 2020-10-15 20:07:05 -0700 | [diff] [blame] | 9397 | static void break_down_buddy_pages(struct zone *zone, struct page *page, |
| 9398 | struct page *target, int low, int high, |
| 9399 | int migratetype) |
| 9400 | { |
| 9401 | unsigned long size = 1 << high; |
| 9402 | struct page *current_buddy, *next_page; |
| 9403 | |
| 9404 | while (high > low) { |
| 9405 | high--; |
| 9406 | size >>= 1; |
| 9407 | |
| 9408 | if (target >= &page[size]) { |
| 9409 | next_page = page + size; |
| 9410 | current_buddy = page; |
| 9411 | } else { |
| 9412 | next_page = page; |
| 9413 | current_buddy = page + size; |
| 9414 | } |
| 9415 | |
| 9416 | if (set_page_guard(zone, current_buddy, high, migratetype)) |
| 9417 | continue; |
| 9418 | |
| 9419 | if (current_buddy != target) { |
| 9420 | add_to_free_list(current_buddy, zone, high, migratetype); |
Matthew Wilcox (Oracle) | ab130f91 | 2020-10-15 20:10:15 -0700 | [diff] [blame] | 9421 | set_buddy_order(current_buddy, high); |
Oscar Salvador | 06be6ff | 2020-10-15 20:07:05 -0700 | [diff] [blame] | 9422 | page = next_page; |
| 9423 | } |
| 9424 | } |
| 9425 | } |
| 9426 | |
| 9427 | /* |
| 9428 | * Take a page that will be marked as poisoned off the buddy allocator. |
| 9429 | */ |
| 9430 | bool take_page_off_buddy(struct page *page) |
Naoya Horiguchi | d4ae991 | 2018-08-23 17:00:42 -0700 | [diff] [blame] | 9431 | { |
| 9432 | struct zone *zone = page_zone(page); |
| 9433 | unsigned long pfn = page_to_pfn(page); |
| 9434 | unsigned long flags; |
| 9435 | unsigned int order; |
Oscar Salvador | 06be6ff | 2020-10-15 20:07:05 -0700 | [diff] [blame] | 9436 | bool ret = false; |
Naoya Horiguchi | d4ae991 | 2018-08-23 17:00:42 -0700 | [diff] [blame] | 9437 | |
| 9438 | spin_lock_irqsave(&zone->lock, flags); |
| 9439 | for (order = 0; order < MAX_ORDER; order++) { |
| 9440 | struct page *page_head = page - (pfn & ((1 << order) - 1)); |
Matthew Wilcox (Oracle) | ab130f91 | 2020-10-15 20:10:15 -0700 | [diff] [blame] | 9441 | int page_order = buddy_order(page_head); |
Naoya Horiguchi | d4ae991 | 2018-08-23 17:00:42 -0700 | [diff] [blame] | 9442 | |
Matthew Wilcox (Oracle) | ab130f91 | 2020-10-15 20:10:15 -0700 | [diff] [blame] | 9443 | if (PageBuddy(page_head) && page_order >= order) { |
Oscar Salvador | 06be6ff | 2020-10-15 20:07:05 -0700 | [diff] [blame] | 9444 | unsigned long pfn_head = page_to_pfn(page_head); |
| 9445 | int migratetype = get_pfnblock_migratetype(page_head, |
| 9446 | pfn_head); |
| 9447 | |
Matthew Wilcox (Oracle) | ab130f91 | 2020-10-15 20:10:15 -0700 | [diff] [blame] | 9448 | del_page_from_free_list(page_head, zone, page_order); |
Oscar Salvador | 06be6ff | 2020-10-15 20:07:05 -0700 | [diff] [blame] | 9449 | break_down_buddy_pages(zone, page_head, page, 0, |
Matthew Wilcox (Oracle) | ab130f91 | 2020-10-15 20:10:15 -0700 | [diff] [blame] | 9450 | page_order, migratetype); |
Ding Hui | bac9c6f | 2021-06-04 20:01:21 -0700 | [diff] [blame] | 9451 | if (!is_migrate_isolate(migratetype)) |
| 9452 | __mod_zone_freepage_state(zone, -1, migratetype); |
Oscar Salvador | 06be6ff | 2020-10-15 20:07:05 -0700 | [diff] [blame] | 9453 | ret = true; |
Naoya Horiguchi | d4ae991 | 2018-08-23 17:00:42 -0700 | [diff] [blame] | 9454 | break; |
| 9455 | } |
Oscar Salvador | 06be6ff | 2020-10-15 20:07:05 -0700 | [diff] [blame] | 9456 | if (page_count(page_head) > 0) |
| 9457 | break; |
Naoya Horiguchi | d4ae991 | 2018-08-23 17:00:42 -0700 | [diff] [blame] | 9458 | } |
| 9459 | spin_unlock_irqrestore(&zone->lock, flags); |
Oscar Salvador | 06be6ff | 2020-10-15 20:07:05 -0700 | [diff] [blame] | 9460 | return ret; |
Naoya Horiguchi | d4ae991 | 2018-08-23 17:00:42 -0700 | [diff] [blame] | 9461 | } |
| 9462 | #endif |