blob: 09bf2c5f8b4b938fefe726d1910580407fdd644d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/mm/page_alloc.c
3 *
4 * Manages the free list, the system allocates free pages here.
5 * Note that kmalloc() lives in slab.c
6 *
7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
8 * Swap reorganised 29.12.95, Stephen Tweedie
9 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
10 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
11 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
12 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
14 * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
15 */
16
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/stddef.h>
18#include <linux/mm.h>
Arun KSca79b0c2018-12-28 00:34:29 -080019#include <linux/highmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <linux/swap.h>
21#include <linux/interrupt.h>
22#include <linux/pagemap.h>
KOSAKI Motohiro10ed2732008-03-04 14:28:32 -080023#include <linux/jiffies.h>
Yinghai Luedbe7d22010-08-25 13:39:16 -070024#include <linux/memblock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/compiler.h>
Randy Dunlap9f158332005-09-13 01:25:16 -070026#include <linux/kernel.h>
Andrey Ryabininb8c73fc2015-02-13 14:39:28 -080027#include <linux/kasan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/module.h>
29#include <linux/suspend.h>
30#include <linux/pagevec.h>
31#include <linux/blkdev.h>
32#include <linux/slab.h>
Dave Hansena238ab52011-05-24 17:12:16 -070033#include <linux/ratelimit.h>
David Rientjes5a3135c22007-10-16 23:25:53 -070034#include <linux/oom.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/topology.h>
36#include <linux/sysctl.h>
37#include <linux/cpu.h>
38#include <linux/cpuset.h>
Dave Hansenbdc8cb92005-10-29 18:16:53 -070039#include <linux/memory_hotplug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070040#include <linux/nodemask.h>
41#include <linux/vmalloc.h>
KOSAKI Motohiroa6cccdc2011-05-24 17:11:33 -070042#include <linux/vmstat.h>
Christoph Lameter4be38e32006-01-06 00:11:17 -080043#include <linux/mempolicy.h>
Dan Williams4b94ffd2016-01-15 16:56:22 -080044#include <linux/memremap.h>
Yasunori Goto68113782006-06-23 02:03:11 -070045#include <linux/stop_machine.h>
Mel Gormanc7132162006-09-27 01:49:43 -070046#include <linux/sort.h>
47#include <linux/pfn.h>
Andrew Morton3fcfab12006-10-19 23:28:16 -070048#include <linux/backing-dev.h>
Akinobu Mita933e3122006-12-08 02:39:45 -080049#include <linux/fault-inject.h>
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -070050#include <linux/page-isolation.h>
Joonsoo Kimeefa864b2014-12-12 16:55:46 -080051#include <linux/page_ext.h>
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -070052#include <linux/debugobjects.h>
Catalin Marinasdbb1f812009-06-11 13:23:19 +010053#include <linux/kmemleak.h>
Mel Gorman56de7262010-05-24 14:32:30 -070054#include <linux/compaction.h>
Mel Gorman0d3d0622009-09-21 17:02:44 -070055#include <trace/events/kmem.h>
Michal Hockod379f012017-02-22 15:42:00 -080056#include <trace/events/oom.h>
Linus Torvalds268bb0c2011-05-20 12:50:29 -070057#include <linux/prefetch.h>
Lisa Du6e543d52013-09-11 14:22:36 -070058#include <linux/mm_inline.h>
Michal Nazarewicz041d3a82011-12-29 13:09:50 +010059#include <linux/migrate.h>
David Rientjes949f7ec2013-04-29 15:07:48 -070060#include <linux/hugetlb.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060061#include <linux/sched/rt.h>
Ingo Molnar5b3cc152017-02-02 20:43:54 +010062#include <linux/sched/mm.h>
Joonsoo Kim48c96a32014-12-12 16:56:01 -080063#include <linux/page_owner.h>
Mel Gorman0e1cc952015-06-30 14:57:27 -070064#include <linux/kthread.h>
Vladimir Davydov49491482016-07-26 15:24:24 -070065#include <linux/memcontrol.h>
Steven Rostedt (VMware)42c269c2017-03-03 16:15:39 -050066#include <linux/ftrace.h>
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +010067#include <linux/lockdep.h>
Chen Yu556b9692017-08-25 15:55:30 -070068#include <linux/nmi.h>
Johannes Weinereb414682018-10-26 15:06:27 -070069#include <linux/psi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070070
Jiang Liu7ee3d4e2013-07-03 15:03:41 -070071#include <asm/sections.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070072#include <asm/tlbflush.h>
Andrew Mortonac924c62006-05-15 09:43:59 -070073#include <asm/div64.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070074#include "internal.h"
75
Cody P Schaferc8e251f2013-07-03 15:01:29 -070076/* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
77static DEFINE_MUTEX(pcp_batch_high_lock);
David Rientjes7cd2b0a2014-06-23 13:22:04 -070078#define MIN_PERCPU_PAGELIST_FRACTION (8)
Cody P Schaferc8e251f2013-07-03 15:01:29 -070079
Lee Schermerhorn72812012010-05-26 14:44:56 -070080#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
81DEFINE_PER_CPU(int, numa_node);
82EXPORT_PER_CPU_SYMBOL(numa_node);
83#endif
84
Kemi Wang45180852017-11-15 17:38:22 -080085DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key);
86
Lee Schermerhorn7aac7892010-05-26 14:45:00 -070087#ifdef CONFIG_HAVE_MEMORYLESS_NODES
88/*
89 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
90 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
91 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
92 * defined in <linux/topology.h>.
93 */
94DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */
95EXPORT_PER_CPU_SYMBOL(_numa_mem_);
Joonsoo Kimad2c8142014-10-09 15:26:13 -070096int _node_numa_mem_[MAX_NUMNODES];
Lee Schermerhorn7aac7892010-05-26 14:45:00 -070097#endif
98
Mel Gormanbd233f52017-02-24 14:56:56 -080099/* work_structs for global per-cpu drains */
Wei Yangd9367bd2018-12-28 00:38:58 -0800100struct pcpu_drain {
101 struct zone *zone;
102 struct work_struct work;
103};
Mel Gormanbd233f52017-02-24 14:56:56 -0800104DEFINE_MUTEX(pcpu_drain_mutex);
Wei Yangd9367bd2018-12-28 00:38:58 -0800105DEFINE_PER_CPU(struct pcpu_drain, pcpu_drain);
Mel Gormanbd233f52017-02-24 14:56:56 -0800106
Emese Revfy38addce2016-06-20 20:41:19 +0200107#ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
Kees Cook58bea412016-10-19 00:08:04 +0200108volatile unsigned long latent_entropy __latent_entropy;
Emese Revfy38addce2016-06-20 20:41:19 +0200109EXPORT_SYMBOL(latent_entropy);
110#endif
111
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112/*
Christoph Lameter13808912007-10-16 01:25:27 -0700113 * Array of node states.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114 */
Christoph Lameter13808912007-10-16 01:25:27 -0700115nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
116 [N_POSSIBLE] = NODE_MASK_ALL,
117 [N_ONLINE] = { { [0] = 1UL } },
118#ifndef CONFIG_NUMA
119 [N_NORMAL_MEMORY] = { { [0] = 1UL } },
120#ifdef CONFIG_HIGHMEM
121 [N_HIGH_MEMORY] = { { [0] = 1UL } },
122#endif
Lai Jiangshan20b2f522012-12-12 13:52:00 -0800123 [N_MEMORY] = { { [0] = 1UL } },
Christoph Lameter13808912007-10-16 01:25:27 -0700124 [N_CPU] = { { [0] = 1UL } },
125#endif /* NUMA */
126};
127EXPORT_SYMBOL(node_states);
128
Arun KSca79b0c2018-12-28 00:34:29 -0800129atomic_long_t _totalram_pages __read_mostly;
130EXPORT_SYMBOL(_totalram_pages);
Hideo AOKIcb45b0e2006-04-10 22:52:59 -0700131unsigned long totalreserve_pages __read_mostly;
Pintu Kumare48322a2014-12-18 16:17:15 -0800132unsigned long totalcma_pages __read_mostly;
Johannes Weinerab8fabd2012-01-10 15:07:42 -0800133
Hugh Dickins1b76b022012-05-11 01:00:07 -0700134int percpu_pagelist_fraction;
Benjamin Herrenschmidtdcce2842009-06-18 13:24:12 +1000135gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136
Vlastimil Babkabb14c2c2015-09-08 15:01:25 -0700137/*
138 * A cached value of the page's pageblock's migratetype, used when the page is
139 * put on a pcplist. Used to avoid the pageblock migratetype lookup when
140 * freeing from pcplists in most cases, at the cost of possibly becoming stale.
141 * Also the migratetype set in the page does not necessarily match the pcplist
142 * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any
143 * other index - this ensures that it will be put on the correct CMA freelist.
144 */
145static inline int get_pcppage_migratetype(struct page *page)
146{
147 return page->index;
148}
149
150static inline void set_pcppage_migratetype(struct page *page, int migratetype)
151{
152 page->index = migratetype;
153}
154
Rafael J. Wysocki452aa692010-03-05 13:42:13 -0800155#ifdef CONFIG_PM_SLEEP
156/*
157 * The following functions are used by the suspend/hibernate code to temporarily
158 * change gfp_allowed_mask in order to avoid using I/O during memory allocations
159 * while devices are suspended. To avoid races with the suspend/hibernate code,
Pingfan Liu55f25032018-07-31 16:51:32 +0800160 * they should always be called with system_transition_mutex held
161 * (gfp_allowed_mask also should only be modified with system_transition_mutex
162 * held, unless the suspend/hibernate code is guaranteed not to run in parallel
163 * with that modification).
Rafael J. Wysocki452aa692010-03-05 13:42:13 -0800164 */
Rafael J. Wysockic9e664f2010-12-03 22:57:45 +0100165
166static gfp_t saved_gfp_mask;
167
168void pm_restore_gfp_mask(void)
Rafael J. Wysocki452aa692010-03-05 13:42:13 -0800169{
Pingfan Liu55f25032018-07-31 16:51:32 +0800170 WARN_ON(!mutex_is_locked(&system_transition_mutex));
Rafael J. Wysockic9e664f2010-12-03 22:57:45 +0100171 if (saved_gfp_mask) {
172 gfp_allowed_mask = saved_gfp_mask;
173 saved_gfp_mask = 0;
174 }
Rafael J. Wysocki452aa692010-03-05 13:42:13 -0800175}
176
Rafael J. Wysockic9e664f2010-12-03 22:57:45 +0100177void pm_restrict_gfp_mask(void)
Rafael J. Wysocki452aa692010-03-05 13:42:13 -0800178{
Pingfan Liu55f25032018-07-31 16:51:32 +0800179 WARN_ON(!mutex_is_locked(&system_transition_mutex));
Rafael J. Wysockic9e664f2010-12-03 22:57:45 +0100180 WARN_ON(saved_gfp_mask);
181 saved_gfp_mask = gfp_allowed_mask;
Mel Gormand0164ad2015-11-06 16:28:21 -0800182 gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS);
Rafael J. Wysocki452aa692010-03-05 13:42:13 -0800183}
Mel Gormanf90ac392012-01-10 15:07:15 -0800184
185bool pm_suspended_storage(void)
186{
Mel Gormand0164ad2015-11-06 16:28:21 -0800187 if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
Mel Gormanf90ac392012-01-10 15:07:15 -0800188 return false;
189 return true;
190}
Rafael J. Wysocki452aa692010-03-05 13:42:13 -0800191#endif /* CONFIG_PM_SLEEP */
192
Mel Gormand9c23402007-10-16 01:26:01 -0700193#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -0800194unsigned int pageblock_order __read_mostly;
Mel Gormand9c23402007-10-16 01:26:01 -0700195#endif
196
Hugh Dickinsd98c7a02006-02-14 13:52:59 -0800197static void __free_pages_ok(struct page *page, unsigned int order);
David Howellsa226f6c2006-01-06 00:11:08 -0800198
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199/*
200 * results with 256, 32 in the lowmem_reserve sysctl:
201 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
202 * 1G machine -> (16M dma, 784M normal, 224M high)
203 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
204 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
Yaowei Bai84109e12015-02-12 15:00:22 -0800205 * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA
Andi Kleena2f1b422005-11-05 17:25:53 +0100206 *
207 * TBD: should special case ZONE_DMA32 machines here - in those we normally
208 * don't need any ZONE_NORMAL reservation
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 */
Joonsoo Kimd3cda232018-04-10 16:30:11 -0700210int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = {
Christoph Lameter4b51d662007-02-10 01:43:10 -0800211#ifdef CONFIG_ZONE_DMA
Joonsoo Kimd3cda232018-04-10 16:30:11 -0700212 [ZONE_DMA] = 256,
Christoph Lameter4b51d662007-02-10 01:43:10 -0800213#endif
Christoph Lameterfb0e7942006-09-25 23:31:13 -0700214#ifdef CONFIG_ZONE_DMA32
Joonsoo Kimd3cda232018-04-10 16:30:11 -0700215 [ZONE_DMA32] = 256,
Christoph Lameterfb0e7942006-09-25 23:31:13 -0700216#endif
Joonsoo Kimd3cda232018-04-10 16:30:11 -0700217 [ZONE_NORMAL] = 32,
Christoph Lametere53ef382006-09-25 23:31:14 -0700218#ifdef CONFIG_HIGHMEM
Joonsoo Kimd3cda232018-04-10 16:30:11 -0700219 [ZONE_HIGHMEM] = 0,
Christoph Lametere53ef382006-09-25 23:31:14 -0700220#endif
Joonsoo Kimd3cda232018-04-10 16:30:11 -0700221 [ZONE_MOVABLE] = 0,
Christoph Lameter2f1b6242006-09-25 23:31:13 -0700222};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
224EXPORT_SYMBOL(totalram_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225
Helge Deller15ad7cd2006-12-06 20:40:36 -0800226static char * const zone_names[MAX_NR_ZONES] = {
Christoph Lameter4b51d662007-02-10 01:43:10 -0800227#ifdef CONFIG_ZONE_DMA
Christoph Lameter2f1b6242006-09-25 23:31:13 -0700228 "DMA",
Christoph Lameter4b51d662007-02-10 01:43:10 -0800229#endif
Christoph Lameterfb0e7942006-09-25 23:31:13 -0700230#ifdef CONFIG_ZONE_DMA32
Christoph Lameter2f1b6242006-09-25 23:31:13 -0700231 "DMA32",
Christoph Lameterfb0e7942006-09-25 23:31:13 -0700232#endif
Christoph Lameter2f1b6242006-09-25 23:31:13 -0700233 "Normal",
Christoph Lametere53ef382006-09-25 23:31:14 -0700234#ifdef CONFIG_HIGHMEM
Mel Gorman2a1e2742007-07-17 04:03:12 -0700235 "HighMem",
Christoph Lametere53ef382006-09-25 23:31:14 -0700236#endif
Mel Gorman2a1e2742007-07-17 04:03:12 -0700237 "Movable",
Dan Williams033fbae2015-08-09 15:29:06 -0400238#ifdef CONFIG_ZONE_DEVICE
239 "Device",
240#endif
Christoph Lameter2f1b6242006-09-25 23:31:13 -0700241};
242
Alexey Dobriyanc999fbd2018-12-28 00:35:55 -0800243const char * const migratetype_names[MIGRATE_TYPES] = {
Vlastimil Babka60f30352016-03-15 14:56:08 -0700244 "Unmovable",
245 "Movable",
246 "Reclaimable",
247 "HighAtomic",
248#ifdef CONFIG_CMA
249 "CMA",
250#endif
251#ifdef CONFIG_MEMORY_ISOLATION
252 "Isolate",
253#endif
254};
255
Kirill A. Shutemovf1e61552015-11-06 16:29:50 -0800256compound_page_dtor * const compound_page_dtors[] = {
257 NULL,
258 free_compound_page,
259#ifdef CONFIG_HUGETLB_PAGE
260 free_huge_page,
261#endif
Kirill A. Shutemov9a982252016-01-15 16:54:17 -0800262#ifdef CONFIG_TRANSPARENT_HUGEPAGE
263 free_transhuge_page,
264#endif
Kirill A. Shutemovf1e61552015-11-06 16:29:50 -0800265};
266
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267int min_free_kbytes = 1024;
Han Pingtian42aa83c2014-01-23 15:53:28 -0800268int user_min_free_kbytes = -1;
Mel Gorman1c308442018-12-28 00:35:52 -0800269int watermark_boost_factor __read_mostly = 15000;
Johannes Weiner795ae7a2016-03-17 14:19:14 -0700270int watermark_scale_factor = 10;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271
Oscar Salvadorbbe5d992018-12-28 00:37:24 -0800272static unsigned long nr_kernel_pages __initdata;
273static unsigned long nr_all_pages __initdata;
274static unsigned long dma_reserve __initdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275
Tejun Heo0ee332c2011-12-08 10:22:09 -0800276#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
Oscar Salvadorbbe5d992018-12-28 00:37:24 -0800277static unsigned long arch_zone_lowest_possible_pfn[MAX_NR_ZONES] __initdata;
278static unsigned long arch_zone_highest_possible_pfn[MAX_NR_ZONES] __initdata;
David Rientjes7f16f912018-04-05 16:23:12 -0700279static unsigned long required_kernelcore __initdata;
David Rientjesa5c6d652018-04-05 16:23:09 -0700280static unsigned long required_kernelcore_percent __initdata;
David Rientjes7f16f912018-04-05 16:23:12 -0700281static unsigned long required_movablecore __initdata;
David Rientjesa5c6d652018-04-05 16:23:09 -0700282static unsigned long required_movablecore_percent __initdata;
Oscar Salvadorbbe5d992018-12-28 00:37:24 -0800283static unsigned long zone_movable_pfn[MAX_NUMNODES] __initdata;
David Rientjes7f16f912018-04-05 16:23:12 -0700284static bool mirrored_kernelcore __meminitdata;
Mel Gormanc7132162006-09-27 01:49:43 -0700285
Tejun Heo0ee332c2011-12-08 10:22:09 -0800286/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
287int movable_zone;
288EXPORT_SYMBOL(movable_zone);
289#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
Mel Gormanc7132162006-09-27 01:49:43 -0700290
Miklos Szeredi418508c2007-05-23 13:57:55 -0700291#if MAX_NUMNODES > 1
292int nr_node_ids __read_mostly = MAX_NUMNODES;
Christoph Lameter62bc62a2009-06-16 15:32:15 -0700293int nr_online_nodes __read_mostly = 1;
Miklos Szeredi418508c2007-05-23 13:57:55 -0700294EXPORT_SYMBOL(nr_node_ids);
Christoph Lameter62bc62a2009-06-16 15:32:15 -0700295EXPORT_SYMBOL(nr_online_nodes);
Miklos Szeredi418508c2007-05-23 13:57:55 -0700296#endif
297
Mel Gorman9ef9acb2007-10-16 01:25:54 -0700298int page_group_by_mobility_disabled __read_mostly;
299
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700300#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
Waiman Long3c0c12c2018-12-28 00:38:51 -0800301/*
302 * During boot we initialize deferred pages on-demand, as needed, but once
303 * page_alloc_init_late() has finished, the deferred pages are all initialized,
304 * and we can permanently disable that path.
305 */
306static DEFINE_STATIC_KEY_TRUE(deferred_pages);
307
308/*
309 * Calling kasan_free_pages() only after deferred memory initialization
310 * has completed. Poisoning pages during deferred memory init will greatly
311 * lengthen the process and cause problem in large memory systems as the
312 * deferred pages initialization is done with interrupt disabled.
313 *
314 * Assuming that there will be no reference to those newly initialized
315 * pages before they are ever allocated, this should have no effect on
316 * KASAN memory tracking as the poison will be properly inserted at page
317 * allocation time. The only corner case is when pages are allocated by
318 * on-demand allocation and then freed again before the deferred pages
319 * initialization is done, but this is not likely to happen.
320 */
321static inline void kasan_free_nondeferred_pages(struct page *page, int order)
322{
323 if (!static_branch_unlikely(&deferred_pages))
324 kasan_free_pages(page, order);
325}
326
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700327/* Returns true if the struct page for the pfn is uninitialised */
Mel Gorman0e1cc952015-06-30 14:57:27 -0700328static inline bool __meminit early_page_uninitialised(unsigned long pfn)
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700329{
Mel Gormanef70b6f2016-07-14 12:07:23 -0700330 int nid = early_pfn_to_nid(pfn);
331
332 if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700333 return true;
334
335 return false;
336}
337
338/*
Pavel Tatashind3035be2018-10-26 15:09:37 -0700339 * Returns true when the remaining initialisation should be deferred until
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700340 * later in the boot cycle when it can be parallelised.
341 */
Pavel Tatashind3035be2018-10-26 15:09:37 -0700342static bool __meminit
343defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700344{
Pavel Tatashind3035be2018-10-26 15:09:37 -0700345 static unsigned long prev_end_pfn, nr_initialised;
346
347 /*
348 * prev_end_pfn static that contains the end of previous zone
349 * No need to protect because called very early in boot before smp_init.
350 */
351 if (prev_end_pfn != end_pfn) {
352 prev_end_pfn = end_pfn;
353 nr_initialised = 0;
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700354 }
355
Pavel Tatashind3035be2018-10-26 15:09:37 -0700356 /* Always populate low zones for address-constrained allocations */
357 if (end_pfn < pgdat_end_pfn(NODE_DATA(nid)))
358 return false;
Wei Yang23b68cf2018-12-28 00:36:18 -0800359
360 /*
361 * We start only with one section of pages, more pages are added as
362 * needed until the rest of deferred pages are initialized.
363 */
Pavel Tatashind3035be2018-10-26 15:09:37 -0700364 nr_initialised++;
Wei Yang23b68cf2018-12-28 00:36:18 -0800365 if ((nr_initialised > PAGES_PER_SECTION) &&
Pavel Tatashind3035be2018-10-26 15:09:37 -0700366 (pfn & (PAGES_PER_SECTION - 1)) == 0) {
367 NODE_DATA(nid)->first_deferred_pfn = pfn;
368 return true;
369 }
370 return false;
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700371}
372#else
Waiman Long3c0c12c2018-12-28 00:38:51 -0800373#define kasan_free_nondeferred_pages(p, o) kasan_free_pages(p, o)
374
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700375static inline bool early_page_uninitialised(unsigned long pfn)
376{
377 return false;
378}
379
Pavel Tatashind3035be2018-10-26 15:09:37 -0700380static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700381{
Pavel Tatashind3035be2018-10-26 15:09:37 -0700382 return false;
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700383}
384#endif
385
Mel Gorman0b423ca2016-05-19 17:14:27 -0700386/* Return a pointer to the bitmap storing bits affecting a block of pages */
387static inline unsigned long *get_pageblock_bitmap(struct page *page,
388 unsigned long pfn)
389{
390#ifdef CONFIG_SPARSEMEM
391 return __pfn_to_section(pfn)->pageblock_flags;
392#else
393 return page_zone(page)->pageblock_flags;
394#endif /* CONFIG_SPARSEMEM */
395}
396
397static inline int pfn_to_bitidx(struct page *page, unsigned long pfn)
398{
399#ifdef CONFIG_SPARSEMEM
400 pfn &= (PAGES_PER_SECTION-1);
401 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
402#else
403 pfn = pfn - round_down(page_zone(page)->zone_start_pfn, pageblock_nr_pages);
404 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
405#endif /* CONFIG_SPARSEMEM */
406}
407
408/**
409 * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
410 * @page: The page within the block of interest
411 * @pfn: The target page frame number
412 * @end_bitidx: The last bit of interest to retrieve
413 * @mask: mask of bits that the caller is interested in
414 *
415 * Return: pageblock_bits flags
416 */
417static __always_inline unsigned long __get_pfnblock_flags_mask(struct page *page,
418 unsigned long pfn,
419 unsigned long end_bitidx,
420 unsigned long mask)
421{
422 unsigned long *bitmap;
423 unsigned long bitidx, word_bitidx;
424 unsigned long word;
425
426 bitmap = get_pageblock_bitmap(page, pfn);
427 bitidx = pfn_to_bitidx(page, pfn);
428 word_bitidx = bitidx / BITS_PER_LONG;
429 bitidx &= (BITS_PER_LONG-1);
430
431 word = bitmap[word_bitidx];
432 bitidx += end_bitidx;
433 return (word >> (BITS_PER_LONG - bitidx - 1)) & mask;
434}
435
436unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
437 unsigned long end_bitidx,
438 unsigned long mask)
439{
440 return __get_pfnblock_flags_mask(page, pfn, end_bitidx, mask);
441}
442
443static __always_inline int get_pfnblock_migratetype(struct page *page, unsigned long pfn)
444{
445 return __get_pfnblock_flags_mask(page, pfn, PB_migrate_end, MIGRATETYPE_MASK);
446}
447
448/**
449 * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
450 * @page: The page within the block of interest
451 * @flags: The flags to set
452 * @pfn: The target page frame number
453 * @end_bitidx: The last bit of interest
454 * @mask: mask of bits that the caller is interested in
455 */
456void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
457 unsigned long pfn,
458 unsigned long end_bitidx,
459 unsigned long mask)
460{
461 unsigned long *bitmap;
462 unsigned long bitidx, word_bitidx;
463 unsigned long old_word, word;
464
465 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
Pingfan Liu125b8602018-12-28 00:38:43 -0800466 BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits));
Mel Gorman0b423ca2016-05-19 17:14:27 -0700467
468 bitmap = get_pageblock_bitmap(page, pfn);
469 bitidx = pfn_to_bitidx(page, pfn);
470 word_bitidx = bitidx / BITS_PER_LONG;
471 bitidx &= (BITS_PER_LONG-1);
472
473 VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page);
474
475 bitidx += end_bitidx;
476 mask <<= (BITS_PER_LONG - bitidx - 1);
477 flags <<= (BITS_PER_LONG - bitidx - 1);
478
479 word = READ_ONCE(bitmap[word_bitidx]);
480 for (;;) {
481 old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags);
482 if (word == old_word)
483 break;
484 word = old_word;
485 }
486}
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700487
Minchan Kimee6f5092012-07-31 16:43:50 -0700488void set_pageblock_migratetype(struct page *page, int migratetype)
Mel Gormanb2a0ac82007-10-16 01:25:48 -0700489{
KOSAKI Motohiro5d0f3f72013-11-12 15:08:18 -0800490 if (unlikely(page_group_by_mobility_disabled &&
491 migratetype < MIGRATE_PCPTYPES))
Mel Gorman49255c62009-06-16 15:31:58 -0700492 migratetype = MIGRATE_UNMOVABLE;
493
Mel Gormanb2a0ac82007-10-16 01:25:48 -0700494 set_pageblock_flags_group(page, (unsigned long)migratetype,
495 PB_migrate, PB_migrate_end);
496}
497
Nick Piggin13e74442006-01-06 00:10:58 -0800498#ifdef CONFIG_DEBUG_VM
Dave Hansenc6a57e12005-10-29 18:16:52 -0700499static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500{
Dave Hansenbdc8cb92005-10-29 18:16:53 -0700501 int ret = 0;
502 unsigned seq;
503 unsigned long pfn = page_to_pfn(page);
Cody P Schaferb5e6a5a2013-02-22 16:35:28 -0800504 unsigned long sp, start_pfn;
Dave Hansenc6a57e12005-10-29 18:16:52 -0700505
Dave Hansenbdc8cb92005-10-29 18:16:53 -0700506 do {
507 seq = zone_span_seqbegin(zone);
Cody P Schaferb5e6a5a2013-02-22 16:35:28 -0800508 start_pfn = zone->zone_start_pfn;
509 sp = zone->spanned_pages;
Cody P Schafer108bcc92013-02-22 16:35:23 -0800510 if (!zone_spans_pfn(zone, pfn))
Dave Hansenbdc8cb92005-10-29 18:16:53 -0700511 ret = 1;
512 } while (zone_span_seqretry(zone, seq));
513
Cody P Schaferb5e6a5a2013-02-22 16:35:28 -0800514 if (ret)
Dave Hansen613813e2014-06-04 16:07:27 -0700515 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
516 pfn, zone_to_nid(zone), zone->name,
517 start_pfn, start_pfn + sp);
Cody P Schaferb5e6a5a2013-02-22 16:35:28 -0800518
Dave Hansenbdc8cb92005-10-29 18:16:53 -0700519 return ret;
Dave Hansenc6a57e12005-10-29 18:16:52 -0700520}
521
522static int page_is_consistent(struct zone *zone, struct page *page)
523{
Andy Whitcroft14e07292007-05-06 14:49:14 -0700524 if (!pfn_valid_within(page_to_pfn(page)))
Dave Hansenc6a57e12005-10-29 18:16:52 -0700525 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526 if (zone != page_zone(page))
Dave Hansenc6a57e12005-10-29 18:16:52 -0700527 return 0;
528
529 return 1;
530}
531/*
532 * Temporary debugging check for pages not lying within a given zone.
533 */
Matthias Kaehlcked73d3c9f2017-07-06 15:39:23 -0700534static int __maybe_unused bad_range(struct zone *zone, struct page *page)
Dave Hansenc6a57e12005-10-29 18:16:52 -0700535{
536 if (page_outside_zone_boundaries(zone, page))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 return 1;
Dave Hansenc6a57e12005-10-29 18:16:52 -0700538 if (!page_is_consistent(zone, page))
539 return 1;
540
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541 return 0;
542}
Nick Piggin13e74442006-01-06 00:10:58 -0800543#else
Matthias Kaehlcked73d3c9f2017-07-06 15:39:23 -0700544static inline int __maybe_unused bad_range(struct zone *zone, struct page *page)
Nick Piggin13e74442006-01-06 00:10:58 -0800545{
546 return 0;
547}
548#endif
549
Kirill A. Shutemovd230dec2014-04-07 15:37:38 -0700550static void bad_page(struct page *page, const char *reason,
551 unsigned long bad_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552{
Hugh Dickinsd936cf92009-01-06 14:40:12 -0800553 static unsigned long resume;
554 static unsigned long nr_shown;
555 static unsigned long nr_unshown;
556
557 /*
558 * Allow a burst of 60 reports, then keep quiet for that minute;
559 * or allow a steady drip of one report per second.
560 */
561 if (nr_shown == 60) {
562 if (time_before(jiffies, resume)) {
563 nr_unshown++;
564 goto out;
565 }
566 if (nr_unshown) {
Vlastimil Babkaff8e8112016-03-15 14:56:24 -0700567 pr_alert(
Hugh Dickins1e9e6362009-01-06 14:40:13 -0800568 "BUG: Bad page state: %lu messages suppressed\n",
Hugh Dickinsd936cf92009-01-06 14:40:12 -0800569 nr_unshown);
570 nr_unshown = 0;
571 }
572 nr_shown = 0;
573 }
574 if (nr_shown++ == 0)
575 resume = jiffies + 60 * HZ;
576
Vlastimil Babkaff8e8112016-03-15 14:56:24 -0700577 pr_alert("BUG: Bad page state in process %s pfn:%05lx\n",
Hugh Dickins3dc14742009-01-06 14:40:08 -0800578 current->comm, page_to_pfn(page));
Vlastimil Babkaff8e8112016-03-15 14:56:24 -0700579 __dump_page(page, reason);
580 bad_flags &= page->flags;
581 if (bad_flags)
582 pr_alert("bad because of flags: %#lx(%pGp)\n",
583 bad_flags, &bad_flags);
Vlastimil Babka4e462112016-03-15 14:56:21 -0700584 dump_page_owner(page);
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700585
Dave Jones4f318882011-10-31 17:07:24 -0700586 print_modules();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 dump_stack();
Hugh Dickinsd936cf92009-01-06 14:40:12 -0800588out:
Hugh Dickins8cc3b392009-01-06 14:40:06 -0800589 /* Leave bad fields for debug, except PageBuddy could make trouble */
Mel Gorman22b751c2013-02-22 16:34:59 -0800590 page_mapcount_reset(page); /* remove PageBuddy */
Rusty Russell373d4d02013-01-21 17:17:39 +1030591 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592}
593
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594/*
595 * Higher-order pages are called "compound pages". They are structured thusly:
596 *
Kirill A. Shutemov1d798ca2015-11-06 16:29:54 -0800597 * The first PAGE_SIZE page is called the "head page" and have PG_head set.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 *
Kirill A. Shutemov1d798ca2015-11-06 16:29:54 -0800599 * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded
600 * in bit 0 of page->compound_head. The rest of bits is pointer to head page.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601 *
Kirill A. Shutemov1d798ca2015-11-06 16:29:54 -0800602 * The first tail page's ->compound_dtor holds the offset in array of compound
603 * page destructors. See compound_page_dtors.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 *
Kirill A. Shutemov1d798ca2015-11-06 16:29:54 -0800605 * The first tail page's ->compound_order holds the order of allocation.
Hugh Dickins41d78ba2006-02-14 13:52:58 -0800606 * This usage means that zero-order pages may not be compound.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 */
Hugh Dickinsd98c7a02006-02-14 13:52:59 -0800608
Kirill A. Shutemov9a982252016-01-15 16:54:17 -0800609void free_compound_page(struct page *page)
Hugh Dickinsd98c7a02006-02-14 13:52:59 -0800610{
Christoph Lameterd85f3382007-05-06 14:49:39 -0700611 __free_pages_ok(page, compound_order(page));
Hugh Dickinsd98c7a02006-02-14 13:52:59 -0800612}
613
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -0800614void prep_compound_page(struct page *page, unsigned int order)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615{
616 int i;
617 int nr_pages = 1 << order;
618
Kirill A. Shutemovf1e61552015-11-06 16:29:50 -0800619 set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
Christoph Lameterd85f3382007-05-06 14:49:39 -0700620 set_compound_order(page, order);
Christoph Lameter6d777952007-05-06 14:49:40 -0700621 __SetPageHead(page);
Andy Whitcroft18229df2008-11-06 12:53:27 -0800622 for (i = 1; i < nr_pages; i++) {
623 struct page *p = page + i;
Youquan Song58a84aa2011-12-08 14:34:18 -0800624 set_page_count(p, 0);
Kirill A. Shutemov1c290f62016-01-15 16:52:07 -0800625 p->mapping = TAIL_MAPPING;
Kirill A. Shutemov1d798ca2015-11-06 16:29:54 -0800626 set_compound_head(p, page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 }
Kirill A. Shutemov53f92632016-01-15 16:53:42 -0800628 atomic_set(compound_mapcount_ptr(page), -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629}
630
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800631#ifdef CONFIG_DEBUG_PAGEALLOC
632unsigned int _debug_guardpage_minorder;
Christian Borntraegerea6eabb2016-03-15 14:55:30 -0700633bool _debug_pagealloc_enabled __read_mostly
634 = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT);
Joonsoo Kim505f6d22016-03-17 14:17:56 -0700635EXPORT_SYMBOL(_debug_pagealloc_enabled);
Joonsoo Kime30825f2014-12-12 16:55:49 -0800636bool _debug_guardpage_enabled __read_mostly;
637
Joonsoo Kim031bc572014-12-12 16:55:52 -0800638static int __init early_debug_pagealloc(char *buf)
639{
640 if (!buf)
641 return -EINVAL;
Minfei Huang2a138dc2016-05-20 16:58:13 -0700642 return kstrtobool(buf, &_debug_pagealloc_enabled);
Joonsoo Kim031bc572014-12-12 16:55:52 -0800643}
644early_param("debug_pagealloc", early_debug_pagealloc);
645
Joonsoo Kime30825f2014-12-12 16:55:49 -0800646static bool need_debug_guardpage(void)
647{
Joonsoo Kim031bc572014-12-12 16:55:52 -0800648 /* If we don't use debug_pagealloc, we don't need guard page */
649 if (!debug_pagealloc_enabled())
650 return false;
651
Joonsoo Kimf1c1e9f2016-10-07 16:58:18 -0700652 if (!debug_guardpage_minorder())
653 return false;
654
Joonsoo Kime30825f2014-12-12 16:55:49 -0800655 return true;
656}
657
658static void init_debug_guardpage(void)
659{
Joonsoo Kim031bc572014-12-12 16:55:52 -0800660 if (!debug_pagealloc_enabled())
661 return;
662
Joonsoo Kimf1c1e9f2016-10-07 16:58:18 -0700663 if (!debug_guardpage_minorder())
664 return;
665
Joonsoo Kime30825f2014-12-12 16:55:49 -0800666 _debug_guardpage_enabled = true;
667}
668
669struct page_ext_operations debug_guardpage_ops = {
670 .need = need_debug_guardpage,
671 .init = init_debug_guardpage,
672};
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800673
674static int __init debug_guardpage_minorder_setup(char *buf)
675{
676 unsigned long res;
677
678 if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) {
Joe Perches11705322016-03-17 14:19:50 -0700679 pr_err("Bad debug_guardpage_minorder value\n");
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800680 return 0;
681 }
682 _debug_guardpage_minorder = res;
Joe Perches11705322016-03-17 14:19:50 -0700683 pr_info("Setting debug_guardpage_minorder to %lu\n", res);
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800684 return 0;
685}
Joonsoo Kimf1c1e9f2016-10-07 16:58:18 -0700686early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup);
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800687
Joonsoo Kimacbc15a2016-10-07 16:58:15 -0700688static inline bool set_page_guard(struct zone *zone, struct page *page,
Joonsoo Kim2847cf92014-12-12 16:55:01 -0800689 unsigned int order, int migratetype)
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800690{
Joonsoo Kime30825f2014-12-12 16:55:49 -0800691 struct page_ext *page_ext;
692
693 if (!debug_guardpage_enabled())
Joonsoo Kimacbc15a2016-10-07 16:58:15 -0700694 return false;
695
696 if (order >= debug_guardpage_minorder())
697 return false;
Joonsoo Kime30825f2014-12-12 16:55:49 -0800698
699 page_ext = lookup_page_ext(page);
Yang Shif86e4272016-06-03 14:55:38 -0700700 if (unlikely(!page_ext))
Joonsoo Kimacbc15a2016-10-07 16:58:15 -0700701 return false;
Yang Shif86e4272016-06-03 14:55:38 -0700702
Joonsoo Kime30825f2014-12-12 16:55:49 -0800703 __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
704
Joonsoo Kim2847cf92014-12-12 16:55:01 -0800705 INIT_LIST_HEAD(&page->lru);
706 set_page_private(page, order);
707 /* Guard pages are not available for any usage */
708 __mod_zone_freepage_state(zone, -(1 << order), migratetype);
Joonsoo Kimacbc15a2016-10-07 16:58:15 -0700709
710 return true;
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800711}
712
Joonsoo Kim2847cf92014-12-12 16:55:01 -0800713static inline void clear_page_guard(struct zone *zone, struct page *page,
714 unsigned int order, int migratetype)
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800715{
Joonsoo Kime30825f2014-12-12 16:55:49 -0800716 struct page_ext *page_ext;
717
718 if (!debug_guardpage_enabled())
719 return;
720
721 page_ext = lookup_page_ext(page);
Yang Shif86e4272016-06-03 14:55:38 -0700722 if (unlikely(!page_ext))
723 return;
724
Joonsoo Kime30825f2014-12-12 16:55:49 -0800725 __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
726
Joonsoo Kim2847cf92014-12-12 16:55:01 -0800727 set_page_private(page, 0);
728 if (!is_migrate_isolate(migratetype))
729 __mod_zone_freepage_state(zone, (1 << order), migratetype);
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800730}
731#else
Joonsoo Kim980ac162016-10-07 16:58:27 -0700732struct page_ext_operations debug_guardpage_ops;
Joonsoo Kimacbc15a2016-10-07 16:58:15 -0700733static inline bool set_page_guard(struct zone *zone, struct page *page,
734 unsigned int order, int migratetype) { return false; }
Joonsoo Kim2847cf92014-12-12 16:55:01 -0800735static inline void clear_page_guard(struct zone *zone, struct page *page,
736 unsigned int order, int migratetype) {}
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800737#endif
738
Mel Gorman7aeb09f2014-06-04 16:10:21 -0700739static inline void set_page_order(struct page *page, unsigned int order)
Andrew Morton6aa3001b22006-04-18 22:20:52 -0700740{
Hugh Dickins4c21e2f2005-10-29 18:16:40 -0700741 set_page_private(page, order);
Nick Piggin676165a2006-04-10 11:21:48 +1000742 __SetPageBuddy(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743}
744
745static inline void rmv_page_order(struct page *page)
746{
Nick Piggin676165a2006-04-10 11:21:48 +1000747 __ClearPageBuddy(page);
Hugh Dickins4c21e2f2005-10-29 18:16:40 -0700748 set_page_private(page, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749}
750
751/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752 * This function checks whether a page is free && is the buddy
Matthew Wilcox6e292b92018-06-07 17:08:18 -0700753 * we can coalesce a page and its buddy if
Vlastimil Babka13ad59d2017-02-22 15:41:51 -0800754 * (a) the buddy is not in a hole (check before calling!) &&
Nick Piggin676165a2006-04-10 11:21:48 +1000755 * (b) the buddy is in the buddy system &&
Andy Whitcroftcb2b95e2006-06-23 02:03:01 -0700756 * (c) a page and its buddy have the same order &&
757 * (d) a page and its buddy are in the same zone.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758 *
Matthew Wilcox6e292b92018-06-07 17:08:18 -0700759 * For recording whether a page is in the buddy system, we set PageBuddy.
760 * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
Nick Piggin676165a2006-04-10 11:21:48 +1000761 *
762 * For recording page's order, we use page_private(page).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763 */
Andy Whitcroftcb2b95e2006-06-23 02:03:01 -0700764static inline int page_is_buddy(struct page *page, struct page *buddy,
Mel Gorman7aeb09f2014-06-04 16:10:21 -0700765 unsigned int order)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766{
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800767 if (page_is_guard(buddy) && page_order(buddy) == order) {
Mel Gormand34c5fa2014-06-04 16:10:10 -0700768 if (page_zone_id(page) != page_zone_id(buddy))
769 return 0;
770
Weijie Yang4c5018c2015-02-10 14:11:39 -0800771 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
772
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800773 return 1;
774 }
775
Andy Whitcroftcb2b95e2006-06-23 02:03:01 -0700776 if (PageBuddy(buddy) && page_order(buddy) == order) {
Mel Gormand34c5fa2014-06-04 16:10:10 -0700777 /*
778 * zone check is done late to avoid uselessly
779 * calculating zone/node ids for pages that could
780 * never merge.
781 */
782 if (page_zone_id(page) != page_zone_id(buddy))
783 return 0;
784
Weijie Yang4c5018c2015-02-10 14:11:39 -0800785 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
786
Andrew Morton6aa3001b22006-04-18 22:20:52 -0700787 return 1;
Nick Piggin676165a2006-04-10 11:21:48 +1000788 }
Andrew Morton6aa3001b22006-04-18 22:20:52 -0700789 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790}
791
Mel Gorman5e1f0f02019-03-05 15:45:41 -0800792#ifdef CONFIG_COMPACTION
793static inline struct capture_control *task_capc(struct zone *zone)
794{
795 struct capture_control *capc = current->capture_control;
796
797 return capc &&
798 !(current->flags & PF_KTHREAD) &&
799 !capc->page &&
800 capc->cc->zone == zone &&
801 capc->cc->direct_compaction ? capc : NULL;
802}
803
804static inline bool
805compaction_capture(struct capture_control *capc, struct page *page,
806 int order, int migratetype)
807{
808 if (!capc || order != capc->cc->order)
809 return false;
810
811 /* Do not accidentally pollute CMA or isolated regions*/
812 if (is_migrate_cma(migratetype) ||
813 is_migrate_isolate(migratetype))
814 return false;
815
816 /*
817 * Do not let lower order allocations polluate a movable pageblock.
818 * This might let an unmovable request use a reclaimable pageblock
819 * and vice-versa but no more than normal fallback logic which can
820 * have trouble finding a high-order free page.
821 */
822 if (order < pageblock_order && migratetype == MIGRATE_MOVABLE)
823 return false;
824
825 capc->page = page;
826 return true;
827}
828
829#else
830static inline struct capture_control *task_capc(struct zone *zone)
831{
832 return NULL;
833}
834
835static inline bool
836compaction_capture(struct capture_control *capc, struct page *page,
837 int order, int migratetype)
838{
839 return false;
840}
841#endif /* CONFIG_COMPACTION */
842
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843/*
844 * Freeing function for a buddy system allocator.
845 *
846 * The concept of a buddy system is to maintain direct-mapped table
847 * (containing bit values) for memory blocks of various "orders".
848 * The bottom level table contains the map for the smallest allocatable
849 * units of memory (here, pages), and each level above it describes
850 * pairs of units from the levels below, hence, "buddies".
851 * At a high level, all that happens here is marking the table entry
852 * at the bottom level available, and propagating the changes upward
853 * as necessary, plus some accounting needed to play nicely with other
854 * parts of the VM system.
855 * At each level, we keep a list of pages, which are heads of continuous
Matthew Wilcox6e292b92018-06-07 17:08:18 -0700856 * free pages of length of (1 << order) and marked with PageBuddy.
857 * Page's order is recorded in page_private(page) field.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 * So when we are allocating or freeing one, we can derive the state of the
Michal Nazarewicz5f63b722012-01-11 15:16:11 +0100859 * other. That is, if we allocate a small block, and both were
860 * free, the remainder of the region must be split into blocks.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 * If a block is freed, and its buddy is also free, then this
Michal Nazarewicz5f63b722012-01-11 15:16:11 +0100862 * triggers coalescing into a block of larger size.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863 *
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +0100864 * -- nyc
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865 */
866
Nick Piggin48db57f2006-01-08 01:00:42 -0800867static inline void __free_one_page(struct page *page,
Mel Gormandc4b0ca2014-06-04 16:10:17 -0700868 unsigned long pfn,
Mel Gormaned0ae212009-06-16 15:32:07 -0700869 struct zone *zone, unsigned int order,
870 int migratetype)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871{
Vlastimil Babka76741e72017-02-22 15:41:48 -0800872 unsigned long combined_pfn;
873 unsigned long uninitialized_var(buddy_pfn);
Corrado Zoccolo6dda9d52010-05-24 14:31:54 -0700874 struct page *buddy;
Vlastimil Babkad9dddbf2016-03-25 14:21:50 -0700875 unsigned int max_order;
Mel Gorman5e1f0f02019-03-05 15:45:41 -0800876 struct capture_control *capc = task_capc(zone);
Vlastimil Babkad9dddbf2016-03-25 14:21:50 -0700877
878 max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879
Cody P Schaferd29bb972013-02-22 16:35:25 -0800880 VM_BUG_ON(!zone_is_initialized(zone));
Kirill A. Shutemov6e9f0d52015-02-11 15:25:50 -0800881 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882
Mel Gormaned0ae212009-06-16 15:32:07 -0700883 VM_BUG_ON(migratetype == -1);
Vlastimil Babkad9dddbf2016-03-25 14:21:50 -0700884 if (likely(!is_migrate_isolate(migratetype)))
Joonsoo Kim8f82b552014-11-13 15:19:18 -0800885 __mod_zone_freepage_state(zone, 1 << order, migratetype);
Mel Gormaned0ae212009-06-16 15:32:07 -0700886
Vlastimil Babka76741e72017-02-22 15:41:48 -0800887 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page);
Sasha Levin309381fea2014-01-23 15:52:54 -0800888 VM_BUG_ON_PAGE(bad_range(zone, page), page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889
Vlastimil Babkad9dddbf2016-03-25 14:21:50 -0700890continue_merging:
Joonsoo Kim3c605092014-11-13 15:19:21 -0800891 while (order < max_order - 1) {
Mel Gorman5e1f0f02019-03-05 15:45:41 -0800892 if (compaction_capture(capc, page, order, migratetype)) {
893 __mod_zone_freepage_state(zone, -(1 << order),
894 migratetype);
895 return;
896 }
Vlastimil Babka76741e72017-02-22 15:41:48 -0800897 buddy_pfn = __find_buddy_pfn(pfn, order);
898 buddy = page + (buddy_pfn - pfn);
Vlastimil Babka13ad59d2017-02-22 15:41:51 -0800899
900 if (!pfn_valid_within(buddy_pfn))
901 goto done_merging;
Andy Whitcroftcb2b95e2006-06-23 02:03:01 -0700902 if (!page_is_buddy(page, buddy, order))
Vlastimil Babkad9dddbf2016-03-25 14:21:50 -0700903 goto done_merging;
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800904 /*
905 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
906 * merge with it and move up one order.
907 */
908 if (page_is_guard(buddy)) {
Joonsoo Kim2847cf92014-12-12 16:55:01 -0800909 clear_page_guard(zone, buddy, order, migratetype);
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800910 } else {
911 list_del(&buddy->lru);
912 zone->free_area[order].nr_free--;
913 rmv_page_order(buddy);
914 }
Vlastimil Babka76741e72017-02-22 15:41:48 -0800915 combined_pfn = buddy_pfn & pfn;
916 page = page + (combined_pfn - pfn);
917 pfn = combined_pfn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918 order++;
919 }
Vlastimil Babkad9dddbf2016-03-25 14:21:50 -0700920 if (max_order < MAX_ORDER) {
921 /* If we are here, it means order is >= pageblock_order.
922 * We want to prevent merge between freepages on isolate
923 * pageblock and normal pageblock. Without this, pageblock
924 * isolation could cause incorrect freepage or CMA accounting.
925 *
926 * We don't want to hit this code for the more frequent
927 * low-order merging.
928 */
929 if (unlikely(has_isolate_pageblock(zone))) {
930 int buddy_mt;
931
Vlastimil Babka76741e72017-02-22 15:41:48 -0800932 buddy_pfn = __find_buddy_pfn(pfn, order);
933 buddy = page + (buddy_pfn - pfn);
Vlastimil Babkad9dddbf2016-03-25 14:21:50 -0700934 buddy_mt = get_pageblock_migratetype(buddy);
935
936 if (migratetype != buddy_mt
937 && (is_migrate_isolate(migratetype) ||
938 is_migrate_isolate(buddy_mt)))
939 goto done_merging;
940 }
941 max_order++;
942 goto continue_merging;
943 }
944
945done_merging:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946 set_page_order(page, order);
Corrado Zoccolo6dda9d52010-05-24 14:31:54 -0700947
948 /*
949 * If this is not the largest possible page, check if the buddy
950 * of the next-highest order is free. If it is, it's possible
951 * that pages are being freed that will coalesce soon. In case,
952 * that is happening, add the free page to the tail of the list
953 * so it's less likely to be used soon and more likely to be merged
954 * as a higher order page
955 */
Vlastimil Babka13ad59d2017-02-22 15:41:51 -0800956 if ((order < MAX_ORDER-2) && pfn_valid_within(buddy_pfn)) {
Corrado Zoccolo6dda9d52010-05-24 14:31:54 -0700957 struct page *higher_page, *higher_buddy;
Vlastimil Babka76741e72017-02-22 15:41:48 -0800958 combined_pfn = buddy_pfn & pfn;
959 higher_page = page + (combined_pfn - pfn);
960 buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1);
961 higher_buddy = higher_page + (buddy_pfn - combined_pfn);
Tony Luckb4fb8f62017-03-08 09:35:39 -0800962 if (pfn_valid_within(buddy_pfn) &&
963 page_is_buddy(higher_page, higher_buddy, order + 1)) {
Corrado Zoccolo6dda9d52010-05-24 14:31:54 -0700964 list_add_tail(&page->lru,
965 &zone->free_area[order].free_list[migratetype]);
966 goto out;
967 }
968 }
969
970 list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
971out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972 zone->free_area[order].nr_free++;
973}
974
Mel Gorman7bfec6f2016-05-19 17:14:15 -0700975/*
976 * A bad page could be due to a number of fields. Instead of multiple branches,
977 * try and check multiple fields with one check. The caller must do a detailed
978 * check if necessary.
979 */
980static inline bool page_expected_state(struct page *page,
981 unsigned long check_flags)
982{
983 if (unlikely(atomic_read(&page->_mapcount) != -1))
984 return false;
985
986 if (unlikely((unsigned long)page->mapping |
987 page_ref_count(page) |
988#ifdef CONFIG_MEMCG
989 (unsigned long)page->mem_cgroup |
990#endif
991 (page->flags & check_flags)))
992 return false;
993
994 return true;
995}
996
Mel Gormanbb552ac2016-05-19 17:14:18 -0700997static void free_pages_check_bad(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998{
Mel Gorman7bfec6f2016-05-19 17:14:15 -0700999 const char *bad_reason;
1000 unsigned long bad_flags;
1001
Mel Gorman7bfec6f2016-05-19 17:14:15 -07001002 bad_reason = NULL;
1003 bad_flags = 0;
Dave Hansenf0b791a2014-01-23 15:52:49 -08001004
Kirill A. Shutemov53f92632016-01-15 16:53:42 -08001005 if (unlikely(atomic_read(&page->_mapcount) != -1))
Dave Hansenf0b791a2014-01-23 15:52:49 -08001006 bad_reason = "nonzero mapcount";
1007 if (unlikely(page->mapping != NULL))
1008 bad_reason = "non-NULL mapping";
Joonsoo Kimfe896d12016-03-17 14:19:26 -07001009 if (unlikely(page_ref_count(page) != 0))
Joonsoo Kim0139aa72016-05-19 17:10:49 -07001010 bad_reason = "nonzero _refcount";
Dave Hansenf0b791a2014-01-23 15:52:49 -08001011 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) {
1012 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
1013 bad_flags = PAGE_FLAGS_CHECK_AT_FREE;
1014 }
Johannes Weiner9edad6e2014-12-10 15:44:58 -08001015#ifdef CONFIG_MEMCG
1016 if (unlikely(page->mem_cgroup))
1017 bad_reason = "page still charged to cgroup";
1018#endif
Mel Gorman7bfec6f2016-05-19 17:14:15 -07001019 bad_page(page, bad_reason, bad_flags);
Mel Gormanbb552ac2016-05-19 17:14:18 -07001020}
1021
1022static inline int free_pages_check(struct page *page)
1023{
Mel Gormanda838d42016-05-19 17:14:21 -07001024 if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE)))
Mel Gormanbb552ac2016-05-19 17:14:18 -07001025 return 0;
Mel Gormanbb552ac2016-05-19 17:14:18 -07001026
1027 /* Something has gone sideways, find it */
1028 free_pages_check_bad(page);
Mel Gorman7bfec6f2016-05-19 17:14:15 -07001029 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030}
1031
Mel Gorman4db75482016-05-19 17:14:32 -07001032static int free_tail_pages_check(struct page *head_page, struct page *page)
1033{
1034 int ret = 1;
1035
1036 /*
1037 * We rely page->lru.next never has bit 0 set, unless the page
1038 * is PageTail(). Let's make sure that's true even for poisoned ->lru.
1039 */
1040 BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1);
1041
1042 if (!IS_ENABLED(CONFIG_DEBUG_VM)) {
1043 ret = 0;
1044 goto out;
1045 }
1046 switch (page - head_page) {
1047 case 1:
Matthew Wilcox4da19842018-06-07 17:08:50 -07001048 /* the first tail page: ->mapping may be compound_mapcount() */
Mel Gorman4db75482016-05-19 17:14:32 -07001049 if (unlikely(compound_mapcount(page))) {
1050 bad_page(page, "nonzero compound_mapcount", 0);
1051 goto out;
1052 }
1053 break;
1054 case 2:
1055 /*
1056 * the second tail page: ->mapping is
Matthew Wilcoxfa3015b2018-06-07 17:08:42 -07001057 * deferred_list.next -- ignore value.
Mel Gorman4db75482016-05-19 17:14:32 -07001058 */
1059 break;
1060 default:
1061 if (page->mapping != TAIL_MAPPING) {
1062 bad_page(page, "corrupted mapping in tail page", 0);
1063 goto out;
1064 }
1065 break;
1066 }
1067 if (unlikely(!PageTail(page))) {
1068 bad_page(page, "PageTail not set", 0);
1069 goto out;
1070 }
1071 if (unlikely(compound_head(page) != head_page)) {
1072 bad_page(page, "compound_head not consistent", 0);
1073 goto out;
1074 }
1075 ret = 0;
1076out:
1077 page->mapping = NULL;
1078 clear_compound_head(page);
1079 return ret;
1080}
1081
Mel Gormane2769db2016-05-19 17:14:38 -07001082static __always_inline bool free_pages_prepare(struct page *page,
1083 unsigned int order, bool check_free)
1084{
1085 int bad = 0;
1086
1087 VM_BUG_ON_PAGE(PageTail(page), page);
1088
1089 trace_mm_page_free(page, order);
Mel Gormane2769db2016-05-19 17:14:38 -07001090
1091 /*
1092 * Check tail pages before head page information is cleared to
1093 * avoid checking PageCompound for order-0 pages.
1094 */
1095 if (unlikely(order)) {
1096 bool compound = PageCompound(page);
1097 int i;
1098
1099 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
1100
Kirill A. Shutemov9a73f612016-07-26 15:25:53 -07001101 if (compound)
1102 ClearPageDoubleMap(page);
Mel Gormane2769db2016-05-19 17:14:38 -07001103 for (i = 1; i < (1 << order); i++) {
1104 if (compound)
1105 bad += free_tail_pages_check(page, page + i);
1106 if (unlikely(free_pages_check(page + i))) {
1107 bad++;
1108 continue;
1109 }
1110 (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1111 }
1112 }
Minchan Kimbda807d2016-07-26 15:23:05 -07001113 if (PageMappingFlags(page))
Mel Gormane2769db2016-05-19 17:14:38 -07001114 page->mapping = NULL;
Vladimir Davydovc4159a72016-08-08 23:03:12 +03001115 if (memcg_kmem_enabled() && PageKmemcg(page))
Shakeel Butt60cd4bc2019-03-05 15:43:13 -08001116 __memcg_kmem_uncharge(page, order);
Mel Gormane2769db2016-05-19 17:14:38 -07001117 if (check_free)
1118 bad += free_pages_check(page);
1119 if (bad)
1120 return false;
1121
1122 page_cpupid_reset_last(page);
1123 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1124 reset_page_owner(page, order);
1125
1126 if (!PageHighMem(page)) {
1127 debug_check_no_locks_freed(page_address(page),
1128 PAGE_SIZE << order);
1129 debug_check_no_obj_freed(page_address(page),
1130 PAGE_SIZE << order);
1131 }
1132 arch_free_page(page, order);
1133 kernel_poison_pages(page, 1 << order, 0);
1134 kernel_map_pages(page, 1 << order, 0);
Waiman Long3c0c12c2018-12-28 00:38:51 -08001135 kasan_free_nondeferred_pages(page, order);
Mel Gormane2769db2016-05-19 17:14:38 -07001136
1137 return true;
1138}
Mel Gorman4db75482016-05-19 17:14:32 -07001139
1140#ifdef CONFIG_DEBUG_VM
1141static inline bool free_pcp_prepare(struct page *page)
1142{
Mel Gormane2769db2016-05-19 17:14:38 -07001143 return free_pages_prepare(page, 0, true);
Mel Gorman4db75482016-05-19 17:14:32 -07001144}
1145
1146static inline bool bulkfree_pcp_prepare(struct page *page)
1147{
1148 return false;
1149}
1150#else
1151static bool free_pcp_prepare(struct page *page)
1152{
Mel Gormane2769db2016-05-19 17:14:38 -07001153 return free_pages_prepare(page, 0, false);
Mel Gorman4db75482016-05-19 17:14:32 -07001154}
1155
1156static bool bulkfree_pcp_prepare(struct page *page)
1157{
1158 return free_pages_check(page);
1159}
1160#endif /* CONFIG_DEBUG_VM */
1161
Aaron Lu97334162018-04-05 16:24:14 -07001162static inline void prefetch_buddy(struct page *page)
1163{
1164 unsigned long pfn = page_to_pfn(page);
1165 unsigned long buddy_pfn = __find_buddy_pfn(pfn, 0);
1166 struct page *buddy = page + (buddy_pfn - pfn);
1167
1168 prefetch(buddy);
1169}
1170
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171/*
Mel Gorman5f8dcc22009-09-21 17:03:19 -07001172 * Frees a number of pages from the PCP lists
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173 * Assumes all pages on list are in same zone, and of same order.
Renaud Lienhart207f36e2005-09-10 00:26:59 -07001174 * count is the number of pages to free.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175 *
1176 * If the zone was previously in an "all pages pinned" state then look to
1177 * see if this freeing clears that state.
1178 *
1179 * And clear the zone's pages_scanned counter, to hold off the "all pages are
1180 * pinned" detection logic.
1181 */
Mel Gorman5f8dcc22009-09-21 17:03:19 -07001182static void free_pcppages_bulk(struct zone *zone, int count,
1183 struct per_cpu_pages *pcp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184{
Mel Gorman5f8dcc22009-09-21 17:03:19 -07001185 int migratetype = 0;
Mel Gormana6f9edd62009-09-21 17:03:20 -07001186 int batch_free = 0;
Aaron Lu97334162018-04-05 16:24:14 -07001187 int prefetch_nr = 0;
Mel Gorman37779992016-05-19 17:13:58 -07001188 bool isolated_pageblocks;
Aaron Lu0a5f4e52018-04-05 16:24:10 -07001189 struct page *page, *tmp;
1190 LIST_HEAD(head);
Mel Gormanf2260e62009-06-16 15:32:13 -07001191
Mel Gormane5b31ac2016-05-19 17:14:24 -07001192 while (count) {
Mel Gorman5f8dcc22009-09-21 17:03:19 -07001193 struct list_head *list;
Nick Piggin48db57f2006-01-08 01:00:42 -08001194
Mel Gorman5f8dcc22009-09-21 17:03:19 -07001195 /*
Mel Gormana6f9edd62009-09-21 17:03:20 -07001196 * Remove pages from lists in a round-robin fashion. A
1197 * batch_free count is maintained that is incremented when an
1198 * empty list is encountered. This is so more pages are freed
1199 * off fuller lists instead of spinning excessively around empty
1200 * lists
Mel Gorman5f8dcc22009-09-21 17:03:19 -07001201 */
1202 do {
Mel Gormana6f9edd62009-09-21 17:03:20 -07001203 batch_free++;
Mel Gorman5f8dcc22009-09-21 17:03:19 -07001204 if (++migratetype == MIGRATE_PCPTYPES)
1205 migratetype = 0;
1206 list = &pcp->lists[migratetype];
1207 } while (list_empty(list));
1208
Namhyung Kim1d168712011-03-22 16:32:45 -07001209 /* This is the only non-empty list. Free them all. */
1210 if (batch_free == MIGRATE_PCPTYPES)
Mel Gormane5b31ac2016-05-19 17:14:24 -07001211 batch_free = count;
Namhyung Kim1d168712011-03-22 16:32:45 -07001212
Mel Gormana6f9edd62009-09-21 17:03:20 -07001213 do {
Geliang Tanga16601c2016-01-14 15:20:30 -08001214 page = list_last_entry(list, struct page, lru);
Aaron Lu0a5f4e52018-04-05 16:24:10 -07001215 /* must delete to avoid corrupting pcp list */
Mel Gormana6f9edd62009-09-21 17:03:20 -07001216 list_del(&page->lru);
Aaron Lu77ba9062018-04-05 16:24:06 -07001217 pcp->count--;
Vlastimil Babkaaa016d12015-09-08 15:01:22 -07001218
Mel Gorman4db75482016-05-19 17:14:32 -07001219 if (bulkfree_pcp_prepare(page))
1220 continue;
1221
Aaron Lu0a5f4e52018-04-05 16:24:10 -07001222 list_add_tail(&page->lru, &head);
Aaron Lu97334162018-04-05 16:24:14 -07001223
1224 /*
1225 * We are going to put the page back to the global
1226 * pool, prefetch its buddy to speed up later access
1227 * under zone->lock. It is believed the overhead of
1228 * an additional test and calculating buddy_pfn here
1229 * can be offset by reduced memory latency later. To
1230 * avoid excessive prefetching due to large count, only
1231 * prefetch buddy for the first pcp->batch nr of pages.
1232 */
1233 if (prefetch_nr++ < pcp->batch)
1234 prefetch_buddy(page);
Mel Gormane5b31ac2016-05-19 17:14:24 -07001235 } while (--count && --batch_free && !list_empty(list));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236 }
Aaron Lu0a5f4e52018-04-05 16:24:10 -07001237
1238 spin_lock(&zone->lock);
1239 isolated_pageblocks = has_isolate_pageblock(zone);
1240
1241 /*
1242 * Use safe version since after __free_one_page(),
1243 * page->lru.next will not point to original list.
1244 */
1245 list_for_each_entry_safe(page, tmp, &head, lru) {
1246 int mt = get_pcppage_migratetype(page);
1247 /* MIGRATE_ISOLATE page should not go to pcplists */
1248 VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
1249 /* Pageblock could have been isolated meanwhile */
1250 if (unlikely(isolated_pageblocks))
1251 mt = get_pageblock_migratetype(page);
1252
1253 __free_one_page(page, page_to_pfn(page), zone, 0, mt);
1254 trace_mm_page_pcpu_drain(page, 0, mt);
1255 }
Mel Gormand34b0732017-04-20 14:37:43 -07001256 spin_unlock(&zone->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257}
1258
Mel Gormandc4b0ca2014-06-04 16:10:17 -07001259static void free_one_page(struct zone *zone,
1260 struct page *page, unsigned long pfn,
Mel Gorman7aeb09f2014-06-04 16:10:21 -07001261 unsigned int order,
Mel Gormaned0ae212009-06-16 15:32:07 -07001262 int migratetype)
Nick Piggin48db57f2006-01-08 01:00:42 -08001263{
Mel Gormand34b0732017-04-20 14:37:43 -07001264 spin_lock(&zone->lock);
Joonsoo Kimad53f922014-11-13 15:19:11 -08001265 if (unlikely(has_isolate_pageblock(zone) ||
1266 is_migrate_isolate(migratetype))) {
1267 migratetype = get_pfnblock_migratetype(page, pfn);
Joonsoo Kimad53f922014-11-13 15:19:11 -08001268 }
Mel Gormandc4b0ca2014-06-04 16:10:17 -07001269 __free_one_page(page, pfn, zone, order, migratetype);
Mel Gormand34b0732017-04-20 14:37:43 -07001270 spin_unlock(&zone->lock);
Nick Piggin48db57f2006-01-08 01:00:42 -08001271}
1272
Robin Holt1e8ce832015-06-30 14:56:45 -07001273static void __meminit __init_single_page(struct page *page, unsigned long pfn,
Pavel Tatashind0dc12e2018-04-05 16:23:00 -07001274 unsigned long zone, int nid)
Robin Holt1e8ce832015-06-30 14:56:45 -07001275{
Pavel Tatashind0dc12e2018-04-05 16:23:00 -07001276 mm_zero_struct_page(page);
Robin Holt1e8ce832015-06-30 14:56:45 -07001277 set_page_links(page, zone, nid, pfn);
Robin Holt1e8ce832015-06-30 14:56:45 -07001278 init_page_count(page);
1279 page_mapcount_reset(page);
1280 page_cpupid_reset_last(page);
Andrey Konovalov2813b9c2018-12-28 00:30:57 -08001281 page_kasan_tag_reset(page);
Robin Holt1e8ce832015-06-30 14:56:45 -07001282
Robin Holt1e8ce832015-06-30 14:56:45 -07001283 INIT_LIST_HEAD(&page->lru);
1284#ifdef WANT_PAGE_VIRTUAL
1285 /* The shift won't overflow because ZONE_NORMAL is below 4G. */
1286 if (!is_highmem_idx(zone))
1287 set_page_address(page, __va(pfn << PAGE_SHIFT));
1288#endif
1289}
1290
Mel Gorman7e18adb2015-06-30 14:57:05 -07001291#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
Arnd Bergmann57148a62017-10-03 16:15:10 -07001292static void __meminit init_reserved_page(unsigned long pfn)
Mel Gorman7e18adb2015-06-30 14:57:05 -07001293{
1294 pg_data_t *pgdat;
1295 int nid, zid;
1296
1297 if (!early_page_uninitialised(pfn))
1298 return;
1299
1300 nid = early_pfn_to_nid(pfn);
1301 pgdat = NODE_DATA(nid);
1302
1303 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1304 struct zone *zone = &pgdat->node_zones[zid];
1305
1306 if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone))
1307 break;
1308 }
Pavel Tatashind0dc12e2018-04-05 16:23:00 -07001309 __init_single_page(pfn_to_page(pfn), pfn, zid, nid);
Mel Gorman7e18adb2015-06-30 14:57:05 -07001310}
1311#else
1312static inline void init_reserved_page(unsigned long pfn)
1313{
1314}
1315#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1316
Nathan Zimmer92923ca2015-06-30 14:56:48 -07001317/*
1318 * Initialised pages do not have PageReserved set. This function is
1319 * called for each range allocated by the bootmem allocator and
1320 * marks the pages PageReserved. The remaining valid pages are later
1321 * sent to the buddy page allocator.
1322 */
Stefan Bader4b50bcc2016-05-20 16:58:38 -07001323void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
Nathan Zimmer92923ca2015-06-30 14:56:48 -07001324{
1325 unsigned long start_pfn = PFN_DOWN(start);
1326 unsigned long end_pfn = PFN_UP(end);
1327
Mel Gorman7e18adb2015-06-30 14:57:05 -07001328 for (; start_pfn < end_pfn; start_pfn++) {
1329 if (pfn_valid(start_pfn)) {
1330 struct page *page = pfn_to_page(start_pfn);
1331
1332 init_reserved_page(start_pfn);
Kirill A. Shutemov1d798ca2015-11-06 16:29:54 -08001333
1334 /* Avoid false-positive PageTail() */
1335 INIT_LIST_HEAD(&page->lru);
1336
Alexander Duyckd483da52018-10-26 15:07:48 -07001337 /*
1338 * no need for atomic set_bit because the struct
1339 * page is not visible yet so nobody should
1340 * access it yet.
1341 */
1342 __SetPageReserved(page);
Mel Gorman7e18adb2015-06-30 14:57:05 -07001343 }
1344 }
Nathan Zimmer92923ca2015-06-30 14:56:48 -07001345}
1346
KOSAKI Motohiroec95f532010-05-24 14:32:38 -07001347static void __free_pages_ok(struct page *page, unsigned int order)
1348{
Mel Gormand34b0732017-04-20 14:37:43 -07001349 unsigned long flags;
Minchan Kim95e34412012-10-08 16:32:11 -07001350 int migratetype;
Mel Gormandc4b0ca2014-06-04 16:10:17 -07001351 unsigned long pfn = page_to_pfn(page);
KOSAKI Motohiroec95f532010-05-24 14:32:38 -07001352
Mel Gormane2769db2016-05-19 17:14:38 -07001353 if (!free_pages_prepare(page, order, true))
KOSAKI Motohiroec95f532010-05-24 14:32:38 -07001354 return;
1355
Mel Gormancfc47a22014-06-04 16:10:19 -07001356 migratetype = get_pfnblock_migratetype(page, pfn);
Mel Gormand34b0732017-04-20 14:37:43 -07001357 local_irq_save(flags);
1358 __count_vm_events(PGFREE, 1 << order);
Mel Gormandc4b0ca2014-06-04 16:10:17 -07001359 free_one_page(page_zone(page), page, pfn, order, migratetype);
Mel Gormand34b0732017-04-20 14:37:43 -07001360 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361}
1362
Arun KSa9cd4102019-03-05 15:42:14 -08001363void __free_pages_core(struct page *page, unsigned int order)
David Howellsa226f6c2006-01-06 00:11:08 -08001364{
Johannes Weinerc3993072012-01-10 15:08:10 -08001365 unsigned int nr_pages = 1 << order;
Yinghai Lue2d0bd22013-09-11 14:20:37 -07001366 struct page *p = page;
Johannes Weinerc3993072012-01-10 15:08:10 -08001367 unsigned int loop;
David Howellsa226f6c2006-01-06 00:11:08 -08001368
Yinghai Lue2d0bd22013-09-11 14:20:37 -07001369 prefetchw(p);
1370 for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
1371 prefetchw(p + 1);
Johannes Weinerc3993072012-01-10 15:08:10 -08001372 __ClearPageReserved(p);
1373 set_page_count(p, 0);
David Howellsa226f6c2006-01-06 00:11:08 -08001374 }
Yinghai Lue2d0bd22013-09-11 14:20:37 -07001375 __ClearPageReserved(p);
1376 set_page_count(p, 0);
Johannes Weinerc3993072012-01-10 15:08:10 -08001377
Arun KS9705bea2018-12-28 00:34:24 -08001378 atomic_long_add(nr_pages, &page_zone(page)->managed_pages);
Johannes Weinerc3993072012-01-10 15:08:10 -08001379 set_page_refcounted(page);
1380 __free_pages(page, order);
David Howellsa226f6c2006-01-06 00:11:08 -08001381}
1382
Mel Gorman75a592a2015-06-30 14:56:59 -07001383#if defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) || \
1384 defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
Mel Gorman7ace9912015-08-06 15:46:13 -07001385
Mel Gorman75a592a2015-06-30 14:56:59 -07001386static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
1387
1388int __meminit early_pfn_to_nid(unsigned long pfn)
1389{
Mel Gorman7ace9912015-08-06 15:46:13 -07001390 static DEFINE_SPINLOCK(early_pfn_lock);
Mel Gorman75a592a2015-06-30 14:56:59 -07001391 int nid;
1392
Mel Gorman7ace9912015-08-06 15:46:13 -07001393 spin_lock(&early_pfn_lock);
Mel Gorman75a592a2015-06-30 14:56:59 -07001394 nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
Mel Gorman7ace9912015-08-06 15:46:13 -07001395 if (nid < 0)
Mel Gormane4568d32016-07-14 12:07:20 -07001396 nid = first_online_node;
Mel Gorman7ace9912015-08-06 15:46:13 -07001397 spin_unlock(&early_pfn_lock);
1398
1399 return nid;
Mel Gorman75a592a2015-06-30 14:56:59 -07001400}
1401#endif
1402
1403#ifdef CONFIG_NODES_SPAN_OTHER_NODES
Matthias Kaehlcked73d3c9f2017-07-06 15:39:23 -07001404static inline bool __meminit __maybe_unused
1405meminit_pfn_in_nid(unsigned long pfn, int node,
1406 struct mminit_pfnnid_cache *state)
Mel Gorman75a592a2015-06-30 14:56:59 -07001407{
1408 int nid;
1409
1410 nid = __early_pfn_to_nid(pfn, state);
1411 if (nid >= 0 && nid != node)
1412 return false;
1413 return true;
1414}
1415
1416/* Only safe to use early in boot when initialisation is single-threaded */
1417static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
1418{
1419 return meminit_pfn_in_nid(pfn, node, &early_pfnnid_cache);
1420}
1421
1422#else
1423
1424static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
1425{
1426 return true;
1427}
Matthias Kaehlcked73d3c9f2017-07-06 15:39:23 -07001428static inline bool __meminit __maybe_unused
1429meminit_pfn_in_nid(unsigned long pfn, int node,
1430 struct mminit_pfnnid_cache *state)
Mel Gorman75a592a2015-06-30 14:56:59 -07001431{
1432 return true;
1433}
1434#endif
1435
1436
Mike Rapoport7c2ee342018-10-30 15:09:36 -07001437void __init memblock_free_pages(struct page *page, unsigned long pfn,
Mel Gorman3a80a7f2015-06-30 14:57:02 -07001438 unsigned int order)
1439{
1440 if (early_page_uninitialised(pfn))
1441 return;
Arun KSa9cd4102019-03-05 15:42:14 -08001442 __free_pages_core(page, order);
Mel Gorman3a80a7f2015-06-30 14:57:02 -07001443}
1444
Joonsoo Kim7cf91a92016-03-15 14:57:51 -07001445/*
1446 * Check that the whole (or subset of) a pageblock given by the interval of
1447 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
1448 * with the migration of free compaction scanner. The scanners then need to
1449 * use only pfn_valid_within() check for arches that allow holes within
1450 * pageblocks.
1451 *
1452 * Return struct page pointer of start_pfn, or NULL if checks were not passed.
1453 *
1454 * It's possible on some configurations to have a setup like node0 node1 node0
1455 * i.e. it's possible that all pages within a zones range of pages do not
1456 * belong to a single zone. We assume that a border between node0 and node1
1457 * can occur within a single pageblock, but not a node0 node1 node0
1458 * interleaving within a single pageblock. It is therefore sufficient to check
1459 * the first and last page of a pageblock and avoid checking each individual
1460 * page in a pageblock.
1461 */
1462struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
1463 unsigned long end_pfn, struct zone *zone)
1464{
1465 struct page *start_page;
1466 struct page *end_page;
1467
1468 /* end_pfn is one past the range we are checking */
1469 end_pfn--;
1470
1471 if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn))
1472 return NULL;
1473
Michal Hocko2d070ea2017-07-06 15:37:56 -07001474 start_page = pfn_to_online_page(start_pfn);
1475 if (!start_page)
1476 return NULL;
Joonsoo Kim7cf91a92016-03-15 14:57:51 -07001477
1478 if (page_zone(start_page) != zone)
1479 return NULL;
1480
1481 end_page = pfn_to_page(end_pfn);
1482
1483 /* This gives a shorter code than deriving page_zone(end_page) */
1484 if (page_zone_id(start_page) != page_zone_id(end_page))
1485 return NULL;
1486
1487 return start_page;
1488}
1489
1490void set_zone_contiguous(struct zone *zone)
1491{
1492 unsigned long block_start_pfn = zone->zone_start_pfn;
1493 unsigned long block_end_pfn;
1494
1495 block_end_pfn = ALIGN(block_start_pfn + 1, pageblock_nr_pages);
1496 for (; block_start_pfn < zone_end_pfn(zone);
1497 block_start_pfn = block_end_pfn,
1498 block_end_pfn += pageblock_nr_pages) {
1499
1500 block_end_pfn = min(block_end_pfn, zone_end_pfn(zone));
1501
1502 if (!__pageblock_pfn_to_page(block_start_pfn,
1503 block_end_pfn, zone))
1504 return;
1505 }
1506
1507 /* We confirm that there is no hole */
1508 zone->contiguous = true;
1509}
1510
1511void clear_zone_contiguous(struct zone *zone)
1512{
1513 zone->contiguous = false;
1514}
1515
Mel Gorman7e18adb2015-06-30 14:57:05 -07001516#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
Pavel Tatashin2f47a912017-11-15 17:36:09 -08001517static void __init deferred_free_range(unsigned long pfn,
1518 unsigned long nr_pages)
Mel Gormana4de83d2015-06-30 14:57:16 -07001519{
Pavel Tatashin2f47a912017-11-15 17:36:09 -08001520 struct page *page;
1521 unsigned long i;
Mel Gormana4de83d2015-06-30 14:57:16 -07001522
Pavel Tatashin2f47a912017-11-15 17:36:09 -08001523 if (!nr_pages)
Mel Gormana4de83d2015-06-30 14:57:16 -07001524 return;
1525
Pavel Tatashin2f47a912017-11-15 17:36:09 -08001526 page = pfn_to_page(pfn);
1527
Mel Gormana4de83d2015-06-30 14:57:16 -07001528 /* Free a large naturally-aligned chunk if possible */
Xishi Qiue7801492016-10-07 16:58:09 -07001529 if (nr_pages == pageblock_nr_pages &&
1530 (pfn & (pageblock_nr_pages - 1)) == 0) {
Mel Gormanac5d2532015-06-30 14:57:20 -07001531 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
Arun KSa9cd4102019-03-05 15:42:14 -08001532 __free_pages_core(page, pageblock_order);
Mel Gormana4de83d2015-06-30 14:57:16 -07001533 return;
1534 }
1535
Xishi Qiue7801492016-10-07 16:58:09 -07001536 for (i = 0; i < nr_pages; i++, page++, pfn++) {
1537 if ((pfn & (pageblock_nr_pages - 1)) == 0)
1538 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
Arun KSa9cd4102019-03-05 15:42:14 -08001539 __free_pages_core(page, 0);
Xishi Qiue7801492016-10-07 16:58:09 -07001540 }
Mel Gormana4de83d2015-06-30 14:57:16 -07001541}
1542
Nicolai Stanged3cd1312015-08-06 15:46:16 -07001543/* Completion tracking for deferred_init_memmap() threads */
1544static atomic_t pgdat_init_n_undone __initdata;
1545static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
1546
1547static inline void __init pgdat_init_report_one_done(void)
1548{
1549 if (atomic_dec_and_test(&pgdat_init_n_undone))
1550 complete(&pgdat_init_all_done_comp);
1551}
Mel Gorman0e1cc952015-06-30 14:57:27 -07001552
Pavel Tatashin2f47a912017-11-15 17:36:09 -08001553/*
Pavel Tatashin80b1f412018-01-31 16:16:30 -08001554 * Returns true if page needs to be initialized or freed to buddy allocator.
1555 *
1556 * First we check if pfn is valid on architectures where it is possible to have
1557 * holes within pageblock_nr_pages. On systems where it is not possible, this
1558 * function is optimized out.
1559 *
1560 * Then, we check if a current large page is valid by only checking the validity
1561 * of the head pfn.
1562 *
1563 * Finally, meminit_pfn_in_nid is checked on systems where pfns can interleave
1564 * within a node: a pfn is between start and end of a node, but does not belong
1565 * to this memory node.
Pavel Tatashin2f47a912017-11-15 17:36:09 -08001566 */
Pavel Tatashin80b1f412018-01-31 16:16:30 -08001567static inline bool __init
1568deferred_pfn_valid(int nid, unsigned long pfn,
1569 struct mminit_pfnnid_cache *nid_init_state)
Pavel Tatashin2f47a912017-11-15 17:36:09 -08001570{
Pavel Tatashin80b1f412018-01-31 16:16:30 -08001571 if (!pfn_valid_within(pfn))
1572 return false;
1573 if (!(pfn & (pageblock_nr_pages - 1)) && !pfn_valid(pfn))
1574 return false;
1575 if (!meminit_pfn_in_nid(pfn, nid, nid_init_state))
1576 return false;
1577 return true;
Pavel Tatashin2f47a912017-11-15 17:36:09 -08001578}
1579
Pavel Tatashin80b1f412018-01-31 16:16:30 -08001580/*
1581 * Free pages to buddy allocator. Try to free aligned pages in
1582 * pageblock_nr_pages sizes.
1583 */
1584static void __init deferred_free_pages(int nid, int zid, unsigned long pfn,
1585 unsigned long end_pfn)
Pavel Tatashin2f47a912017-11-15 17:36:09 -08001586{
1587 struct mminit_pfnnid_cache nid_init_state = { };
1588 unsigned long nr_pgmask = pageblock_nr_pages - 1;
Pavel Tatashin2f47a912017-11-15 17:36:09 -08001589 unsigned long nr_free = 0;
Pavel Tatashin2f47a912017-11-15 17:36:09 -08001590
Pavel Tatashin80b1f412018-01-31 16:16:30 -08001591 for (; pfn < end_pfn; pfn++) {
1592 if (!deferred_pfn_valid(nid, pfn, &nid_init_state)) {
1593 deferred_free_range(pfn - nr_free, nr_free);
1594 nr_free = 0;
1595 } else if (!(pfn & nr_pgmask)) {
1596 deferred_free_range(pfn - nr_free, nr_free);
Pavel Tatashin2f47a912017-11-15 17:36:09 -08001597 nr_free = 1;
Pavel Tatashin3a2d7fa2018-04-05 16:22:27 -07001598 touch_nmi_watchdog();
Pavel Tatashin80b1f412018-01-31 16:16:30 -08001599 } else {
1600 nr_free++;
Pavel Tatashin2f47a912017-11-15 17:36:09 -08001601 }
1602 }
1603 /* Free the last block of pages to allocator */
Pavel Tatashin80b1f412018-01-31 16:16:30 -08001604 deferred_free_range(pfn - nr_free, nr_free);
1605}
Pavel Tatashin2f47a912017-11-15 17:36:09 -08001606
Pavel Tatashin80b1f412018-01-31 16:16:30 -08001607/*
1608 * Initialize struct pages. We minimize pfn page lookups and scheduler checks
1609 * by performing it only once every pageblock_nr_pages.
1610 * Return number of pages initialized.
1611 */
1612static unsigned long __init deferred_init_pages(int nid, int zid,
1613 unsigned long pfn,
1614 unsigned long end_pfn)
1615{
1616 struct mminit_pfnnid_cache nid_init_state = { };
1617 unsigned long nr_pgmask = pageblock_nr_pages - 1;
1618 unsigned long nr_pages = 0;
1619 struct page *page = NULL;
1620
1621 for (; pfn < end_pfn; pfn++) {
1622 if (!deferred_pfn_valid(nid, pfn, &nid_init_state)) {
1623 page = NULL;
1624 continue;
1625 } else if (!page || !(pfn & nr_pgmask)) {
1626 page = pfn_to_page(pfn);
Pavel Tatashin3a2d7fa2018-04-05 16:22:27 -07001627 touch_nmi_watchdog();
Pavel Tatashin80b1f412018-01-31 16:16:30 -08001628 } else {
1629 page++;
1630 }
Pavel Tatashind0dc12e2018-04-05 16:23:00 -07001631 __init_single_page(page, pfn, zid, nid);
Pavel Tatashin80b1f412018-01-31 16:16:30 -08001632 nr_pages++;
1633 }
1634 return (nr_pages);
Pavel Tatashin2f47a912017-11-15 17:36:09 -08001635}
1636
Mel Gorman7e18adb2015-06-30 14:57:05 -07001637/* Initialise remaining memory on a node */
Mel Gorman0e1cc952015-06-30 14:57:27 -07001638static int __init deferred_init_memmap(void *data)
Mel Gorman7e18adb2015-06-30 14:57:05 -07001639{
Mel Gorman0e1cc952015-06-30 14:57:27 -07001640 pg_data_t *pgdat = data;
1641 int nid = pgdat->node_id;
Mel Gorman7e18adb2015-06-30 14:57:05 -07001642 unsigned long start = jiffies;
1643 unsigned long nr_pages = 0;
Pavel Tatashin3a2d7fa2018-04-05 16:22:27 -07001644 unsigned long spfn, epfn, first_init_pfn, flags;
Pavel Tatashin2f47a912017-11-15 17:36:09 -08001645 phys_addr_t spa, epa;
1646 int zid;
Mel Gorman7e18adb2015-06-30 14:57:05 -07001647 struct zone *zone;
Mel Gorman0e1cc952015-06-30 14:57:27 -07001648 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
Pavel Tatashin2f47a912017-11-15 17:36:09 -08001649 u64 i;
Mel Gorman7e18adb2015-06-30 14:57:05 -07001650
Mel Gorman0e1cc952015-06-30 14:57:27 -07001651 /* Bind memory initialisation thread to a local node if possible */
1652 if (!cpumask_empty(cpumask))
1653 set_cpus_allowed_ptr(current, cpumask);
Mel Gorman7e18adb2015-06-30 14:57:05 -07001654
Pavel Tatashin3a2d7fa2018-04-05 16:22:27 -07001655 pgdat_resize_lock(pgdat, &flags);
1656 first_init_pfn = pgdat->first_deferred_pfn;
1657 if (first_init_pfn == ULONG_MAX) {
1658 pgdat_resize_unlock(pgdat, &flags);
1659 pgdat_init_report_one_done();
1660 return 0;
1661 }
1662
Mel Gorman7e18adb2015-06-30 14:57:05 -07001663 /* Sanity check boundaries */
1664 BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn);
1665 BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
1666 pgdat->first_deferred_pfn = ULONG_MAX;
1667
1668 /* Only the highest zone is deferred so find it */
1669 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1670 zone = pgdat->node_zones + zid;
1671 if (first_init_pfn < zone_end_pfn(zone))
1672 break;
1673 }
Pavel Tatashin2f47a912017-11-15 17:36:09 -08001674 first_init_pfn = max(zone->zone_start_pfn, first_init_pfn);
Mel Gorman7e18adb2015-06-30 14:57:05 -07001675
Pavel Tatashin80b1f412018-01-31 16:16:30 -08001676 /*
1677 * Initialize and free pages. We do it in two loops: first we initialize
1678 * struct page, than free to buddy allocator, because while we are
1679 * freeing pages we can access pages that are ahead (computing buddy
1680 * page in __free_one_page()).
1681 */
Pavel Tatashin2f47a912017-11-15 17:36:09 -08001682 for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &spa, &epa, NULL) {
1683 spfn = max_t(unsigned long, first_init_pfn, PFN_UP(spa));
1684 epfn = min_t(unsigned long, zone_end_pfn(zone), PFN_DOWN(epa));
Pavel Tatashin80b1f412018-01-31 16:16:30 -08001685 nr_pages += deferred_init_pages(nid, zid, spfn, epfn);
1686 }
1687 for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &spa, &epa, NULL) {
1688 spfn = max_t(unsigned long, first_init_pfn, PFN_UP(spa));
1689 epfn = min_t(unsigned long, zone_end_pfn(zone), PFN_DOWN(epa));
1690 deferred_free_pages(nid, zid, spfn, epfn);
Mel Gorman7e18adb2015-06-30 14:57:05 -07001691 }
Pavel Tatashin3a2d7fa2018-04-05 16:22:27 -07001692 pgdat_resize_unlock(pgdat, &flags);
Mel Gorman7e18adb2015-06-30 14:57:05 -07001693
1694 /* Sanity check that the next zone really is unpopulated */
1695 WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
1696
Mel Gorman0e1cc952015-06-30 14:57:27 -07001697 pr_info("node %d initialised, %lu pages in %ums\n", nid, nr_pages,
Mel Gorman7e18adb2015-06-30 14:57:05 -07001698 jiffies_to_msecs(jiffies - start));
Nicolai Stanged3cd1312015-08-06 15:46:16 -07001699
1700 pgdat_init_report_one_done();
Mel Gorman0e1cc952015-06-30 14:57:27 -07001701 return 0;
1702}
Pavel Tatashinc9e97a12018-04-05 16:22:31 -07001703
1704/*
Pavel Tatashinc9e97a12018-04-05 16:22:31 -07001705 * If this zone has deferred pages, try to grow it by initializing enough
1706 * deferred pages to satisfy the allocation specified by order, rounded up to
1707 * the nearest PAGES_PER_SECTION boundary. So we're adding memory in increments
1708 * of SECTION_SIZE bytes by initializing struct pages in increments of
1709 * PAGES_PER_SECTION * sizeof(struct page) bytes.
1710 *
1711 * Return true when zone was grown, otherwise return false. We return true even
1712 * when we grow less than requested, to let the caller decide if there are
1713 * enough pages to satisfy the allocation.
1714 *
1715 * Note: We use noinline because this function is needed only during boot, and
1716 * it is called from a __ref function _deferred_grow_zone. This way we are
1717 * making sure that it is not inlined into permanent text section.
1718 */
1719static noinline bool __init
1720deferred_grow_zone(struct zone *zone, unsigned int order)
1721{
1722 int zid = zone_idx(zone);
1723 int nid = zone_to_nid(zone);
1724 pg_data_t *pgdat = NODE_DATA(nid);
1725 unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION);
1726 unsigned long nr_pages = 0;
1727 unsigned long first_init_pfn, spfn, epfn, t, flags;
1728 unsigned long first_deferred_pfn = pgdat->first_deferred_pfn;
1729 phys_addr_t spa, epa;
1730 u64 i;
1731
1732 /* Only the last zone may have deferred pages */
1733 if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat))
1734 return false;
1735
1736 pgdat_resize_lock(pgdat, &flags);
1737
1738 /*
1739 * If deferred pages have been initialized while we were waiting for
1740 * the lock, return true, as the zone was grown. The caller will retry
1741 * this zone. We won't return to this function since the caller also
1742 * has this static branch.
1743 */
1744 if (!static_branch_unlikely(&deferred_pages)) {
1745 pgdat_resize_unlock(pgdat, &flags);
1746 return true;
1747 }
1748
1749 /*
1750 * If someone grew this zone while we were waiting for spinlock, return
1751 * true, as there might be enough pages already.
1752 */
1753 if (first_deferred_pfn != pgdat->first_deferred_pfn) {
1754 pgdat_resize_unlock(pgdat, &flags);
1755 return true;
1756 }
1757
1758 first_init_pfn = max(zone->zone_start_pfn, first_deferred_pfn);
1759
1760 if (first_init_pfn >= pgdat_end_pfn(pgdat)) {
1761 pgdat_resize_unlock(pgdat, &flags);
1762 return false;
1763 }
1764
1765 for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &spa, &epa, NULL) {
1766 spfn = max_t(unsigned long, first_init_pfn, PFN_UP(spa));
1767 epfn = min_t(unsigned long, zone_end_pfn(zone), PFN_DOWN(epa));
1768
1769 while (spfn < epfn && nr_pages < nr_pages_needed) {
1770 t = ALIGN(spfn + PAGES_PER_SECTION, PAGES_PER_SECTION);
1771 first_deferred_pfn = min(t, epfn);
1772 nr_pages += deferred_init_pages(nid, zid, spfn,
1773 first_deferred_pfn);
1774 spfn = first_deferred_pfn;
1775 }
1776
1777 if (nr_pages >= nr_pages_needed)
1778 break;
1779 }
1780
1781 for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &spa, &epa, NULL) {
1782 spfn = max_t(unsigned long, first_init_pfn, PFN_UP(spa));
1783 epfn = min_t(unsigned long, first_deferred_pfn, PFN_DOWN(epa));
1784 deferred_free_pages(nid, zid, spfn, epfn);
1785
1786 if (first_deferred_pfn == epfn)
1787 break;
1788 }
1789 pgdat->first_deferred_pfn = first_deferred_pfn;
1790 pgdat_resize_unlock(pgdat, &flags);
1791
1792 return nr_pages > 0;
1793}
1794
1795/*
1796 * deferred_grow_zone() is __init, but it is called from
1797 * get_page_from_freelist() during early boot until deferred_pages permanently
1798 * disables this call. This is why we have refdata wrapper to avoid warning,
1799 * and to ensure that the function body gets unloaded.
1800 */
1801static bool __ref
1802_deferred_grow_zone(struct zone *zone, unsigned int order)
1803{
1804 return deferred_grow_zone(zone, order);
1805}
1806
Joonsoo Kim7cf91a92016-03-15 14:57:51 -07001807#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
Mel Gorman0e1cc952015-06-30 14:57:27 -07001808
1809void __init page_alloc_init_late(void)
1810{
Joonsoo Kim7cf91a92016-03-15 14:57:51 -07001811 struct zone *zone;
1812
1813#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
Mel Gorman0e1cc952015-06-30 14:57:27 -07001814 int nid;
1815
Nicolai Stanged3cd1312015-08-06 15:46:16 -07001816 /* There will be num_node_state(N_MEMORY) threads */
1817 atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
Mel Gorman0e1cc952015-06-30 14:57:27 -07001818 for_each_node_state(nid, N_MEMORY) {
Mel Gorman0e1cc952015-06-30 14:57:27 -07001819 kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
1820 }
1821
1822 /* Block until all are initialised */
Nicolai Stanged3cd1312015-08-06 15:46:16 -07001823 wait_for_completion(&pgdat_init_all_done_comp);
Mel Gorman4248b0d2015-08-06 15:46:20 -07001824
Pavel Tatashinc9e97a12018-04-05 16:22:31 -07001825 /*
1826 * We initialized the rest of the deferred pages. Permanently disable
1827 * on-demand struct page initialization.
1828 */
1829 static_branch_disable(&deferred_pages);
1830
Mel Gorman4248b0d2015-08-06 15:46:20 -07001831 /* Reinit limits that are based on free pages after the kernel is up */
1832 files_maxfiles_init();
Joonsoo Kim7cf91a92016-03-15 14:57:51 -07001833#endif
Pavel Tatashin3010f872017-08-18 15:16:05 -07001834#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
1835 /* Discard memblock private memory */
1836 memblock_discard();
1837#endif
Joonsoo Kim7cf91a92016-03-15 14:57:51 -07001838
1839 for_each_populated_zone(zone)
1840 set_zone_contiguous(zone);
Mel Gorman7e18adb2015-06-30 14:57:05 -07001841}
Mel Gorman7e18adb2015-06-30 14:57:05 -07001842
Michal Nazarewicz47118af2011-12-29 13:09:50 +01001843#ifdef CONFIG_CMA
Li Zhong9cf510a2013-08-23 13:52:52 +08001844/* Free whole pageblock and set its migration type to MIGRATE_CMA. */
Michal Nazarewicz47118af2011-12-29 13:09:50 +01001845void __init init_cma_reserved_pageblock(struct page *page)
1846{
1847 unsigned i = pageblock_nr_pages;
1848 struct page *p = page;
1849
1850 do {
1851 __ClearPageReserved(p);
1852 set_page_count(p, 0);
Joonsoo Kimd883c6c2018-05-23 10:18:21 +09001853 } while (++p, --i);
Michal Nazarewicz47118af2011-12-29 13:09:50 +01001854
Michal Nazarewicz47118af2011-12-29 13:09:50 +01001855 set_pageblock_migratetype(page, MIGRATE_CMA);
Michal Nazarewiczdc783272014-07-02 15:22:35 -07001856
1857 if (pageblock_order >= MAX_ORDER) {
1858 i = pageblock_nr_pages;
1859 p = page;
1860 do {
1861 set_page_refcounted(p);
1862 __free_pages(p, MAX_ORDER - 1);
1863 p += MAX_ORDER_NR_PAGES;
1864 } while (i -= MAX_ORDER_NR_PAGES);
1865 } else {
1866 set_page_refcounted(page);
1867 __free_pages(page, pageblock_order);
1868 }
1869
Jiang Liu3dcc0572013-07-03 15:03:21 -07001870 adjust_managed_page_count(page, pageblock_nr_pages);
Michal Nazarewicz47118af2011-12-29 13:09:50 +01001871}
1872#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001873
1874/*
1875 * The order of subdivision here is critical for the IO subsystem.
1876 * Please do not alter this order without good reasons and regression
1877 * testing. Specifically, as large blocks of memory are subdivided,
1878 * the order in which smaller blocks are delivered depends on the order
1879 * they're subdivided in this function. This is the primary factor
1880 * influencing the order in which pages are delivered to the IO
1881 * subsystem according to empirical testing, and this is also justified
1882 * by considering the behavior of a buddy system containing a single
1883 * large block of memory acted on by a series of small allocations.
1884 * This behavior is a critical factor in sglist merging's success.
1885 *
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +01001886 * -- nyc
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887 */
Nick Piggin085cc7d52006-01-06 00:11:01 -08001888static inline void expand(struct zone *zone, struct page *page,
Mel Gormanb2a0ac82007-10-16 01:25:48 -07001889 int low, int high, struct free_area *area,
1890 int migratetype)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001891{
1892 unsigned long size = 1 << high;
1893
1894 while (high > low) {
1895 area--;
1896 high--;
1897 size >>= 1;
Sasha Levin309381fea2014-01-23 15:52:54 -08001898 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -08001899
Joonsoo Kimacbc15a2016-10-07 16:58:15 -07001900 /*
1901 * Mark as guard pages (or page), that will allow to
1902 * merge back to allocator when buddy will be freed.
1903 * Corresponding page table entries will not be touched,
1904 * pages will stay not present in virtual address space
1905 */
1906 if (set_page_guard(zone, &page[size], high, migratetype))
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -08001907 continue;
Joonsoo Kimacbc15a2016-10-07 16:58:15 -07001908
Mel Gormanb2a0ac82007-10-16 01:25:48 -07001909 list_add(&page[size].lru, &area->free_list[migratetype]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001910 area->nr_free++;
1911 set_page_order(&page[size], high);
1912 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001913}
1914
Vlastimil Babka4e611802016-05-19 17:14:41 -07001915static void check_new_page_bad(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916{
Vlastimil Babka4e611802016-05-19 17:14:41 -07001917 const char *bad_reason = NULL;
1918 unsigned long bad_flags = 0;
Dave Hansenf0b791a2014-01-23 15:52:49 -08001919
Kirill A. Shutemov53f92632016-01-15 16:53:42 -08001920 if (unlikely(atomic_read(&page->_mapcount) != -1))
Dave Hansenf0b791a2014-01-23 15:52:49 -08001921 bad_reason = "nonzero mapcount";
1922 if (unlikely(page->mapping != NULL))
1923 bad_reason = "non-NULL mapping";
Joonsoo Kimfe896d12016-03-17 14:19:26 -07001924 if (unlikely(page_ref_count(page) != 0))
Dave Hansenf0b791a2014-01-23 15:52:49 -08001925 bad_reason = "nonzero _count";
Naoya Horiguchif4c18e62015-08-06 15:47:08 -07001926 if (unlikely(page->flags & __PG_HWPOISON)) {
1927 bad_reason = "HWPoisoned (hardware-corrupted)";
1928 bad_flags = __PG_HWPOISON;
Naoya Horiguchie570f562016-05-20 16:58:50 -07001929 /* Don't complain about hwpoisoned pages */
1930 page_mapcount_reset(page); /* remove PageBuddy */
1931 return;
Naoya Horiguchif4c18e62015-08-06 15:47:08 -07001932 }
Dave Hansenf0b791a2014-01-23 15:52:49 -08001933 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) {
1934 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set";
1935 bad_flags = PAGE_FLAGS_CHECK_AT_PREP;
1936 }
Johannes Weiner9edad6e2014-12-10 15:44:58 -08001937#ifdef CONFIG_MEMCG
1938 if (unlikely(page->mem_cgroup))
1939 bad_reason = "page still charged to cgroup";
1940#endif
Vlastimil Babka4e611802016-05-19 17:14:41 -07001941 bad_page(page, bad_reason, bad_flags);
1942}
1943
1944/*
1945 * This page is about to be returned from the page allocator
1946 */
1947static inline int check_new_page(struct page *page)
1948{
1949 if (likely(page_expected_state(page,
1950 PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON)))
1951 return 0;
1952
1953 check_new_page_bad(page);
1954 return 1;
Wu Fengguang2a7684a2009-09-16 11:50:12 +02001955}
1956
Vinayak Menonbd33ef32017-05-03 14:54:42 -07001957static inline bool free_pages_prezeroed(void)
Laura Abbott1414c7f2016-03-15 14:56:30 -07001958{
1959 return IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) &&
Vinayak Menonbd33ef32017-05-03 14:54:42 -07001960 page_poisoning_enabled();
Laura Abbott1414c7f2016-03-15 14:56:30 -07001961}
1962
Mel Gorman479f8542016-05-19 17:14:35 -07001963#ifdef CONFIG_DEBUG_VM
1964static bool check_pcp_refill(struct page *page)
1965{
1966 return false;
1967}
1968
1969static bool check_new_pcp(struct page *page)
1970{
1971 return check_new_page(page);
1972}
1973#else
1974static bool check_pcp_refill(struct page *page)
1975{
1976 return check_new_page(page);
1977}
1978static bool check_new_pcp(struct page *page)
1979{
1980 return false;
1981}
1982#endif /* CONFIG_DEBUG_VM */
1983
1984static bool check_new_pages(struct page *page, unsigned int order)
1985{
1986 int i;
1987 for (i = 0; i < (1 << order); i++) {
1988 struct page *p = page + i;
1989
1990 if (unlikely(check_new_page(p)))
1991 return true;
1992 }
1993
1994 return false;
1995}
1996
Joonsoo Kim46f24fd2016-07-26 15:23:58 -07001997inline void post_alloc_hook(struct page *page, unsigned int order,
1998 gfp_t gfp_flags)
1999{
2000 set_page_private(page, 0);
2001 set_page_refcounted(page);
2002
2003 arch_alloc_page(page, order);
2004 kernel_map_pages(page, 1 << order, 1);
Joonsoo Kim46f24fd2016-07-26 15:23:58 -07002005 kasan_alloc_pages(page, order);
Qian Cai41179922019-03-05 15:41:24 -08002006 kernel_poison_pages(page, 1 << order, 1);
Joonsoo Kim46f24fd2016-07-26 15:23:58 -07002007 set_page_owner(page, order, gfp_flags);
2008}
2009
Mel Gorman479f8542016-05-19 17:14:35 -07002010static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
Mel Gormanc6038442016-05-19 17:13:38 -07002011 unsigned int alloc_flags)
Wu Fengguang2a7684a2009-09-16 11:50:12 +02002012{
2013 int i;
Hugh Dickins689bceb2005-11-21 21:32:20 -08002014
Joonsoo Kim46f24fd2016-07-26 15:23:58 -07002015 post_alloc_hook(page, order, gfp_flags);
Nick Piggin17cf4402006-03-22 00:08:41 -08002016
Vinayak Menonbd33ef32017-05-03 14:54:42 -07002017 if (!free_pages_prezeroed() && (gfp_flags & __GFP_ZERO))
Anisse Astierf4d28972015-06-24 16:56:36 -07002018 for (i = 0; i < (1 << order); i++)
2019 clear_highpage(page + i);
Nick Piggin17cf4402006-03-22 00:08:41 -08002020
2021 if (order && (gfp_flags & __GFP_COMP))
2022 prep_compound_page(page, order);
2023
Vlastimil Babka75379192015-02-11 15:25:38 -08002024 /*
Michal Hocko2f064f32015-08-21 14:11:51 -07002025 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
Vlastimil Babka75379192015-02-11 15:25:38 -08002026 * allocate the page. The expectation is that the caller is taking
2027 * steps that will free more memory. The caller should avoid the page
2028 * being used for !PFMEMALLOC purposes.
2029 */
Michal Hocko2f064f32015-08-21 14:11:51 -07002030 if (alloc_flags & ALLOC_NO_WATERMARKS)
2031 set_page_pfmemalloc(page);
2032 else
2033 clear_page_pfmemalloc(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002034}
2035
Mel Gorman56fd56b2007-10-16 01:25:58 -07002036/*
2037 * Go through the free lists for the given migratetype and remove
2038 * the smallest available page from the freelists
2039 */
Aaron Lu85ccc8f2017-11-15 17:36:53 -08002040static __always_inline
Mel Gorman728ec982009-06-16 15:32:04 -07002041struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
Mel Gorman56fd56b2007-10-16 01:25:58 -07002042 int migratetype)
2043{
2044 unsigned int current_order;
Pintu Kumarb8af2942013-09-11 14:20:34 -07002045 struct free_area *area;
Mel Gorman56fd56b2007-10-16 01:25:58 -07002046 struct page *page;
2047
2048 /* Find a page of the appropriate size in the preferred list */
2049 for (current_order = order; current_order < MAX_ORDER; ++current_order) {
2050 area = &(zone->free_area[current_order]);
Geliang Tanga16601c2016-01-14 15:20:30 -08002051 page = list_first_entry_or_null(&area->free_list[migratetype],
Mel Gorman56fd56b2007-10-16 01:25:58 -07002052 struct page, lru);
Geliang Tanga16601c2016-01-14 15:20:30 -08002053 if (!page)
2054 continue;
Mel Gorman56fd56b2007-10-16 01:25:58 -07002055 list_del(&page->lru);
2056 rmv_page_order(page);
2057 area->nr_free--;
Mel Gorman56fd56b2007-10-16 01:25:58 -07002058 expand(zone, page, order, current_order, area, migratetype);
Vlastimil Babkabb14c2c2015-09-08 15:01:25 -07002059 set_pcppage_migratetype(page, migratetype);
Mel Gorman56fd56b2007-10-16 01:25:58 -07002060 return page;
2061 }
2062
2063 return NULL;
2064}
2065
2066
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002067/*
2068 * This array describes the order lists are fallen back to when
2069 * the free lists for the desirable migrate type are depleted
2070 */
Michal Nazarewicz47118af2011-12-29 13:09:50 +01002071static int fallbacks[MIGRATE_TYPES][4] = {
Mel Gorman974a7862015-11-06 16:28:34 -08002072 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },
Mel Gorman974a7862015-11-06 16:28:34 -08002073 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES },
Huang Shijie7ead3342018-12-28 00:34:46 -08002074 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },
Joonsoo Kimdc676472015-04-14 15:45:15 -07002075#ifdef CONFIG_CMA
Mel Gorman974a7862015-11-06 16:28:34 -08002076 [MIGRATE_CMA] = { MIGRATE_TYPES }, /* Never used */
Michal Nazarewicz47118af2011-12-29 13:09:50 +01002077#endif
Minchan Kim194159f2013-02-22 16:33:58 -08002078#ifdef CONFIG_MEMORY_ISOLATION
Mel Gorman974a7862015-11-06 16:28:34 -08002079 [MIGRATE_ISOLATE] = { MIGRATE_TYPES }, /* Never used */
Minchan Kim194159f2013-02-22 16:33:58 -08002080#endif
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002081};
2082
Joonsoo Kimdc676472015-04-14 15:45:15 -07002083#ifdef CONFIG_CMA
Aaron Lu85ccc8f2017-11-15 17:36:53 -08002084static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone,
Joonsoo Kimdc676472015-04-14 15:45:15 -07002085 unsigned int order)
2086{
2087 return __rmqueue_smallest(zone, order, MIGRATE_CMA);
2088}
2089#else
2090static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
2091 unsigned int order) { return NULL; }
2092#endif
2093
Mel Gormanc361be52007-10-16 01:25:51 -07002094/*
2095 * Move the free pages in a range to the free lists of the requested type.
Mel Gormand9c23402007-10-16 01:26:01 -07002096 * Note that start_page and end_pages are not aligned on a pageblock
Mel Gormanc361be52007-10-16 01:25:51 -07002097 * boundary. If alignment is required, use move_freepages_block()
2098 */
Vlastimil Babka02aa0cd2017-05-08 15:54:40 -07002099static int move_freepages(struct zone *zone,
Adrian Bunkb69a7282008-07-23 21:28:12 -07002100 struct page *start_page, struct page *end_page,
Vlastimil Babka02aa0cd2017-05-08 15:54:40 -07002101 int migratetype, int *num_movable)
Mel Gormanc361be52007-10-16 01:25:51 -07002102{
2103 struct page *page;
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -08002104 unsigned int order;
Mel Gormand1003132007-10-16 01:26:00 -07002105 int pages_moved = 0;
Mel Gormanc361be52007-10-16 01:25:51 -07002106
2107#ifndef CONFIG_HOLES_IN_ZONE
2108 /*
2109 * page_zone is not safe to call in this context when
2110 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
2111 * anyway as we check zone boundaries in move_freepages_block().
2112 * Remove at a later date when no bug reports exist related to
Mel Gormanac0e5b72007-10-16 01:25:58 -07002113 * grouping pages by mobility
Mel Gormanc361be52007-10-16 01:25:51 -07002114 */
Ard Biesheuvel3e040402018-03-14 19:29:37 +00002115 VM_BUG_ON(pfn_valid(page_to_pfn(start_page)) &&
2116 pfn_valid(page_to_pfn(end_page)) &&
2117 page_zone(start_page) != page_zone(end_page));
Mel Gormanc361be52007-10-16 01:25:51 -07002118#endif
Mel Gormanc361be52007-10-16 01:25:51 -07002119 for (page = start_page; page <= end_page;) {
2120 if (!pfn_valid_within(page_to_pfn(page))) {
2121 page++;
2122 continue;
2123 }
2124
Ard Biesheuvelf073bdc2017-01-10 16:58:00 -08002125 /* Make sure we are not inadvertently changing nodes */
2126 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
2127
Mel Gormanc361be52007-10-16 01:25:51 -07002128 if (!PageBuddy(page)) {
Vlastimil Babka02aa0cd2017-05-08 15:54:40 -07002129 /*
2130 * We assume that pages that could be isolated for
2131 * migration are movable. But we don't actually try
2132 * isolating, as that would be expensive.
2133 */
2134 if (num_movable &&
2135 (PageLRU(page) || __PageMovable(page)))
2136 (*num_movable)++;
2137
Mel Gormanc361be52007-10-16 01:25:51 -07002138 page++;
2139 continue;
2140 }
2141
2142 order = page_order(page);
Kirill A. Shutemov84be48d2011-03-22 16:33:41 -07002143 list_move(&page->lru,
2144 &zone->free_area[order].free_list[migratetype]);
Mel Gormanc361be52007-10-16 01:25:51 -07002145 page += 1 << order;
Mel Gormand1003132007-10-16 01:26:00 -07002146 pages_moved += 1 << order;
Mel Gormanc361be52007-10-16 01:25:51 -07002147 }
2148
Mel Gormand1003132007-10-16 01:26:00 -07002149 return pages_moved;
Mel Gormanc361be52007-10-16 01:25:51 -07002150}
2151
Minchan Kimee6f5092012-07-31 16:43:50 -07002152int move_freepages_block(struct zone *zone, struct page *page,
Vlastimil Babka02aa0cd2017-05-08 15:54:40 -07002153 int migratetype, int *num_movable)
Mel Gormanc361be52007-10-16 01:25:51 -07002154{
2155 unsigned long start_pfn, end_pfn;
2156 struct page *start_page, *end_page;
2157
David Rientjes4a222122018-10-26 15:09:24 -07002158 if (num_movable)
2159 *num_movable = 0;
2160
Mel Gormanc361be52007-10-16 01:25:51 -07002161 start_pfn = page_to_pfn(page);
Mel Gormand9c23402007-10-16 01:26:01 -07002162 start_pfn = start_pfn & ~(pageblock_nr_pages-1);
Mel Gormanc361be52007-10-16 01:25:51 -07002163 start_page = pfn_to_page(start_pfn);
Mel Gormand9c23402007-10-16 01:26:01 -07002164 end_page = start_page + pageblock_nr_pages - 1;
2165 end_pfn = start_pfn + pageblock_nr_pages - 1;
Mel Gormanc361be52007-10-16 01:25:51 -07002166
2167 /* Do not cross zone boundaries */
Cody P Schafer108bcc92013-02-22 16:35:23 -08002168 if (!zone_spans_pfn(zone, start_pfn))
Mel Gormanc361be52007-10-16 01:25:51 -07002169 start_page = page;
Cody P Schafer108bcc92013-02-22 16:35:23 -08002170 if (!zone_spans_pfn(zone, end_pfn))
Mel Gormanc361be52007-10-16 01:25:51 -07002171 return 0;
2172
Vlastimil Babka02aa0cd2017-05-08 15:54:40 -07002173 return move_freepages(zone, start_page, end_page, migratetype,
2174 num_movable);
Mel Gormanc361be52007-10-16 01:25:51 -07002175}
2176
Mel Gorman2f66a682009-09-21 17:02:31 -07002177static void change_pageblock_range(struct page *pageblock_page,
2178 int start_order, int migratetype)
2179{
2180 int nr_pageblocks = 1 << (start_order - pageblock_order);
2181
2182 while (nr_pageblocks--) {
2183 set_pageblock_migratetype(pageblock_page, migratetype);
2184 pageblock_page += pageblock_nr_pages;
2185 }
2186}
2187
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07002188/*
Vlastimil Babka9c0415e2015-02-11 15:28:21 -08002189 * When we are falling back to another migratetype during allocation, try to
2190 * steal extra free pages from the same pageblocks to satisfy further
2191 * allocations, instead of polluting multiple pageblocks.
2192 *
2193 * If we are stealing a relatively large buddy page, it is likely there will
2194 * be more free pages in the pageblock, so try to steal them all. For
2195 * reclaimable and unmovable allocations, we steal regardless of page size,
2196 * as fragmentation caused by those allocations polluting movable pageblocks
2197 * is worse than movable allocations stealing from unmovable and reclaimable
2198 * pageblocks.
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07002199 */
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002200static bool can_steal_fallback(unsigned int order, int start_mt)
2201{
2202 /*
2203 * Leaving this order check is intended, although there is
2204 * relaxed order check in next check. The reason is that
2205 * we can actually steal whole pageblock if this condition met,
2206 * but, below check doesn't guarantee it and that is just heuristic
2207 * so could be changed anytime.
2208 */
2209 if (order >= pageblock_order)
2210 return true;
2211
2212 if (order >= pageblock_order / 2 ||
2213 start_mt == MIGRATE_RECLAIMABLE ||
2214 start_mt == MIGRATE_UNMOVABLE ||
2215 page_group_by_mobility_disabled)
2216 return true;
2217
2218 return false;
2219}
2220
Mel Gorman1c308442018-12-28 00:35:52 -08002221static inline void boost_watermark(struct zone *zone)
2222{
2223 unsigned long max_boost;
2224
2225 if (!watermark_boost_factor)
2226 return;
2227
2228 max_boost = mult_frac(zone->_watermark[WMARK_HIGH],
2229 watermark_boost_factor, 10000);
Mel Gorman94b33342019-02-20 22:19:49 -08002230
2231 /*
2232 * high watermark may be uninitialised if fragmentation occurs
2233 * very early in boot so do not boost. We do not fall
2234 * through and boost by pageblock_nr_pages as failing
2235 * allocations that early means that reclaim is not going
2236 * to help and it may even be impossible to reclaim the
2237 * boosted watermark resulting in a hang.
2238 */
2239 if (!max_boost)
2240 return;
2241
Mel Gorman1c308442018-12-28 00:35:52 -08002242 max_boost = max(pageblock_nr_pages, max_boost);
2243
2244 zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages,
2245 max_boost);
2246}
2247
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002248/*
2249 * This function implements actual steal behaviour. If order is large enough,
2250 * we can steal whole pageblock. If not, we first move freepages in this
Vlastimil Babka02aa0cd2017-05-08 15:54:40 -07002251 * pageblock to our migratetype and determine how many already-allocated pages
2252 * are there in the pageblock with a compatible migratetype. If at least half
2253 * of pages are free or compatible, we can change migratetype of the pageblock
2254 * itself, so pages freed in the future will be put on the correct free list.
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002255 */
2256static void steal_suitable_fallback(struct zone *zone, struct page *page,
Mel Gorman1c308442018-12-28 00:35:52 -08002257 unsigned int alloc_flags, int start_type, bool whole_block)
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07002258{
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -08002259 unsigned int current_order = page_order(page);
Vlastimil Babka3bc48f92017-05-08 15:54:37 -07002260 struct free_area *area;
Vlastimil Babka02aa0cd2017-05-08 15:54:40 -07002261 int free_pages, movable_pages, alike_pages;
2262 int old_block_type;
2263
2264 old_block_type = get_pageblock_migratetype(page);
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07002265
Vlastimil Babka3bc48f92017-05-08 15:54:37 -07002266 /*
2267 * This can happen due to races and we want to prevent broken
2268 * highatomic accounting.
2269 */
Vlastimil Babka02aa0cd2017-05-08 15:54:40 -07002270 if (is_migrate_highatomic(old_block_type))
Vlastimil Babka3bc48f92017-05-08 15:54:37 -07002271 goto single_page;
2272
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07002273 /* Take ownership for orders >= pageblock_order */
2274 if (current_order >= pageblock_order) {
2275 change_pageblock_range(page, current_order, start_type);
Vlastimil Babka3bc48f92017-05-08 15:54:37 -07002276 goto single_page;
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07002277 }
2278
Mel Gorman1c308442018-12-28 00:35:52 -08002279 /*
2280 * Boost watermarks to increase reclaim pressure to reduce the
2281 * likelihood of future fallbacks. Wake kswapd now as the node
2282 * may be balanced overall and kswapd will not wake naturally.
2283 */
2284 boost_watermark(zone);
2285 if (alloc_flags & ALLOC_KSWAPD)
Mel Gorman73444bc2019-01-08 15:23:39 -08002286 set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
Mel Gorman1c308442018-12-28 00:35:52 -08002287
Vlastimil Babka3bc48f92017-05-08 15:54:37 -07002288 /* We are not allowed to try stealing from the whole block */
2289 if (!whole_block)
2290 goto single_page;
2291
Vlastimil Babka02aa0cd2017-05-08 15:54:40 -07002292 free_pages = move_freepages_block(zone, page, start_type,
2293 &movable_pages);
2294 /*
2295 * Determine how many pages are compatible with our allocation.
2296 * For movable allocation, it's the number of movable pages which
2297 * we just obtained. For other types it's a bit more tricky.
2298 */
2299 if (start_type == MIGRATE_MOVABLE) {
2300 alike_pages = movable_pages;
2301 } else {
2302 /*
2303 * If we are falling back a RECLAIMABLE or UNMOVABLE allocation
2304 * to MOVABLE pageblock, consider all non-movable pages as
2305 * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or
2306 * vice versa, be conservative since we can't distinguish the
2307 * exact migratetype of non-movable pages.
2308 */
2309 if (old_block_type == MIGRATE_MOVABLE)
2310 alike_pages = pageblock_nr_pages
2311 - (free_pages + movable_pages);
2312 else
2313 alike_pages = 0;
2314 }
2315
Vlastimil Babka3bc48f92017-05-08 15:54:37 -07002316 /* moving whole block can fail due to zone boundary conditions */
Vlastimil Babka02aa0cd2017-05-08 15:54:40 -07002317 if (!free_pages)
Vlastimil Babka3bc48f92017-05-08 15:54:37 -07002318 goto single_page;
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07002319
Vlastimil Babka02aa0cd2017-05-08 15:54:40 -07002320 /*
2321 * If a sufficient number of pages in the block are either free or of
2322 * comparable migratability as our allocation, claim the whole block.
2323 */
2324 if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002325 page_group_by_mobility_disabled)
2326 set_pageblock_migratetype(page, start_type);
Vlastimil Babka3bc48f92017-05-08 15:54:37 -07002327
2328 return;
2329
2330single_page:
2331 area = &zone->free_area[current_order];
2332 list_move(&page->lru, &area->free_list[start_type]);
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002333}
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07002334
Joonsoo Kim2149cda2015-04-14 15:45:21 -07002335/*
2336 * Check whether there is a suitable fallback freepage with requested order.
2337 * If only_stealable is true, this function returns fallback_mt only if
2338 * we can steal other freepages all together. This would help to reduce
2339 * fragmentation due to mixed migratetype pages in one pageblock.
2340 */
2341int find_suitable_fallback(struct free_area *area, unsigned int order,
2342 int migratetype, bool only_stealable, bool *can_steal)
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002343{
2344 int i;
2345 int fallback_mt;
2346
2347 if (area->nr_free == 0)
2348 return -1;
2349
2350 *can_steal = false;
2351 for (i = 0;; i++) {
2352 fallback_mt = fallbacks[migratetype][i];
Mel Gorman974a7862015-11-06 16:28:34 -08002353 if (fallback_mt == MIGRATE_TYPES)
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002354 break;
2355
2356 if (list_empty(&area->free_list[fallback_mt]))
2357 continue;
2358
2359 if (can_steal_fallback(order, migratetype))
2360 *can_steal = true;
2361
Joonsoo Kim2149cda2015-04-14 15:45:21 -07002362 if (!only_stealable)
2363 return fallback_mt;
2364
2365 if (*can_steal)
2366 return fallback_mt;
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07002367 }
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002368
2369 return -1;
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07002370}
2371
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002372/*
2373 * Reserve a pageblock for exclusive use of high-order atomic allocations if
2374 * there are no empty page blocks that contain a page with a suitable order
2375 */
2376static void reserve_highatomic_pageblock(struct page *page, struct zone *zone,
2377 unsigned int alloc_order)
2378{
2379 int mt;
2380 unsigned long max_managed, flags;
2381
2382 /*
2383 * Limit the number reserved to 1 pageblock or roughly 1% of a zone.
2384 * Check is race-prone but harmless.
2385 */
Arun KS9705bea2018-12-28 00:34:24 -08002386 max_managed = (zone_managed_pages(zone) / 100) + pageblock_nr_pages;
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002387 if (zone->nr_reserved_highatomic >= max_managed)
2388 return;
2389
2390 spin_lock_irqsave(&zone->lock, flags);
2391
2392 /* Recheck the nr_reserved_highatomic limit under the lock */
2393 if (zone->nr_reserved_highatomic >= max_managed)
2394 goto out_unlock;
2395
2396 /* Yoink! */
2397 mt = get_pageblock_migratetype(page);
Xishi Qiua6ffdc02017-05-03 14:52:52 -07002398 if (!is_migrate_highatomic(mt) && !is_migrate_isolate(mt)
2399 && !is_migrate_cma(mt)) {
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002400 zone->nr_reserved_highatomic += pageblock_nr_pages;
2401 set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
Vlastimil Babka02aa0cd2017-05-08 15:54:40 -07002402 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL);
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002403 }
2404
2405out_unlock:
2406 spin_unlock_irqrestore(&zone->lock, flags);
2407}
2408
2409/*
2410 * Used when an allocation is about to fail under memory pressure. This
2411 * potentially hurts the reliability of high-order allocations when under
2412 * intense memory pressure but failed atomic allocations should be easier
2413 * to recover from than an OOM.
Minchan Kim29fac032016-12-12 16:42:14 -08002414 *
2415 * If @force is true, try to unreserve a pageblock even though highatomic
2416 * pageblock is exhausted.
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002417 */
Minchan Kim29fac032016-12-12 16:42:14 -08002418static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
2419 bool force)
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002420{
2421 struct zonelist *zonelist = ac->zonelist;
2422 unsigned long flags;
2423 struct zoneref *z;
2424 struct zone *zone;
2425 struct page *page;
2426 int order;
Minchan Kim04c87162016-12-12 16:42:11 -08002427 bool ret;
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002428
2429 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx,
2430 ac->nodemask) {
Minchan Kim29fac032016-12-12 16:42:14 -08002431 /*
2432 * Preserve at least one pageblock unless memory pressure
2433 * is really high.
2434 */
2435 if (!force && zone->nr_reserved_highatomic <=
2436 pageblock_nr_pages)
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002437 continue;
2438
2439 spin_lock_irqsave(&zone->lock, flags);
2440 for (order = 0; order < MAX_ORDER; order++) {
2441 struct free_area *area = &(zone->free_area[order]);
2442
Geliang Tanga16601c2016-01-14 15:20:30 -08002443 page = list_first_entry_or_null(
2444 &area->free_list[MIGRATE_HIGHATOMIC],
2445 struct page, lru);
2446 if (!page)
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002447 continue;
2448
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002449 /*
Minchan Kim4855e4a2016-12-12 16:42:08 -08002450 * In page freeing path, migratetype change is racy so
2451 * we can counter several free pages in a pageblock
2452 * in this loop althoug we changed the pageblock type
2453 * from highatomic to ac->migratetype. So we should
2454 * adjust the count once.
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002455 */
Xishi Qiua6ffdc02017-05-03 14:52:52 -07002456 if (is_migrate_highatomic_page(page)) {
Minchan Kim4855e4a2016-12-12 16:42:08 -08002457 /*
2458 * It should never happen but changes to
2459 * locking could inadvertently allow a per-cpu
2460 * drain to add pages to MIGRATE_HIGHATOMIC
2461 * while unreserving so be safe and watch for
2462 * underflows.
2463 */
2464 zone->nr_reserved_highatomic -= min(
2465 pageblock_nr_pages,
2466 zone->nr_reserved_highatomic);
2467 }
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002468
2469 /*
2470 * Convert to ac->migratetype and avoid the normal
2471 * pageblock stealing heuristics. Minimally, the caller
2472 * is doing the work and needs the pages. More
2473 * importantly, if the block was always converted to
2474 * MIGRATE_UNMOVABLE or another type then the number
2475 * of pageblocks that cannot be completely freed
2476 * may increase.
2477 */
2478 set_pageblock_migratetype(page, ac->migratetype);
Vlastimil Babka02aa0cd2017-05-08 15:54:40 -07002479 ret = move_freepages_block(zone, page, ac->migratetype,
2480 NULL);
Minchan Kim29fac032016-12-12 16:42:14 -08002481 if (ret) {
2482 spin_unlock_irqrestore(&zone->lock, flags);
2483 return ret;
2484 }
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002485 }
2486 spin_unlock_irqrestore(&zone->lock, flags);
2487 }
Minchan Kim04c87162016-12-12 16:42:11 -08002488
2489 return false;
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002490}
2491
Vlastimil Babka3bc48f92017-05-08 15:54:37 -07002492/*
2493 * Try finding a free buddy page on the fallback list and put it on the free
2494 * list of requested migratetype, possibly along with other pages from the same
2495 * block, depending on fragmentation avoidance heuristics. Returns true if
2496 * fallback was found so that __rmqueue_smallest() can grab it.
Rasmus Villemoesb0025292017-07-10 15:49:26 -07002497 *
2498 * The use of signed ints for order and current_order is a deliberate
2499 * deviation from the rest of this file, to make the for loop
2500 * condition simpler.
Vlastimil Babka3bc48f92017-05-08 15:54:37 -07002501 */
Aaron Lu85ccc8f2017-11-15 17:36:53 -08002502static __always_inline bool
Mel Gorman6bb15452018-12-28 00:35:41 -08002503__rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
2504 unsigned int alloc_flags)
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002505{
Pintu Kumarb8af2942013-09-11 14:20:34 -07002506 struct free_area *area;
Rasmus Villemoesb0025292017-07-10 15:49:26 -07002507 int current_order;
Mel Gorman6bb15452018-12-28 00:35:41 -08002508 int min_order = order;
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002509 struct page *page;
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002510 int fallback_mt;
2511 bool can_steal;
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002512
Vlastimil Babka7a8f58f2017-07-10 15:47:14 -07002513 /*
Mel Gorman6bb15452018-12-28 00:35:41 -08002514 * Do not steal pages from freelists belonging to other pageblocks
2515 * i.e. orders < pageblock_order. If there are no local zones free,
2516 * the zonelists will be reiterated without ALLOC_NOFRAGMENT.
2517 */
2518 if (alloc_flags & ALLOC_NOFRAGMENT)
2519 min_order = pageblock_order;
2520
2521 /*
Vlastimil Babka7a8f58f2017-07-10 15:47:14 -07002522 * Find the largest available free page in the other list. This roughly
2523 * approximates finding the pageblock with the most free pages, which
2524 * would be too costly to do exactly.
2525 */
Mel Gorman6bb15452018-12-28 00:35:41 -08002526 for (current_order = MAX_ORDER - 1; current_order >= min_order;
Mel Gorman7aeb09f2014-06-04 16:10:21 -07002527 --current_order) {
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002528 area = &(zone->free_area[current_order]);
2529 fallback_mt = find_suitable_fallback(area, current_order,
Joonsoo Kim2149cda2015-04-14 15:45:21 -07002530 start_migratetype, false, &can_steal);
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002531 if (fallback_mt == -1)
2532 continue;
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002533
Vlastimil Babka7a8f58f2017-07-10 15:47:14 -07002534 /*
2535 * We cannot steal all free pages from the pageblock and the
2536 * requested migratetype is movable. In that case it's better to
2537 * steal and split the smallest available page instead of the
2538 * largest available page, because even if the next movable
2539 * allocation falls back into a different pageblock than this
2540 * one, it won't cause permanent fragmentation.
2541 */
2542 if (!can_steal && start_migratetype == MIGRATE_MOVABLE
2543 && current_order > order)
2544 goto find_smallest;
Mel Gormane0104872007-10-16 01:25:53 -07002545
Vlastimil Babka7a8f58f2017-07-10 15:47:14 -07002546 goto do_steal;
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002547 }
2548
Vlastimil Babka3bc48f92017-05-08 15:54:37 -07002549 return false;
Vlastimil Babka7a8f58f2017-07-10 15:47:14 -07002550
2551find_smallest:
2552 for (current_order = order; current_order < MAX_ORDER;
2553 current_order++) {
2554 area = &(zone->free_area[current_order]);
2555 fallback_mt = find_suitable_fallback(area, current_order,
2556 start_migratetype, false, &can_steal);
2557 if (fallback_mt != -1)
2558 break;
2559 }
2560
2561 /*
2562 * This should not happen - we already found a suitable fallback
2563 * when looking for the largest page.
2564 */
2565 VM_BUG_ON(current_order == MAX_ORDER);
2566
2567do_steal:
2568 page = list_first_entry(&area->free_list[fallback_mt],
2569 struct page, lru);
2570
Mel Gorman1c308442018-12-28 00:35:52 -08002571 steal_suitable_fallback(zone, page, alloc_flags, start_migratetype,
2572 can_steal);
Vlastimil Babka7a8f58f2017-07-10 15:47:14 -07002573
2574 trace_mm_page_alloc_extfrag(page, order, current_order,
2575 start_migratetype, fallback_mt);
2576
2577 return true;
2578
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002579}
2580
Mel Gorman56fd56b2007-10-16 01:25:58 -07002581/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002582 * Do the hard work of removing an element from the buddy allocator.
2583 * Call me with the zone->lock already held.
2584 */
Aaron Lu85ccc8f2017-11-15 17:36:53 -08002585static __always_inline struct page *
Mel Gorman6bb15452018-12-28 00:35:41 -08002586__rmqueue(struct zone *zone, unsigned int order, int migratetype,
2587 unsigned int alloc_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002588{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002589 struct page *page;
2590
Vlastimil Babka3bc48f92017-05-08 15:54:37 -07002591retry:
Mel Gorman56fd56b2007-10-16 01:25:58 -07002592 page = __rmqueue_smallest(zone, order, migratetype);
Mel Gorman974a7862015-11-06 16:28:34 -08002593 if (unlikely(!page)) {
Joonsoo Kimdc676472015-04-14 15:45:15 -07002594 if (migratetype == MIGRATE_MOVABLE)
2595 page = __rmqueue_cma_fallback(zone, order);
2596
Mel Gorman6bb15452018-12-28 00:35:41 -08002597 if (!page && __rmqueue_fallback(zone, order, migratetype,
2598 alloc_flags))
Vlastimil Babka3bc48f92017-05-08 15:54:37 -07002599 goto retry;
Mel Gorman728ec982009-06-16 15:32:04 -07002600 }
2601
Mel Gorman0d3d0622009-09-21 17:02:44 -07002602 trace_mm_page_alloc_zone_locked(page, order, migratetype);
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002603 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002604}
2605
Michal Nazarewicz5f63b722012-01-11 15:16:11 +01002606/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002607 * Obtain a specified number of elements from the buddy allocator, all under
2608 * a single hold of the lock, for efficiency. Add them to the supplied list.
2609 * Returns the number of new pages which were placed at *list.
2610 */
Michal Nazarewicz5f63b722012-01-11 15:16:11 +01002611static int rmqueue_bulk(struct zone *zone, unsigned int order,
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002612 unsigned long count, struct list_head *list,
Mel Gorman6bb15452018-12-28 00:35:41 -08002613 int migratetype, unsigned int alloc_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002614{
Mel Gormana6de7342016-12-12 16:44:41 -08002615 int i, alloced = 0;
Michal Nazarewicz5f63b722012-01-11 15:16:11 +01002616
Mel Gormand34b0732017-04-20 14:37:43 -07002617 spin_lock(&zone->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002618 for (i = 0; i < count; ++i) {
Mel Gorman6bb15452018-12-28 00:35:41 -08002619 struct page *page = __rmqueue(zone, order, migratetype,
2620 alloc_flags);
Nick Piggin085cc7d52006-01-06 00:11:01 -08002621 if (unlikely(page == NULL))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002622 break;
Mel Gorman81eabcb2007-12-17 16:20:05 -08002623
Mel Gorman479f8542016-05-19 17:14:35 -07002624 if (unlikely(check_pcp_refill(page)))
2625 continue;
2626
Mel Gorman81eabcb2007-12-17 16:20:05 -08002627 /*
Vlastimil Babka0fac3ba2017-11-15 17:38:07 -08002628 * Split buddy pages returned by expand() are received here in
2629 * physical page order. The page is added to the tail of
2630 * caller's list. From the callers perspective, the linked list
2631 * is ordered by page number under some conditions. This is
2632 * useful for IO devices that can forward direction from the
2633 * head, thus also in the physical page order. This is useful
2634 * for IO devices that can merge IO requests if the physical
2635 * pages are ordered properly.
Mel Gorman81eabcb2007-12-17 16:20:05 -08002636 */
Vlastimil Babka0fac3ba2017-11-15 17:38:07 -08002637 list_add_tail(&page->lru, list);
Mel Gormana6de7342016-12-12 16:44:41 -08002638 alloced++;
Vlastimil Babkabb14c2c2015-09-08 15:01:25 -07002639 if (is_migrate_cma(get_pcppage_migratetype(page)))
Bartlomiej Zolnierkiewiczd1ce7492012-10-08 16:32:02 -07002640 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
2641 -(1 << order));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002642 }
Mel Gormana6de7342016-12-12 16:44:41 -08002643
2644 /*
2645 * i pages were removed from the buddy list even if some leak due
2646 * to check_pcp_refill failing so adjust NR_FREE_PAGES based
2647 * on i. Do not confuse with 'alloced' which is the number of
2648 * pages added to the pcp list.
2649 */
Mel Gormanf2260e62009-06-16 15:32:13 -07002650 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
Mel Gormand34b0732017-04-20 14:37:43 -07002651 spin_unlock(&zone->lock);
Mel Gormana6de7342016-12-12 16:44:41 -08002652 return alloced;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002653}
2654
Christoph Lameter4ae7c032005-06-21 17:14:57 -07002655#ifdef CONFIG_NUMA
Christoph Lameter8fce4d82006-03-09 17:33:54 -08002656/*
Christoph Lameter4037d452007-05-09 02:35:14 -07002657 * Called from the vmstat counter updater to drain pagesets of this
2658 * currently executing processor on remote nodes after they have
2659 * expired.
2660 *
Christoph Lameter879336c2006-03-22 00:09:08 -08002661 * Note that this function must be called with the thread pinned to
2662 * a single processor.
Christoph Lameter8fce4d82006-03-09 17:33:54 -08002663 */
Christoph Lameter4037d452007-05-09 02:35:14 -07002664void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
Christoph Lameter4ae7c032005-06-21 17:14:57 -07002665{
Christoph Lameter4ae7c032005-06-21 17:14:57 -07002666 unsigned long flags;
Michal Nazarewicz7be12fc2014-08-06 16:05:15 -07002667 int to_drain, batch;
Christoph Lameter4ae7c032005-06-21 17:14:57 -07002668
Christoph Lameter4037d452007-05-09 02:35:14 -07002669 local_irq_save(flags);
Jason Low4db0c3c2015-04-15 16:14:08 -07002670 batch = READ_ONCE(pcp->batch);
Michal Nazarewicz7be12fc2014-08-06 16:05:15 -07002671 to_drain = min(pcp->count, batch);
Aaron Lu77ba9062018-04-05 16:24:06 -07002672 if (to_drain > 0)
KOSAKI Motohiro2a135152012-07-31 16:42:53 -07002673 free_pcppages_bulk(zone, to_drain, pcp);
Christoph Lameter4037d452007-05-09 02:35:14 -07002674 local_irq_restore(flags);
Christoph Lameter4ae7c032005-06-21 17:14:57 -07002675}
2676#endif
2677
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002678/*
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002679 * Drain pcplists of the indicated processor and zone.
2680 *
2681 * The processor must either be the current processor and the
2682 * thread pinned to the current processor or a processor that
2683 * is not online.
2684 */
2685static void drain_pages_zone(unsigned int cpu, struct zone *zone)
2686{
2687 unsigned long flags;
2688 struct per_cpu_pageset *pset;
2689 struct per_cpu_pages *pcp;
2690
2691 local_irq_save(flags);
2692 pset = per_cpu_ptr(zone->pageset, cpu);
2693
2694 pcp = &pset->pcp;
Aaron Lu77ba9062018-04-05 16:24:06 -07002695 if (pcp->count)
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002696 free_pcppages_bulk(zone, pcp->count, pcp);
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002697 local_irq_restore(flags);
2698}
2699
2700/*
2701 * Drain pcplists of all zones on the indicated processor.
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002702 *
2703 * The processor must either be the current processor and the
2704 * thread pinned to the current processor or a processor that
2705 * is not online.
2706 */
2707static void drain_pages(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002708{
2709 struct zone *zone;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002710
KOSAKI Motohiroee99c712009-03-31 15:19:31 -07002711 for_each_populated_zone(zone) {
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002712 drain_pages_zone(cpu, zone);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002713 }
2714}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002715
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002716/*
2717 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002718 *
2719 * The CPU has to be pinned. When zone parameter is non-NULL, spill just
2720 * the single zone's pages.
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002721 */
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002722void drain_local_pages(struct zone *zone)
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002723{
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002724 int cpu = smp_processor_id();
2725
2726 if (zone)
2727 drain_pages_zone(cpu, zone);
2728 else
2729 drain_pages(cpu);
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002730}
2731
Mel Gorman0ccce3b2017-02-24 14:56:32 -08002732static void drain_local_pages_wq(struct work_struct *work)
2733{
Wei Yangd9367bd2018-12-28 00:38:58 -08002734 struct pcpu_drain *drain;
2735
2736 drain = container_of(work, struct pcpu_drain, work);
2737
Michal Hockoa459eeb2017-02-24 14:56:35 -08002738 /*
2739 * drain_all_pages doesn't use proper cpu hotplug protection so
2740 * we can race with cpu offline when the WQ can move this from
2741 * a cpu pinned worker to an unbound one. We can operate on a different
2742 * cpu which is allright but we also have to make sure to not move to
2743 * a different one.
2744 */
2745 preempt_disable();
Wei Yangd9367bd2018-12-28 00:38:58 -08002746 drain_local_pages(drain->zone);
Michal Hockoa459eeb2017-02-24 14:56:35 -08002747 preempt_enable();
Mel Gorman0ccce3b2017-02-24 14:56:32 -08002748}
2749
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002750/*
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07002751 * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
2752 *
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002753 * When zone parameter is non-NULL, spill just the single zone's pages.
2754 *
Mel Gorman0ccce3b2017-02-24 14:56:32 -08002755 * Note that this can be extremely slow as the draining happens in a workqueue.
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002756 */
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002757void drain_all_pages(struct zone *zone)
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002758{
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07002759 int cpu;
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07002760
2761 /*
2762 * Allocate in the BSS so we wont require allocation in
2763 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
2764 */
2765 static cpumask_t cpus_with_pcps;
2766
Michal Hockoce612872017-04-07 16:05:05 -07002767 /*
2768 * Make sure nobody triggers this path before mm_percpu_wq is fully
2769 * initialized.
2770 */
2771 if (WARN_ON_ONCE(!mm_percpu_wq))
2772 return;
2773
Mel Gormanbd233f52017-02-24 14:56:56 -08002774 /*
2775 * Do not drain if one is already in progress unless it's specific to
2776 * a zone. Such callers are primarily CMA and memory hotplug and need
2777 * the drain to be complete when the call returns.
2778 */
2779 if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) {
2780 if (!zone)
2781 return;
2782 mutex_lock(&pcpu_drain_mutex);
2783 }
Mel Gorman0ccce3b2017-02-24 14:56:32 -08002784
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07002785 /*
2786 * We don't care about racing with CPU hotplug event
2787 * as offline notification will cause the notified
2788 * cpu to drain that CPU pcps and on_each_cpu_mask
2789 * disables preemption as part of its processing
2790 */
2791 for_each_online_cpu(cpu) {
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002792 struct per_cpu_pageset *pcp;
2793 struct zone *z;
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07002794 bool has_pcps = false;
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002795
2796 if (zone) {
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07002797 pcp = per_cpu_ptr(zone->pageset, cpu);
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002798 if (pcp->pcp.count)
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07002799 has_pcps = true;
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002800 } else {
2801 for_each_populated_zone(z) {
2802 pcp = per_cpu_ptr(z->pageset, cpu);
2803 if (pcp->pcp.count) {
2804 has_pcps = true;
2805 break;
2806 }
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07002807 }
2808 }
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002809
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07002810 if (has_pcps)
2811 cpumask_set_cpu(cpu, &cpus_with_pcps);
2812 else
2813 cpumask_clear_cpu(cpu, &cpus_with_pcps);
2814 }
Mel Gorman0ccce3b2017-02-24 14:56:32 -08002815
Mel Gormanbd233f52017-02-24 14:56:56 -08002816 for_each_cpu(cpu, &cpus_with_pcps) {
Wei Yangd9367bd2018-12-28 00:38:58 -08002817 struct pcpu_drain *drain = per_cpu_ptr(&pcpu_drain, cpu);
2818
2819 drain->zone = zone;
2820 INIT_WORK(&drain->work, drain_local_pages_wq);
2821 queue_work_on(cpu, mm_percpu_wq, &drain->work);
Mel Gorman0ccce3b2017-02-24 14:56:32 -08002822 }
Mel Gormanbd233f52017-02-24 14:56:56 -08002823 for_each_cpu(cpu, &cpus_with_pcps)
Wei Yangd9367bd2018-12-28 00:38:58 -08002824 flush_work(&per_cpu_ptr(&pcpu_drain, cpu)->work);
Mel Gormanbd233f52017-02-24 14:56:56 -08002825
2826 mutex_unlock(&pcpu_drain_mutex);
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002827}
2828
Rafael J. Wysocki296699d2007-07-29 23:27:18 +02002829#ifdef CONFIG_HIBERNATION
Linus Torvalds1da177e2005-04-16 15:20:36 -07002830
Chen Yu556b9692017-08-25 15:55:30 -07002831/*
2832 * Touch the watchdog for every WD_PAGE_COUNT pages.
2833 */
2834#define WD_PAGE_COUNT (128*1024)
2835
Linus Torvalds1da177e2005-04-16 15:20:36 -07002836void mark_free_pages(struct zone *zone)
2837{
Chen Yu556b9692017-08-25 15:55:30 -07002838 unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT;
Rafael J. Wysockif623f0d2006-09-25 23:32:49 -07002839 unsigned long flags;
Mel Gorman7aeb09f2014-06-04 16:10:21 -07002840 unsigned int order, t;
Geliang Tang86760a22016-01-14 15:20:33 -08002841 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002842
Xishi Qiu8080fc02013-09-11 14:21:45 -07002843 if (zone_is_empty(zone))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002844 return;
2845
2846 spin_lock_irqsave(&zone->lock, flags);
Rafael J. Wysockif623f0d2006-09-25 23:32:49 -07002847
Cody P Schafer108bcc92013-02-22 16:35:23 -08002848 max_zone_pfn = zone_end_pfn(zone);
Rafael J. Wysockif623f0d2006-09-25 23:32:49 -07002849 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
2850 if (pfn_valid(pfn)) {
Geliang Tang86760a22016-01-14 15:20:33 -08002851 page = pfn_to_page(pfn);
Joonsoo Kimba6b0972016-05-19 17:12:16 -07002852
Chen Yu556b9692017-08-25 15:55:30 -07002853 if (!--page_count) {
2854 touch_nmi_watchdog();
2855 page_count = WD_PAGE_COUNT;
2856 }
2857
Joonsoo Kimba6b0972016-05-19 17:12:16 -07002858 if (page_zone(page) != zone)
2859 continue;
2860
Rafael J. Wysocki7be98232007-05-06 14:50:42 -07002861 if (!swsusp_page_is_forbidden(page))
2862 swsusp_unset_page_free(page);
Rafael J. Wysockif623f0d2006-09-25 23:32:49 -07002863 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002864
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002865 for_each_migratetype_order(order, t) {
Geliang Tang86760a22016-01-14 15:20:33 -08002866 list_for_each_entry(page,
2867 &zone->free_area[order].free_list[t], lru) {
Rafael J. Wysockif623f0d2006-09-25 23:32:49 -07002868 unsigned long i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002869
Geliang Tang86760a22016-01-14 15:20:33 -08002870 pfn = page_to_pfn(page);
Chen Yu556b9692017-08-25 15:55:30 -07002871 for (i = 0; i < (1UL << order); i++) {
2872 if (!--page_count) {
2873 touch_nmi_watchdog();
2874 page_count = WD_PAGE_COUNT;
2875 }
Rafael J. Wysocki7be98232007-05-06 14:50:42 -07002876 swsusp_set_page_free(pfn_to_page(pfn + i));
Chen Yu556b9692017-08-25 15:55:30 -07002877 }
Rafael J. Wysockif623f0d2006-09-25 23:32:49 -07002878 }
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002879 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002880 spin_unlock_irqrestore(&zone->lock, flags);
2881}
Mel Gormane2c55dc2007-10-16 01:25:50 -07002882#endif /* CONFIG_PM */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002883
Mel Gorman2d4894b2017-11-15 17:37:59 -08002884static bool free_unref_page_prepare(struct page *page, unsigned long pfn)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002885{
Mel Gorman5f8dcc22009-09-21 17:03:19 -07002886 int migratetype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002887
Mel Gorman4db75482016-05-19 17:14:32 -07002888 if (!free_pcp_prepare(page))
Mel Gorman9cca35d42017-11-15 17:37:37 -08002889 return false;
Hugh Dickins689bceb2005-11-21 21:32:20 -08002890
Mel Gormandc4b0ca2014-06-04 16:10:17 -07002891 migratetype = get_pfnblock_migratetype(page, pfn);
Vlastimil Babkabb14c2c2015-09-08 15:01:25 -07002892 set_pcppage_migratetype(page, migratetype);
Mel Gorman9cca35d42017-11-15 17:37:37 -08002893 return true;
2894}
2895
Mel Gorman2d4894b2017-11-15 17:37:59 -08002896static void free_unref_page_commit(struct page *page, unsigned long pfn)
Mel Gorman9cca35d42017-11-15 17:37:37 -08002897{
2898 struct zone *zone = page_zone(page);
2899 struct per_cpu_pages *pcp;
2900 int migratetype;
2901
2902 migratetype = get_pcppage_migratetype(page);
Mel Gormand34b0732017-04-20 14:37:43 -07002903 __count_vm_event(PGFREE);
Mel Gormanda456f12009-06-16 15:32:08 -07002904
Mel Gorman5f8dcc22009-09-21 17:03:19 -07002905 /*
2906 * We only track unmovable, reclaimable and movable on pcp lists.
2907 * Free ISOLATE pages back to the allocator because they are being
Xishi Qiua6ffdc02017-05-03 14:52:52 -07002908 * offlined but treat HIGHATOMIC as movable pages so we can get those
Mel Gorman5f8dcc22009-09-21 17:03:19 -07002909 * areas back if necessary. Otherwise, we may have to free
2910 * excessively into the page allocator
2911 */
2912 if (migratetype >= MIGRATE_PCPTYPES) {
Minchan Kim194159f2013-02-22 16:33:58 -08002913 if (unlikely(is_migrate_isolate(migratetype))) {
Mel Gormandc4b0ca2014-06-04 16:10:17 -07002914 free_one_page(zone, page, pfn, 0, migratetype);
Mel Gorman9cca35d42017-11-15 17:37:37 -08002915 return;
Mel Gorman5f8dcc22009-09-21 17:03:19 -07002916 }
2917 migratetype = MIGRATE_MOVABLE;
2918 }
2919
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09002920 pcp = &this_cpu_ptr(zone->pageset)->pcp;
Mel Gorman2d4894b2017-11-15 17:37:59 -08002921 list_add(&page->lru, &pcp->lists[migratetype]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002922 pcp->count++;
Nick Piggin48db57f2006-01-08 01:00:42 -08002923 if (pcp->count >= pcp->high) {
Jason Low4db0c3c2015-04-15 16:14:08 -07002924 unsigned long batch = READ_ONCE(pcp->batch);
Cody P Schafer998d39cb2013-07-03 15:01:32 -07002925 free_pcppages_bulk(zone, batch, pcp);
Nick Piggin48db57f2006-01-08 01:00:42 -08002926 }
Mel Gorman9cca35d42017-11-15 17:37:37 -08002927}
Mel Gorman5f8dcc22009-09-21 17:03:19 -07002928
Mel Gorman9cca35d42017-11-15 17:37:37 -08002929/*
2930 * Free a 0-order page
Mel Gorman9cca35d42017-11-15 17:37:37 -08002931 */
Mel Gorman2d4894b2017-11-15 17:37:59 -08002932void free_unref_page(struct page *page)
Mel Gorman9cca35d42017-11-15 17:37:37 -08002933{
2934 unsigned long flags;
2935 unsigned long pfn = page_to_pfn(page);
2936
Mel Gorman2d4894b2017-11-15 17:37:59 -08002937 if (!free_unref_page_prepare(page, pfn))
Mel Gorman9cca35d42017-11-15 17:37:37 -08002938 return;
2939
2940 local_irq_save(flags);
Mel Gorman2d4894b2017-11-15 17:37:59 -08002941 free_unref_page_commit(page, pfn);
Mel Gormand34b0732017-04-20 14:37:43 -07002942 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002943}
2944
Nick Piggin8dfcc9b2006-03-22 00:08:05 -08002945/*
Konstantin Khlebnikovcc598502012-01-10 15:07:04 -08002946 * Free a list of 0-order pages
2947 */
Mel Gorman2d4894b2017-11-15 17:37:59 -08002948void free_unref_page_list(struct list_head *list)
Konstantin Khlebnikovcc598502012-01-10 15:07:04 -08002949{
2950 struct page *page, *next;
Mel Gorman9cca35d42017-11-15 17:37:37 -08002951 unsigned long flags, pfn;
Lucas Stachc24ad772017-12-14 15:32:55 -08002952 int batch_count = 0;
Konstantin Khlebnikovcc598502012-01-10 15:07:04 -08002953
Mel Gorman9cca35d42017-11-15 17:37:37 -08002954 /* Prepare pages for freeing */
Konstantin Khlebnikovcc598502012-01-10 15:07:04 -08002955 list_for_each_entry_safe(page, next, list, lru) {
Mel Gorman9cca35d42017-11-15 17:37:37 -08002956 pfn = page_to_pfn(page);
Mel Gorman2d4894b2017-11-15 17:37:59 -08002957 if (!free_unref_page_prepare(page, pfn))
Mel Gorman9cca35d42017-11-15 17:37:37 -08002958 list_del(&page->lru);
2959 set_page_private(page, pfn);
Konstantin Khlebnikovcc598502012-01-10 15:07:04 -08002960 }
Mel Gorman9cca35d42017-11-15 17:37:37 -08002961
2962 local_irq_save(flags);
2963 list_for_each_entry_safe(page, next, list, lru) {
2964 unsigned long pfn = page_private(page);
2965
2966 set_page_private(page, 0);
Mel Gorman2d4894b2017-11-15 17:37:59 -08002967 trace_mm_page_free_batched(page);
2968 free_unref_page_commit(page, pfn);
Lucas Stachc24ad772017-12-14 15:32:55 -08002969
2970 /*
2971 * Guard against excessive IRQ disabled times when we get
2972 * a large list of pages to free.
2973 */
2974 if (++batch_count == SWAP_CLUSTER_MAX) {
2975 local_irq_restore(flags);
2976 batch_count = 0;
2977 local_irq_save(flags);
2978 }
Mel Gorman9cca35d42017-11-15 17:37:37 -08002979 }
2980 local_irq_restore(flags);
Konstantin Khlebnikovcc598502012-01-10 15:07:04 -08002981}
2982
2983/*
Nick Piggin8dfcc9b2006-03-22 00:08:05 -08002984 * split_page takes a non-compound higher-order page, and splits it into
2985 * n (1<<order) sub-pages: page[0..n]
2986 * Each sub-page must be freed individually.
2987 *
2988 * Note: this is probably too low level an operation for use in drivers.
2989 * Please consult with lkml before using this in your driver.
2990 */
2991void split_page(struct page *page, unsigned int order)
2992{
2993 int i;
2994
Sasha Levin309381fea2014-01-23 15:52:54 -08002995 VM_BUG_ON_PAGE(PageCompound(page), page);
2996 VM_BUG_ON_PAGE(!page_count(page), page);
Vegard Nossumb1eeab62008-11-25 16:55:53 +01002997
Joonsoo Kima9627bc2016-07-26 15:23:49 -07002998 for (i = 1; i < (1 << order); i++)
Nick Piggin7835e982006-03-22 00:08:40 -08002999 set_page_refcounted(page + i);
Joonsoo Kima9627bc2016-07-26 15:23:49 -07003000 split_page_owner(page, order);
Nick Piggin8dfcc9b2006-03-22 00:08:05 -08003001}
K. Y. Srinivasan5853ff22013-03-25 15:47:38 -07003002EXPORT_SYMBOL_GPL(split_page);
Nick Piggin8dfcc9b2006-03-22 00:08:05 -08003003
Joonsoo Kim3c605092014-11-13 15:19:21 -08003004int __isolate_free_page(struct page *page, unsigned int order)
Mel Gorman748446b2010-05-24 14:32:27 -07003005{
Mel Gorman748446b2010-05-24 14:32:27 -07003006 unsigned long watermark;
3007 struct zone *zone;
Bartlomiej Zolnierkiewicz2139cbe2012-10-08 16:32:00 -07003008 int mt;
Mel Gorman748446b2010-05-24 14:32:27 -07003009
3010 BUG_ON(!PageBuddy(page));
3011
3012 zone = page_zone(page);
Marek Szyprowski2e30abd2012-12-11 16:02:57 -08003013 mt = get_pageblock_migratetype(page);
Mel Gorman748446b2010-05-24 14:32:27 -07003014
Minchan Kim194159f2013-02-22 16:33:58 -08003015 if (!is_migrate_isolate(mt)) {
Vlastimil Babka8348faf2016-10-07 16:58:00 -07003016 /*
3017 * Obey watermarks as if the page was being allocated. We can
3018 * emulate a high-order watermark check with a raised order-0
3019 * watermark, because we already know our high-order page
3020 * exists.
3021 */
Mel Gormanfd1444b2019-03-05 15:44:50 -08003022 watermark = zone->_watermark[WMARK_MIN] + (1UL << order);
Joonsoo Kimd883c6c2018-05-23 10:18:21 +09003023 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
Marek Szyprowski2e30abd2012-12-11 16:02:57 -08003024 return 0;
3025
Mel Gorman8fb74b92013-01-11 14:32:16 -08003026 __mod_zone_freepage_state(zone, -(1UL << order), mt);
Marek Szyprowski2e30abd2012-12-11 16:02:57 -08003027 }
Mel Gorman748446b2010-05-24 14:32:27 -07003028
3029 /* Remove page from free list */
3030 list_del(&page->lru);
3031 zone->free_area[order].nr_free--;
3032 rmv_page_order(page);
Bartlomiej Zolnierkiewicz2139cbe2012-10-08 16:32:00 -07003033
zhong jiang400bc7f2016-07-28 15:45:07 -07003034 /*
3035 * Set the pageblock if the isolated page is at least half of a
3036 * pageblock
3037 */
Mel Gorman748446b2010-05-24 14:32:27 -07003038 if (order >= pageblock_order - 1) {
3039 struct page *endpage = page + (1 << order) - 1;
Michal Nazarewicz47118af2011-12-29 13:09:50 +01003040 for (; page < endpage; page += pageblock_nr_pages) {
3041 int mt = get_pageblock_migratetype(page);
Minchan Kim88ed3652016-12-12 16:42:05 -08003042 if (!is_migrate_isolate(mt) && !is_migrate_cma(mt)
Xishi Qiua6ffdc02017-05-03 14:52:52 -07003043 && !is_migrate_highatomic(mt))
Michal Nazarewicz47118af2011-12-29 13:09:50 +01003044 set_pageblock_migratetype(page,
3045 MIGRATE_MOVABLE);
3046 }
Mel Gorman748446b2010-05-24 14:32:27 -07003047 }
3048
Joonsoo Kimf3a14ce2015-07-17 16:24:15 -07003049
Mel Gorman8fb74b92013-01-11 14:32:16 -08003050 return 1UL << order;
Mel Gorman1fb3f8c2012-10-08 16:29:12 -07003051}
3052
3053/*
Mel Gorman060e7412016-05-19 17:13:27 -07003054 * Update NUMA hit/miss statistics
3055 *
3056 * Must be called with interrupts disabled.
Mel Gorman060e7412016-05-19 17:13:27 -07003057 */
Michal Hocko41b61672017-01-10 16:57:42 -08003058static inline void zone_statistics(struct zone *preferred_zone, struct zone *z)
Mel Gorman060e7412016-05-19 17:13:27 -07003059{
3060#ifdef CONFIG_NUMA
Kemi Wang3a321d22017-09-08 16:12:48 -07003061 enum numa_stat_item local_stat = NUMA_LOCAL;
Mel Gorman060e7412016-05-19 17:13:27 -07003062
Kemi Wang45180852017-11-15 17:38:22 -08003063 /* skip numa counters update if numa stats is disabled */
3064 if (!static_branch_likely(&vm_numa_stat_key))
3065 return;
3066
Pavel Tatashinc1093b72018-08-21 21:53:32 -07003067 if (zone_to_nid(z) != numa_node_id())
Mel Gorman060e7412016-05-19 17:13:27 -07003068 local_stat = NUMA_OTHER;
Mel Gorman060e7412016-05-19 17:13:27 -07003069
Pavel Tatashinc1093b72018-08-21 21:53:32 -07003070 if (zone_to_nid(z) == zone_to_nid(preferred_zone))
Kemi Wang3a321d22017-09-08 16:12:48 -07003071 __inc_numa_state(z, NUMA_HIT);
Michal Hocko2df26632017-01-10 16:57:39 -08003072 else {
Kemi Wang3a321d22017-09-08 16:12:48 -07003073 __inc_numa_state(z, NUMA_MISS);
3074 __inc_numa_state(preferred_zone, NUMA_FOREIGN);
Mel Gorman060e7412016-05-19 17:13:27 -07003075 }
Kemi Wang3a321d22017-09-08 16:12:48 -07003076 __inc_numa_state(z, local_stat);
Mel Gorman060e7412016-05-19 17:13:27 -07003077#endif
3078}
3079
Mel Gorman066b2392017-02-24 14:56:26 -08003080/* Remove page from the per-cpu list, caller must protect the list */
3081static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
Mel Gorman6bb15452018-12-28 00:35:41 -08003082 unsigned int alloc_flags,
Mel Gorman453f85d2017-11-15 17:38:03 -08003083 struct per_cpu_pages *pcp,
Mel Gorman066b2392017-02-24 14:56:26 -08003084 struct list_head *list)
3085{
3086 struct page *page;
3087
3088 do {
3089 if (list_empty(list)) {
3090 pcp->count += rmqueue_bulk(zone, 0,
3091 pcp->batch, list,
Mel Gorman6bb15452018-12-28 00:35:41 -08003092 migratetype, alloc_flags);
Mel Gorman066b2392017-02-24 14:56:26 -08003093 if (unlikely(list_empty(list)))
3094 return NULL;
3095 }
3096
Mel Gorman453f85d2017-11-15 17:38:03 -08003097 page = list_first_entry(list, struct page, lru);
Mel Gorman066b2392017-02-24 14:56:26 -08003098 list_del(&page->lru);
3099 pcp->count--;
3100 } while (check_new_pcp(page));
3101
3102 return page;
3103}
3104
3105/* Lock and remove page from the per-cpu list */
3106static struct page *rmqueue_pcplist(struct zone *preferred_zone,
3107 struct zone *zone, unsigned int order,
Mel Gorman6bb15452018-12-28 00:35:41 -08003108 gfp_t gfp_flags, int migratetype,
3109 unsigned int alloc_flags)
Mel Gorman066b2392017-02-24 14:56:26 -08003110{
3111 struct per_cpu_pages *pcp;
3112 struct list_head *list;
Mel Gorman066b2392017-02-24 14:56:26 -08003113 struct page *page;
Mel Gormand34b0732017-04-20 14:37:43 -07003114 unsigned long flags;
Mel Gorman066b2392017-02-24 14:56:26 -08003115
Mel Gormand34b0732017-04-20 14:37:43 -07003116 local_irq_save(flags);
Mel Gorman066b2392017-02-24 14:56:26 -08003117 pcp = &this_cpu_ptr(zone->pageset)->pcp;
3118 list = &pcp->lists[migratetype];
Mel Gorman6bb15452018-12-28 00:35:41 -08003119 page = __rmqueue_pcplist(zone, migratetype, alloc_flags, pcp, list);
Mel Gorman066b2392017-02-24 14:56:26 -08003120 if (page) {
3121 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
3122 zone_statistics(preferred_zone, zone);
3123 }
Mel Gormand34b0732017-04-20 14:37:43 -07003124 local_irq_restore(flags);
Mel Gorman066b2392017-02-24 14:56:26 -08003125 return page;
3126}
3127
Mel Gorman060e7412016-05-19 17:13:27 -07003128/*
Vlastimil Babka75379192015-02-11 15:25:38 -08003129 * Allocate a page from the given zone. Use pcplists for order-0 allocations.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003130 */
Mel Gorman0a15c3e2009-06-16 15:32:05 -07003131static inline
Mel Gorman066b2392017-02-24 14:56:26 -08003132struct page *rmqueue(struct zone *preferred_zone,
Mel Gorman7aeb09f2014-06-04 16:10:21 -07003133 struct zone *zone, unsigned int order,
Mel Gormanc6038442016-05-19 17:13:38 -07003134 gfp_t gfp_flags, unsigned int alloc_flags,
3135 int migratetype)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003136{
3137 unsigned long flags;
Hugh Dickins689bceb2005-11-21 21:32:20 -08003138 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003139
Mel Gormand34b0732017-04-20 14:37:43 -07003140 if (likely(order == 0)) {
Mel Gorman066b2392017-02-24 14:56:26 -08003141 page = rmqueue_pcplist(preferred_zone, zone, order,
Mel Gorman6bb15452018-12-28 00:35:41 -08003142 gfp_flags, migratetype, alloc_flags);
Mel Gorman066b2392017-02-24 14:56:26 -08003143 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003144 }
3145
Mel Gorman066b2392017-02-24 14:56:26 -08003146 /*
3147 * We most definitely don't want callers attempting to
3148 * allocate greater than order-1 page units with __GFP_NOFAIL.
3149 */
3150 WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
3151 spin_lock_irqsave(&zone->lock, flags);
3152
3153 do {
3154 page = NULL;
3155 if (alloc_flags & ALLOC_HARDER) {
3156 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
3157 if (page)
3158 trace_mm_page_alloc_zone_locked(page, order, migratetype);
3159 }
3160 if (!page)
Mel Gorman6bb15452018-12-28 00:35:41 -08003161 page = __rmqueue(zone, order, migratetype, alloc_flags);
Mel Gorman066b2392017-02-24 14:56:26 -08003162 } while (page && check_new_pages(page, order));
3163 spin_unlock(&zone->lock);
3164 if (!page)
3165 goto failed;
3166 __mod_zone_freepage_state(zone, -(1 << order),
3167 get_pcppage_migratetype(page));
3168
Mel Gorman16709d12016-07-28 15:46:56 -07003169 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
Michal Hocko41b61672017-01-10 16:57:42 -08003170 zone_statistics(preferred_zone, zone);
Nick Piggina74609f2006-01-06 00:11:20 -08003171 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003172
Mel Gorman066b2392017-02-24 14:56:26 -08003173out:
Mel Gorman73444bc2019-01-08 15:23:39 -08003174 /* Separate test+clear to avoid unnecessary atomics */
3175 if (test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags)) {
3176 clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
3177 wakeup_kswapd(zone, 0, 0, zone_idx(zone));
3178 }
3179
Mel Gorman066b2392017-02-24 14:56:26 -08003180 VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003181 return page;
Nick Piggina74609f2006-01-06 00:11:20 -08003182
3183failed:
3184 local_irq_restore(flags);
Nick Piggina74609f2006-01-06 00:11:20 -08003185 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003186}
3187
Akinobu Mita933e3122006-12-08 02:39:45 -08003188#ifdef CONFIG_FAIL_PAGE_ALLOC
3189
Akinobu Mitab2588c42011-07-26 16:09:03 -07003190static struct {
Akinobu Mita933e3122006-12-08 02:39:45 -08003191 struct fault_attr attr;
3192
Viresh Kumar621a5f72015-09-26 15:04:07 -07003193 bool ignore_gfp_highmem;
Mel Gorman71baba42015-11-06 16:28:28 -08003194 bool ignore_gfp_reclaim;
Akinobu Mita54114992007-07-15 23:40:23 -07003195 u32 min_order;
Akinobu Mita933e3122006-12-08 02:39:45 -08003196} fail_page_alloc = {
3197 .attr = FAULT_ATTR_INITIALIZER,
Mel Gorman71baba42015-11-06 16:28:28 -08003198 .ignore_gfp_reclaim = true,
Viresh Kumar621a5f72015-09-26 15:04:07 -07003199 .ignore_gfp_highmem = true,
Akinobu Mita54114992007-07-15 23:40:23 -07003200 .min_order = 1,
Akinobu Mita933e3122006-12-08 02:39:45 -08003201};
3202
3203static int __init setup_fail_page_alloc(char *str)
3204{
3205 return setup_fault_attr(&fail_page_alloc.attr, str);
3206}
3207__setup("fail_page_alloc=", setup_fail_page_alloc);
3208
Benjamin Poirieraf3b8542018-12-28 00:39:23 -08003209static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
Akinobu Mita933e3122006-12-08 02:39:45 -08003210{
Akinobu Mita54114992007-07-15 23:40:23 -07003211 if (order < fail_page_alloc.min_order)
Gavin Shandeaf3862012-07-31 16:41:51 -07003212 return false;
Akinobu Mita933e3122006-12-08 02:39:45 -08003213 if (gfp_mask & __GFP_NOFAIL)
Gavin Shandeaf3862012-07-31 16:41:51 -07003214 return false;
Akinobu Mita933e3122006-12-08 02:39:45 -08003215 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
Gavin Shandeaf3862012-07-31 16:41:51 -07003216 return false;
Mel Gorman71baba42015-11-06 16:28:28 -08003217 if (fail_page_alloc.ignore_gfp_reclaim &&
3218 (gfp_mask & __GFP_DIRECT_RECLAIM))
Gavin Shandeaf3862012-07-31 16:41:51 -07003219 return false;
Akinobu Mita933e3122006-12-08 02:39:45 -08003220
3221 return should_fail(&fail_page_alloc.attr, 1 << order);
3222}
3223
3224#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
3225
3226static int __init fail_page_alloc_debugfs(void)
3227{
Joe Perches0825a6f2018-06-14 15:27:58 -07003228 umode_t mode = S_IFREG | 0600;
Akinobu Mita933e3122006-12-08 02:39:45 -08003229 struct dentry *dir;
Akinobu Mita933e3122006-12-08 02:39:45 -08003230
Akinobu Mitadd48c082011-08-03 16:21:01 -07003231 dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
3232 &fail_page_alloc.attr);
3233 if (IS_ERR(dir))
3234 return PTR_ERR(dir);
Akinobu Mita933e3122006-12-08 02:39:45 -08003235
Akinobu Mitab2588c42011-07-26 16:09:03 -07003236 if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
Mel Gorman71baba42015-11-06 16:28:28 -08003237 &fail_page_alloc.ignore_gfp_reclaim))
Akinobu Mitab2588c42011-07-26 16:09:03 -07003238 goto fail;
3239 if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir,
3240 &fail_page_alloc.ignore_gfp_highmem))
3241 goto fail;
3242 if (!debugfs_create_u32("min-order", mode, dir,
3243 &fail_page_alloc.min_order))
3244 goto fail;
Akinobu Mita933e3122006-12-08 02:39:45 -08003245
Akinobu Mitab2588c42011-07-26 16:09:03 -07003246 return 0;
3247fail:
Akinobu Mitadd48c082011-08-03 16:21:01 -07003248 debugfs_remove_recursive(dir);
Akinobu Mita933e3122006-12-08 02:39:45 -08003249
Akinobu Mitab2588c42011-07-26 16:09:03 -07003250 return -ENOMEM;
Akinobu Mita933e3122006-12-08 02:39:45 -08003251}
3252
3253late_initcall(fail_page_alloc_debugfs);
3254
3255#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
3256
3257#else /* CONFIG_FAIL_PAGE_ALLOC */
3258
Benjamin Poirieraf3b8542018-12-28 00:39:23 -08003259static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
Akinobu Mita933e3122006-12-08 02:39:45 -08003260{
Gavin Shandeaf3862012-07-31 16:41:51 -07003261 return false;
Akinobu Mita933e3122006-12-08 02:39:45 -08003262}
3263
3264#endif /* CONFIG_FAIL_PAGE_ALLOC */
3265
Benjamin Poirieraf3b8542018-12-28 00:39:23 -08003266static noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3267{
3268 return __should_fail_alloc_page(gfp_mask, order);
3269}
3270ALLOW_ERROR_INJECTION(should_fail_alloc_page, TRUE);
3271
Linus Torvalds1da177e2005-04-16 15:20:36 -07003272/*
Mel Gorman97a16fc2015-11-06 16:28:40 -08003273 * Return true if free base pages are above 'mark'. For high-order checks it
3274 * will return true of the order-0 watermark is reached and there is at least
3275 * one free page of a suitable size. Checking now avoids taking the zone lock
3276 * to check in the allocation paths if no pages are free.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003277 */
Michal Hocko86a294a2016-05-20 16:57:12 -07003278bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
3279 int classzone_idx, unsigned int alloc_flags,
3280 long free_pages)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003281{
Christoph Lameterd23ad422007-02-10 01:43:02 -08003282 long min = mark;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003283 int o;
Michal Hockocd04ae12017-09-06 16:24:50 -07003284 const bool alloc_harder = (alloc_flags & (ALLOC_HARDER|ALLOC_OOM));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003285
Mel Gorman0aaa29a2015-11-06 16:28:37 -08003286 /* free_pages may go negative - that's OK */
Michal Hockodf0a6da2012-01-10 15:08:02 -08003287 free_pages -= (1 << order) - 1;
Mel Gorman0aaa29a2015-11-06 16:28:37 -08003288
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08003289 if (alloc_flags & ALLOC_HIGH)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003290 min -= min / 2;
Mel Gorman0aaa29a2015-11-06 16:28:37 -08003291
3292 /*
3293 * If the caller does not have rights to ALLOC_HARDER then subtract
3294 * the high-atomic reserves. This will over-estimate the size of the
3295 * atomic reserve but it avoids a search.
3296 */
Michal Hockocd04ae12017-09-06 16:24:50 -07003297 if (likely(!alloc_harder)) {
Mel Gorman0aaa29a2015-11-06 16:28:37 -08003298 free_pages -= z->nr_reserved_highatomic;
Michal Hockocd04ae12017-09-06 16:24:50 -07003299 } else {
3300 /*
3301 * OOM victims can try even harder than normal ALLOC_HARDER
3302 * users on the grounds that it's definitely going to be in
3303 * the exit path shortly and free memory. Any allocation it
3304 * makes during the free path will be small and short-lived.
3305 */
3306 if (alloc_flags & ALLOC_OOM)
3307 min -= min / 2;
3308 else
3309 min -= min / 4;
3310 }
3311
Mel Gormane2b19192015-11-06 16:28:09 -08003312
Joonsoo Kimd883c6c2018-05-23 10:18:21 +09003313#ifdef CONFIG_CMA
3314 /* If allocation can't use CMA areas don't use free CMA pages */
3315 if (!(alloc_flags & ALLOC_CMA))
3316 free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES);
3317#endif
3318
Mel Gorman97a16fc2015-11-06 16:28:40 -08003319 /*
3320 * Check watermarks for an order-0 allocation request. If these
3321 * are not met, then a high-order request also cannot go ahead
3322 * even if a suitable page happened to be free.
3323 */
3324 if (free_pages <= min + z->lowmem_reserve[classzone_idx])
Mel Gorman88f5acf2011-01-13 15:45:41 -08003325 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003326
Mel Gorman97a16fc2015-11-06 16:28:40 -08003327 /* If this is an order-0 request then the watermark is fine */
3328 if (!order)
3329 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003330
Mel Gorman97a16fc2015-11-06 16:28:40 -08003331 /* For a high-order request, check at least one suitable page is free */
3332 for (o = order; o < MAX_ORDER; o++) {
3333 struct free_area *area = &z->free_area[o];
3334 int mt;
3335
3336 if (!area->nr_free)
3337 continue;
3338
Mel Gorman97a16fc2015-11-06 16:28:40 -08003339 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
3340 if (!list_empty(&area->free_list[mt]))
3341 return true;
3342 }
3343
3344#ifdef CONFIG_CMA
Joonsoo Kimd883c6c2018-05-23 10:18:21 +09003345 if ((alloc_flags & ALLOC_CMA) &&
3346 !list_empty(&area->free_list[MIGRATE_CMA])) {
Mel Gorman97a16fc2015-11-06 16:28:40 -08003347 return true;
Joonsoo Kimd883c6c2018-05-23 10:18:21 +09003348 }
Mel Gorman97a16fc2015-11-06 16:28:40 -08003349#endif
Vlastimil Babkab050e372017-11-15 17:38:30 -08003350 if (alloc_harder &&
3351 !list_empty(&area->free_list[MIGRATE_HIGHATOMIC]))
3352 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003353 }
Mel Gorman97a16fc2015-11-06 16:28:40 -08003354 return false;
Mel Gorman88f5acf2011-01-13 15:45:41 -08003355}
3356
Mel Gorman7aeb09f2014-06-04 16:10:21 -07003357bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
Mel Gormanc6038442016-05-19 17:13:38 -07003358 int classzone_idx, unsigned int alloc_flags)
Mel Gorman88f5acf2011-01-13 15:45:41 -08003359{
3360 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
3361 zone_page_state(z, NR_FREE_PAGES));
3362}
3363
Mel Gorman48ee5f32016-05-19 17:14:07 -07003364static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
3365 unsigned long mark, int classzone_idx, unsigned int alloc_flags)
3366{
3367 long free_pages = zone_page_state(z, NR_FREE_PAGES);
Joonsoo Kimd883c6c2018-05-23 10:18:21 +09003368 long cma_pages = 0;
3369
3370#ifdef CONFIG_CMA
3371 /* If allocation can't use CMA areas don't use free CMA pages */
3372 if (!(alloc_flags & ALLOC_CMA))
3373 cma_pages = zone_page_state(z, NR_FREE_CMA_PAGES);
3374#endif
Mel Gorman48ee5f32016-05-19 17:14:07 -07003375
3376 /*
3377 * Fast check for order-0 only. If this fails then the reserves
3378 * need to be calculated. There is a corner case where the check
3379 * passes but only the high-order atomic reserve are free. If
3380 * the caller is !atomic then it'll uselessly search the free
3381 * list. That corner case is then slower but it is harmless.
3382 */
Joonsoo Kimd883c6c2018-05-23 10:18:21 +09003383 if (!order && (free_pages - cma_pages) > mark + z->lowmem_reserve[classzone_idx])
Mel Gorman48ee5f32016-05-19 17:14:07 -07003384 return true;
3385
3386 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
3387 free_pages);
3388}
3389
Mel Gorman7aeb09f2014-06-04 16:10:21 -07003390bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
Mel Gormane2b19192015-11-06 16:28:09 -08003391 unsigned long mark, int classzone_idx)
Mel Gorman88f5acf2011-01-13 15:45:41 -08003392{
3393 long free_pages = zone_page_state(z, NR_FREE_PAGES);
3394
3395 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
3396 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
3397
Mel Gormane2b19192015-11-06 16:28:09 -08003398 return __zone_watermark_ok(z, order, mark, classzone_idx, 0,
Mel Gorman88f5acf2011-01-13 15:45:41 -08003399 free_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003400}
3401
Paul Jackson9276b1bc2006-12-06 20:31:48 -08003402#ifdef CONFIG_NUMA
David Rientjes957f8222012-10-08 16:33:24 -07003403static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3404{
Gavin Shane02dc012017-02-24 14:59:33 -08003405 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <=
Mel Gorman5f7a75a2014-06-04 16:07:15 -07003406 RECLAIM_DISTANCE;
David Rientjes957f8222012-10-08 16:33:24 -07003407}
Paul Jackson9276b1bc2006-12-06 20:31:48 -08003408#else /* CONFIG_NUMA */
David Rientjes957f8222012-10-08 16:33:24 -07003409static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3410{
3411 return true;
3412}
Paul Jackson9276b1bc2006-12-06 20:31:48 -08003413#endif /* CONFIG_NUMA */
3414
Mel Gorman6bb15452018-12-28 00:35:41 -08003415/*
3416 * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid
3417 * fragmentation is subtle. If the preferred zone was HIGHMEM then
3418 * premature use of a lower zone may cause lowmem pressure problems that
3419 * are worse than fragmentation. If the next zone is ZONE_DMA then it is
3420 * probably too small. It only makes sense to spread allocations to avoid
3421 * fragmentation between the Normal and DMA32 zones.
3422 */
3423static inline unsigned int
Mel Gorman0a79cda2018-12-28 00:35:48 -08003424alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
Mel Gorman6bb15452018-12-28 00:35:41 -08003425{
Mel Gorman0a79cda2018-12-28 00:35:48 -08003426 unsigned int alloc_flags = 0;
3427
3428 if (gfp_mask & __GFP_KSWAPD_RECLAIM)
3429 alloc_flags |= ALLOC_KSWAPD;
3430
3431#ifdef CONFIG_ZONE_DMA32
Mel Gorman6bb15452018-12-28 00:35:41 -08003432 if (zone_idx(zone) != ZONE_NORMAL)
Mel Gorman0a79cda2018-12-28 00:35:48 -08003433 goto out;
Mel Gorman6bb15452018-12-28 00:35:41 -08003434
3435 /*
3436 * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and
3437 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume
3438 * on UMA that if Normal is populated then so is DMA32.
3439 */
3440 BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1);
3441 if (nr_online_nodes > 1 && !populated_zone(--zone))
Mel Gorman0a79cda2018-12-28 00:35:48 -08003442 goto out;
Mel Gorman6bb15452018-12-28 00:35:41 -08003443
Mel Gorman0a79cda2018-12-28 00:35:48 -08003444out:
3445#endif /* CONFIG_ZONE_DMA32 */
3446 return alloc_flags;
Mel Gorman6bb15452018-12-28 00:35:41 -08003447}
Mel Gorman6bb15452018-12-28 00:35:41 -08003448
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08003449/*
Paul Jackson0798e512006-12-06 20:31:38 -08003450 * get_page_from_freelist goes through the zonelist trying to allocate
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08003451 * a page.
3452 */
3453static struct page *
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003454get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
3455 const struct alloc_context *ac)
Martin Hicks753ee722005-06-21 17:14:41 -07003456{
Mel Gorman6bb15452018-12-28 00:35:41 -08003457 struct zoneref *z;
Mel Gorman5117f452009-06-16 15:31:59 -07003458 struct zone *zone;
Mel Gorman3b8c0be2016-07-28 15:46:53 -07003459 struct pglist_data *last_pgdat_dirty_limit = NULL;
Mel Gorman6bb15452018-12-28 00:35:41 -08003460 bool no_fallback;
Mel Gorman3b8c0be2016-07-28 15:46:53 -07003461
Mel Gorman6bb15452018-12-28 00:35:41 -08003462retry:
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08003463 /*
Paul Jackson9276b1bc2006-12-06 20:31:48 -08003464 * Scan zonelist, looking for a zone with enough free.
Vladimir Davydov344736f2014-10-20 15:50:30 +04003465 * See also __cpuset_node_allowed() comment in kernel/cpuset.c.
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08003466 */
Mel Gorman6bb15452018-12-28 00:35:41 -08003467 no_fallback = alloc_flags & ALLOC_NOFRAGMENT;
3468 z = ac->preferred_zoneref;
Mel Gormanc33d6c02016-05-19 17:14:10 -07003469 for_next_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003470 ac->nodemask) {
Mel Gormanbe06af02016-05-19 17:13:47 -07003471 struct page *page;
Johannes Weinere085dbc2013-09-11 14:20:46 -07003472 unsigned long mark;
3473
Mel Gorman664eedd2014-06-04 16:10:08 -07003474 if (cpusets_enabled() &&
3475 (alloc_flags & ALLOC_CPUSET) &&
Vlastimil Babka002f2902016-05-19 17:14:30 -07003476 !__cpuset_zone_allowed(zone, gfp_mask))
Mel Gormancd38b112011-07-25 17:12:29 -07003477 continue;
Johannes Weinera756cf52012-01-10 15:07:49 -08003478 /*
3479 * When allocating a page cache page for writing, we
Mel Gorman281e3722016-07-28 15:46:11 -07003480 * want to get it from a node that is within its dirty
3481 * limit, such that no single node holds more than its
Johannes Weinera756cf52012-01-10 15:07:49 -08003482 * proportional share of globally allowed dirty pages.
Mel Gorman281e3722016-07-28 15:46:11 -07003483 * The dirty limits take into account the node's
Johannes Weinera756cf52012-01-10 15:07:49 -08003484 * lowmem reserves and high watermark so that kswapd
3485 * should be able to balance it without having to
3486 * write pages from its LRU list.
3487 *
Johannes Weinera756cf52012-01-10 15:07:49 -08003488 * XXX: For now, allow allocations to potentially
Mel Gorman281e3722016-07-28 15:46:11 -07003489 * exceed the per-node dirty limit in the slowpath
Mel Gormanc9ab0c42015-11-06 16:28:12 -08003490 * (spread_dirty_pages unset) before going into reclaim,
Johannes Weinera756cf52012-01-10 15:07:49 -08003491 * which is important when on a NUMA setup the allowed
Mel Gorman281e3722016-07-28 15:46:11 -07003492 * nodes are together not big enough to reach the
Johannes Weinera756cf52012-01-10 15:07:49 -08003493 * global limit. The proper fix for these situations
Mel Gorman281e3722016-07-28 15:46:11 -07003494 * will require awareness of nodes in the
Johannes Weinera756cf52012-01-10 15:07:49 -08003495 * dirty-throttling and the flusher threads.
3496 */
Mel Gorman3b8c0be2016-07-28 15:46:53 -07003497 if (ac->spread_dirty_pages) {
3498 if (last_pgdat_dirty_limit == zone->zone_pgdat)
3499 continue;
3500
3501 if (!node_dirty_ok(zone->zone_pgdat)) {
3502 last_pgdat_dirty_limit = zone->zone_pgdat;
3503 continue;
3504 }
3505 }
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08003506
Mel Gorman6bb15452018-12-28 00:35:41 -08003507 if (no_fallback && nr_online_nodes > 1 &&
3508 zone != ac->preferred_zoneref->zone) {
3509 int local_nid;
3510
3511 /*
3512 * If moving to a remote node, retry but allow
3513 * fragmenting fallbacks. Locality is more important
3514 * than fragmentation avoidance.
3515 */
3516 local_nid = zone_to_nid(ac->preferred_zoneref->zone);
3517 if (zone_to_nid(zone) != local_nid) {
3518 alloc_flags &= ~ALLOC_NOFRAGMENT;
3519 goto retry;
3520 }
3521 }
3522
Mel Gormana9214442018-12-28 00:35:44 -08003523 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
Mel Gorman48ee5f32016-05-19 17:14:07 -07003524 if (!zone_watermark_fast(zone, order, mark,
Mel Gorman93ea9962016-05-19 17:14:13 -07003525 ac_classzone_idx(ac), alloc_flags)) {
Mel Gormanfa5e0842009-06-16 15:33:22 -07003526 int ret;
3527
Pavel Tatashinc9e97a12018-04-05 16:22:31 -07003528#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
3529 /*
3530 * Watermark failed for this zone, but see if we can
3531 * grow this zone if it contains deferred pages.
3532 */
3533 if (static_branch_unlikely(&deferred_pages)) {
3534 if (_deferred_grow_zone(zone, order))
3535 goto try_this_zone;
3536 }
3537#endif
Mel Gorman5dab2912014-06-04 16:10:14 -07003538 /* Checked here to keep the fast path fast */
3539 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
3540 if (alloc_flags & ALLOC_NO_WATERMARKS)
3541 goto try_this_zone;
3542
Mel Gormana5f5f912016-07-28 15:46:32 -07003543 if (node_reclaim_mode == 0 ||
Mel Gormanc33d6c02016-05-19 17:14:10 -07003544 !zone_allows_reclaim(ac->preferred_zoneref->zone, zone))
Mel Gormancd38b112011-07-25 17:12:29 -07003545 continue;
3546
Mel Gormana5f5f912016-07-28 15:46:32 -07003547 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
Mel Gormanfa5e0842009-06-16 15:33:22 -07003548 switch (ret) {
Mel Gormana5f5f912016-07-28 15:46:32 -07003549 case NODE_RECLAIM_NOSCAN:
Mel Gormanfa5e0842009-06-16 15:33:22 -07003550 /* did not scan */
Mel Gormancd38b112011-07-25 17:12:29 -07003551 continue;
Mel Gormana5f5f912016-07-28 15:46:32 -07003552 case NODE_RECLAIM_FULL:
Mel Gormanfa5e0842009-06-16 15:33:22 -07003553 /* scanned but unreclaimable */
Mel Gormancd38b112011-07-25 17:12:29 -07003554 continue;
Mel Gormanfa5e0842009-06-16 15:33:22 -07003555 default:
3556 /* did we reclaim enough */
Mel Gormanfed27192013-04-29 15:07:57 -07003557 if (zone_watermark_ok(zone, order, mark,
Mel Gorman93ea9962016-05-19 17:14:13 -07003558 ac_classzone_idx(ac), alloc_flags))
Mel Gormanfed27192013-04-29 15:07:57 -07003559 goto try_this_zone;
3560
Mel Gormanfed27192013-04-29 15:07:57 -07003561 continue;
Paul Jackson0798e512006-12-06 20:31:38 -08003562 }
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08003563 }
3564
Mel Gormanfa5e0842009-06-16 15:33:22 -07003565try_this_zone:
Mel Gorman066b2392017-02-24 14:56:26 -08003566 page = rmqueue(ac->preferred_zoneref->zone, zone, order,
Mel Gorman0aaa29a2015-11-06 16:28:37 -08003567 gfp_mask, alloc_flags, ac->migratetype);
Vlastimil Babka75379192015-02-11 15:25:38 -08003568 if (page) {
Mel Gorman479f8542016-05-19 17:14:35 -07003569 prep_new_page(page, order, gfp_mask, alloc_flags);
Mel Gorman0aaa29a2015-11-06 16:28:37 -08003570
3571 /*
3572 * If this is a high-order atomic allocation then check
3573 * if the pageblock should be reserved for the future
3574 */
3575 if (unlikely(order && (alloc_flags & ALLOC_HARDER)))
3576 reserve_highatomic_pageblock(page, zone, order);
3577
Vlastimil Babka75379192015-02-11 15:25:38 -08003578 return page;
Pavel Tatashinc9e97a12018-04-05 16:22:31 -07003579 } else {
3580#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
3581 /* Try again if zone has deferred pages */
3582 if (static_branch_unlikely(&deferred_pages)) {
3583 if (_deferred_grow_zone(zone, order))
3584 goto try_this_zone;
3585 }
3586#endif
Vlastimil Babka75379192015-02-11 15:25:38 -08003587 }
Mel Gorman54a6eb52008-04-28 02:12:16 -07003588 }
Paul Jackson9276b1bc2006-12-06 20:31:48 -08003589
Mel Gorman6bb15452018-12-28 00:35:41 -08003590 /*
3591 * It's possible on a UMA machine to get through all zones that are
3592 * fragmented. If avoiding fragmentation, reset and try again.
3593 */
3594 if (no_fallback) {
3595 alloc_flags &= ~ALLOC_NOFRAGMENT;
3596 goto retry;
3597 }
3598
Mel Gorman4ffeaf32014-08-06 16:07:22 -07003599 return NULL;
Martin Hicks753ee722005-06-21 17:14:41 -07003600}
3601
Michal Hocko9af744d2017-02-22 15:46:16 -08003602static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
Dave Hansena238ab52011-05-24 17:12:16 -07003603{
Dave Hansena238ab52011-05-24 17:12:16 -07003604 unsigned int filter = SHOW_MEM_FILTER_NODES;
Michal Hockoaa187502017-02-22 15:41:45 -08003605 static DEFINE_RATELIMIT_STATE(show_mem_rs, HZ, 1);
Dave Hansena238ab52011-05-24 17:12:16 -07003606
Michal Hocko2c029a12018-10-26 15:06:49 -07003607 if (!__ratelimit(&show_mem_rs))
Dave Hansena238ab52011-05-24 17:12:16 -07003608 return;
3609
3610 /*
3611 * This documents exceptions given to allocations in certain
3612 * contexts that are allowed to allocate outside current's set
3613 * of allowed nodes.
3614 */
3615 if (!(gfp_mask & __GFP_NOMEMALLOC))
Michal Hockocd04ae12017-09-06 16:24:50 -07003616 if (tsk_is_oom_victim(current) ||
Dave Hansena238ab52011-05-24 17:12:16 -07003617 (current->flags & (PF_MEMALLOC | PF_EXITING)))
3618 filter &= ~SHOW_MEM_FILTER_NODES;
Mel Gormand0164ad2015-11-06 16:28:21 -08003619 if (in_interrupt() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
Dave Hansena238ab52011-05-24 17:12:16 -07003620 filter &= ~SHOW_MEM_FILTER_NODES;
3621
Michal Hocko9af744d2017-02-22 15:46:16 -08003622 show_mem(filter, nodemask);
Michal Hockoaa187502017-02-22 15:41:45 -08003623}
3624
Michal Hockoa8e99252017-02-22 15:46:10 -08003625void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
Michal Hockoaa187502017-02-22 15:41:45 -08003626{
3627 struct va_format vaf;
3628 va_list args;
3629 static DEFINE_RATELIMIT_STATE(nopage_rs, DEFAULT_RATELIMIT_INTERVAL,
3630 DEFAULT_RATELIMIT_BURST);
3631
Tetsuo Handa0f7896f2017-05-03 14:55:34 -07003632 if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs))
Michal Hockoaa187502017-02-22 15:41:45 -08003633 return;
3634
Michal Hocko7877cdc2016-10-07 17:01:55 -07003635 va_start(args, fmt);
3636 vaf.fmt = fmt;
3637 vaf.va = &args;
yuzhoujianef8444e2018-12-28 00:36:07 -08003638 pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl",
Michal Hocko0205f752017-11-15 17:39:14 -08003639 current->comm, &vaf, gfp_mask, &gfp_mask,
3640 nodemask_pr_args(nodemask));
Michal Hocko7877cdc2016-10-07 17:01:55 -07003641 va_end(args);
Joe Perches3ee9a4f2011-10-31 17:08:35 -07003642
Michal Hockoa8e99252017-02-22 15:46:10 -08003643 cpuset_print_current_mems_allowed();
yuzhoujianef8444e2018-12-28 00:36:07 -08003644 pr_cont("\n");
Dave Hansena238ab52011-05-24 17:12:16 -07003645 dump_stack();
David Rientjes685dbf62017-02-22 15:46:28 -08003646 warn_alloc_show_mem(gfp_mask, nodemask);
Dave Hansena238ab52011-05-24 17:12:16 -07003647}
3648
Mel Gorman11e33f62009-06-16 15:31:57 -07003649static inline struct page *
Michal Hocko6c18ba72017-02-22 15:46:25 -08003650__alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order,
3651 unsigned int alloc_flags,
3652 const struct alloc_context *ac)
3653{
3654 struct page *page;
3655
3656 page = get_page_from_freelist(gfp_mask, order,
3657 alloc_flags|ALLOC_CPUSET, ac);
3658 /*
3659 * fallback to ignore cpuset restriction if our nodes
3660 * are depleted
3661 */
3662 if (!page)
3663 page = get_page_from_freelist(gfp_mask, order,
3664 alloc_flags, ac);
3665
3666 return page;
3667}
3668
3669static inline struct page *
Mel Gorman11e33f62009-06-16 15:31:57 -07003670__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003671 const struct alloc_context *ac, unsigned long *did_some_progress)
Mel Gorman11e33f62009-06-16 15:31:57 -07003672{
David Rientjes6e0fc462015-09-08 15:00:36 -07003673 struct oom_control oc = {
3674 .zonelist = ac->zonelist,
3675 .nodemask = ac->nodemask,
Vladimir Davydov2a966b72016-07-26 15:22:33 -07003676 .memcg = NULL,
David Rientjes6e0fc462015-09-08 15:00:36 -07003677 .gfp_mask = gfp_mask,
3678 .order = order,
David Rientjes6e0fc462015-09-08 15:00:36 -07003679 };
Linus Torvalds1da177e2005-04-16 15:20:36 -07003680 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003681
Johannes Weiner9879de72015-01-26 12:58:32 -08003682 *did_some_progress = 0;
3683
Johannes Weiner9879de72015-01-26 12:58:32 -08003684 /*
Johannes Weinerdc564012015-06-24 16:57:19 -07003685 * Acquire the oom lock. If that fails, somebody else is
3686 * making progress for us.
Johannes Weiner9879de72015-01-26 12:58:32 -08003687 */
Johannes Weinerdc564012015-06-24 16:57:19 -07003688 if (!mutex_trylock(&oom_lock)) {
Johannes Weiner9879de72015-01-26 12:58:32 -08003689 *did_some_progress = 1;
Mel Gorman11e33f62009-06-16 15:31:57 -07003690 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003691 return NULL;
3692 }
Jens Axboe6b1de912005-11-17 21:35:02 +01003693
Mel Gorman11e33f62009-06-16 15:31:57 -07003694 /*
3695 * Go through the zonelist yet one more time, keep very high watermark
3696 * here, this is only to catch a parallel oom killing, we must fail if
Tetsuo Handae746bf72017-08-31 16:15:20 -07003697 * we're still under heavy pressure. But make sure that this reclaim
3698 * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY
3699 * allocation which will never fail due to oom_lock already held.
Mel Gorman11e33f62009-06-16 15:31:57 -07003700 */
Tetsuo Handae746bf72017-08-31 16:15:20 -07003701 page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) &
3702 ~__GFP_DIRECT_RECLAIM, order,
3703 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08003704 if (page)
Mel Gorman11e33f62009-06-16 15:31:57 -07003705 goto out;
3706
Michal Hocko06ad2762017-02-22 15:46:22 -08003707 /* Coredumps can quickly deplete all memory reserves */
3708 if (current->flags & PF_DUMPCORE)
3709 goto out;
3710 /* The OOM killer will not help higher order allocs */
3711 if (order > PAGE_ALLOC_COSTLY_ORDER)
3712 goto out;
Michal Hockodcda9b02017-07-12 14:36:45 -07003713 /*
3714 * We have already exhausted all our reclaim opportunities without any
3715 * success so it is time to admit defeat. We will skip the OOM killer
3716 * because it is very likely that the caller has a more reasonable
3717 * fallback than shooting a random task.
3718 */
3719 if (gfp_mask & __GFP_RETRY_MAYFAIL)
3720 goto out;
Michal Hocko06ad2762017-02-22 15:46:22 -08003721 /* The OOM killer does not needlessly kill tasks for lowmem */
3722 if (ac->high_zoneidx < ZONE_NORMAL)
3723 goto out;
3724 if (pm_suspended_storage())
3725 goto out;
3726 /*
3727 * XXX: GFP_NOFS allocations should rather fail than rely on
3728 * other request to make a forward progress.
3729 * We are in an unfortunate situation where out_of_memory cannot
3730 * do much for this context but let's try it to at least get
3731 * access to memory reserved if the current task is killed (see
3732 * out_of_memory). Once filesystems are ready to handle allocation
3733 * failures more gracefully we should just bail out here.
3734 */
Michal Hocko3da88fb32016-05-19 17:13:09 -07003735
Michal Hocko06ad2762017-02-22 15:46:22 -08003736 /* The OOM killer may not free memory on a specific node */
3737 if (gfp_mask & __GFP_THISNODE)
3738 goto out;
3739
Shile Zhang3c2c6482018-01-31 16:20:07 -08003740 /* Exhausted what can be done so it's blame time */
Michal Hocko5020e282016-01-14 15:20:36 -08003741 if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
Michal Hockoc32b3cb2015-02-11 15:26:24 -08003742 *did_some_progress = 1;
Michal Hocko5020e282016-01-14 15:20:36 -08003743
Michal Hocko6c18ba72017-02-22 15:46:25 -08003744 /*
3745 * Help non-failing allocations by giving them access to memory
3746 * reserves
3747 */
3748 if (gfp_mask & __GFP_NOFAIL)
3749 page = __alloc_pages_cpuset_fallback(gfp_mask, order,
Michal Hocko5020e282016-01-14 15:20:36 -08003750 ALLOC_NO_WATERMARKS, ac);
Michal Hocko5020e282016-01-14 15:20:36 -08003751 }
Mel Gorman11e33f62009-06-16 15:31:57 -07003752out:
Johannes Weinerdc564012015-06-24 16:57:19 -07003753 mutex_unlock(&oom_lock);
Mel Gorman11e33f62009-06-16 15:31:57 -07003754 return page;
3755}
3756
Michal Hocko33c2d212016-05-20 16:57:06 -07003757/*
3758 * Maximum number of compaction retries wit a progress before OOM
3759 * killer is consider as the only way to move forward.
3760 */
3761#define MAX_COMPACT_RETRIES 16
3762
Mel Gorman56de7262010-05-24 14:32:30 -07003763#ifdef CONFIG_COMPACTION
3764/* Try memory compaction for high-order allocations before reclaim */
3765static struct page *
3766__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
Mel Gormanc6038442016-05-19 17:13:38 -07003767 unsigned int alloc_flags, const struct alloc_context *ac,
Vlastimil Babkaa5508cd2016-07-28 15:49:28 -07003768 enum compact_priority prio, enum compact_result *compact_result)
Mel Gorman56de7262010-05-24 14:32:30 -07003769{
Mel Gorman5e1f0f02019-03-05 15:45:41 -08003770 struct page *page = NULL;
Johannes Weinereb414682018-10-26 15:06:27 -07003771 unsigned long pflags;
Vlastimil Babka499118e2017-05-08 15:59:50 -07003772 unsigned int noreclaim_flag;
Vlastimil Babka53853e22014-10-09 15:27:02 -07003773
Mel Gorman66199712012-01-12 17:19:41 -08003774 if (!order)
Mel Gorman56de7262010-05-24 14:32:30 -07003775 return NULL;
3776
Johannes Weinereb414682018-10-26 15:06:27 -07003777 psi_memstall_enter(&pflags);
Vlastimil Babka499118e2017-05-08 15:59:50 -07003778 noreclaim_flag = memalloc_noreclaim_save();
Johannes Weinereb414682018-10-26 15:06:27 -07003779
Michal Hockoc5d01d02016-05-20 16:56:53 -07003780 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
Mel Gorman5e1f0f02019-03-05 15:45:41 -08003781 prio, &page);
Johannes Weinereb414682018-10-26 15:06:27 -07003782
Vlastimil Babka499118e2017-05-08 15:59:50 -07003783 memalloc_noreclaim_restore(noreclaim_flag);
Johannes Weinereb414682018-10-26 15:06:27 -07003784 psi_memstall_leave(&pflags);
Mel Gorman56de7262010-05-24 14:32:30 -07003785
Mel Gorman5e1f0f02019-03-05 15:45:41 -08003786 if (*compact_result <= COMPACT_INACTIVE) {
3787 WARN_ON_ONCE(page);
Vlastimil Babka98dd3b42014-10-09 15:27:04 -07003788 return NULL;
Mel Gorman5e1f0f02019-03-05 15:45:41 -08003789 }
Mel Gorman56de7262010-05-24 14:32:30 -07003790
Vlastimil Babka98dd3b42014-10-09 15:27:04 -07003791 /*
3792 * At least in one zone compaction wasn't deferred or skipped, so let's
3793 * count a compaction stall
3794 */
3795 count_vm_event(COMPACTSTALL);
3796
Mel Gorman5e1f0f02019-03-05 15:45:41 -08003797 /* Prep a captured page if available */
3798 if (page)
3799 prep_new_page(page, order, gfp_mask, alloc_flags);
3800
3801 /* Try get a page from the freelist if available */
3802 if (!page)
3803 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
Vlastimil Babka98dd3b42014-10-09 15:27:04 -07003804
3805 if (page) {
3806 struct zone *zone = page_zone(page);
3807
3808 zone->compact_blockskip_flush = false;
3809 compaction_defer_reset(zone, order, true);
3810 count_vm_event(COMPACTSUCCESS);
3811 return page;
3812 }
3813
3814 /*
Vlastimil Babka98dd3b42014-10-09 15:27:04 -07003815 * It's bad if compaction run occurs and fails. The most likely reason
3816 * is that pages exist, but not enough to satisfy watermarks.
3817 */
3818 count_vm_event(COMPACTFAIL);
3819
3820 cond_resched();
3821
Mel Gorman56de7262010-05-24 14:32:30 -07003822 return NULL;
3823}
Michal Hocko33c2d212016-05-20 16:57:06 -07003824
Vlastimil Babka32508452016-10-07 17:00:28 -07003825static inline bool
3826should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
3827 enum compact_result compact_result,
3828 enum compact_priority *compact_priority,
Vlastimil Babkad9436492016-10-07 17:00:31 -07003829 int *compaction_retries)
Vlastimil Babka32508452016-10-07 17:00:28 -07003830{
3831 int max_retries = MAX_COMPACT_RETRIES;
Vlastimil Babkac2033b02016-10-07 17:00:34 -07003832 int min_priority;
Michal Hocko65190cf2017-02-22 15:42:03 -08003833 bool ret = false;
3834 int retries = *compaction_retries;
3835 enum compact_priority priority = *compact_priority;
Vlastimil Babka32508452016-10-07 17:00:28 -07003836
3837 if (!order)
3838 return false;
3839
Vlastimil Babkad9436492016-10-07 17:00:31 -07003840 if (compaction_made_progress(compact_result))
3841 (*compaction_retries)++;
3842
Vlastimil Babka32508452016-10-07 17:00:28 -07003843 /*
3844 * compaction considers all the zone as desperately out of memory
3845 * so it doesn't really make much sense to retry except when the
3846 * failure could be caused by insufficient priority
3847 */
Vlastimil Babkad9436492016-10-07 17:00:31 -07003848 if (compaction_failed(compact_result))
3849 goto check_priority;
Vlastimil Babka32508452016-10-07 17:00:28 -07003850
3851 /*
3852 * make sure the compaction wasn't deferred or didn't bail out early
3853 * due to locks contention before we declare that we should give up.
3854 * But do not retry if the given zonelist is not suitable for
3855 * compaction.
3856 */
Michal Hocko65190cf2017-02-22 15:42:03 -08003857 if (compaction_withdrawn(compact_result)) {
3858 ret = compaction_zonelist_suitable(ac, order, alloc_flags);
3859 goto out;
3860 }
Vlastimil Babka32508452016-10-07 17:00:28 -07003861
3862 /*
Michal Hockodcda9b02017-07-12 14:36:45 -07003863 * !costly requests are much more important than __GFP_RETRY_MAYFAIL
Vlastimil Babka32508452016-10-07 17:00:28 -07003864 * costly ones because they are de facto nofail and invoke OOM
3865 * killer to move on while costly can fail and users are ready
3866 * to cope with that. 1/4 retries is rather arbitrary but we
3867 * would need much more detailed feedback from compaction to
3868 * make a better decision.
3869 */
3870 if (order > PAGE_ALLOC_COSTLY_ORDER)
3871 max_retries /= 4;
Michal Hocko65190cf2017-02-22 15:42:03 -08003872 if (*compaction_retries <= max_retries) {
3873 ret = true;
3874 goto out;
3875 }
Vlastimil Babka32508452016-10-07 17:00:28 -07003876
Vlastimil Babkad9436492016-10-07 17:00:31 -07003877 /*
3878 * Make sure there are attempts at the highest priority if we exhausted
3879 * all retries or failed at the lower priorities.
3880 */
3881check_priority:
Vlastimil Babkac2033b02016-10-07 17:00:34 -07003882 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ?
3883 MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY;
Michal Hocko65190cf2017-02-22 15:42:03 -08003884
Vlastimil Babkac2033b02016-10-07 17:00:34 -07003885 if (*compact_priority > min_priority) {
Vlastimil Babkad9436492016-10-07 17:00:31 -07003886 (*compact_priority)--;
3887 *compaction_retries = 0;
Michal Hocko65190cf2017-02-22 15:42:03 -08003888 ret = true;
Vlastimil Babkad9436492016-10-07 17:00:31 -07003889 }
Michal Hocko65190cf2017-02-22 15:42:03 -08003890out:
3891 trace_compact_retry(order, priority, compact_result, retries, max_retries, ret);
3892 return ret;
Vlastimil Babka32508452016-10-07 17:00:28 -07003893}
Mel Gorman56de7262010-05-24 14:32:30 -07003894#else
3895static inline struct page *
3896__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
Mel Gormanc6038442016-05-19 17:13:38 -07003897 unsigned int alloc_flags, const struct alloc_context *ac,
Vlastimil Babkaa5508cd2016-07-28 15:49:28 -07003898 enum compact_priority prio, enum compact_result *compact_result)
Mel Gorman56de7262010-05-24 14:32:30 -07003899{
Michal Hocko33c2d212016-05-20 16:57:06 -07003900 *compact_result = COMPACT_SKIPPED;
Mel Gorman56de7262010-05-24 14:32:30 -07003901 return NULL;
3902}
Michal Hocko33c2d212016-05-20 16:57:06 -07003903
3904static inline bool
Michal Hocko86a294a2016-05-20 16:57:12 -07003905should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags,
3906 enum compact_result compact_result,
Vlastimil Babkaa5508cd2016-07-28 15:49:28 -07003907 enum compact_priority *compact_priority,
Vlastimil Babkad9436492016-10-07 17:00:31 -07003908 int *compaction_retries)
Michal Hocko33c2d212016-05-20 16:57:06 -07003909{
Michal Hocko31e49bf2016-05-20 16:57:15 -07003910 struct zone *zone;
3911 struct zoneref *z;
3912
3913 if (!order || order > PAGE_ALLOC_COSTLY_ORDER)
3914 return false;
3915
3916 /*
3917 * There are setups with compaction disabled which would prefer to loop
3918 * inside the allocator rather than hit the oom killer prematurely.
3919 * Let's give them a good hope and keep retrying while the order-0
3920 * watermarks are OK.
3921 */
3922 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
3923 ac->nodemask) {
3924 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone),
3925 ac_classzone_idx(ac), alloc_flags))
3926 return true;
3927 }
Michal Hocko33c2d212016-05-20 16:57:06 -07003928 return false;
3929}
Vlastimil Babka32508452016-10-07 17:00:28 -07003930#endif /* CONFIG_COMPACTION */
Mel Gorman56de7262010-05-24 14:32:30 -07003931
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +01003932#ifdef CONFIG_LOCKDEP
Omar Sandoval93781322018-06-07 17:07:02 -07003933static struct lockdep_map __fs_reclaim_map =
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +01003934 STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map);
3935
3936static bool __need_fs_reclaim(gfp_t gfp_mask)
3937{
3938 gfp_mask = current_gfp_context(gfp_mask);
3939
3940 /* no reclaim without waiting on it */
3941 if (!(gfp_mask & __GFP_DIRECT_RECLAIM))
3942 return false;
3943
3944 /* this guy won't enter reclaim */
Tetsuo Handa2e517d682018-03-22 16:17:10 -07003945 if (current->flags & PF_MEMALLOC)
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +01003946 return false;
3947
3948 /* We're only interested __GFP_FS allocations for now */
3949 if (!(gfp_mask & __GFP_FS))
3950 return false;
3951
3952 if (gfp_mask & __GFP_NOLOCKDEP)
3953 return false;
3954
3955 return true;
3956}
3957
Omar Sandoval93781322018-06-07 17:07:02 -07003958void __fs_reclaim_acquire(void)
3959{
3960 lock_map_acquire(&__fs_reclaim_map);
3961}
3962
3963void __fs_reclaim_release(void)
3964{
3965 lock_map_release(&__fs_reclaim_map);
3966}
3967
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +01003968void fs_reclaim_acquire(gfp_t gfp_mask)
3969{
3970 if (__need_fs_reclaim(gfp_mask))
Omar Sandoval93781322018-06-07 17:07:02 -07003971 __fs_reclaim_acquire();
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +01003972}
3973EXPORT_SYMBOL_GPL(fs_reclaim_acquire);
3974
3975void fs_reclaim_release(gfp_t gfp_mask)
3976{
3977 if (__need_fs_reclaim(gfp_mask))
Omar Sandoval93781322018-06-07 17:07:02 -07003978 __fs_reclaim_release();
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +01003979}
3980EXPORT_SYMBOL_GPL(fs_reclaim_release);
3981#endif
3982
Marek Szyprowskibba90712012-01-25 12:09:52 +01003983/* Perform direct synchronous page reclaim */
3984static int
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003985__perform_reclaim(gfp_t gfp_mask, unsigned int order,
3986 const struct alloc_context *ac)
Mel Gorman11e33f62009-06-16 15:31:57 -07003987{
Mel Gorman11e33f62009-06-16 15:31:57 -07003988 struct reclaim_state reclaim_state;
Marek Szyprowskibba90712012-01-25 12:09:52 +01003989 int progress;
Vlastimil Babka499118e2017-05-08 15:59:50 -07003990 unsigned int noreclaim_flag;
Johannes Weinereb414682018-10-26 15:06:27 -07003991 unsigned long pflags;
Mel Gorman11e33f62009-06-16 15:31:57 -07003992
3993 cond_resched();
3994
3995 /* We now go into synchronous reclaim */
3996 cpuset_memory_pressure_bump();
Johannes Weinereb414682018-10-26 15:06:27 -07003997 psi_memstall_enter(&pflags);
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +01003998 fs_reclaim_acquire(gfp_mask);
Omar Sandoval93781322018-06-07 17:07:02 -07003999 noreclaim_flag = memalloc_noreclaim_save();
Mel Gorman11e33f62009-06-16 15:31:57 -07004000 reclaim_state.reclaimed_slab = 0;
Andrew Mortonc06b1fc2011-01-13 15:47:32 -08004001 current->reclaim_state = &reclaim_state;
Mel Gorman11e33f62009-06-16 15:31:57 -07004002
Vlastimil Babkaa9263752015-02-11 15:25:41 -08004003 progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
4004 ac->nodemask);
Mel Gorman11e33f62009-06-16 15:31:57 -07004005
Andrew Mortonc06b1fc2011-01-13 15:47:32 -08004006 current->reclaim_state = NULL;
Vlastimil Babka499118e2017-05-08 15:59:50 -07004007 memalloc_noreclaim_restore(noreclaim_flag);
Omar Sandoval93781322018-06-07 17:07:02 -07004008 fs_reclaim_release(gfp_mask);
Johannes Weinereb414682018-10-26 15:06:27 -07004009 psi_memstall_leave(&pflags);
Mel Gorman11e33f62009-06-16 15:31:57 -07004010
4011 cond_resched();
4012
Marek Szyprowskibba90712012-01-25 12:09:52 +01004013 return progress;
4014}
4015
4016/* The really slow allocator path where we enter direct reclaim */
4017static inline struct page *
4018__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
Mel Gormanc6038442016-05-19 17:13:38 -07004019 unsigned int alloc_flags, const struct alloc_context *ac,
Vlastimil Babkaa9263752015-02-11 15:25:41 -08004020 unsigned long *did_some_progress)
Marek Szyprowskibba90712012-01-25 12:09:52 +01004021{
4022 struct page *page = NULL;
4023 bool drained = false;
4024
Vlastimil Babkaa9263752015-02-11 15:25:41 -08004025 *did_some_progress = __perform_reclaim(gfp_mask, order, ac);
Mel Gorman9ee493c2010-09-09 16:38:18 -07004026 if (unlikely(!(*did_some_progress)))
4027 return NULL;
Mel Gorman11e33f62009-06-16 15:31:57 -07004028
Mel Gorman9ee493c2010-09-09 16:38:18 -07004029retry:
Vlastimil Babka31a6c192016-07-28 15:49:13 -07004030 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
Mel Gorman9ee493c2010-09-09 16:38:18 -07004031
4032 /*
4033 * If an allocation failed after direct reclaim, it could be because
Mel Gorman0aaa29a2015-11-06 16:28:37 -08004034 * pages are pinned on the per-cpu lists or in high alloc reserves.
4035 * Shrink them them and try again
Mel Gorman9ee493c2010-09-09 16:38:18 -07004036 */
4037 if (!page && !drained) {
Minchan Kim29fac032016-12-12 16:42:14 -08004038 unreserve_highatomic_pageblock(ac, false);
Vlastimil Babka93481ff2014-12-10 15:43:01 -08004039 drain_all_pages(NULL);
Mel Gorman9ee493c2010-09-09 16:38:18 -07004040 drained = true;
4041 goto retry;
4042 }
4043
Mel Gorman11e33f62009-06-16 15:31:57 -07004044 return page;
4045}
4046
David Rientjes5ecd9d42018-04-05 16:25:16 -07004047static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask,
4048 const struct alloc_context *ac)
Mel Gorman11e33f62009-06-16 15:31:57 -07004049{
4050 struct zoneref *z;
4051 struct zone *zone;
Mel Gormane1a55632016-07-28 15:46:26 -07004052 pg_data_t *last_pgdat = NULL;
David Rientjes5ecd9d42018-04-05 16:25:16 -07004053 enum zone_type high_zoneidx = ac->high_zoneidx;
Mel Gorman11e33f62009-06-16 15:31:57 -07004054
David Rientjes5ecd9d42018-04-05 16:25:16 -07004055 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, high_zoneidx,
4056 ac->nodemask) {
Mel Gormane1a55632016-07-28 15:46:26 -07004057 if (last_pgdat != zone->zone_pgdat)
David Rientjes5ecd9d42018-04-05 16:25:16 -07004058 wakeup_kswapd(zone, gfp_mask, order, high_zoneidx);
Mel Gormane1a55632016-07-28 15:46:26 -07004059 last_pgdat = zone->zone_pgdat;
4060 }
Mel Gorman11e33f62009-06-16 15:31:57 -07004061}
4062
Mel Gormanc6038442016-05-19 17:13:38 -07004063static inline unsigned int
Peter Zijlstra341ce062009-06-16 15:32:02 -07004064gfp_to_alloc_flags(gfp_t gfp_mask)
4065{
Mel Gormanc6038442016-05-19 17:13:38 -07004066 unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
Peter Zijlstra341ce062009-06-16 15:32:02 -07004067
Mel Gormana56f57f2009-06-16 15:32:02 -07004068 /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
Namhyung Kime6223a32010-10-26 14:21:59 -07004069 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
Mel Gormana56f57f2009-06-16 15:32:02 -07004070
Peter Zijlstra341ce062009-06-16 15:32:02 -07004071 /*
4072 * The caller may dip into page reserves a bit more if the caller
4073 * cannot run direct reclaim, or if the caller has realtime scheduling
4074 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
Mel Gormand0164ad2015-11-06 16:28:21 -08004075 * set both ALLOC_HARDER (__GFP_ATOMIC) and ALLOC_HIGH (__GFP_HIGH).
Peter Zijlstra341ce062009-06-16 15:32:02 -07004076 */
Namhyung Kime6223a32010-10-26 14:21:59 -07004077 alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
Peter Zijlstra341ce062009-06-16 15:32:02 -07004078
Mel Gormand0164ad2015-11-06 16:28:21 -08004079 if (gfp_mask & __GFP_ATOMIC) {
Andrea Arcangeli5c3240d2011-01-13 15:46:49 -08004080 /*
David Rientjesb104a352014-07-30 16:08:24 -07004081 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
4082 * if it can't schedule.
Andrea Arcangeli5c3240d2011-01-13 15:46:49 -08004083 */
David Rientjesb104a352014-07-30 16:08:24 -07004084 if (!(gfp_mask & __GFP_NOMEMALLOC))
Andrea Arcangeli5c3240d2011-01-13 15:46:49 -08004085 alloc_flags |= ALLOC_HARDER;
Peter Zijlstra341ce062009-06-16 15:32:02 -07004086 /*
David Rientjesb104a352014-07-30 16:08:24 -07004087 * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
Vladimir Davydov344736f2014-10-20 15:50:30 +04004088 * comment for __cpuset_node_allowed().
Peter Zijlstra341ce062009-06-16 15:32:02 -07004089 */
4090 alloc_flags &= ~ALLOC_CPUSET;
Andrew Mortonc06b1fc2011-01-13 15:47:32 -08004091 } else if (unlikely(rt_task(current)) && !in_interrupt())
Peter Zijlstra341ce062009-06-16 15:32:02 -07004092 alloc_flags |= ALLOC_HARDER;
4093
Mel Gorman0a79cda2018-12-28 00:35:48 -08004094 if (gfp_mask & __GFP_KSWAPD_RECLAIM)
4095 alloc_flags |= ALLOC_KSWAPD;
4096
Joonsoo Kimd883c6c2018-05-23 10:18:21 +09004097#ifdef CONFIG_CMA
4098 if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
4099 alloc_flags |= ALLOC_CMA;
4100#endif
Peter Zijlstra341ce062009-06-16 15:32:02 -07004101 return alloc_flags;
4102}
4103
Michal Hockocd04ae12017-09-06 16:24:50 -07004104static bool oom_reserves_allowed(struct task_struct *tsk)
Mel Gorman072bb0a2012-07-31 16:43:58 -07004105{
Michal Hockocd04ae12017-09-06 16:24:50 -07004106 if (!tsk_is_oom_victim(tsk))
Vlastimil Babka31a6c192016-07-28 15:49:13 -07004107 return false;
4108
Michal Hockocd04ae12017-09-06 16:24:50 -07004109 /*
4110 * !MMU doesn't have oom reaper so give access to memory reserves
4111 * only to the thread with TIF_MEMDIE set
4112 */
4113 if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE))
4114 return false;
Vlastimil Babka31a6c192016-07-28 15:49:13 -07004115
Michal Hockocd04ae12017-09-06 16:24:50 -07004116 return true;
4117}
4118
4119/*
4120 * Distinguish requests which really need access to full memory
4121 * reserves from oom victims which can live with a portion of it
4122 */
4123static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask)
4124{
4125 if (unlikely(gfp_mask & __GFP_NOMEMALLOC))
4126 return 0;
4127 if (gfp_mask & __GFP_MEMALLOC)
4128 return ALLOC_NO_WATERMARKS;
4129 if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
4130 return ALLOC_NO_WATERMARKS;
4131 if (!in_interrupt()) {
4132 if (current->flags & PF_MEMALLOC)
4133 return ALLOC_NO_WATERMARKS;
4134 else if (oom_reserves_allowed(current))
4135 return ALLOC_OOM;
4136 }
4137
4138 return 0;
4139}
4140
4141bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
4142{
4143 return !!__gfp_pfmemalloc_flags(gfp_mask);
Mel Gorman072bb0a2012-07-31 16:43:58 -07004144}
4145
Michal Hocko0a0337e2016-05-20 16:57:00 -07004146/*
Michal Hocko0a0337e2016-05-20 16:57:00 -07004147 * Checks whether it makes sense to retry the reclaim to make a forward progress
4148 * for the given allocation request.
Johannes Weiner491d79a2017-05-03 14:52:16 -07004149 *
4150 * We give up when we either have tried MAX_RECLAIM_RETRIES in a row
4151 * without success, or when we couldn't even meet the watermark if we
4152 * reclaimed all remaining pages on the LRU lists.
Michal Hocko0a0337e2016-05-20 16:57:00 -07004153 *
4154 * Returns true if a retry is viable or false to enter the oom path.
4155 */
4156static inline bool
4157should_reclaim_retry(gfp_t gfp_mask, unsigned order,
4158 struct alloc_context *ac, int alloc_flags,
Vlastimil Babka423b4522016-10-07 17:00:40 -07004159 bool did_some_progress, int *no_progress_loops)
Michal Hocko0a0337e2016-05-20 16:57:00 -07004160{
4161 struct zone *zone;
4162 struct zoneref *z;
Michal Hocko15f570b2018-10-26 15:03:31 -07004163 bool ret = false;
Michal Hocko0a0337e2016-05-20 16:57:00 -07004164
4165 /*
Vlastimil Babka423b4522016-10-07 17:00:40 -07004166 * Costly allocations might have made a progress but this doesn't mean
4167 * their order will become available due to high fragmentation so
4168 * always increment the no progress counter for them
4169 */
4170 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER)
4171 *no_progress_loops = 0;
4172 else
4173 (*no_progress_loops)++;
4174
4175 /*
Michal Hocko0a0337e2016-05-20 16:57:00 -07004176 * Make sure we converge to OOM if we cannot make any progress
4177 * several times in the row.
4178 */
Minchan Kim04c87162016-12-12 16:42:11 -08004179 if (*no_progress_loops > MAX_RECLAIM_RETRIES) {
4180 /* Before OOM, exhaust highatomic_reserve */
Minchan Kim29fac032016-12-12 16:42:14 -08004181 return unreserve_highatomic_pageblock(ac, true);
Minchan Kim04c87162016-12-12 16:42:11 -08004182 }
Michal Hocko0a0337e2016-05-20 16:57:00 -07004183
Michal Hocko0a0337e2016-05-20 16:57:00 -07004184 /*
Mel Gormanbca67592016-07-28 15:47:05 -07004185 * Keep reclaiming pages while there is a chance this will lead
4186 * somewhere. If none of the target zones can satisfy our allocation
4187 * request even if all reclaimable pages are considered then we are
4188 * screwed and have to go OOM.
Michal Hocko0a0337e2016-05-20 16:57:00 -07004189 */
4190 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
4191 ac->nodemask) {
4192 unsigned long available;
Michal Hockoede37712016-05-20 16:57:03 -07004193 unsigned long reclaimable;
Michal Hockod379f012017-02-22 15:42:00 -08004194 unsigned long min_wmark = min_wmark_pages(zone);
4195 bool wmark;
Michal Hocko0a0337e2016-05-20 16:57:00 -07004196
Mel Gorman5a1c84b2016-07-28 15:47:31 -07004197 available = reclaimable = zone_reclaimable_pages(zone);
Mel Gorman5a1c84b2016-07-28 15:47:31 -07004198 available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
Michal Hocko0a0337e2016-05-20 16:57:00 -07004199
4200 /*
Johannes Weiner491d79a2017-05-03 14:52:16 -07004201 * Would the allocation succeed if we reclaimed all
4202 * reclaimable pages?
Michal Hocko0a0337e2016-05-20 16:57:00 -07004203 */
Michal Hockod379f012017-02-22 15:42:00 -08004204 wmark = __zone_watermark_ok(zone, order, min_wmark,
4205 ac_classzone_idx(ac), alloc_flags, available);
4206 trace_reclaim_retry_zone(z, order, reclaimable,
4207 available, min_wmark, *no_progress_loops, wmark);
4208 if (wmark) {
Michal Hockoede37712016-05-20 16:57:03 -07004209 /*
4210 * If we didn't make any progress and have a lot of
4211 * dirty + writeback pages then we should wait for
4212 * an IO to complete to slow down the reclaim and
4213 * prevent from pre mature OOM
4214 */
4215 if (!did_some_progress) {
Mel Gorman11fb9982016-07-28 15:46:20 -07004216 unsigned long write_pending;
Michal Hockoede37712016-05-20 16:57:03 -07004217
Mel Gorman5a1c84b2016-07-28 15:47:31 -07004218 write_pending = zone_page_state_snapshot(zone,
4219 NR_ZONE_WRITE_PENDING);
Michal Hockoede37712016-05-20 16:57:03 -07004220
Mel Gorman11fb9982016-07-28 15:46:20 -07004221 if (2 * write_pending > reclaimable) {
Michal Hockoede37712016-05-20 16:57:03 -07004222 congestion_wait(BLK_RW_ASYNC, HZ/10);
4223 return true;
4224 }
4225 }
Mel Gorman5a1c84b2016-07-28 15:47:31 -07004226
Michal Hocko15f570b2018-10-26 15:03:31 -07004227 ret = true;
4228 goto out;
Michal Hocko0a0337e2016-05-20 16:57:00 -07004229 }
4230 }
4231
Michal Hocko15f570b2018-10-26 15:03:31 -07004232out:
4233 /*
4234 * Memory allocation/reclaim might be called from a WQ context and the
4235 * current implementation of the WQ concurrency control doesn't
4236 * recognize that a particular WQ is congested if the worker thread is
4237 * looping without ever sleeping. Therefore we have to do a short sleep
4238 * here rather than calling cond_resched().
4239 */
4240 if (current->flags & PF_WQ_WORKER)
4241 schedule_timeout_uninterruptible(1);
4242 else
4243 cond_resched();
4244 return ret;
Michal Hocko0a0337e2016-05-20 16:57:00 -07004245}
4246
Vlastimil Babka902b6282017-07-06 15:39:56 -07004247static inline bool
4248check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac)
4249{
4250 /*
4251 * It's possible that cpuset's mems_allowed and the nodemask from
4252 * mempolicy don't intersect. This should be normally dealt with by
4253 * policy_nodemask(), but it's possible to race with cpuset update in
4254 * such a way the check therein was true, and then it became false
4255 * before we got our cpuset_mems_cookie here.
4256 * This assumes that for all allocations, ac->nodemask can come only
4257 * from MPOL_BIND mempolicy (whose documented semantics is to be ignored
4258 * when it does not intersect with the cpuset restrictions) or the
4259 * caller can deal with a violated nodemask.
4260 */
4261 if (cpusets_enabled() && ac->nodemask &&
4262 !cpuset_nodemask_valid_mems_allowed(ac->nodemask)) {
4263 ac->nodemask = NULL;
4264 return true;
4265 }
4266
4267 /*
4268 * When updating a task's mems_allowed or mempolicy nodemask, it is
4269 * possible to race with parallel threads in such a way that our
4270 * allocation can fail while the mask is being updated. If we are about
4271 * to fail, check if the cpuset changed during allocation and if so,
4272 * retry.
4273 */
4274 if (read_mems_allowed_retry(cpuset_mems_cookie))
4275 return true;
4276
4277 return false;
4278}
4279
Mel Gorman11e33f62009-06-16 15:31:57 -07004280static inline struct page *
4281__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
Vlastimil Babkaa9263752015-02-11 15:25:41 -08004282 struct alloc_context *ac)
Mel Gorman11e33f62009-06-16 15:31:57 -07004283{
Mel Gormand0164ad2015-11-06 16:28:21 -08004284 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
Vlastimil Babka282722b2017-05-08 15:54:49 -07004285 const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
Mel Gorman11e33f62009-06-16 15:31:57 -07004286 struct page *page = NULL;
Mel Gormanc6038442016-05-19 17:13:38 -07004287 unsigned int alloc_flags;
Mel Gorman11e33f62009-06-16 15:31:57 -07004288 unsigned long did_some_progress;
Vlastimil Babka5ce9bfe2017-01-24 15:18:38 -08004289 enum compact_priority compact_priority;
Michal Hockoc5d01d02016-05-20 16:56:53 -07004290 enum compact_result compact_result;
Vlastimil Babka5ce9bfe2017-01-24 15:18:38 -08004291 int compaction_retries;
4292 int no_progress_loops;
Vlastimil Babka5ce9bfe2017-01-24 15:18:38 -08004293 unsigned int cpuset_mems_cookie;
Michal Hockocd04ae12017-09-06 16:24:50 -07004294 int reserve_flags;
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08004295
Christoph Lameter952f3b52006-12-06 20:33:26 -08004296 /*
Mel Gormand0164ad2015-11-06 16:28:21 -08004297 * We also sanity check to catch abuse of atomic reserves being used by
4298 * callers that are not in atomic context.
4299 */
4300 if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
4301 (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
4302 gfp_mask &= ~__GFP_ATOMIC;
4303
Vlastimil Babka5ce9bfe2017-01-24 15:18:38 -08004304retry_cpuset:
4305 compaction_retries = 0;
4306 no_progress_loops = 0;
4307 compact_priority = DEF_COMPACT_PRIORITY;
4308 cpuset_mems_cookie = read_mems_allowed_begin();
Michal Hocko9a67f642017-02-22 15:46:19 -08004309
4310 /*
4311 * The fast path uses conservative alloc_flags to succeed only until
4312 * kswapd needs to be woken up, and to avoid the cost of setting up
4313 * alloc_flags precisely. So we do that now.
4314 */
4315 alloc_flags = gfp_to_alloc_flags(gfp_mask);
4316
Vlastimil Babkae47483b2017-01-24 15:18:41 -08004317 /*
4318 * We need to recalculate the starting point for the zonelist iterator
4319 * because we might have used different nodemask in the fast path, or
4320 * there was a cpuset modification and we are retrying - otherwise we
4321 * could end up iterating over non-eligible zones endlessly.
4322 */
4323 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4324 ac->high_zoneidx, ac->nodemask);
4325 if (!ac->preferred_zoneref->zone)
4326 goto nopage;
4327
Mel Gorman0a79cda2018-12-28 00:35:48 -08004328 if (alloc_flags & ALLOC_KSWAPD)
David Rientjes5ecd9d42018-04-05 16:25:16 -07004329 wake_all_kswapds(order, gfp_mask, ac);
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08004330
Paul Jackson9bf22292005-09-06 15:18:12 -07004331 /*
Vlastimil Babka23771232016-07-28 15:49:16 -07004332 * The adjusted alloc_flags might result in immediate success, so try
4333 * that first
4334 */
4335 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4336 if (page)
4337 goto got_pg;
4338
Vlastimil Babkaa8161d12016-07-28 15:49:19 -07004339 /*
4340 * For costly allocations, try direct compaction first, as it's likely
Vlastimil Babka282722b2017-05-08 15:54:49 -07004341 * that we have enough base pages and don't need to reclaim. For non-
4342 * movable high-order allocations, do that as well, as compaction will
4343 * try prevent permanent fragmentation by migrating from blocks of the
4344 * same migratetype.
4345 * Don't try this for allocations that are allowed to ignore
4346 * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen.
Vlastimil Babkaa8161d12016-07-28 15:49:19 -07004347 */
Vlastimil Babka282722b2017-05-08 15:54:49 -07004348 if (can_direct_reclaim &&
4349 (costly_order ||
4350 (order > 0 && ac->migratetype != MIGRATE_MOVABLE))
4351 && !gfp_pfmemalloc_allowed(gfp_mask)) {
Vlastimil Babkaa8161d12016-07-28 15:49:19 -07004352 page = __alloc_pages_direct_compact(gfp_mask, order,
4353 alloc_flags, ac,
Vlastimil Babkaa5508cd2016-07-28 15:49:28 -07004354 INIT_COMPACT_PRIORITY,
Vlastimil Babkaa8161d12016-07-28 15:49:19 -07004355 &compact_result);
4356 if (page)
4357 goto got_pg;
4358
Vlastimil Babka3eb27712016-07-28 15:49:22 -07004359 /*
4360 * Checks for costly allocations with __GFP_NORETRY, which
4361 * includes THP page fault allocations
4362 */
Vlastimil Babka282722b2017-05-08 15:54:49 -07004363 if (costly_order && (gfp_mask & __GFP_NORETRY)) {
Vlastimil Babkaa8161d12016-07-28 15:49:19 -07004364 /*
4365 * If compaction is deferred for high-order allocations,
4366 * it is because sync compaction recently failed. If
4367 * this is the case and the caller requested a THP
4368 * allocation, we do not want to heavily disrupt the
4369 * system, so we fail the allocation instead of entering
4370 * direct reclaim.
4371 */
4372 if (compact_result == COMPACT_DEFERRED)
4373 goto nopage;
4374
4375 /*
Vlastimil Babka3eb27712016-07-28 15:49:22 -07004376 * Looks like reclaim/compaction is worth trying, but
4377 * sync compaction could be very expensive, so keep
Vlastimil Babka25160352016-07-28 15:49:25 -07004378 * using async compaction.
Vlastimil Babkaa8161d12016-07-28 15:49:19 -07004379 */
Vlastimil Babkaa5508cd2016-07-28 15:49:28 -07004380 compact_priority = INIT_COMPACT_PRIORITY;
Vlastimil Babkaa8161d12016-07-28 15:49:19 -07004381 }
4382 }
Vlastimil Babka23771232016-07-28 15:49:16 -07004383
4384retry:
4385 /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */
Mel Gorman0a79cda2018-12-28 00:35:48 -08004386 if (alloc_flags & ALLOC_KSWAPD)
David Rientjes5ecd9d42018-04-05 16:25:16 -07004387 wake_all_kswapds(order, gfp_mask, ac);
Vlastimil Babka23771232016-07-28 15:49:16 -07004388
Michal Hockocd04ae12017-09-06 16:24:50 -07004389 reserve_flags = __gfp_pfmemalloc_flags(gfp_mask);
4390 if (reserve_flags)
4391 alloc_flags = reserve_flags;
Vlastimil Babka23771232016-07-28 15:49:16 -07004392
4393 /*
Vlastimil Babkad6a24df2018-08-17 15:45:05 -07004394 * Reset the nodemask and zonelist iterators if memory policies can be
4395 * ignored. These allocations are high priority and system rather than
4396 * user oriented.
Mel Gormane46e7b72016-06-03 14:56:01 -07004397 */
Michal Hockocd04ae12017-09-06 16:24:50 -07004398 if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) {
Vlastimil Babkad6a24df2018-08-17 15:45:05 -07004399 ac->nodemask = NULL;
Mel Gormane46e7b72016-06-03 14:56:01 -07004400 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4401 ac->high_zoneidx, ac->nodemask);
4402 }
4403
Vlastimil Babka23771232016-07-28 15:49:16 -07004404 /* Attempt with potentially adjusted zonelist and alloc_flags */
Vlastimil Babka31a6c192016-07-28 15:49:13 -07004405 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08004406 if (page)
4407 goto got_pg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004408
Mel Gormand0164ad2015-11-06 16:28:21 -08004409 /* Caller is not willing to reclaim, we can't balance anything */
Michal Hocko9a67f642017-02-22 15:46:19 -08004410 if (!can_direct_reclaim)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004411 goto nopage;
Michal Hocko9a67f642017-02-22 15:46:19 -08004412
Peter Zijlstra341ce062009-06-16 15:32:02 -07004413 /* Avoid recursion of direct reclaim */
Michal Hocko9a67f642017-02-22 15:46:19 -08004414 if (current->flags & PF_MEMALLOC)
Peter Zijlstra341ce062009-06-16 15:32:02 -07004415 goto nopage;
David Rientjes8fe78042014-08-06 16:07:54 -07004416
Mel Gorman11e33f62009-06-16 15:31:57 -07004417 /* Try direct reclaim and then allocating */
Vlastimil Babkaa9263752015-02-11 15:25:41 -08004418 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
4419 &did_some_progress);
Mel Gorman11e33f62009-06-16 15:31:57 -07004420 if (page)
4421 goto got_pg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004422
Vlastimil Babkaa8161d12016-07-28 15:49:19 -07004423 /* Try direct compaction and then allocating */
4424 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
Vlastimil Babkaa5508cd2016-07-28 15:49:28 -07004425 compact_priority, &compact_result);
Vlastimil Babkaa8161d12016-07-28 15:49:19 -07004426 if (page)
4427 goto got_pg;
4428
Johannes Weiner90839052015-06-24 16:57:21 -07004429 /* Do not loop if specifically requested */
4430 if (gfp_mask & __GFP_NORETRY)
Vlastimil Babkaa8161d12016-07-28 15:49:19 -07004431 goto nopage;
Johannes Weiner90839052015-06-24 16:57:21 -07004432
Michal Hocko0a0337e2016-05-20 16:57:00 -07004433 /*
4434 * Do not retry costly high order allocations unless they are
Michal Hockodcda9b02017-07-12 14:36:45 -07004435 * __GFP_RETRY_MAYFAIL
Michal Hocko0a0337e2016-05-20 16:57:00 -07004436 */
Michal Hockodcda9b02017-07-12 14:36:45 -07004437 if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL))
Vlastimil Babkaa8161d12016-07-28 15:49:19 -07004438 goto nopage;
Michal Hocko0a0337e2016-05-20 16:57:00 -07004439
Michal Hocko0a0337e2016-05-20 16:57:00 -07004440 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
Vlastimil Babka423b4522016-10-07 17:00:40 -07004441 did_some_progress > 0, &no_progress_loops))
Michal Hocko0a0337e2016-05-20 16:57:00 -07004442 goto retry;
4443
Michal Hocko33c2d212016-05-20 16:57:06 -07004444 /*
4445 * It doesn't make any sense to retry for the compaction if the order-0
4446 * reclaim is not able to make any progress because the current
4447 * implementation of the compaction depends on the sufficient amount
4448 * of free memory (see __compaction_suitable)
4449 */
4450 if (did_some_progress > 0 &&
Michal Hocko86a294a2016-05-20 16:57:12 -07004451 should_compact_retry(ac, order, alloc_flags,
Vlastimil Babkaa5508cd2016-07-28 15:49:28 -07004452 compact_result, &compact_priority,
Vlastimil Babkad9436492016-10-07 17:00:31 -07004453 &compaction_retries))
Michal Hocko33c2d212016-05-20 16:57:06 -07004454 goto retry;
4455
Vlastimil Babka902b6282017-07-06 15:39:56 -07004456
4457 /* Deal with possible cpuset update races before we start OOM killing */
4458 if (check_retry_cpuset(cpuset_mems_cookie, ac))
Vlastimil Babkae47483b2017-01-24 15:18:41 -08004459 goto retry_cpuset;
4460
Johannes Weiner90839052015-06-24 16:57:21 -07004461 /* Reclaim has failed us, start killing things */
4462 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
4463 if (page)
4464 goto got_pg;
4465
Michal Hocko9a67f642017-02-22 15:46:19 -08004466 /* Avoid allocations with no watermarks from looping endlessly */
Michal Hockocd04ae12017-09-06 16:24:50 -07004467 if (tsk_is_oom_victim(current) &&
4468 (alloc_flags == ALLOC_OOM ||
Tetsuo Handac2889832017-06-02 14:46:31 -07004469 (gfp_mask & __GFP_NOMEMALLOC)))
Michal Hocko9a67f642017-02-22 15:46:19 -08004470 goto nopage;
4471
Johannes Weiner90839052015-06-24 16:57:21 -07004472 /* Retry as long as the OOM killer is making progress */
Michal Hocko0a0337e2016-05-20 16:57:00 -07004473 if (did_some_progress) {
4474 no_progress_loops = 0;
Johannes Weiner90839052015-06-24 16:57:21 -07004475 goto retry;
Michal Hocko0a0337e2016-05-20 16:57:00 -07004476 }
Johannes Weiner90839052015-06-24 16:57:21 -07004477
Linus Torvalds1da177e2005-04-16 15:20:36 -07004478nopage:
Vlastimil Babka902b6282017-07-06 15:39:56 -07004479 /* Deal with possible cpuset update races before we fail */
4480 if (check_retry_cpuset(cpuset_mems_cookie, ac))
Vlastimil Babka5ce9bfe2017-01-24 15:18:38 -08004481 goto retry_cpuset;
4482
Michal Hocko9a67f642017-02-22 15:46:19 -08004483 /*
4484 * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
4485 * we always retry
4486 */
4487 if (gfp_mask & __GFP_NOFAIL) {
4488 /*
4489 * All existing users of the __GFP_NOFAIL are blockable, so warn
4490 * of any new users that actually require GFP_NOWAIT
4491 */
4492 if (WARN_ON_ONCE(!can_direct_reclaim))
4493 goto fail;
4494
4495 /*
4496 * PF_MEMALLOC request from this context is rather bizarre
4497 * because we cannot reclaim anything and only can loop waiting
4498 * for somebody to do a work for us
4499 */
4500 WARN_ON_ONCE(current->flags & PF_MEMALLOC);
4501
4502 /*
4503 * non failing costly orders are a hard requirement which we
4504 * are not prepared for much so let's warn about these users
4505 * so that we can identify them and convert them to something
4506 * else.
4507 */
4508 WARN_ON_ONCE(order > PAGE_ALLOC_COSTLY_ORDER);
4509
Michal Hocko6c18ba72017-02-22 15:46:25 -08004510 /*
4511 * Help non-failing allocations by giving them access to memory
4512 * reserves but do not use ALLOC_NO_WATERMARKS because this
4513 * could deplete whole memory reserves which would just make
4514 * the situation worse
4515 */
4516 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac);
4517 if (page)
4518 goto got_pg;
4519
Michal Hocko9a67f642017-02-22 15:46:19 -08004520 cond_resched();
4521 goto retry;
4522 }
4523fail:
Michal Hockoa8e99252017-02-22 15:46:10 -08004524 warn_alloc(gfp_mask, ac->nodemask,
Michal Hocko7877cdc2016-10-07 17:01:55 -07004525 "page allocation failure: order:%u", order);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004526got_pg:
Mel Gorman072bb0a2012-07-31 16:43:58 -07004527 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004528}
Mel Gorman11e33f62009-06-16 15:31:57 -07004529
Mel Gorman9cd75552017-02-24 14:56:29 -08004530static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
Vlastimil Babka04ec6262017-07-06 15:40:03 -07004531 int preferred_nid, nodemask_t *nodemask,
Mel Gorman9cd75552017-02-24 14:56:29 -08004532 struct alloc_context *ac, gfp_t *alloc_mask,
4533 unsigned int *alloc_flags)
4534{
4535 ac->high_zoneidx = gfp_zone(gfp_mask);
Vlastimil Babka04ec6262017-07-06 15:40:03 -07004536 ac->zonelist = node_zonelist(preferred_nid, gfp_mask);
Mel Gorman9cd75552017-02-24 14:56:29 -08004537 ac->nodemask = nodemask;
4538 ac->migratetype = gfpflags_to_migratetype(gfp_mask);
4539
4540 if (cpusets_enabled()) {
4541 *alloc_mask |= __GFP_HARDWALL;
Mel Gorman9cd75552017-02-24 14:56:29 -08004542 if (!ac->nodemask)
4543 ac->nodemask = &cpuset_current_mems_allowed;
Vlastimil Babka51047822017-02-24 14:56:53 -08004544 else
4545 *alloc_flags |= ALLOC_CPUSET;
Mel Gorman9cd75552017-02-24 14:56:29 -08004546 }
4547
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +01004548 fs_reclaim_acquire(gfp_mask);
4549 fs_reclaim_release(gfp_mask);
Mel Gorman9cd75552017-02-24 14:56:29 -08004550
4551 might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
4552
4553 if (should_fail_alloc_page(gfp_mask, order))
4554 return false;
4555
Joonsoo Kimd883c6c2018-05-23 10:18:21 +09004556 if (IS_ENABLED(CONFIG_CMA) && ac->migratetype == MIGRATE_MOVABLE)
4557 *alloc_flags |= ALLOC_CMA;
4558
Mel Gorman9cd75552017-02-24 14:56:29 -08004559 return true;
4560}
4561
4562/* Determine whether to spread dirty pages and what the first usable zone */
Huaisheng Yea380b402018-06-07 17:07:57 -07004563static inline void finalise_ac(gfp_t gfp_mask, struct alloc_context *ac)
Mel Gorman9cd75552017-02-24 14:56:29 -08004564{
4565 /* Dirty zone balancing only done in the fast path */
4566 ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);
4567
4568 /*
4569 * The preferred zone is used for statistics but crucially it is
4570 * also used as the starting point for the zonelist iterator. It
4571 * may get reset for allocations that ignore memory policies.
4572 */
4573 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4574 ac->high_zoneidx, ac->nodemask);
4575}
4576
Mel Gorman11e33f62009-06-16 15:31:57 -07004577/*
4578 * This is the 'heart' of the zoned buddy allocator.
4579 */
4580struct page *
Vlastimil Babka04ec6262017-07-06 15:40:03 -07004581__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
4582 nodemask_t *nodemask)
Mel Gorman11e33f62009-06-16 15:31:57 -07004583{
Mel Gorman5bb1b162016-05-19 17:13:50 -07004584 struct page *page;
Mel Gormane6cbd7f2016-07-28 15:46:50 -07004585 unsigned int alloc_flags = ALLOC_WMARK_LOW;
Tetsuo Handaf19360f2017-09-08 16:13:22 -07004586 gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */
Mel Gorman9cd75552017-02-24 14:56:29 -08004587 struct alloc_context ac = { };
Mel Gorman682a3382016-05-19 17:13:30 -07004588
Michal Hockoc63ae432018-11-16 15:08:53 -08004589 /*
4590 * There are several places where we assume that the order value is sane
4591 * so bail out early if the request is out of bound.
4592 */
4593 if (unlikely(order >= MAX_ORDER)) {
4594 WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
4595 return NULL;
4596 }
4597
Benjamin Herrenschmidtdcce2842009-06-18 13:24:12 +10004598 gfp_mask &= gfp_allowed_mask;
Tetsuo Handaf19360f2017-09-08 16:13:22 -07004599 alloc_mask = gfp_mask;
Vlastimil Babka04ec6262017-07-06 15:40:03 -07004600 if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags))
Mel Gorman11e33f62009-06-16 15:31:57 -07004601 return NULL;
4602
Huaisheng Yea380b402018-06-07 17:07:57 -07004603 finalise_ac(gfp_mask, &ac);
Mel Gorman5bb1b162016-05-19 17:13:50 -07004604
Mel Gorman6bb15452018-12-28 00:35:41 -08004605 /*
4606 * Forbid the first pass from falling back to types that fragment
4607 * memory until all local zones are considered.
4608 */
Mel Gorman0a79cda2018-12-28 00:35:48 -08004609 alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp_mask);
Mel Gorman6bb15452018-12-28 00:35:41 -08004610
Mel Gorman5117f452009-06-16 15:31:59 -07004611 /* First allocation attempt */
Vlastimil Babkaa9263752015-02-11 15:25:41 -08004612 page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
Mel Gorman4fcb0972016-05-19 17:14:01 -07004613 if (likely(page))
4614 goto out;
Andrew Morton91fbdc02015-02-11 15:25:04 -08004615
Mel Gorman4fcb0972016-05-19 17:14:01 -07004616 /*
Michal Hocko7dea19f2017-05-03 14:53:15 -07004617 * Apply scoped allocation constraints. This is mainly about GFP_NOFS
4618 * resp. GFP_NOIO which has to be inherited for all allocation requests
4619 * from a particular context which has been marked by
4620 * memalloc_no{fs,io}_{save,restore}.
Mel Gorman4fcb0972016-05-19 17:14:01 -07004621 */
Michal Hocko7dea19f2017-05-03 14:53:15 -07004622 alloc_mask = current_gfp_context(gfp_mask);
Mel Gorman4fcb0972016-05-19 17:14:01 -07004623 ac.spread_dirty_pages = false;
Mel Gorman11e33f62009-06-16 15:31:57 -07004624
Mel Gorman47415262016-05-19 17:14:44 -07004625 /*
4626 * Restore the original nodemask if it was potentially replaced with
4627 * &cpuset_current_mems_allowed to optimize the fast-path attempt.
4628 */
Vlastimil Babkae47483b2017-01-24 15:18:41 -08004629 if (unlikely(ac.nodemask != nodemask))
Mel Gorman47415262016-05-19 17:14:44 -07004630 ac.nodemask = nodemask;
Vlastimil Babka16096c22017-01-24 15:18:35 -08004631
Mel Gorman4fcb0972016-05-19 17:14:01 -07004632 page = __alloc_pages_slowpath(alloc_mask, order, &ac);
Xishi Qiu23f086f2015-02-11 15:25:07 -08004633
Mel Gorman4fcb0972016-05-19 17:14:01 -07004634out:
Vladimir Davydovc4159a72016-08-08 23:03:12 +03004635 if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page &&
Shakeel Butt60cd4bc2019-03-05 15:43:13 -08004636 unlikely(__memcg_kmem_charge(page, gfp_mask, order) != 0)) {
Vladimir Davydovc4159a72016-08-08 23:03:12 +03004637 __free_pages(page, order);
4638 page = NULL;
Vladimir Davydov49491482016-07-26 15:24:24 -07004639 }
4640
Mel Gorman4fcb0972016-05-19 17:14:01 -07004641 trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);
4642
Mel Gorman11e33f62009-06-16 15:31:57 -07004643 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004644}
Mel Gormand2391712009-06-16 15:31:52 -07004645EXPORT_SYMBOL(__alloc_pages_nodemask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004646
4647/*
Michal Hocko9ea9a682018-08-17 15:46:01 -07004648 * Common helper functions. Never use with __GFP_HIGHMEM because the returned
4649 * address cannot represent highmem pages. Use alloc_pages and then kmap if
4650 * you need to access high mem.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004651 */
Harvey Harrison920c7a52008-02-04 22:29:26 -08004652unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004653{
Akinobu Mita945a1112009-09-21 17:01:47 -07004654 struct page *page;
4655
Michal Hocko9ea9a682018-08-17 15:46:01 -07004656 page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004657 if (!page)
4658 return 0;
4659 return (unsigned long) page_address(page);
4660}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004661EXPORT_SYMBOL(__get_free_pages);
4662
Harvey Harrison920c7a52008-02-04 22:29:26 -08004663unsigned long get_zeroed_page(gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004664{
Akinobu Mita945a1112009-09-21 17:01:47 -07004665 return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004666}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004667EXPORT_SYMBOL(get_zeroed_page);
4668
Aaron Lu742aa7f2018-12-28 00:35:22 -08004669static inline void free_the_page(struct page *page, unsigned int order)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004670{
Aaron Lu742aa7f2018-12-28 00:35:22 -08004671 if (order == 0) /* Via pcp? */
4672 free_unref_page(page);
4673 else
4674 __free_pages_ok(page, order);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004675}
4676
Aaron Lu742aa7f2018-12-28 00:35:22 -08004677void __free_pages(struct page *page, unsigned int order)
4678{
4679 if (put_page_testzero(page))
4680 free_the_page(page, order);
4681}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004682EXPORT_SYMBOL(__free_pages);
4683
Harvey Harrison920c7a52008-02-04 22:29:26 -08004684void free_pages(unsigned long addr, unsigned int order)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004685{
4686 if (addr != 0) {
Nick Piggin725d7042006-09-25 23:30:55 -07004687 VM_BUG_ON(!virt_addr_valid((void *)addr));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004688 __free_pages(virt_to_page((void *)addr), order);
4689 }
4690}
4691
4692EXPORT_SYMBOL(free_pages);
4693
Glauber Costa6a1a0d32012-12-18 14:22:00 -08004694/*
Alexander Duyckb63ae8c2015-05-06 21:11:57 -07004695 * Page Fragment:
4696 * An arbitrary-length arbitrary-offset area of memory which resides
4697 * within a 0 or higher order page. Multiple fragments within that page
4698 * are individually refcounted, in the page's reference counter.
4699 *
4700 * The page_frag functions below provide a simple allocation framework for
4701 * page fragments. This is used by the network stack and network device
4702 * drivers to provide a backing region of memory for use as either an
4703 * sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
4704 */
Alexander Duyck2976db82017-01-10 16:58:09 -08004705static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
4706 gfp_t gfp_mask)
Alexander Duyckb63ae8c2015-05-06 21:11:57 -07004707{
4708 struct page *page = NULL;
4709 gfp_t gfp = gfp_mask;
4710
4711#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
4712 gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
4713 __GFP_NOMEMALLOC;
4714 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
4715 PAGE_FRAG_CACHE_MAX_ORDER);
4716 nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
4717#endif
4718 if (unlikely(!page))
4719 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
4720
4721 nc->va = page ? page_address(page) : NULL;
4722
4723 return page;
4724}
4725
Alexander Duyck2976db82017-01-10 16:58:09 -08004726void __page_frag_cache_drain(struct page *page, unsigned int count)
Alexander Duyck44fdffd2016-12-14 15:05:26 -08004727{
4728 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
4729
Aaron Lu742aa7f2018-12-28 00:35:22 -08004730 if (page_ref_sub_and_test(page, count))
4731 free_the_page(page, compound_order(page));
Alexander Duyck44fdffd2016-12-14 15:05:26 -08004732}
Alexander Duyck2976db82017-01-10 16:58:09 -08004733EXPORT_SYMBOL(__page_frag_cache_drain);
Alexander Duyck44fdffd2016-12-14 15:05:26 -08004734
Alexander Duyck8c2dd3e2017-01-10 16:58:06 -08004735void *page_frag_alloc(struct page_frag_cache *nc,
4736 unsigned int fragsz, gfp_t gfp_mask)
Alexander Duyckb63ae8c2015-05-06 21:11:57 -07004737{
4738 unsigned int size = PAGE_SIZE;
4739 struct page *page;
4740 int offset;
4741
4742 if (unlikely(!nc->va)) {
4743refill:
Alexander Duyck2976db82017-01-10 16:58:09 -08004744 page = __page_frag_cache_refill(nc, gfp_mask);
Alexander Duyckb63ae8c2015-05-06 21:11:57 -07004745 if (!page)
4746 return NULL;
4747
4748#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
4749 /* if size can vary use size else just use PAGE_SIZE */
4750 size = nc->size;
4751#endif
4752 /* Even if we own the page, we do not use atomic_set().
4753 * This would break get_page_unless_zero() users.
4754 */
Alexander Duyck86447722019-02-15 14:44:12 -08004755 page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE);
Alexander Duyckb63ae8c2015-05-06 21:11:57 -07004756
4757 /* reset page count bias and offset to start of new frag */
Michal Hocko2f064f32015-08-21 14:11:51 -07004758 nc->pfmemalloc = page_is_pfmemalloc(page);
Alexander Duyck86447722019-02-15 14:44:12 -08004759 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
Alexander Duyckb63ae8c2015-05-06 21:11:57 -07004760 nc->offset = size;
4761 }
4762
4763 offset = nc->offset - fragsz;
4764 if (unlikely(offset < 0)) {
4765 page = virt_to_page(nc->va);
4766
Joonsoo Kimfe896d12016-03-17 14:19:26 -07004767 if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
Alexander Duyckb63ae8c2015-05-06 21:11:57 -07004768 goto refill;
4769
4770#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
4771 /* if size can vary use size else just use PAGE_SIZE */
4772 size = nc->size;
4773#endif
4774 /* OK, page count is 0, we can safely set it */
Alexander Duyck86447722019-02-15 14:44:12 -08004775 set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1);
Alexander Duyckb63ae8c2015-05-06 21:11:57 -07004776
4777 /* reset page count bias and offset to start of new frag */
Alexander Duyck86447722019-02-15 14:44:12 -08004778 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
Alexander Duyckb63ae8c2015-05-06 21:11:57 -07004779 offset = size - fragsz;
4780 }
4781
4782 nc->pagecnt_bias--;
4783 nc->offset = offset;
4784
4785 return nc->va + offset;
4786}
Alexander Duyck8c2dd3e2017-01-10 16:58:06 -08004787EXPORT_SYMBOL(page_frag_alloc);
Alexander Duyckb63ae8c2015-05-06 21:11:57 -07004788
4789/*
4790 * Frees a page fragment allocated out of either a compound or order 0 page.
4791 */
Alexander Duyck8c2dd3e2017-01-10 16:58:06 -08004792void page_frag_free(void *addr)
Alexander Duyckb63ae8c2015-05-06 21:11:57 -07004793{
4794 struct page *page = virt_to_head_page(addr);
4795
Aaron Lu742aa7f2018-12-28 00:35:22 -08004796 if (unlikely(put_page_testzero(page)))
4797 free_the_page(page, compound_order(page));
Alexander Duyckb63ae8c2015-05-06 21:11:57 -07004798}
Alexander Duyck8c2dd3e2017-01-10 16:58:06 -08004799EXPORT_SYMBOL(page_frag_free);
Alexander Duyckb63ae8c2015-05-06 21:11:57 -07004800
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -08004801static void *make_alloc_exact(unsigned long addr, unsigned int order,
4802 size_t size)
Andi Kleenee85c2e2011-05-11 15:13:34 -07004803{
4804 if (addr) {
4805 unsigned long alloc_end = addr + (PAGE_SIZE << order);
4806 unsigned long used = addr + PAGE_ALIGN(size);
4807
4808 split_page(virt_to_page((void *)addr), order);
4809 while (used < alloc_end) {
4810 free_page(used);
4811 used += PAGE_SIZE;
4812 }
4813 }
4814 return (void *)addr;
4815}
4816
Timur Tabi2be0ffe2008-07-23 21:28:11 -07004817/**
4818 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
4819 * @size: the number of bytes to allocate
4820 * @gfp_mask: GFP flags for the allocation
4821 *
4822 * This function is similar to alloc_pages(), except that it allocates the
4823 * minimum number of pages to satisfy the request. alloc_pages() can only
4824 * allocate memory in power-of-two pages.
4825 *
4826 * This function is also limited by MAX_ORDER.
4827 *
4828 * Memory allocated by this function must be released by free_pages_exact().
4829 */
4830void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
4831{
4832 unsigned int order = get_order(size);
4833 unsigned long addr;
4834
4835 addr = __get_free_pages(gfp_mask, order);
Andi Kleenee85c2e2011-05-11 15:13:34 -07004836 return make_alloc_exact(addr, order, size);
Timur Tabi2be0ffe2008-07-23 21:28:11 -07004837}
4838EXPORT_SYMBOL(alloc_pages_exact);
4839
4840/**
Andi Kleenee85c2e2011-05-11 15:13:34 -07004841 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
4842 * pages on a node.
Randy Dunlapb5e6ab52011-05-16 13:16:54 -07004843 * @nid: the preferred node ID where memory should be allocated
Andi Kleenee85c2e2011-05-11 15:13:34 -07004844 * @size: the number of bytes to allocate
4845 * @gfp_mask: GFP flags for the allocation
4846 *
4847 * Like alloc_pages_exact(), but try to allocate on node nid first before falling
4848 * back.
Andi Kleenee85c2e2011-05-11 15:13:34 -07004849 */
Fabian Fredericke1931812014-08-06 16:04:59 -07004850void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
Andi Kleenee85c2e2011-05-11 15:13:34 -07004851{
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -08004852 unsigned int order = get_order(size);
Andi Kleenee85c2e2011-05-11 15:13:34 -07004853 struct page *p = alloc_pages_node(nid, gfp_mask, order);
4854 if (!p)
4855 return NULL;
4856 return make_alloc_exact((unsigned long)page_address(p), order, size);
4857}
Andi Kleenee85c2e2011-05-11 15:13:34 -07004858
4859/**
Timur Tabi2be0ffe2008-07-23 21:28:11 -07004860 * free_pages_exact - release memory allocated via alloc_pages_exact()
4861 * @virt: the value returned by alloc_pages_exact.
4862 * @size: size of allocation, same value as passed to alloc_pages_exact().
4863 *
4864 * Release the memory allocated by a previous call to alloc_pages_exact.
4865 */
4866void free_pages_exact(void *virt, size_t size)
4867{
4868 unsigned long addr = (unsigned long)virt;
4869 unsigned long end = addr + PAGE_ALIGN(size);
4870
4871 while (addr < end) {
4872 free_page(addr);
4873 addr += PAGE_SIZE;
4874 }
4875}
4876EXPORT_SYMBOL(free_pages_exact);
4877
Zhang Yanfeie0fb5812013-02-22 16:35:54 -08004878/**
4879 * nr_free_zone_pages - count number of pages beyond high watermark
4880 * @offset: The zone index of the highest zone
4881 *
4882 * nr_free_zone_pages() counts the number of counts pages which are beyond the
4883 * high watermark within all zones at or below a given zone index. For each
4884 * zone, the number of pages is calculated as:
mchehab@s-opensource.com0e056eb2017-03-30 17:11:36 -03004885 *
4886 * nr_free_zone_pages = managed_pages - high_pages
Zhang Yanfeie0fb5812013-02-22 16:35:54 -08004887 */
Zhang Yanfeiebec3862013-02-22 16:35:43 -08004888static unsigned long nr_free_zone_pages(int offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004889{
Mel Gormandd1a2392008-04-28 02:12:17 -07004890 struct zoneref *z;
Mel Gorman54a6eb52008-04-28 02:12:16 -07004891 struct zone *zone;
4892
Martin J. Blighe310fd42005-07-29 22:59:18 -07004893 /* Just pick one node, since fallback list is circular */
Zhang Yanfeiebec3862013-02-22 16:35:43 -08004894 unsigned long sum = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004895
Mel Gorman0e884602008-04-28 02:12:14 -07004896 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004897
Mel Gorman54a6eb52008-04-28 02:12:16 -07004898 for_each_zone_zonelist(zone, z, zonelist, offset) {
Arun KS9705bea2018-12-28 00:34:24 -08004899 unsigned long size = zone_managed_pages(zone);
Mel Gorman41858962009-06-16 15:32:12 -07004900 unsigned long high = high_wmark_pages(zone);
Martin J. Blighe310fd42005-07-29 22:59:18 -07004901 if (size > high)
4902 sum += size - high;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004903 }
4904
4905 return sum;
4906}
4907
Zhang Yanfeie0fb5812013-02-22 16:35:54 -08004908/**
4909 * nr_free_buffer_pages - count number of pages beyond high watermark
4910 *
4911 * nr_free_buffer_pages() counts the number of pages which are beyond the high
4912 * watermark within ZONE_DMA and ZONE_NORMAL.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004913 */
Zhang Yanfeiebec3862013-02-22 16:35:43 -08004914unsigned long nr_free_buffer_pages(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004915{
Al Viroaf4ca452005-10-21 02:55:38 -04004916 return nr_free_zone_pages(gfp_zone(GFP_USER));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004917}
Meelap Shahc2f1a552007-07-17 04:04:39 -07004918EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004919
Zhang Yanfeie0fb5812013-02-22 16:35:54 -08004920/**
4921 * nr_free_pagecache_pages - count number of pages beyond high watermark
4922 *
4923 * nr_free_pagecache_pages() counts the number of pages which are beyond the
4924 * high watermark within all zones.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004925 */
Zhang Yanfeiebec3862013-02-22 16:35:43 -08004926unsigned long nr_free_pagecache_pages(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004927{
Mel Gorman2a1e2742007-07-17 04:03:12 -07004928 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004929}
Christoph Lameter08e0f6a2006-09-27 01:50:06 -07004930
4931static inline void show_node(struct zone *zone)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004932{
Kirill A. Shutemove5adfff2012-12-11 16:00:29 -08004933 if (IS_ENABLED(CONFIG_NUMA))
Andy Whitcroft25ba77c2006-12-06 20:33:03 -08004934 printk("Node %d ", zone_to_nid(zone));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004935}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004936
Igor Redkod02bd272016-03-17 14:19:05 -07004937long si_mem_available(void)
4938{
4939 long available;
4940 unsigned long pagecache;
4941 unsigned long wmark_low = 0;
4942 unsigned long pages[NR_LRU_LISTS];
Vlastimil Babkab29940c2018-10-26 15:05:46 -07004943 unsigned long reclaimable;
Igor Redkod02bd272016-03-17 14:19:05 -07004944 struct zone *zone;
4945 int lru;
4946
4947 for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
Mel Gorman2f95ff92016-08-11 15:32:57 -07004948 pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
Igor Redkod02bd272016-03-17 14:19:05 -07004949
4950 for_each_zone(zone)
Mel Gormana9214442018-12-28 00:35:44 -08004951 wmark_low += low_wmark_pages(zone);
Igor Redkod02bd272016-03-17 14:19:05 -07004952
4953 /*
4954 * Estimate the amount of memory available for userspace allocations,
4955 * without causing swapping.
4956 */
Michal Hockoc41f0122017-09-06 16:23:36 -07004957 available = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages;
Igor Redkod02bd272016-03-17 14:19:05 -07004958
4959 /*
4960 * Not all the page cache can be freed, otherwise the system will
4961 * start swapping. Assume at least half of the page cache, or the
4962 * low watermark worth of cache, needs to stay.
4963 */
4964 pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE];
4965 pagecache -= min(pagecache / 2, wmark_low);
4966 available += pagecache;
4967
4968 /*
Vlastimil Babkab29940c2018-10-26 15:05:46 -07004969 * Part of the reclaimable slab and other kernel memory consists of
4970 * items that are in use, and cannot be freed. Cap this estimate at the
4971 * low watermark.
Igor Redkod02bd272016-03-17 14:19:05 -07004972 */
Vlastimil Babkab29940c2018-10-26 15:05:46 -07004973 reclaimable = global_node_page_state(NR_SLAB_RECLAIMABLE) +
4974 global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE);
4975 available += reclaimable - min(reclaimable / 2, wmark_low);
Roman Gushchin034ebf62018-04-10 16:27:40 -07004976
Igor Redkod02bd272016-03-17 14:19:05 -07004977 if (available < 0)
4978 available = 0;
4979 return available;
4980}
4981EXPORT_SYMBOL_GPL(si_mem_available);
4982
Linus Torvalds1da177e2005-04-16 15:20:36 -07004983void si_meminfo(struct sysinfo *val)
4984{
Arun KSca79b0c2018-12-28 00:34:29 -08004985 val->totalram = totalram_pages();
Mel Gorman11fb9982016-07-28 15:46:20 -07004986 val->sharedram = global_node_page_state(NR_SHMEM);
Michal Hockoc41f0122017-09-06 16:23:36 -07004987 val->freeram = global_zone_page_state(NR_FREE_PAGES);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004988 val->bufferram = nr_blockdev_pages();
Arun KSca79b0c2018-12-28 00:34:29 -08004989 val->totalhigh = totalhigh_pages();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004990 val->freehigh = nr_free_highpages();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004991 val->mem_unit = PAGE_SIZE;
4992}
4993
4994EXPORT_SYMBOL(si_meminfo);
4995
4996#ifdef CONFIG_NUMA
4997void si_meminfo_node(struct sysinfo *val, int nid)
4998{
Jiang Liucdd91a72013-07-03 15:03:27 -07004999 int zone_type; /* needs to be signed */
5000 unsigned long managed_pages = 0;
Joonsoo Kimfc2bd792016-05-19 17:12:23 -07005001 unsigned long managed_highpages = 0;
5002 unsigned long free_highpages = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005003 pg_data_t *pgdat = NODE_DATA(nid);
5004
Jiang Liucdd91a72013-07-03 15:03:27 -07005005 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
Arun KS9705bea2018-12-28 00:34:24 -08005006 managed_pages += zone_managed_pages(&pgdat->node_zones[zone_type]);
Jiang Liucdd91a72013-07-03 15:03:27 -07005007 val->totalram = managed_pages;
Mel Gorman11fb9982016-07-28 15:46:20 -07005008 val->sharedram = node_page_state(pgdat, NR_SHMEM);
Mel Gorman75ef7182016-07-28 15:45:24 -07005009 val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES);
Christoph Lameter98d2b0e2006-09-25 23:31:12 -07005010#ifdef CONFIG_HIGHMEM
Joonsoo Kimfc2bd792016-05-19 17:12:23 -07005011 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
5012 struct zone *zone = &pgdat->node_zones[zone_type];
5013
5014 if (is_highmem(zone)) {
Arun KS9705bea2018-12-28 00:34:24 -08005015 managed_highpages += zone_managed_pages(zone);
Joonsoo Kimfc2bd792016-05-19 17:12:23 -07005016 free_highpages += zone_page_state(zone, NR_FREE_PAGES);
5017 }
5018 }
5019 val->totalhigh = managed_highpages;
5020 val->freehigh = free_highpages;
Christoph Lameter98d2b0e2006-09-25 23:31:12 -07005021#else
Joonsoo Kimfc2bd792016-05-19 17:12:23 -07005022 val->totalhigh = managed_highpages;
5023 val->freehigh = free_highpages;
Christoph Lameter98d2b0e2006-09-25 23:31:12 -07005024#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07005025 val->mem_unit = PAGE_SIZE;
5026}
5027#endif
5028
David Rientjesddd588b2011-03-22 16:30:46 -07005029/*
David Rientjes7bf02ea2011-05-24 17:11:16 -07005030 * Determine whether the node should be displayed or not, depending on whether
5031 * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
David Rientjesddd588b2011-03-22 16:30:46 -07005032 */
Michal Hocko9af744d2017-02-22 15:46:16 -08005033static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask)
David Rientjesddd588b2011-03-22 16:30:46 -07005034{
David Rientjesddd588b2011-03-22 16:30:46 -07005035 if (!(flags & SHOW_MEM_FILTER_NODES))
Michal Hocko9af744d2017-02-22 15:46:16 -08005036 return false;
David Rientjesddd588b2011-03-22 16:30:46 -07005037
Michal Hocko9af744d2017-02-22 15:46:16 -08005038 /*
5039 * no node mask - aka implicit memory numa policy. Do not bother with
5040 * the synchronization - read_mems_allowed_begin - because we do not
5041 * have to be precise here.
5042 */
5043 if (!nodemask)
5044 nodemask = &cpuset_current_mems_allowed;
5045
5046 return !node_isset(nid, *nodemask);
David Rientjesddd588b2011-03-22 16:30:46 -07005047}
5048
Linus Torvalds1da177e2005-04-16 15:20:36 -07005049#define K(x) ((x) << (PAGE_SHIFT-10))
5050
Rabin Vincent377e4f12012-12-11 16:00:24 -08005051static void show_migration_types(unsigned char type)
5052{
5053 static const char types[MIGRATE_TYPES] = {
5054 [MIGRATE_UNMOVABLE] = 'U',
Rabin Vincent377e4f12012-12-11 16:00:24 -08005055 [MIGRATE_MOVABLE] = 'M',
Vlastimil Babka475a2f92015-12-11 13:40:29 -08005056 [MIGRATE_RECLAIMABLE] = 'E',
5057 [MIGRATE_HIGHATOMIC] = 'H',
Rabin Vincent377e4f12012-12-11 16:00:24 -08005058#ifdef CONFIG_CMA
5059 [MIGRATE_CMA] = 'C',
5060#endif
Minchan Kim194159f2013-02-22 16:33:58 -08005061#ifdef CONFIG_MEMORY_ISOLATION
Rabin Vincent377e4f12012-12-11 16:00:24 -08005062 [MIGRATE_ISOLATE] = 'I',
Minchan Kim194159f2013-02-22 16:33:58 -08005063#endif
Rabin Vincent377e4f12012-12-11 16:00:24 -08005064 };
5065 char tmp[MIGRATE_TYPES + 1];
5066 char *p = tmp;
5067 int i;
5068
5069 for (i = 0; i < MIGRATE_TYPES; i++) {
5070 if (type & (1 << i))
5071 *p++ = types[i];
5072 }
5073
5074 *p = '\0';
Joe Perches1f84a182016-10-27 17:46:29 -07005075 printk(KERN_CONT "(%s) ", tmp);
Rabin Vincent377e4f12012-12-11 16:00:24 -08005076}
5077
Linus Torvalds1da177e2005-04-16 15:20:36 -07005078/*
5079 * Show free area list (used inside shift_scroll-lock stuff)
5080 * We also calculate the percentage fragmentation. We do this by counting the
5081 * memory on each free list with the exception of the first item on the list.
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07005082 *
5083 * Bits in @filter:
5084 * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
5085 * cpuset.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005086 */
Michal Hocko9af744d2017-02-22 15:46:16 -08005087void show_free_areas(unsigned int filter, nodemask_t *nodemask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005088{
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07005089 unsigned long free_pcp = 0;
Jes Sorensenc7241912006-09-27 01:50:05 -07005090 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005091 struct zone *zone;
Mel Gorman599d0c92016-07-28 15:45:31 -07005092 pg_data_t *pgdat;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005093
KOSAKI Motohiroee99c712009-03-31 15:19:31 -07005094 for_each_populated_zone(zone) {
Michal Hocko9af744d2017-02-22 15:46:16 -08005095 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
David Rientjesddd588b2011-03-22 16:30:46 -07005096 continue;
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07005097
Konstantin Khlebnikov761b0672015-04-14 15:45:32 -07005098 for_each_online_cpu(cpu)
5099 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005100 }
5101
KOSAKI Motohiroa7312862009-09-21 17:01:37 -07005102 printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
5103 " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07005104 " unevictable:%lu dirty:%lu writeback:%lu unstable:%lu\n"
5105 " slab_reclaimable:%lu slab_unreclaimable:%lu\n"
Bartlomiej Zolnierkiewiczd1ce7492012-10-08 16:32:02 -07005106 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07005107 " free:%lu free_pcp:%lu free_cma:%lu\n",
Mel Gorman599d0c92016-07-28 15:45:31 -07005108 global_node_page_state(NR_ACTIVE_ANON),
5109 global_node_page_state(NR_INACTIVE_ANON),
5110 global_node_page_state(NR_ISOLATED_ANON),
5111 global_node_page_state(NR_ACTIVE_FILE),
5112 global_node_page_state(NR_INACTIVE_FILE),
5113 global_node_page_state(NR_ISOLATED_FILE),
5114 global_node_page_state(NR_UNEVICTABLE),
Mel Gorman11fb9982016-07-28 15:46:20 -07005115 global_node_page_state(NR_FILE_DIRTY),
5116 global_node_page_state(NR_WRITEBACK),
5117 global_node_page_state(NR_UNSTABLE_NFS),
Johannes Weinerd507e2eb2017-08-10 15:23:31 -07005118 global_node_page_state(NR_SLAB_RECLAIMABLE),
5119 global_node_page_state(NR_SLAB_UNRECLAIMABLE),
Mel Gorman50658e22016-07-28 15:46:14 -07005120 global_node_page_state(NR_FILE_MAPPED),
Mel Gorman11fb9982016-07-28 15:46:20 -07005121 global_node_page_state(NR_SHMEM),
Michal Hockoc41f0122017-09-06 16:23:36 -07005122 global_zone_page_state(NR_PAGETABLE),
5123 global_zone_page_state(NR_BOUNCE),
5124 global_zone_page_state(NR_FREE_PAGES),
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07005125 free_pcp,
Michal Hockoc41f0122017-09-06 16:23:36 -07005126 global_zone_page_state(NR_FREE_CMA_PAGES));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005127
Mel Gorman599d0c92016-07-28 15:45:31 -07005128 for_each_online_pgdat(pgdat) {
Michal Hocko9af744d2017-02-22 15:46:16 -08005129 if (show_mem_node_skip(filter, pgdat->node_id, nodemask))
Michal Hockoc02e50b2017-02-22 15:46:07 -08005130 continue;
5131
Mel Gorman599d0c92016-07-28 15:45:31 -07005132 printk("Node %d"
5133 " active_anon:%lukB"
5134 " inactive_anon:%lukB"
5135 " active_file:%lukB"
5136 " inactive_file:%lukB"
5137 " unevictable:%lukB"
5138 " isolated(anon):%lukB"
5139 " isolated(file):%lukB"
Mel Gorman50658e22016-07-28 15:46:14 -07005140 " mapped:%lukB"
Mel Gorman11fb9982016-07-28 15:46:20 -07005141 " dirty:%lukB"
5142 " writeback:%lukB"
5143 " shmem:%lukB"
5144#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5145 " shmem_thp: %lukB"
5146 " shmem_pmdmapped: %lukB"
5147 " anon_thp: %lukB"
5148#endif
5149 " writeback_tmp:%lukB"
5150 " unstable:%lukB"
Mel Gorman599d0c92016-07-28 15:45:31 -07005151 " all_unreclaimable? %s"
5152 "\n",
5153 pgdat->node_id,
5154 K(node_page_state(pgdat, NR_ACTIVE_ANON)),
5155 K(node_page_state(pgdat, NR_INACTIVE_ANON)),
5156 K(node_page_state(pgdat, NR_ACTIVE_FILE)),
5157 K(node_page_state(pgdat, NR_INACTIVE_FILE)),
5158 K(node_page_state(pgdat, NR_UNEVICTABLE)),
5159 K(node_page_state(pgdat, NR_ISOLATED_ANON)),
5160 K(node_page_state(pgdat, NR_ISOLATED_FILE)),
Mel Gorman50658e22016-07-28 15:46:14 -07005161 K(node_page_state(pgdat, NR_FILE_MAPPED)),
Mel Gorman11fb9982016-07-28 15:46:20 -07005162 K(node_page_state(pgdat, NR_FILE_DIRTY)),
5163 K(node_page_state(pgdat, NR_WRITEBACK)),
Alexander Polakov1f06b812017-04-07 16:04:45 -07005164 K(node_page_state(pgdat, NR_SHMEM)),
Mel Gorman11fb9982016-07-28 15:46:20 -07005165#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5166 K(node_page_state(pgdat, NR_SHMEM_THPS) * HPAGE_PMD_NR),
5167 K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)
5168 * HPAGE_PMD_NR),
5169 K(node_page_state(pgdat, NR_ANON_THPS) * HPAGE_PMD_NR),
5170#endif
Mel Gorman11fb9982016-07-28 15:46:20 -07005171 K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
5172 K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
Johannes Weinerc73322d2017-05-03 14:51:51 -07005173 pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ?
5174 "yes" : "no");
Mel Gorman599d0c92016-07-28 15:45:31 -07005175 }
5176
KOSAKI Motohiroee99c712009-03-31 15:19:31 -07005177 for_each_populated_zone(zone) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005178 int i;
5179
Michal Hocko9af744d2017-02-22 15:46:16 -08005180 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
David Rientjesddd588b2011-03-22 16:30:46 -07005181 continue;
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07005182
5183 free_pcp = 0;
5184 for_each_online_cpu(cpu)
5185 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
5186
Linus Torvalds1da177e2005-04-16 15:20:36 -07005187 show_node(zone);
Joe Perches1f84a182016-10-27 17:46:29 -07005188 printk(KERN_CONT
5189 "%s"
Linus Torvalds1da177e2005-04-16 15:20:36 -07005190 " free:%lukB"
5191 " min:%lukB"
5192 " low:%lukB"
5193 " high:%lukB"
Minchan Kim71c799f2016-07-28 15:47:26 -07005194 " active_anon:%lukB"
5195 " inactive_anon:%lukB"
5196 " active_file:%lukB"
5197 " inactive_file:%lukB"
5198 " unevictable:%lukB"
Mel Gorman5a1c84b2016-07-28 15:47:31 -07005199 " writepending:%lukB"
Linus Torvalds1da177e2005-04-16 15:20:36 -07005200 " present:%lukB"
Jiang Liu9feedc92012-12-12 13:52:12 -08005201 " managed:%lukB"
KOSAKI Motohiro4a0aa732009-09-21 17:01:30 -07005202 " mlocked:%lukB"
KOSAKI Motohiroc6a7f572009-09-21 17:01:32 -07005203 " kernel_stack:%lukB"
KOSAKI Motohiro4a0aa732009-09-21 17:01:30 -07005204 " pagetables:%lukB"
KOSAKI Motohiro4a0aa732009-09-21 17:01:30 -07005205 " bounce:%lukB"
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07005206 " free_pcp:%lukB"
5207 " local_pcp:%ukB"
Bartlomiej Zolnierkiewiczd1ce7492012-10-08 16:32:02 -07005208 " free_cma:%lukB"
Linus Torvalds1da177e2005-04-16 15:20:36 -07005209 "\n",
5210 zone->name,
Mel Gorman88f5acf2011-01-13 15:45:41 -08005211 K(zone_page_state(zone, NR_FREE_PAGES)),
Mel Gorman41858962009-06-16 15:32:12 -07005212 K(min_wmark_pages(zone)),
5213 K(low_wmark_pages(zone)),
5214 K(high_wmark_pages(zone)),
Minchan Kim71c799f2016-07-28 15:47:26 -07005215 K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)),
5216 K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)),
5217 K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)),
5218 K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)),
5219 K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)),
Mel Gorman5a1c84b2016-07-28 15:47:31 -07005220 K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)),
Linus Torvalds1da177e2005-04-16 15:20:36 -07005221 K(zone->present_pages),
Arun KS9705bea2018-12-28 00:34:24 -08005222 K(zone_managed_pages(zone)),
KOSAKI Motohiro4a0aa732009-09-21 17:01:30 -07005223 K(zone_page_state(zone, NR_MLOCK)),
Andy Lutomirskid30dd8b2016-07-28 15:48:14 -07005224 zone_page_state(zone, NR_KERNEL_STACK_KB),
KOSAKI Motohiro4a0aa732009-09-21 17:01:30 -07005225 K(zone_page_state(zone, NR_PAGETABLE)),
KOSAKI Motohiro4a0aa732009-09-21 17:01:30 -07005226 K(zone_page_state(zone, NR_BOUNCE)),
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07005227 K(free_pcp),
5228 K(this_cpu_read(zone->pageset->pcp.count)),
Minchan Kim33e077b2016-07-28 15:47:14 -07005229 K(zone_page_state(zone, NR_FREE_CMA_PAGES)));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005230 printk("lowmem_reserve[]:");
5231 for (i = 0; i < MAX_NR_ZONES; i++)
Joe Perches1f84a182016-10-27 17:46:29 -07005232 printk(KERN_CONT " %ld", zone->lowmem_reserve[i]);
5233 printk(KERN_CONT "\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005234 }
5235
KOSAKI Motohiroee99c712009-03-31 15:19:31 -07005236 for_each_populated_zone(zone) {
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -08005237 unsigned int order;
5238 unsigned long nr[MAX_ORDER], flags, total = 0;
Rabin Vincent377e4f12012-12-11 16:00:24 -08005239 unsigned char types[MAX_ORDER];
Linus Torvalds1da177e2005-04-16 15:20:36 -07005240
Michal Hocko9af744d2017-02-22 15:46:16 -08005241 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
David Rientjesddd588b2011-03-22 16:30:46 -07005242 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005243 show_node(zone);
Joe Perches1f84a182016-10-27 17:46:29 -07005244 printk(KERN_CONT "%s: ", zone->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005245
5246 spin_lock_irqsave(&zone->lock, flags);
5247 for (order = 0; order < MAX_ORDER; order++) {
Rabin Vincent377e4f12012-12-11 16:00:24 -08005248 struct free_area *area = &zone->free_area[order];
5249 int type;
5250
5251 nr[order] = area->nr_free;
Kirill Korotaev8f9de512006-06-23 02:03:50 -07005252 total += nr[order] << order;
Rabin Vincent377e4f12012-12-11 16:00:24 -08005253
5254 types[order] = 0;
5255 for (type = 0; type < MIGRATE_TYPES; type++) {
5256 if (!list_empty(&area->free_list[type]))
5257 types[order] |= 1 << type;
5258 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005259 }
5260 spin_unlock_irqrestore(&zone->lock, flags);
Rabin Vincent377e4f12012-12-11 16:00:24 -08005261 for (order = 0; order < MAX_ORDER; order++) {
Joe Perches1f84a182016-10-27 17:46:29 -07005262 printk(KERN_CONT "%lu*%lukB ",
5263 nr[order], K(1UL) << order);
Rabin Vincent377e4f12012-12-11 16:00:24 -08005264 if (nr[order])
5265 show_migration_types(types[order]);
5266 }
Joe Perches1f84a182016-10-27 17:46:29 -07005267 printk(KERN_CONT "= %lukB\n", K(total));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005268 }
5269
David Rientjes949f7ec2013-04-29 15:07:48 -07005270 hugetlb_show_meminfo();
5271
Mel Gorman11fb9982016-07-28 15:46:20 -07005272 printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES));
Larry Woodmane6f36022008-02-04 22:29:30 -08005273
Linus Torvalds1da177e2005-04-16 15:20:36 -07005274 show_swap_cache_info();
5275}
5276
Mel Gorman19770b32008-04-28 02:12:18 -07005277static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
5278{
5279 zoneref->zone = zone;
5280 zoneref->zone_idx = zone_idx(zone);
5281}
5282
Linus Torvalds1da177e2005-04-16 15:20:36 -07005283/*
5284 * Builds allocation fallback zone lists.
Christoph Lameter1a932052006-01-06 00:11:16 -08005285 *
5286 * Add all populated zones of a node to the zonelist.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005287 */
Michal Hocko9d3be212017-09-06 16:20:30 -07005288static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005289{
Christoph Lameter1a932052006-01-06 00:11:16 -08005290 struct zone *zone;
Zhang Yanfeibc732f12013-07-08 16:00:06 -07005291 enum zone_type zone_type = MAX_NR_ZONES;
Michal Hocko9d3be212017-09-06 16:20:30 -07005292 int nr_zones = 0;
Christoph Lameter02a68a52006-01-06 00:11:18 -08005293
5294 do {
Christoph Lameter2f6726e2006-09-25 23:31:18 -07005295 zone_type--;
Christoph Lameter070f8032006-01-06 00:11:19 -08005296 zone = pgdat->node_zones + zone_type;
Mel Gorman6aa303d2016-09-01 16:14:55 -07005297 if (managed_zone(zone)) {
Michal Hocko9d3be212017-09-06 16:20:30 -07005298 zoneref_set_zone(zone, &zonerefs[nr_zones++]);
Christoph Lameter070f8032006-01-06 00:11:19 -08005299 check_highest_zone(zone_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005300 }
Christoph Lameter2f6726e2006-09-25 23:31:18 -07005301 } while (zone_type);
Zhang Yanfeibc732f12013-07-08 16:00:06 -07005302
Christoph Lameter070f8032006-01-06 00:11:19 -08005303 return nr_zones;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005304}
5305
5306#ifdef CONFIG_NUMA
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005307
5308static int __parse_numa_zonelist_order(char *s)
5309{
Michal Hockoc9bff3e2017-09-06 16:20:13 -07005310 /*
5311 * We used to support different zonlists modes but they turned
5312 * out to be just not useful. Let's keep the warning in place
5313 * if somebody still use the cmd line parameter so that we do
5314 * not fail it silently
5315 */
5316 if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) {
5317 pr_warn("Ignoring unsupported numa_zonelist_order value: %s\n", s);
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005318 return -EINVAL;
5319 }
5320 return 0;
5321}
5322
5323static __init int setup_numa_zonelist_order(char *s)
5324{
Volodymyr G. Lukiianykecb256f2011-01-13 15:46:26 -08005325 if (!s)
5326 return 0;
5327
Michal Hockoc9bff3e2017-09-06 16:20:13 -07005328 return __parse_numa_zonelist_order(s);
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005329}
5330early_param("numa_zonelist_order", setup_numa_zonelist_order);
5331
Michal Hockoc9bff3e2017-09-06 16:20:13 -07005332char numa_zonelist_order[] = "Node";
5333
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005334/*
5335 * sysctl handler for numa_zonelist_order
5336 */
Joe Perchescccad5b2014-06-06 14:38:09 -07005337int numa_zonelist_order_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07005338 void __user *buffer, size_t *length,
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005339 loff_t *ppos)
5340{
Michal Hockoc9bff3e2017-09-06 16:20:13 -07005341 char *str;
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005342 int ret;
5343
Michal Hockoc9bff3e2017-09-06 16:20:13 -07005344 if (!write)
5345 return proc_dostring(table, write, buffer, length, ppos);
5346 str = memdup_user_nul(buffer, 16);
5347 if (IS_ERR(str))
5348 return PTR_ERR(str);
Chen Gangdacbde02013-07-03 15:02:35 -07005349
Michal Hockoc9bff3e2017-09-06 16:20:13 -07005350 ret = __parse_numa_zonelist_order(str);
5351 kfree(str);
Andi Kleen443c6f12009-12-23 21:00:47 +01005352 return ret;
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005353}
5354
5355
Christoph Lameter62bc62a2009-06-16 15:32:15 -07005356#define MAX_NODE_LOAD (nr_online_nodes)
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005357static int node_load[MAX_NUMNODES];
5358
Linus Torvalds1da177e2005-04-16 15:20:36 -07005359/**
Pavel Pisa4dc3b162005-05-01 08:59:25 -07005360 * find_next_best_node - find the next node that should appear in a given node's fallback list
Linus Torvalds1da177e2005-04-16 15:20:36 -07005361 * @node: node whose fallback list we're appending
5362 * @used_node_mask: nodemask_t of already used nodes
5363 *
5364 * We use a number of factors to determine which is the next node that should
5365 * appear on a given node's fallback list. The node should not have appeared
5366 * already in @node's fallback list, and it should be the next closest node
5367 * according to the distance array (which contains arbitrary distance values
5368 * from each node to each node in the system), and should also prefer nodes
5369 * with no CPUs, since presumably they'll have very little allocation pressure
5370 * on them otherwise.
5371 * It returns -1 if no node is found.
5372 */
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005373static int find_next_best_node(int node, nodemask_t *used_node_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005374{
Linus Torvalds4cf808eb2006-02-17 20:38:21 +01005375 int n, val;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005376 int min_val = INT_MAX;
David Rientjes00ef2d22013-02-22 16:35:36 -08005377 int best_node = NUMA_NO_NODE;
Rusty Russella70f7302009-03-13 14:49:46 +10305378 const struct cpumask *tmp = cpumask_of_node(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005379
Linus Torvalds4cf808eb2006-02-17 20:38:21 +01005380 /* Use the local node if we haven't already */
5381 if (!node_isset(node, *used_node_mask)) {
5382 node_set(node, *used_node_mask);
5383 return node;
5384 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005385
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08005386 for_each_node_state(n, N_MEMORY) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005387
5388 /* Don't want a node to appear more than once */
5389 if (node_isset(n, *used_node_mask))
5390 continue;
5391
Linus Torvalds1da177e2005-04-16 15:20:36 -07005392 /* Use the distance array to find the distance */
5393 val = node_distance(node, n);
5394
Linus Torvalds4cf808eb2006-02-17 20:38:21 +01005395 /* Penalize nodes under us ("prefer the next node") */
5396 val += (n < node);
5397
Linus Torvalds1da177e2005-04-16 15:20:36 -07005398 /* Give preference to headless and unused nodes */
Rusty Russella70f7302009-03-13 14:49:46 +10305399 tmp = cpumask_of_node(n);
5400 if (!cpumask_empty(tmp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005401 val += PENALTY_FOR_NODE_WITH_CPUS;
5402
5403 /* Slight preference for less loaded node */
5404 val *= (MAX_NODE_LOAD*MAX_NUMNODES);
5405 val += node_load[n];
5406
5407 if (val < min_val) {
5408 min_val = val;
5409 best_node = n;
5410 }
5411 }
5412
5413 if (best_node >= 0)
5414 node_set(best_node, *used_node_mask);
5415
5416 return best_node;
5417}
5418
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005419
5420/*
5421 * Build zonelists ordered by node and zones within node.
5422 * This results in maximum locality--normal zone overflows into local
5423 * DMA zone, if any--but risks exhausting DMA zone.
5424 */
Michal Hocko9d3be212017-09-06 16:20:30 -07005425static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order,
5426 unsigned nr_nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005427{
Michal Hocko9d3be212017-09-06 16:20:30 -07005428 struct zoneref *zonerefs;
5429 int i;
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005430
Michal Hocko9d3be212017-09-06 16:20:30 -07005431 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
5432
5433 for (i = 0; i < nr_nodes; i++) {
5434 int nr_zones;
5435
5436 pg_data_t *node = NODE_DATA(node_order[i]);
5437
5438 nr_zones = build_zonerefs_node(node, zonerefs);
5439 zonerefs += nr_zones;
5440 }
5441 zonerefs->zone = NULL;
5442 zonerefs->zone_idx = 0;
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005443}
5444
5445/*
Christoph Lameter523b9452007-10-16 01:25:37 -07005446 * Build gfp_thisnode zonelists
5447 */
5448static void build_thisnode_zonelists(pg_data_t *pgdat)
5449{
Michal Hocko9d3be212017-09-06 16:20:30 -07005450 struct zoneref *zonerefs;
5451 int nr_zones;
Christoph Lameter523b9452007-10-16 01:25:37 -07005452
Michal Hocko9d3be212017-09-06 16:20:30 -07005453 zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs;
5454 nr_zones = build_zonerefs_node(pgdat, zonerefs);
5455 zonerefs += nr_zones;
5456 zonerefs->zone = NULL;
5457 zonerefs->zone_idx = 0;
Christoph Lameter523b9452007-10-16 01:25:37 -07005458}
5459
5460/*
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005461 * Build zonelists ordered by zone and nodes within zones.
5462 * This results in conserving DMA zone[s] until all Normal memory is
5463 * exhausted, but results in overflowing to remote node while memory
5464 * may still exist in local DMA zone.
5465 */
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005466
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005467static void build_zonelists(pg_data_t *pgdat)
5468{
Michal Hocko9d3be212017-09-06 16:20:30 -07005469 static int node_order[MAX_NUMNODES];
5470 int node, load, nr_nodes = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005471 nodemask_t used_mask;
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005472 int local_node, prev_node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005473
5474 /* NUMA-aware ordering of nodes */
5475 local_node = pgdat->node_id;
Christoph Lameter62bc62a2009-06-16 15:32:15 -07005476 load = nr_online_nodes;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005477 prev_node = local_node;
5478 nodes_clear(used_mask);
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005479
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005480 memset(node_order, 0, sizeof(node_order));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005481 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
5482 /*
5483 * We don't want to pressure a particular node.
5484 * So adding penalty to the first node in same
5485 * distance group to make it round-robin.
5486 */
David Rientjes957f8222012-10-08 16:33:24 -07005487 if (node_distance(local_node, node) !=
5488 node_distance(local_node, prev_node))
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005489 node_load[node] = load;
5490
Michal Hocko9d3be212017-09-06 16:20:30 -07005491 node_order[nr_nodes++] = node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005492 prev_node = node;
5493 load--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005494 }
Christoph Lameter523b9452007-10-16 01:25:37 -07005495
Michal Hocko9d3be212017-09-06 16:20:30 -07005496 build_zonelists_in_node_order(pgdat, node_order, nr_nodes);
Christoph Lameter523b9452007-10-16 01:25:37 -07005497 build_thisnode_zonelists(pgdat);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005498}
5499
Lee Schermerhorn7aac7892010-05-26 14:45:00 -07005500#ifdef CONFIG_HAVE_MEMORYLESS_NODES
5501/*
5502 * Return node id of node used for "local" allocations.
5503 * I.e., first node id of first zone in arg node's generic zonelist.
5504 * Used for initializing percpu 'numa_mem', which is used primarily
5505 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
5506 */
5507int local_memory_node(int node)
5508{
Mel Gormanc33d6c02016-05-19 17:14:10 -07005509 struct zoneref *z;
Lee Schermerhorn7aac7892010-05-26 14:45:00 -07005510
Mel Gormanc33d6c02016-05-19 17:14:10 -07005511 z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
Lee Schermerhorn7aac7892010-05-26 14:45:00 -07005512 gfp_zone(GFP_KERNEL),
Mel Gormanc33d6c02016-05-19 17:14:10 -07005513 NULL);
Pavel Tatashinc1093b72018-08-21 21:53:32 -07005514 return zone_to_nid(z->zone);
Lee Schermerhorn7aac7892010-05-26 14:45:00 -07005515}
5516#endif
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005517
Joonsoo Kim6423aa82016-08-10 16:27:49 -07005518static void setup_min_unmapped_ratio(void);
5519static void setup_min_slab_ratio(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005520#else /* CONFIG_NUMA */
5521
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005522static void build_zonelists(pg_data_t *pgdat)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005523{
Christoph Lameter19655d32006-09-25 23:31:19 -07005524 int node, local_node;
Michal Hocko9d3be212017-09-06 16:20:30 -07005525 struct zoneref *zonerefs;
5526 int nr_zones;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005527
5528 local_node = pgdat->node_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005529
Michal Hocko9d3be212017-09-06 16:20:30 -07005530 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
5531 nr_zones = build_zonerefs_node(pgdat, zonerefs);
5532 zonerefs += nr_zones;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005533
Mel Gorman54a6eb52008-04-28 02:12:16 -07005534 /*
5535 * Now we build the zonelist so that it contains the zones
5536 * of all the other nodes.
5537 * We don't want to pressure a particular node, so when
5538 * building the zones for node N, we make sure that the
5539 * zones coming right after the local ones are those from
5540 * node N+1 (modulo N)
5541 */
5542 for (node = local_node + 1; node < MAX_NUMNODES; node++) {
5543 if (!node_online(node))
5544 continue;
Michal Hocko9d3be212017-09-06 16:20:30 -07005545 nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs);
5546 zonerefs += nr_zones;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005547 }
Mel Gorman54a6eb52008-04-28 02:12:16 -07005548 for (node = 0; node < local_node; node++) {
5549 if (!node_online(node))
5550 continue;
Michal Hocko9d3be212017-09-06 16:20:30 -07005551 nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs);
5552 zonerefs += nr_zones;
Mel Gorman54a6eb52008-04-28 02:12:16 -07005553 }
5554
Michal Hocko9d3be212017-09-06 16:20:30 -07005555 zonerefs->zone = NULL;
5556 zonerefs->zone_idx = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005557}
5558
5559#endif /* CONFIG_NUMA */
5560
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09005561/*
5562 * Boot pageset table. One per cpu which is going to be used for all
5563 * zones and all nodes. The parameters will be set in such a way
5564 * that an item put on a list will immediately be handed over to
5565 * the buddy list. This is safe since pageset manipulation is done
5566 * with interrupts disabled.
5567 *
5568 * The boot_pagesets must be kept even after bootup is complete for
5569 * unused processors and/or zones. They do play a role for bootstrapping
5570 * hotplugged processors.
5571 *
5572 * zoneinfo_show() and maybe other functions do
5573 * not check if the processor is online before following the pageset pointer.
5574 * Other parts of the kernel may not check if the zone is available.
5575 */
5576static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
5577static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
Johannes Weiner385386c2017-07-06 15:40:43 -07005578static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09005579
Michal Hocko11cd8632017-09-06 16:20:34 -07005580static void __build_all_zonelists(void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005581{
Yasunori Goto68113782006-06-23 02:03:11 -07005582 int nid;
Michal Hockoafb6ebb2017-09-06 16:20:17 -07005583 int __maybe_unused cpu;
Jiang Liu9adb62a2012-07-31 16:43:28 -07005584 pg_data_t *self = data;
Michal Hockob93e0f32017-09-06 16:20:37 -07005585 static DEFINE_SPINLOCK(lock);
5586
5587 spin_lock(&lock);
Paul Jackson9276b1bc2006-12-06 20:31:48 -08005588
Bo Liu7f9cfb32009-08-18 14:11:19 -07005589#ifdef CONFIG_NUMA
5590 memset(node_load, 0, sizeof(node_load));
5591#endif
Jiang Liu9adb62a2012-07-31 16:43:28 -07005592
Wei Yangc1152582017-09-06 16:19:33 -07005593 /*
5594 * This node is hotadded and no memory is yet present. So just
5595 * building zonelists is fine - no need to touch other nodes.
5596 */
Jiang Liu9adb62a2012-07-31 16:43:28 -07005597 if (self && !node_online(self->node_id)) {
5598 build_zonelists(self);
Wei Yangc1152582017-09-06 16:19:33 -07005599 } else {
5600 for_each_online_node(nid) {
5601 pg_data_t *pgdat = NODE_DATA(nid);
Jiang Liu9adb62a2012-07-31 16:43:28 -07005602
Wei Yangc1152582017-09-06 16:19:33 -07005603 build_zonelists(pgdat);
5604 }
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09005605
Michal Hockoafb6ebb2017-09-06 16:20:17 -07005606#ifdef CONFIG_HAVE_MEMORYLESS_NODES
Michal Hockoafb6ebb2017-09-06 16:20:17 -07005607 /*
5608 * We now know the "local memory node" for each node--
5609 * i.e., the node of the first zone in the generic zonelist.
5610 * Set up numa_mem percpu variable for on-line cpus. During
5611 * boot, only the boot cpu should be on-line; we'll init the
5612 * secondary cpus' numa_mem as they come on-line. During
5613 * node/memory hotplug, we'll fixup all on-line cpus.
5614 */
Michal Hockod9c9a0b2017-09-06 16:20:20 -07005615 for_each_online_cpu(cpu)
Michal Hockoafb6ebb2017-09-06 16:20:17 -07005616 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
Michal Hockoafb6ebb2017-09-06 16:20:17 -07005617#endif
Michal Hockod9c9a0b2017-09-06 16:20:20 -07005618 }
Michal Hockob93e0f32017-09-06 16:20:37 -07005619
5620 spin_unlock(&lock);
Michal Hockoafb6ebb2017-09-06 16:20:17 -07005621}
5622
5623static noinline void __init
5624build_all_zonelists_init(void)
5625{
5626 int cpu;
5627
5628 __build_all_zonelists(NULL);
5629
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09005630 /*
5631 * Initialize the boot_pagesets that are going to be used
5632 * for bootstrapping processors. The real pagesets for
5633 * each zone will be allocated later when the per cpu
5634 * allocator is available.
5635 *
5636 * boot_pagesets are used also for bootstrapping offline
5637 * cpus if the system is already booted because the pagesets
5638 * are needed to initialize allocators on a specific cpu too.
5639 * F.e. the percpu allocator needs the page allocator which
5640 * needs the percpu allocator in order to allocate its pagesets
5641 * (a chicken-egg dilemma).
5642 */
Michal Hockoafb6ebb2017-09-06 16:20:17 -07005643 for_each_possible_cpu(cpu)
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09005644 setup_pageset(&per_cpu(boot_pageset, cpu), 0);
5645
Rasmus Villemoes061f67b2015-02-12 15:00:06 -08005646 mminit_verify_zonelist();
5647 cpuset_init_current_mems_allowed();
5648}
5649
Haicheng Li4eaf3f62010-05-24 14:32:52 -07005650/*
Haicheng Li4eaf3f62010-05-24 14:32:52 -07005651 * unless system_state == SYSTEM_BOOTING.
Rasmus Villemoes061f67b2015-02-12 15:00:06 -08005652 *
Michal Hocko72675e12017-09-06 16:20:24 -07005653 * __ref due to call of __init annotated helper build_all_zonelists_init
Rasmus Villemoes061f67b2015-02-12 15:00:06 -08005654 * [protected by SYSTEM_BOOTING].
Haicheng Li4eaf3f62010-05-24 14:32:52 -07005655 */
Michal Hocko72675e12017-09-06 16:20:24 -07005656void __ref build_all_zonelists(pg_data_t *pgdat)
Yasunori Goto68113782006-06-23 02:03:11 -07005657{
5658 if (system_state == SYSTEM_BOOTING) {
Rasmus Villemoes061f67b2015-02-12 15:00:06 -08005659 build_all_zonelists_init();
Yasunori Goto68113782006-06-23 02:03:11 -07005660 } else {
Michal Hocko11cd8632017-09-06 16:20:34 -07005661 __build_all_zonelists(pgdat);
Yasunori Goto68113782006-06-23 02:03:11 -07005662 /* cpuset refresh routine should be here */
5663 }
Andrew Mortonbd1e22b2006-06-23 02:03:47 -07005664 vm_total_pages = nr_free_pagecache_pages();
Mel Gorman9ef9acb2007-10-16 01:25:54 -07005665 /*
5666 * Disable grouping by mobility if the number of pages in the
5667 * system is too low to allow the mechanism to work. It would be
5668 * more accurate, but expensive to check per-zone. This check is
5669 * made on memory-hotadd so a system can start with mobility
5670 * disabled and enable it later
5671 */
Mel Gormand9c23402007-10-16 01:26:01 -07005672 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
Mel Gorman9ef9acb2007-10-16 01:25:54 -07005673 page_group_by_mobility_disabled = 1;
5674 else
5675 page_group_by_mobility_disabled = 0;
5676
Michal Hockoc9bff3e2017-09-06 16:20:13 -07005677 pr_info("Built %i zonelists, mobility grouping %s. Total pages: %ld\n",
Joe Perches756a0252016-03-17 14:19:47 -07005678 nr_online_nodes,
Joe Perches756a0252016-03-17 14:19:47 -07005679 page_group_by_mobility_disabled ? "off" : "on",
5680 vm_total_pages);
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005681#ifdef CONFIG_NUMA
Anton Blanchardf88dfff2014-12-10 15:42:53 -08005682 pr_info("Policy zone: %s\n", zone_names[policy_zone]);
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005683#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07005684}
5685
Pavel Tatashina9a9e772018-10-26 15:09:40 -07005686/* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */
5687static bool __meminit
5688overlap_memmap_init(unsigned long zone, unsigned long *pfn)
5689{
5690#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5691 static struct memblock_region *r;
5692
5693 if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
5694 if (!r || *pfn >= memblock_region_memory_end_pfn(r)) {
5695 for_each_memblock(memory, r) {
5696 if (*pfn < memblock_region_memory_end_pfn(r))
5697 break;
5698 }
5699 }
5700 if (*pfn >= memblock_region_memory_base_pfn(r) &&
5701 memblock_is_mirror(r)) {
5702 *pfn = memblock_region_memory_end_pfn(r);
5703 return true;
5704 }
5705 }
5706#endif
5707 return false;
5708}
5709
Linus Torvalds1da177e2005-04-16 15:20:36 -07005710/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07005711 * Initially all pages are reserved - free ones are freed
Mike Rapoportc6ffc5c2018-10-30 15:09:30 -07005712 * up by memblock_free_all() once the early boot process is
Linus Torvalds1da177e2005-04-16 15:20:36 -07005713 * done. Non-atomic initialization, single-pass.
5714 */
Matt Tolentinoc09b4242006-01-17 07:03:44 +01005715void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
Christoph Hellwiga99583e2017-12-29 08:53:57 +01005716 unsigned long start_pfn, enum memmap_context context,
5717 struct vmem_altmap *altmap)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005718{
Pavel Tatashina9a9e772018-10-26 15:09:40 -07005719 unsigned long pfn, end_pfn = start_pfn + size;
Pavel Tatashind0dc12e2018-04-05 16:23:00 -07005720 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005721
Hugh Dickins22b31ee2009-01-06 14:40:09 -08005722 if (highest_memmap_pfn < end_pfn - 1)
5723 highest_memmap_pfn = end_pfn - 1;
5724
Alexander Duyck966cf442018-10-26 15:07:52 -07005725#ifdef CONFIG_ZONE_DEVICE
Dan Williams4b94ffd2016-01-15 16:56:22 -08005726 /*
5727 * Honor reservation requested by the driver for this ZONE_DEVICE
Alexander Duyck966cf442018-10-26 15:07:52 -07005728 * memory. We limit the total number of pages to initialize to just
5729 * those that might contain the memory mapping. We will defer the
5730 * ZONE_DEVICE page initialization until after we have released
5731 * the hotplug lock.
Dan Williams4b94ffd2016-01-15 16:56:22 -08005732 */
Alexander Duyck966cf442018-10-26 15:07:52 -07005733 if (zone == ZONE_DEVICE) {
5734 if (!altmap)
5735 return;
5736
5737 if (start_pfn == altmap->base_pfn)
5738 start_pfn += altmap->reserve;
5739 end_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
5740 }
5741#endif
Dan Williams4b94ffd2016-01-15 16:56:22 -08005742
Greg Ungerercbe8dd42006-01-12 01:05:24 -08005743 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
Dave Hansena2f3aa022007-01-10 23:15:30 -08005744 /*
Andrew Mortonb72d0ff2016-03-15 14:55:25 -07005745 * There can be holes in boot-time mem_map[]s handed to this
5746 * function. They do not exist on hotplugged memory.
Dave Hansena2f3aa022007-01-10 23:15:30 -08005747 */
Pavel Tatashina9a9e772018-10-26 15:09:40 -07005748 if (context == MEMMAP_EARLY) {
5749 if (!early_pfn_valid(pfn))
Andrew Mortonb72d0ff2016-03-15 14:55:25 -07005750 continue;
Pavel Tatashina9a9e772018-10-26 15:09:40 -07005751 if (!early_pfn_in_nid(pfn, nid))
5752 continue;
5753 if (overlap_memmap_init(zone, &pfn))
5754 continue;
5755 if (defer_init(nid, pfn, end_pfn))
5756 break;
Dave Hansena2f3aa022007-01-10 23:15:30 -08005757 }
Mel Gormanac5d2532015-06-30 14:57:20 -07005758
Pavel Tatashind0dc12e2018-04-05 16:23:00 -07005759 page = pfn_to_page(pfn);
5760 __init_single_page(page, pfn, zone, nid);
5761 if (context == MEMMAP_HOTPLUG)
Alexander Duyckd483da52018-10-26 15:07:48 -07005762 __SetPageReserved(page);
Pavel Tatashind0dc12e2018-04-05 16:23:00 -07005763
Mel Gormanac5d2532015-06-30 14:57:20 -07005764 /*
5765 * Mark the block movable so that blocks are reserved for
5766 * movable at startup. This will force kernel allocations
5767 * to reserve their blocks rather than leaking throughout
5768 * the address space during boot when many long-lived
Mel Gorman974a7862015-11-06 16:28:34 -08005769 * kernel allocations are made.
Mel Gormanac5d2532015-06-30 14:57:20 -07005770 *
5771 * bitmap is created for zone's valid pfn range. but memmap
5772 * can be created for invalid pages (for alignment)
5773 * check here not to call set_pageblock_migratetype() against
5774 * pfn out of zone.
5775 */
5776 if (!(pfn & (pageblock_nr_pages - 1))) {
Mel Gormanac5d2532015-06-30 14:57:20 -07005777 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
Michal Hocko9b6e63c2017-10-03 16:16:19 -07005778 cond_resched();
Mel Gormanac5d2532015-06-30 14:57:20 -07005779 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005780 }
5781}
5782
Alexander Duyck966cf442018-10-26 15:07:52 -07005783#ifdef CONFIG_ZONE_DEVICE
5784void __ref memmap_init_zone_device(struct zone *zone,
5785 unsigned long start_pfn,
5786 unsigned long size,
5787 struct dev_pagemap *pgmap)
5788{
5789 unsigned long pfn, end_pfn = start_pfn + size;
5790 struct pglist_data *pgdat = zone->zone_pgdat;
5791 unsigned long zone_idx = zone_idx(zone);
5792 unsigned long start = jiffies;
5793 int nid = pgdat->node_id;
5794
5795 if (WARN_ON_ONCE(!pgmap || !is_dev_zone(zone)))
5796 return;
5797
5798 /*
5799 * The call to memmap_init_zone should have already taken care
5800 * of the pages reserved for the memmap, so we can just jump to
5801 * the end of that region and start processing the device pages.
5802 */
5803 if (pgmap->altmap_valid) {
5804 struct vmem_altmap *altmap = &pgmap->altmap;
5805
5806 start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
5807 size = end_pfn - start_pfn;
5808 }
5809
5810 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
5811 struct page *page = pfn_to_page(pfn);
5812
5813 __init_single_page(page, pfn, zone_idx, nid);
5814
5815 /*
5816 * Mark page reserved as it will need to wait for onlining
5817 * phase for it to be fully associated with a zone.
5818 *
5819 * We can use the non-atomic __set_bit operation for setting
5820 * the flag as we are still initializing the pages.
5821 */
5822 __SetPageReserved(page);
5823
5824 /*
5825 * ZONE_DEVICE pages union ->lru with a ->pgmap back
5826 * pointer and hmm_data. It is a bug if a ZONE_DEVICE
5827 * page is ever freed or placed on a driver-private list.
5828 */
5829 page->pgmap = pgmap;
5830 page->hmm_data = 0;
5831
5832 /*
5833 * Mark the block movable so that blocks are reserved for
5834 * movable at startup. This will force kernel allocations
5835 * to reserve their blocks rather than leaking throughout
5836 * the address space during boot when many long-lived
5837 * kernel allocations are made.
5838 *
5839 * bitmap is created for zone's valid pfn range. but memmap
5840 * can be created for invalid pages (for alignment)
5841 * check here not to call set_pageblock_migratetype() against
5842 * pfn out of zone.
5843 *
5844 * Please note that MEMMAP_HOTPLUG path doesn't clear memmap
5845 * because this is done early in sparse_add_one_section
5846 */
5847 if (!(pfn & (pageblock_nr_pages - 1))) {
5848 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
5849 cond_resched();
5850 }
5851 }
5852
5853 pr_info("%s initialised, %lu pages in %ums\n", dev_name(pgmap->dev),
5854 size, jiffies_to_msecs(jiffies - start));
5855}
5856
5857#endif
Andi Kleen1e548de2008-02-04 22:29:26 -08005858static void __meminit zone_init_free_lists(struct zone *zone)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005859{
Mel Gorman7aeb09f2014-06-04 16:10:21 -07005860 unsigned int order, t;
Mel Gormanb2a0ac82007-10-16 01:25:48 -07005861 for_each_migratetype_order(order, t) {
5862 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005863 zone->free_area[order].nr_free = 0;
5864 }
5865}
5866
Pavel Tatashindfb3ccd2018-10-26 15:09:32 -07005867void __meminit __weak memmap_init(unsigned long size, int nid,
5868 unsigned long zone, unsigned long start_pfn)
5869{
5870 memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY, NULL);
5871}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005872
David Rientjes7cd2b0a2014-06-23 13:22:04 -07005873static int zone_batchsize(struct zone *zone)
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07005874{
David Howells3a6be872009-05-06 16:03:03 -07005875#ifdef CONFIG_MMU
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07005876 int batch;
5877
5878 /*
5879 * The per-cpu-pages pools are set to around 1000th of the
Aaron Lud8a759b2018-08-17 15:49:14 -07005880 * size of the zone.
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07005881 */
Arun KS9705bea2018-12-28 00:34:24 -08005882 batch = zone_managed_pages(zone) / 1024;
Aaron Lud8a759b2018-08-17 15:49:14 -07005883 /* But no more than a meg. */
5884 if (batch * PAGE_SIZE > 1024 * 1024)
5885 batch = (1024 * 1024) / PAGE_SIZE;
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07005886 batch /= 4; /* We effectively *= 4 below */
5887 if (batch < 1)
5888 batch = 1;
5889
5890 /*
Nick Piggin0ceaacc2005-12-04 13:55:25 +11005891 * Clamp the batch to a 2^n - 1 value. Having a power
5892 * of 2 value was found to be more likely to have
5893 * suboptimal cache aliasing properties in some cases.
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07005894 *
Nick Piggin0ceaacc2005-12-04 13:55:25 +11005895 * For example if 2 tasks are alternately allocating
5896 * batches of pages, one task can end up with a lot
5897 * of pages of one half of the possible page colors
5898 * and the other with pages of the other colors.
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07005899 */
David Howells91552032009-05-06 16:03:02 -07005900 batch = rounddown_pow_of_two(batch + batch/2) - 1;
Seth, Rohitba56e912005-10-29 18:15:47 -07005901
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07005902 return batch;
David Howells3a6be872009-05-06 16:03:03 -07005903
5904#else
5905 /* The deferral and batching of frees should be suppressed under NOMMU
5906 * conditions.
5907 *
5908 * The problem is that NOMMU needs to be able to allocate large chunks
5909 * of contiguous memory as there's no hardware page translation to
5910 * assemble apparent contiguous memory from discontiguous pages.
5911 *
5912 * Queueing large contiguous runs of pages for batching, however,
5913 * causes the pages to actually be freed in smaller chunks. As there
5914 * can be a significant delay between the individual batches being
5915 * recycled, this leads to the once large chunks of space being
5916 * fragmented and becoming unavailable for high-order allocations.
5917 */
5918 return 0;
5919#endif
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07005920}
5921
Cody P Schafer8d7a8fa2013-07-03 15:01:31 -07005922/*
5923 * pcp->high and pcp->batch values are related and dependent on one another:
5924 * ->batch must never be higher then ->high.
5925 * The following function updates them in a safe manner without read side
5926 * locking.
5927 *
5928 * Any new users of pcp->batch and pcp->high should ensure they can cope with
5929 * those fields changing asynchronously (acording the the above rule).
5930 *
5931 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function
5932 * outside of boot time (or some other assurance that no concurrent updaters
5933 * exist).
5934 */
5935static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
5936 unsigned long batch)
5937{
5938 /* start with a fail safe value for batch */
5939 pcp->batch = 1;
5940 smp_wmb();
5941
5942 /* Update high, then batch, in order */
5943 pcp->high = high;
5944 smp_wmb();
5945
5946 pcp->batch = batch;
5947}
5948
Cody P Schafer36640332013-07-03 15:01:40 -07005949/* a companion to pageset_set_high() */
Cody P Schafer4008bab2013-07-03 15:01:28 -07005950static void pageset_set_batch(struct per_cpu_pageset *p, unsigned long batch)
5951{
Cody P Schafer8d7a8fa2013-07-03 15:01:31 -07005952 pageset_update(&p->pcp, 6 * batch, max(1UL, 1 * batch));
Cody P Schafer4008bab2013-07-03 15:01:28 -07005953}
5954
Cody P Schafer88c90db2013-07-03 15:01:35 -07005955static void pageset_init(struct per_cpu_pageset *p)
Christoph Lameter2caaad42005-06-21 17:15:00 -07005956{
5957 struct per_cpu_pages *pcp;
Mel Gorman5f8dcc22009-09-21 17:03:19 -07005958 int migratetype;
Christoph Lameter2caaad42005-06-21 17:15:00 -07005959
Magnus Damm1c6fe942005-10-26 01:58:59 -07005960 memset(p, 0, sizeof(*p));
5961
Christoph Lameter3dfa5722008-02-04 22:29:19 -08005962 pcp = &p->pcp;
Mel Gorman5f8dcc22009-09-21 17:03:19 -07005963 for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
5964 INIT_LIST_HEAD(&pcp->lists[migratetype]);
Christoph Lameter2caaad42005-06-21 17:15:00 -07005965}
5966
Cody P Schafer88c90db2013-07-03 15:01:35 -07005967static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
5968{
5969 pageset_init(p);
5970 pageset_set_batch(p, batch);
5971}
5972
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08005973/*
Cody P Schafer36640332013-07-03 15:01:40 -07005974 * pageset_set_high() sets the high water mark for hot per_cpu_pagelist
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08005975 * to the value high for the pageset p.
5976 */
Cody P Schafer36640332013-07-03 15:01:40 -07005977static void pageset_set_high(struct per_cpu_pageset *p,
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08005978 unsigned long high)
5979{
Cody P Schafer8d7a8fa2013-07-03 15:01:31 -07005980 unsigned long batch = max(1UL, high / 4);
5981 if ((high / 4) > (PAGE_SHIFT * 8))
5982 batch = PAGE_SHIFT * 8;
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08005983
Cody P Schafer8d7a8fa2013-07-03 15:01:31 -07005984 pageset_update(&p->pcp, high, batch);
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08005985}
5986
David Rientjes7cd2b0a2014-06-23 13:22:04 -07005987static void pageset_set_high_and_batch(struct zone *zone,
5988 struct per_cpu_pageset *pcp)
Cody P Schafer56cef2b2013-07-03 15:01:38 -07005989{
Cody P Schafer56cef2b2013-07-03 15:01:38 -07005990 if (percpu_pagelist_fraction)
Cody P Schafer36640332013-07-03 15:01:40 -07005991 pageset_set_high(pcp,
Arun KS9705bea2018-12-28 00:34:24 -08005992 (zone_managed_pages(zone) /
Cody P Schafer56cef2b2013-07-03 15:01:38 -07005993 percpu_pagelist_fraction));
5994 else
5995 pageset_set_batch(pcp, zone_batchsize(zone));
5996}
5997
Cody P Schafer169f6c12013-07-03 15:01:41 -07005998static void __meminit zone_pageset_init(struct zone *zone, int cpu)
5999{
6000 struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
6001
6002 pageset_init(pcp);
6003 pageset_set_high_and_batch(zone, pcp);
6004}
6005
Michal Hocko72675e12017-09-06 16:20:24 -07006006void __meminit setup_zone_pageset(struct zone *zone)
Wu Fengguang319774e2010-05-24 14:32:49 -07006007{
6008 int cpu;
Wu Fengguang319774e2010-05-24 14:32:49 -07006009 zone->pageset = alloc_percpu(struct per_cpu_pageset);
Cody P Schafer56cef2b2013-07-03 15:01:38 -07006010 for_each_possible_cpu(cpu)
6011 zone_pageset_init(zone, cpu);
Wu Fengguang319774e2010-05-24 14:32:49 -07006012}
6013
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07006014/*
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09006015 * Allocate per cpu pagesets and initialize them.
6016 * Before this call only boot pagesets were available.
Christoph Lameter2caaad42005-06-21 17:15:00 -07006017 */
Al Viro78d99552005-12-15 09:18:25 +00006018void __init setup_per_cpu_pageset(void)
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07006019{
Mel Gormanb4911ea2016-08-04 15:31:49 -07006020 struct pglist_data *pgdat;
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09006021 struct zone *zone;
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07006022
Wu Fengguang319774e2010-05-24 14:32:49 -07006023 for_each_populated_zone(zone)
6024 setup_zone_pageset(zone);
Mel Gormanb4911ea2016-08-04 15:31:49 -07006025
6026 for_each_online_pgdat(pgdat)
6027 pgdat->per_cpu_nodestats =
6028 alloc_percpu(struct per_cpu_nodestat);
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07006029}
6030
Matt Tolentinoc09b4242006-01-17 07:03:44 +01006031static __meminit void zone_pcp_init(struct zone *zone)
Dave Hansened8ece22005-10-29 18:16:50 -07006032{
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09006033 /*
6034 * per cpu subsystem is not up at this point. The following code
6035 * relies on the ability of the linker to provide the
6036 * offset of a (static) per cpu variable into the per cpu area.
6037 */
6038 zone->pageset = &boot_pageset;
Dave Hansened8ece22005-10-29 18:16:50 -07006039
Xishi Qiub38a8722013-11-12 15:07:20 -08006040 if (populated_zone(zone))
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09006041 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n",
6042 zone->name, zone->present_pages,
6043 zone_batchsize(zone));
Dave Hansened8ece22005-10-29 18:16:50 -07006044}
6045
Michal Hockodc0bbf32017-07-06 15:37:35 -07006046void __meminit init_currently_empty_zone(struct zone *zone,
Yasunori Goto718127c2006-06-23 02:03:10 -07006047 unsigned long zone_start_pfn,
Yaowei Baib171e402015-11-05 18:47:06 -08006048 unsigned long size)
Dave Hansened8ece22005-10-29 18:16:50 -07006049{
6050 struct pglist_data *pgdat = zone->zone_pgdat;
Wei Yang8f416832018-11-30 14:09:07 -08006051 int zone_idx = zone_idx(zone) + 1;
Linus Torvalds9dcb8b62016-10-26 10:15:30 -07006052
Wei Yang8f416832018-11-30 14:09:07 -08006053 if (zone_idx > pgdat->nr_zones)
6054 pgdat->nr_zones = zone_idx;
Dave Hansened8ece22005-10-29 18:16:50 -07006055
Dave Hansened8ece22005-10-29 18:16:50 -07006056 zone->zone_start_pfn = zone_start_pfn;
6057
Mel Gorman708614e2008-07-23 21:26:51 -07006058 mminit_dprintk(MMINIT_TRACE, "memmap_init",
6059 "Initialising map node %d zone %lu pfns %lu -> %lu\n",
6060 pgdat->node_id,
6061 (unsigned long)zone_idx(zone),
6062 zone_start_pfn, (zone_start_pfn + size));
6063
Andi Kleen1e548de2008-02-04 22:29:26 -08006064 zone_init_free_lists(zone);
Linus Torvalds9dcb8b62016-10-26 10:15:30 -07006065 zone->initialized = 1;
Dave Hansened8ece22005-10-29 18:16:50 -07006066}
6067
Tejun Heo0ee332c2011-12-08 10:22:09 -08006068#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
Mel Gormanc7132162006-09-27 01:49:43 -07006069#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
Mel Gorman8a942fd2015-06-30 14:56:55 -07006070
Mel Gormanc7132162006-09-27 01:49:43 -07006071/*
6072 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
Mel Gormanc7132162006-09-27 01:49:43 -07006073 */
Mel Gorman8a942fd2015-06-30 14:56:55 -07006074int __meminit __early_pfn_to_nid(unsigned long pfn,
6075 struct mminit_pfnnid_cache *state)
Mel Gormanc7132162006-09-27 01:49:43 -07006076{
Tejun Heoc13291a2011-07-12 10:46:30 +02006077 unsigned long start_pfn, end_pfn;
Yinghai Lue76b63f2013-09-11 14:22:17 -07006078 int nid;
Russ Anderson7c243c72013-04-29 15:07:59 -07006079
Mel Gorman8a942fd2015-06-30 14:56:55 -07006080 if (state->last_start <= pfn && pfn < state->last_end)
6081 return state->last_nid;
Mel Gormanc7132162006-09-27 01:49:43 -07006082
Yinghai Lue76b63f2013-09-11 14:22:17 -07006083 nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
Anshuman Khandual98fa15f2019-03-05 15:42:58 -08006084 if (nid != NUMA_NO_NODE) {
Mel Gorman8a942fd2015-06-30 14:56:55 -07006085 state->last_start = start_pfn;
6086 state->last_end = end_pfn;
6087 state->last_nid = nid;
Yinghai Lue76b63f2013-09-11 14:22:17 -07006088 }
6089
6090 return nid;
Mel Gormanc7132162006-09-27 01:49:43 -07006091}
6092#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
6093
Mel Gormanc7132162006-09-27 01:49:43 -07006094/**
Santosh Shilimkar67828322014-01-21 15:50:25 -08006095 * free_bootmem_with_active_regions - Call memblock_free_early_nid for each active range
Randy Dunlap88ca3b92006-10-04 02:15:25 -07006096 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
Santosh Shilimkar67828322014-01-21 15:50:25 -08006097 * @max_low_pfn: The highest PFN that will be passed to memblock_free_early_nid
Mel Gormanc7132162006-09-27 01:49:43 -07006098 *
Zhang Zhen7d018172014-06-04 16:10:53 -07006099 * If an architecture guarantees that all ranges registered contain no holes
6100 * and may be freed, this this function may be used instead of calling
6101 * memblock_free_early_nid() manually.
Mel Gormanc7132162006-09-27 01:49:43 -07006102 */
Tejun Heoc13291a2011-07-12 10:46:30 +02006103void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
Mel Gormanc7132162006-09-27 01:49:43 -07006104{
Tejun Heoc13291a2011-07-12 10:46:30 +02006105 unsigned long start_pfn, end_pfn;
6106 int i, this_nid;
Mel Gormanc7132162006-09-27 01:49:43 -07006107
Tejun Heoc13291a2011-07-12 10:46:30 +02006108 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) {
6109 start_pfn = min(start_pfn, max_low_pfn);
6110 end_pfn = min(end_pfn, max_low_pfn);
Mel Gormanc7132162006-09-27 01:49:43 -07006111
Tejun Heoc13291a2011-07-12 10:46:30 +02006112 if (start_pfn < end_pfn)
Santosh Shilimkar67828322014-01-21 15:50:25 -08006113 memblock_free_early_nid(PFN_PHYS(start_pfn),
6114 (end_pfn - start_pfn) << PAGE_SHIFT,
6115 this_nid);
Mel Gormanc7132162006-09-27 01:49:43 -07006116 }
6117}
6118
6119/**
6120 * sparse_memory_present_with_active_regions - Call memory_present for each active range
Randy Dunlap88ca3b92006-10-04 02:15:25 -07006121 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
Mel Gormanc7132162006-09-27 01:49:43 -07006122 *
Zhang Zhen7d018172014-06-04 16:10:53 -07006123 * If an architecture guarantees that all ranges registered contain no holes and may
6124 * be freed, this function may be used instead of calling memory_present() manually.
Mel Gormanc7132162006-09-27 01:49:43 -07006125 */
6126void __init sparse_memory_present_with_active_regions(int nid)
6127{
Tejun Heoc13291a2011-07-12 10:46:30 +02006128 unsigned long start_pfn, end_pfn;
6129 int i, this_nid;
Mel Gormanc7132162006-09-27 01:49:43 -07006130
Tejun Heoc13291a2011-07-12 10:46:30 +02006131 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid)
6132 memory_present(this_nid, start_pfn, end_pfn);
Mel Gormanc7132162006-09-27 01:49:43 -07006133}
6134
6135/**
6136 * get_pfn_range_for_nid - Return the start and end page frames for a node
Randy Dunlap88ca3b92006-10-04 02:15:25 -07006137 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
6138 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
6139 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
Mel Gormanc7132162006-09-27 01:49:43 -07006140 *
6141 * It returns the start and end page frame of a node based on information
Zhang Zhen7d018172014-06-04 16:10:53 -07006142 * provided by memblock_set_node(). If called for a node
Mel Gormanc7132162006-09-27 01:49:43 -07006143 * with no available memory, a warning is printed and the start and end
Randy Dunlap88ca3b92006-10-04 02:15:25 -07006144 * PFNs will be 0.
Mel Gormanc7132162006-09-27 01:49:43 -07006145 */
Oscar Salvadorbbe5d992018-12-28 00:37:24 -08006146void __init get_pfn_range_for_nid(unsigned int nid,
Mel Gormanc7132162006-09-27 01:49:43 -07006147 unsigned long *start_pfn, unsigned long *end_pfn)
6148{
Tejun Heoc13291a2011-07-12 10:46:30 +02006149 unsigned long this_start_pfn, this_end_pfn;
Mel Gormanc7132162006-09-27 01:49:43 -07006150 int i;
Tejun Heoc13291a2011-07-12 10:46:30 +02006151
Mel Gormanc7132162006-09-27 01:49:43 -07006152 *start_pfn = -1UL;
6153 *end_pfn = 0;
6154
Tejun Heoc13291a2011-07-12 10:46:30 +02006155 for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
6156 *start_pfn = min(*start_pfn, this_start_pfn);
6157 *end_pfn = max(*end_pfn, this_end_pfn);
Mel Gormanc7132162006-09-27 01:49:43 -07006158 }
6159
Christoph Lameter633c0662007-10-16 01:25:37 -07006160 if (*start_pfn == -1UL)
Mel Gormanc7132162006-09-27 01:49:43 -07006161 *start_pfn = 0;
Mel Gormanc7132162006-09-27 01:49:43 -07006162}
6163
6164/*
Mel Gorman2a1e2742007-07-17 04:03:12 -07006165 * This finds a zone that can be used for ZONE_MOVABLE pages. The
6166 * assumption is made that zones within a node are ordered in monotonic
6167 * increasing memory addresses so that the "highest" populated zone is used
6168 */
Adrian Bunkb69a7282008-07-23 21:28:12 -07006169static void __init find_usable_zone_for_movable(void)
Mel Gorman2a1e2742007-07-17 04:03:12 -07006170{
6171 int zone_index;
6172 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
6173 if (zone_index == ZONE_MOVABLE)
6174 continue;
6175
6176 if (arch_zone_highest_possible_pfn[zone_index] >
6177 arch_zone_lowest_possible_pfn[zone_index])
6178 break;
6179 }
6180
6181 VM_BUG_ON(zone_index == -1);
6182 movable_zone = zone_index;
6183}
6184
6185/*
6186 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
Lucas De Marchi25985ed2011-03-30 22:57:33 -03006187 * because it is sized independent of architecture. Unlike the other zones,
Mel Gorman2a1e2742007-07-17 04:03:12 -07006188 * the starting point for ZONE_MOVABLE is not fixed. It may be different
6189 * in each node depending on the size of each node and how evenly kernelcore
6190 * is distributed. This helper function adjusts the zone ranges
6191 * provided by the architecture for a given node by using the end of the
6192 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
6193 * zones within a node are in order of monotonic increases memory addresses
6194 */
Oscar Salvadorbbe5d992018-12-28 00:37:24 -08006195static void __init adjust_zone_range_for_zone_movable(int nid,
Mel Gorman2a1e2742007-07-17 04:03:12 -07006196 unsigned long zone_type,
6197 unsigned long node_start_pfn,
6198 unsigned long node_end_pfn,
6199 unsigned long *zone_start_pfn,
6200 unsigned long *zone_end_pfn)
6201{
6202 /* Only adjust if ZONE_MOVABLE is on this node */
6203 if (zone_movable_pfn[nid]) {
6204 /* Size ZONE_MOVABLE */
6205 if (zone_type == ZONE_MOVABLE) {
6206 *zone_start_pfn = zone_movable_pfn[nid];
6207 *zone_end_pfn = min(node_end_pfn,
6208 arch_zone_highest_possible_pfn[movable_zone]);
6209
Xishi Qiue506b992016-10-07 16:58:06 -07006210 /* Adjust for ZONE_MOVABLE starting within this range */
6211 } else if (!mirrored_kernelcore &&
6212 *zone_start_pfn < zone_movable_pfn[nid] &&
6213 *zone_end_pfn > zone_movable_pfn[nid]) {
6214 *zone_end_pfn = zone_movable_pfn[nid];
6215
Mel Gorman2a1e2742007-07-17 04:03:12 -07006216 /* Check if this whole range is within ZONE_MOVABLE */
6217 } else if (*zone_start_pfn >= zone_movable_pfn[nid])
6218 *zone_start_pfn = *zone_end_pfn;
6219 }
6220}
6221
6222/*
Mel Gormanc7132162006-09-27 01:49:43 -07006223 * Return the number of pages a zone spans in a node, including holes
6224 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
6225 */
Oscar Salvadorbbe5d992018-12-28 00:37:24 -08006226static unsigned long __init zone_spanned_pages_in_node(int nid,
Mel Gormanc7132162006-09-27 01:49:43 -07006227 unsigned long zone_type,
Zhang Yanfei7960aed2013-07-08 15:59:52 -07006228 unsigned long node_start_pfn,
6229 unsigned long node_end_pfn,
Taku Izumid91749c2016-03-15 14:55:18 -07006230 unsigned long *zone_start_pfn,
6231 unsigned long *zone_end_pfn,
Mel Gormanc7132162006-09-27 01:49:43 -07006232 unsigned long *ignored)
6233{
Xishi Qiub5685e92015-09-08 15:04:16 -07006234 /* When hotadd a new node from cpu_up(), the node should be empty */
Xishi Qiuf9126ab2015-08-14 15:35:16 -07006235 if (!node_start_pfn && !node_end_pfn)
6236 return 0;
6237
Zhang Yanfei7960aed2013-07-08 15:59:52 -07006238 /* Get the start and end of the zone */
Taku Izumid91749c2016-03-15 14:55:18 -07006239 *zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
6240 *zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
Mel Gorman2a1e2742007-07-17 04:03:12 -07006241 adjust_zone_range_for_zone_movable(nid, zone_type,
6242 node_start_pfn, node_end_pfn,
Taku Izumid91749c2016-03-15 14:55:18 -07006243 zone_start_pfn, zone_end_pfn);
Mel Gormanc7132162006-09-27 01:49:43 -07006244
6245 /* Check that this node has pages within the zone's required range */
Taku Izumid91749c2016-03-15 14:55:18 -07006246 if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn)
Mel Gormanc7132162006-09-27 01:49:43 -07006247 return 0;
6248
6249 /* Move the zone boundaries inside the node if necessary */
Taku Izumid91749c2016-03-15 14:55:18 -07006250 *zone_end_pfn = min(*zone_end_pfn, node_end_pfn);
6251 *zone_start_pfn = max(*zone_start_pfn, node_start_pfn);
Mel Gormanc7132162006-09-27 01:49:43 -07006252
6253 /* Return the spanned pages */
Taku Izumid91749c2016-03-15 14:55:18 -07006254 return *zone_end_pfn - *zone_start_pfn;
Mel Gormanc7132162006-09-27 01:49:43 -07006255}
6256
6257/*
6258 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
Randy Dunlap88ca3b92006-10-04 02:15:25 -07006259 * then all holes in the requested range will be accounted for.
Mel Gormanc7132162006-09-27 01:49:43 -07006260 */
Oscar Salvadorbbe5d992018-12-28 00:37:24 -08006261unsigned long __init __absent_pages_in_range(int nid,
Mel Gormanc7132162006-09-27 01:49:43 -07006262 unsigned long range_start_pfn,
6263 unsigned long range_end_pfn)
6264{
Tejun Heo96e907d2011-07-12 10:46:29 +02006265 unsigned long nr_absent = range_end_pfn - range_start_pfn;
6266 unsigned long start_pfn, end_pfn;
6267 int i;
Mel Gormanc7132162006-09-27 01:49:43 -07006268
Tejun Heo96e907d2011-07-12 10:46:29 +02006269 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
6270 start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
6271 end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
6272 nr_absent -= end_pfn - start_pfn;
Mel Gormanc7132162006-09-27 01:49:43 -07006273 }
Tejun Heo96e907d2011-07-12 10:46:29 +02006274 return nr_absent;
Mel Gormanc7132162006-09-27 01:49:43 -07006275}
6276
6277/**
6278 * absent_pages_in_range - Return number of page frames in holes within a range
6279 * @start_pfn: The start PFN to start searching for holes
6280 * @end_pfn: The end PFN to stop searching for holes
6281 *
Randy Dunlap88ca3b92006-10-04 02:15:25 -07006282 * It returns the number of pages frames in memory holes within a range.
Mel Gormanc7132162006-09-27 01:49:43 -07006283 */
6284unsigned long __init absent_pages_in_range(unsigned long start_pfn,
6285 unsigned long end_pfn)
6286{
6287 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
6288}
6289
6290/* Return the number of page frames in holes in a zone on a node */
Oscar Salvadorbbe5d992018-12-28 00:37:24 -08006291static unsigned long __init zone_absent_pages_in_node(int nid,
Mel Gormanc7132162006-09-27 01:49:43 -07006292 unsigned long zone_type,
Zhang Yanfei7960aed2013-07-08 15:59:52 -07006293 unsigned long node_start_pfn,
6294 unsigned long node_end_pfn,
Mel Gormanc7132162006-09-27 01:49:43 -07006295 unsigned long *ignored)
6296{
Tejun Heo96e907d2011-07-12 10:46:29 +02006297 unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
6298 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
Mel Gorman9c7cd682006-09-27 01:49:58 -07006299 unsigned long zone_start_pfn, zone_end_pfn;
Taku Izumi342332e2016-03-15 14:55:22 -07006300 unsigned long nr_absent;
Mel Gorman9c7cd682006-09-27 01:49:58 -07006301
Xishi Qiub5685e92015-09-08 15:04:16 -07006302 /* When hotadd a new node from cpu_up(), the node should be empty */
Xishi Qiuf9126ab2015-08-14 15:35:16 -07006303 if (!node_start_pfn && !node_end_pfn)
6304 return 0;
6305
Tejun Heo96e907d2011-07-12 10:46:29 +02006306 zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
6307 zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
Mel Gorman9c7cd682006-09-27 01:49:58 -07006308
Mel Gorman2a1e2742007-07-17 04:03:12 -07006309 adjust_zone_range_for_zone_movable(nid, zone_type,
6310 node_start_pfn, node_end_pfn,
6311 &zone_start_pfn, &zone_end_pfn);
Taku Izumi342332e2016-03-15 14:55:22 -07006312 nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
6313
6314 /*
6315 * ZONE_MOVABLE handling.
6316 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages
6317 * and vice versa.
6318 */
Xishi Qiue506b992016-10-07 16:58:06 -07006319 if (mirrored_kernelcore && zone_movable_pfn[nid]) {
6320 unsigned long start_pfn, end_pfn;
6321 struct memblock_region *r;
Taku Izumi342332e2016-03-15 14:55:22 -07006322
Xishi Qiue506b992016-10-07 16:58:06 -07006323 for_each_memblock(memory, r) {
6324 start_pfn = clamp(memblock_region_memory_base_pfn(r),
6325 zone_start_pfn, zone_end_pfn);
6326 end_pfn = clamp(memblock_region_memory_end_pfn(r),
6327 zone_start_pfn, zone_end_pfn);
Taku Izumi342332e2016-03-15 14:55:22 -07006328
Xishi Qiue506b992016-10-07 16:58:06 -07006329 if (zone_type == ZONE_MOVABLE &&
6330 memblock_is_mirror(r))
6331 nr_absent += end_pfn - start_pfn;
Taku Izumi342332e2016-03-15 14:55:22 -07006332
Xishi Qiue506b992016-10-07 16:58:06 -07006333 if (zone_type == ZONE_NORMAL &&
6334 !memblock_is_mirror(r))
6335 nr_absent += end_pfn - start_pfn;
Taku Izumi342332e2016-03-15 14:55:22 -07006336 }
6337 }
6338
6339 return nr_absent;
Mel Gormanc7132162006-09-27 01:49:43 -07006340}
Mel Gorman0e0b8642006-09-27 01:49:56 -07006341
Tejun Heo0ee332c2011-12-08 10:22:09 -08006342#else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
Oscar Salvadorbbe5d992018-12-28 00:37:24 -08006343static inline unsigned long __init zone_spanned_pages_in_node(int nid,
Mel Gormanc7132162006-09-27 01:49:43 -07006344 unsigned long zone_type,
Zhang Yanfei7960aed2013-07-08 15:59:52 -07006345 unsigned long node_start_pfn,
6346 unsigned long node_end_pfn,
Taku Izumid91749c2016-03-15 14:55:18 -07006347 unsigned long *zone_start_pfn,
6348 unsigned long *zone_end_pfn,
Mel Gormanc7132162006-09-27 01:49:43 -07006349 unsigned long *zones_size)
6350{
Taku Izumid91749c2016-03-15 14:55:18 -07006351 unsigned int zone;
6352
6353 *zone_start_pfn = node_start_pfn;
6354 for (zone = 0; zone < zone_type; zone++)
6355 *zone_start_pfn += zones_size[zone];
6356
6357 *zone_end_pfn = *zone_start_pfn + zones_size[zone_type];
6358
Mel Gormanc7132162006-09-27 01:49:43 -07006359 return zones_size[zone_type];
6360}
6361
Oscar Salvadorbbe5d992018-12-28 00:37:24 -08006362static inline unsigned long __init zone_absent_pages_in_node(int nid,
Mel Gormanc7132162006-09-27 01:49:43 -07006363 unsigned long zone_type,
Zhang Yanfei7960aed2013-07-08 15:59:52 -07006364 unsigned long node_start_pfn,
6365 unsigned long node_end_pfn,
Mel Gormanc7132162006-09-27 01:49:43 -07006366 unsigned long *zholes_size)
6367{
6368 if (!zholes_size)
6369 return 0;
6370
6371 return zholes_size[zone_type];
6372}
Yinghai Lu20e69262013-03-01 14:51:27 -08006373
Tejun Heo0ee332c2011-12-08 10:22:09 -08006374#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
Mel Gormanc7132162006-09-27 01:49:43 -07006375
Oscar Salvadorbbe5d992018-12-28 00:37:24 -08006376static void __init calculate_node_totalpages(struct pglist_data *pgdat,
Zhang Yanfei7960aed2013-07-08 15:59:52 -07006377 unsigned long node_start_pfn,
6378 unsigned long node_end_pfn,
6379 unsigned long *zones_size,
6380 unsigned long *zholes_size)
Mel Gormanc7132162006-09-27 01:49:43 -07006381{
Gu Zhengfebd5942015-06-24 16:57:02 -07006382 unsigned long realtotalpages = 0, totalpages = 0;
Mel Gormanc7132162006-09-27 01:49:43 -07006383 enum zone_type i;
6384
Gu Zhengfebd5942015-06-24 16:57:02 -07006385 for (i = 0; i < MAX_NR_ZONES; i++) {
6386 struct zone *zone = pgdat->node_zones + i;
Taku Izumid91749c2016-03-15 14:55:18 -07006387 unsigned long zone_start_pfn, zone_end_pfn;
Gu Zhengfebd5942015-06-24 16:57:02 -07006388 unsigned long size, real_size;
Mel Gormanc7132162006-09-27 01:49:43 -07006389
Gu Zhengfebd5942015-06-24 16:57:02 -07006390 size = zone_spanned_pages_in_node(pgdat->node_id, i,
6391 node_start_pfn,
6392 node_end_pfn,
Taku Izumid91749c2016-03-15 14:55:18 -07006393 &zone_start_pfn,
6394 &zone_end_pfn,
Gu Zhengfebd5942015-06-24 16:57:02 -07006395 zones_size);
6396 real_size = size - zone_absent_pages_in_node(pgdat->node_id, i,
Zhang Yanfei7960aed2013-07-08 15:59:52 -07006397 node_start_pfn, node_end_pfn,
6398 zholes_size);
Taku Izumid91749c2016-03-15 14:55:18 -07006399 if (size)
6400 zone->zone_start_pfn = zone_start_pfn;
6401 else
6402 zone->zone_start_pfn = 0;
Gu Zhengfebd5942015-06-24 16:57:02 -07006403 zone->spanned_pages = size;
6404 zone->present_pages = real_size;
6405
6406 totalpages += size;
6407 realtotalpages += real_size;
6408 }
6409
6410 pgdat->node_spanned_pages = totalpages;
Mel Gormanc7132162006-09-27 01:49:43 -07006411 pgdat->node_present_pages = realtotalpages;
6412 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
6413 realtotalpages);
6414}
6415
Mel Gorman835c1342007-10-16 01:25:47 -07006416#ifndef CONFIG_SPARSEMEM
6417/*
6418 * Calculate the size of the zone->blockflags rounded to an unsigned long
Mel Gormand9c23402007-10-16 01:26:01 -07006419 * Start by making sure zonesize is a multiple of pageblock_order by rounding
6420 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
Mel Gorman835c1342007-10-16 01:25:47 -07006421 * round what is now in bits to nearest long in bits, then return it in
6422 * bytes.
6423 */
Linus Torvalds7c455122013-02-18 09:58:02 -08006424static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
Mel Gorman835c1342007-10-16 01:25:47 -07006425{
6426 unsigned long usemapsize;
6427
Linus Torvalds7c455122013-02-18 09:58:02 -08006428 zonesize += zone_start_pfn & (pageblock_nr_pages-1);
Mel Gormand9c23402007-10-16 01:26:01 -07006429 usemapsize = roundup(zonesize, pageblock_nr_pages);
6430 usemapsize = usemapsize >> pageblock_order;
Mel Gorman835c1342007-10-16 01:25:47 -07006431 usemapsize *= NR_PAGEBLOCK_BITS;
6432 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
6433
6434 return usemapsize / 8;
6435}
6436
Pavel Tatashin7cc2a952018-08-21 21:53:36 -07006437static void __ref setup_usemap(struct pglist_data *pgdat,
Linus Torvalds7c455122013-02-18 09:58:02 -08006438 struct zone *zone,
6439 unsigned long zone_start_pfn,
6440 unsigned long zonesize)
Mel Gorman835c1342007-10-16 01:25:47 -07006441{
Linus Torvalds7c455122013-02-18 09:58:02 -08006442 unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
Mel Gorman835c1342007-10-16 01:25:47 -07006443 zone->pageblock_flags = NULL;
Julia Lawall58a01a42009-01-06 14:39:28 -08006444 if (usemapsize)
Santosh Shilimkar67828322014-01-21 15:50:25 -08006445 zone->pageblock_flags =
Mike Rapoporteb31d552018-10-30 15:08:04 -07006446 memblock_alloc_node_nopanic(usemapsize,
Santosh Shilimkar67828322014-01-21 15:50:25 -08006447 pgdat->node_id);
Mel Gorman835c1342007-10-16 01:25:47 -07006448}
6449#else
Linus Torvalds7c455122013-02-18 09:58:02 -08006450static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
6451 unsigned long zone_start_pfn, unsigned long zonesize) {}
Mel Gorman835c1342007-10-16 01:25:47 -07006452#endif /* CONFIG_SPARSEMEM */
6453
Mel Gormand9c23402007-10-16 01:26:01 -07006454#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
Mel Gormanba72cb82007-11-28 16:21:13 -08006455
Mel Gormand9c23402007-10-16 01:26:01 -07006456/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
Oscar Salvador03e85f92018-08-21 21:53:43 -07006457void __init set_pageblock_order(void)
Mel Gormand9c23402007-10-16 01:26:01 -07006458{
Andrew Morton955c1cd2012-05-29 15:06:31 -07006459 unsigned int order;
6460
Mel Gormand9c23402007-10-16 01:26:01 -07006461 /* Check that pageblock_nr_pages has not already been setup */
6462 if (pageblock_order)
6463 return;
6464
Andrew Morton955c1cd2012-05-29 15:06:31 -07006465 if (HPAGE_SHIFT > PAGE_SHIFT)
6466 order = HUGETLB_PAGE_ORDER;
6467 else
6468 order = MAX_ORDER - 1;
6469
Mel Gormand9c23402007-10-16 01:26:01 -07006470 /*
6471 * Assume the largest contiguous order of interest is a huge page.
Andrew Morton955c1cd2012-05-29 15:06:31 -07006472 * This value may be variable depending on boot parameters on IA64 and
6473 * powerpc.
Mel Gormand9c23402007-10-16 01:26:01 -07006474 */
6475 pageblock_order = order;
6476}
6477#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
6478
Mel Gormanba72cb82007-11-28 16:21:13 -08006479/*
6480 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
Andrew Morton955c1cd2012-05-29 15:06:31 -07006481 * is unused as pageblock_order is set at compile-time. See
6482 * include/linux/pageblock-flags.h for the values of pageblock_order based on
6483 * the kernel config
Mel Gormanba72cb82007-11-28 16:21:13 -08006484 */
Oscar Salvador03e85f92018-08-21 21:53:43 -07006485void __init set_pageblock_order(void)
Mel Gormanba72cb82007-11-28 16:21:13 -08006486{
Mel Gormanba72cb82007-11-28 16:21:13 -08006487}
Mel Gormand9c23402007-10-16 01:26:01 -07006488
6489#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
6490
Oscar Salvador03e85f92018-08-21 21:53:43 -07006491static unsigned long __init calc_memmap_size(unsigned long spanned_pages,
Pavel Tatashin7cc2a952018-08-21 21:53:36 -07006492 unsigned long present_pages)
Jiang Liu01cefae2012-12-12 13:52:19 -08006493{
6494 unsigned long pages = spanned_pages;
6495
6496 /*
6497 * Provide a more accurate estimation if there are holes within
6498 * the zone and SPARSEMEM is in use. If there are holes within the
6499 * zone, each populated memory region may cost us one or two extra
6500 * memmap pages due to alignment because memmap pages for each
Masahiro Yamada89d790a2017-02-27 14:29:01 -08006501 * populated regions may not be naturally aligned on page boundary.
Jiang Liu01cefae2012-12-12 13:52:19 -08006502 * So the (present_pages >> 4) heuristic is a tradeoff for that.
6503 */
6504 if (spanned_pages > present_pages + (present_pages >> 4) &&
6505 IS_ENABLED(CONFIG_SPARSEMEM))
6506 pages = present_pages;
6507
6508 return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
6509}
6510
Oscar Salvadorace1db32018-08-21 21:53:29 -07006511#ifdef CONFIG_TRANSPARENT_HUGEPAGE
6512static void pgdat_init_split_queue(struct pglist_data *pgdat)
6513{
6514 spin_lock_init(&pgdat->split_queue_lock);
6515 INIT_LIST_HEAD(&pgdat->split_queue);
6516 pgdat->split_queue_len = 0;
6517}
6518#else
6519static void pgdat_init_split_queue(struct pglist_data *pgdat) {}
6520#endif
6521
6522#ifdef CONFIG_COMPACTION
6523static void pgdat_init_kcompactd(struct pglist_data *pgdat)
6524{
6525 init_waitqueue_head(&pgdat->kcompactd_wait);
6526}
6527#else
6528static void pgdat_init_kcompactd(struct pglist_data *pgdat) {}
6529#endif
6530
Oscar Salvador03e85f92018-08-21 21:53:43 -07006531static void __meminit pgdat_init_internals(struct pglist_data *pgdat)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006532{
Dave Hansen208d54e2005-10-29 18:16:52 -07006533 pgdat_resize_init(pgdat);
Oscar Salvadorace1db32018-08-21 21:53:29 -07006534
Oscar Salvadorace1db32018-08-21 21:53:29 -07006535 pgdat_init_split_queue(pgdat);
6536 pgdat_init_kcompactd(pgdat);
6537
Linus Torvalds1da177e2005-04-16 15:20:36 -07006538 init_waitqueue_head(&pgdat->kswapd_wait);
Mel Gorman55150612012-07-31 16:44:35 -07006539 init_waitqueue_head(&pgdat->pfmemalloc_wait);
Oscar Salvadorace1db32018-08-21 21:53:29 -07006540
Joonsoo Kimeefa864b2014-12-12 16:55:46 -08006541 pgdat_page_ext_init(pgdat);
Mel Gormana52633d2016-07-28 15:45:28 -07006542 spin_lock_init(&pgdat->lru_lock);
Mel Gormana9dd0a82016-07-28 15:46:02 -07006543 lruvec_init(node_lruvec(pgdat));
Oscar Salvador03e85f92018-08-21 21:53:43 -07006544}
Michal Nazarewicz5f63b722012-01-11 15:16:11 +01006545
Oscar Salvador03e85f92018-08-21 21:53:43 -07006546static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid,
6547 unsigned long remaining_pages)
6548{
Arun KS9705bea2018-12-28 00:34:24 -08006549 atomic_long_set(&zone->managed_pages, remaining_pages);
Oscar Salvador03e85f92018-08-21 21:53:43 -07006550 zone_set_nid(zone, nid);
6551 zone->name = zone_names[idx];
6552 zone->zone_pgdat = NODE_DATA(nid);
6553 spin_lock_init(&zone->lock);
6554 zone_seqlock_init(zone);
6555 zone_pcp_init(zone);
6556}
6557
6558/*
6559 * Set up the zone data structures
6560 * - init pgdat internals
6561 * - init all zones belonging to this node
6562 *
6563 * NOTE: this function is only called during memory hotplug
6564 */
6565#ifdef CONFIG_MEMORY_HOTPLUG
6566void __ref free_area_init_core_hotplug(int nid)
6567{
6568 enum zone_type z;
6569 pg_data_t *pgdat = NODE_DATA(nid);
6570
6571 pgdat_init_internals(pgdat);
6572 for (z = 0; z < MAX_NR_ZONES; z++)
6573 zone_init_internals(&pgdat->node_zones[z], z, nid, 0);
6574}
6575#endif
6576
6577/*
6578 * Set up the zone data structures:
6579 * - mark all pages reserved
6580 * - mark all memory queues empty
6581 * - clear the memory bitmaps
6582 *
6583 * NOTE: pgdat should get zeroed by caller.
6584 * NOTE: this function is only called during early init.
6585 */
6586static void __init free_area_init_core(struct pglist_data *pgdat)
6587{
6588 enum zone_type j;
6589 int nid = pgdat->node_id;
6590
6591 pgdat_init_internals(pgdat);
Johannes Weiner385386c2017-07-06 15:40:43 -07006592 pgdat->per_cpu_nodestats = &boot_nodestats;
6593
Linus Torvalds1da177e2005-04-16 15:20:36 -07006594 for (j = 0; j < MAX_NR_ZONES; j++) {
6595 struct zone *zone = pgdat->node_zones + j;
Wei Yange6943852018-06-07 17:06:04 -07006596 unsigned long size, freesize, memmap_pages;
Taku Izumid91749c2016-03-15 14:55:18 -07006597 unsigned long zone_start_pfn = zone->zone_start_pfn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006598
Gu Zhengfebd5942015-06-24 16:57:02 -07006599 size = zone->spanned_pages;
Wei Yange6943852018-06-07 17:06:04 -07006600 freesize = zone->present_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006601
Mel Gorman0e0b8642006-09-27 01:49:56 -07006602 /*
Jiang Liu9feedc92012-12-12 13:52:12 -08006603 * Adjust freesize so that it accounts for how much memory
Mel Gorman0e0b8642006-09-27 01:49:56 -07006604 * is used by this zone for memmap. This affects the watermark
6605 * and per-cpu initialisations
6606 */
Wei Yange6943852018-06-07 17:06:04 -07006607 memmap_pages = calc_memmap_size(size, freesize);
Zhong Hongboba914f42014-12-12 16:56:21 -08006608 if (!is_highmem_idx(j)) {
6609 if (freesize >= memmap_pages) {
6610 freesize -= memmap_pages;
6611 if (memmap_pages)
6612 printk(KERN_DEBUG
6613 " %s zone: %lu pages used for memmap\n",
6614 zone_names[j], memmap_pages);
6615 } else
Joe Perches11705322016-03-17 14:19:50 -07006616 pr_warn(" %s zone: %lu pages exceeds freesize %lu\n",
Zhong Hongboba914f42014-12-12 16:56:21 -08006617 zone_names[j], memmap_pages, freesize);
6618 }
Mel Gorman0e0b8642006-09-27 01:49:56 -07006619
Christoph Lameter62672762007-02-10 01:43:07 -08006620 /* Account for reserved pages */
Jiang Liu9feedc92012-12-12 13:52:12 -08006621 if (j == 0 && freesize > dma_reserve) {
6622 freesize -= dma_reserve;
Yinghai Lud903ef92008-10-18 20:27:06 -07006623 printk(KERN_DEBUG " %s zone: %lu pages reserved\n",
Christoph Lameter62672762007-02-10 01:43:07 -08006624 zone_names[0], dma_reserve);
Mel Gorman0e0b8642006-09-27 01:49:56 -07006625 }
6626
Christoph Lameter98d2b0e2006-09-25 23:31:12 -07006627 if (!is_highmem_idx(j))
Jiang Liu9feedc92012-12-12 13:52:12 -08006628 nr_kernel_pages += freesize;
Jiang Liu01cefae2012-12-12 13:52:19 -08006629 /* Charge for highmem memmap if there are enough kernel pages */
6630 else if (nr_kernel_pages > memmap_pages * 2)
6631 nr_kernel_pages -= memmap_pages;
Jiang Liu9feedc92012-12-12 13:52:12 -08006632 nr_all_pages += freesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006633
Jiang Liu9feedc92012-12-12 13:52:12 -08006634 /*
6635 * Set an approximate value for lowmem here, it will be adjusted
6636 * when the bootmem allocator frees pages into the buddy system.
6637 * And all highmem pages will be managed by the buddy system.
6638 */
Oscar Salvador03e85f92018-08-21 21:53:43 -07006639 zone_init_internals(zone, j, nid, freesize);
Johannes Weiner81c0a2b2013-09-11 14:20:47 -07006640
Joonsoo Kimd883c6c2018-05-23 10:18:21 +09006641 if (!size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006642 continue;
6643
Andrew Morton955c1cd2012-05-29 15:06:31 -07006644 set_pageblock_order();
Joonsoo Kimd883c6c2018-05-23 10:18:21 +09006645 setup_usemap(pgdat, zone, zone_start_pfn, size);
6646 init_currently_empty_zone(zone, zone_start_pfn, size);
Heiko Carstens76cdd582008-05-14 16:05:52 -07006647 memmap_init(size, nid, j, zone_start_pfn);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006648 }
6649}
6650
Oscar Salvador0cd842f2017-11-15 17:39:18 -08006651#ifdef CONFIG_FLAT_NODE_MEM_MAP
Fabian Frederickbd721ea2016-08-02 14:03:33 -07006652static void __ref alloc_node_mem_map(struct pglist_data *pgdat)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006653{
Tony Luckb0aeba72015-11-10 10:09:47 -08006654 unsigned long __maybe_unused start = 0;
Laura Abbotta1c34a32015-11-05 18:48:46 -08006655 unsigned long __maybe_unused offset = 0;
6656
Linus Torvalds1da177e2005-04-16 15:20:36 -07006657 /* Skip empty nodes */
6658 if (!pgdat->node_spanned_pages)
6659 return;
6660
Tony Luckb0aeba72015-11-10 10:09:47 -08006661 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
6662 offset = pgdat->node_start_pfn - start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006663 /* ia64 gets its own node_mem_map, before this, without bootmem */
6664 if (!pgdat->node_mem_map) {
Tony Luckb0aeba72015-11-10 10:09:47 -08006665 unsigned long size, end;
Andy Whitcroftd41dee32005-06-23 00:07:54 -07006666 struct page *map;
6667
Bob Piccoe984bb42006-05-20 15:00:31 -07006668 /*
6669 * The zone's endpoints aren't required to be MAX_ORDER
6670 * aligned but the node_mem_map endpoints must be in order
6671 * for the buddy allocator to function correctly.
6672 */
Cody P Schafer108bcc92013-02-22 16:35:23 -08006673 end = pgdat_end_pfn(pgdat);
Bob Piccoe984bb42006-05-20 15:00:31 -07006674 end = ALIGN(end, MAX_ORDER_NR_PAGES);
6675 size = (end - start) * sizeof(struct page);
Mike Rapoporteb31d552018-10-30 15:08:04 -07006676 map = memblock_alloc_node_nopanic(size, pgdat->node_id);
Laura Abbotta1c34a32015-11-05 18:48:46 -08006677 pgdat->node_mem_map = map + offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006678 }
Oscar Salvador0cd842f2017-11-15 17:39:18 -08006679 pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
6680 __func__, pgdat->node_id, (unsigned long)pgdat,
6681 (unsigned long)pgdat->node_mem_map);
Roman Zippel12d810c2007-05-31 00:40:54 -07006682#ifndef CONFIG_NEED_MULTIPLE_NODES
Linus Torvalds1da177e2005-04-16 15:20:36 -07006683 /*
6684 * With no DISCONTIG, the global mem_map is just set as node 0's
6685 */
Mel Gormanc7132162006-09-27 01:49:43 -07006686 if (pgdat == NODE_DATA(0)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006687 mem_map = NODE_DATA(0)->node_mem_map;
Laura Abbotta1c34a32015-11-05 18:48:46 -08006688#if defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) || defined(CONFIG_FLATMEM)
Mel Gormanc7132162006-09-27 01:49:43 -07006689 if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
Laura Abbotta1c34a32015-11-05 18:48:46 -08006690 mem_map -= offset;
Tejun Heo0ee332c2011-12-08 10:22:09 -08006691#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
Mel Gormanc7132162006-09-27 01:49:43 -07006692 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006693#endif
6694}
Oscar Salvador0cd842f2017-11-15 17:39:18 -08006695#else
6696static void __ref alloc_node_mem_map(struct pglist_data *pgdat) { }
6697#endif /* CONFIG_FLAT_NODE_MEM_MAP */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006698
Oscar Salvador0188dc92018-08-21 21:53:39 -07006699#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
6700static inline void pgdat_set_deferred_range(pg_data_t *pgdat)
6701{
Oscar Salvador0188dc92018-08-21 21:53:39 -07006702 pgdat->first_deferred_pfn = ULONG_MAX;
6703}
6704#else
6705static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {}
6706#endif
6707
Oscar Salvador03e85f92018-08-21 21:53:43 -07006708void __init free_area_init_node(int nid, unsigned long *zones_size,
Pavel Tatashin7cc2a952018-08-21 21:53:36 -07006709 unsigned long node_start_pfn,
6710 unsigned long *zholes_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006711{
Johannes Weiner9109fb72008-07-23 21:27:20 -07006712 pg_data_t *pgdat = NODE_DATA(nid);
Zhang Yanfei7960aed2013-07-08 15:59:52 -07006713 unsigned long start_pfn = 0;
6714 unsigned long end_pfn = 0;
Johannes Weiner9109fb72008-07-23 21:27:20 -07006715
Minchan Kim88fdf752012-07-31 16:46:14 -07006716 /* pg_data_t should be reset to zero when it's allocated */
Mel Gorman38087d92016-07-28 15:45:49 -07006717 WARN_ON(pgdat->nr_zones || pgdat->kswapd_classzone_idx);
Minchan Kim88fdf752012-07-31 16:46:14 -07006718
Linus Torvalds1da177e2005-04-16 15:20:36 -07006719 pgdat->node_id = nid;
6720 pgdat->node_start_pfn = node_start_pfn;
Mel Gorman75ef7182016-07-28 15:45:24 -07006721 pgdat->per_cpu_nodestats = NULL;
Zhang Yanfei7960aed2013-07-08 15:59:52 -07006722#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
6723 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
Juergen Gross8d29e182015-02-11 15:26:01 -08006724 pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
Zhen Lei4ada0c52015-09-08 15:04:19 -07006725 (u64)start_pfn << PAGE_SHIFT,
6726 end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0);
Taku Izumid91749c2016-03-15 14:55:18 -07006727#else
6728 start_pfn = node_start_pfn;
Zhang Yanfei7960aed2013-07-08 15:59:52 -07006729#endif
6730 calculate_node_totalpages(pgdat, start_pfn, end_pfn,
6731 zones_size, zholes_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006732
6733 alloc_node_mem_map(pgdat);
Oscar Salvador0188dc92018-08-21 21:53:39 -07006734 pgdat_set_deferred_range(pgdat);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006735
Wei Yang7f3eb552015-09-08 14:59:50 -07006736 free_area_init_core(pgdat);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006737}
6738
Mike Rapoportaca52c32018-10-30 15:07:44 -07006739#if !defined(CONFIG_FLAT_NODE_MEM_MAP)
Pavel Tatashinec393a0f2018-10-26 15:10:21 -07006740/*
6741 * Zero all valid struct pages in range [spfn, epfn), return number of struct
6742 * pages zeroed
6743 */
6744static u64 zero_pfn_range(unsigned long spfn, unsigned long epfn)
6745{
6746 unsigned long pfn;
6747 u64 pgcnt = 0;
6748
6749 for (pfn = spfn; pfn < epfn; pfn++) {
6750 if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages))) {
6751 pfn = ALIGN_DOWN(pfn, pageblock_nr_pages)
6752 + pageblock_nr_pages - 1;
6753 continue;
6754 }
6755 mm_zero_struct_page(pfn_to_page(pfn));
6756 pgcnt++;
6757 }
6758
6759 return pgcnt;
6760}
6761
Pavel Tatashina4a3ede2017-11-15 17:36:31 -08006762/*
6763 * Only struct pages that are backed by physical memory are zeroed and
6764 * initialized by going through __init_single_page(). But, there are some
6765 * struct pages which are reserved in memblock allocator and their fields
6766 * may be accessed (for example page_to_pfn() on some configuration accesses
6767 * flags). We must explicitly zero those struct pages.
Naoya Horiguchi907ec5f2018-10-26 15:10:15 -07006768 *
6769 * This function also addresses a similar issue where struct pages are left
6770 * uninitialized because the physical address range is not covered by
6771 * memblock.memory or memblock.reserved. That could happen when memblock
6772 * layout is manually configured via memmap=.
Pavel Tatashina4a3ede2017-11-15 17:36:31 -08006773 */
Oscar Salvador03e85f92018-08-21 21:53:43 -07006774void __init zero_resv_unavail(void)
Pavel Tatashina4a3ede2017-11-15 17:36:31 -08006775{
6776 phys_addr_t start, end;
Pavel Tatashina4a3ede2017-11-15 17:36:31 -08006777 u64 i, pgcnt;
Naoya Horiguchi907ec5f2018-10-26 15:10:15 -07006778 phys_addr_t next = 0;
Pavel Tatashina4a3ede2017-11-15 17:36:31 -08006779
6780 /*
Naoya Horiguchi907ec5f2018-10-26 15:10:15 -07006781 * Loop through unavailable ranges not covered by memblock.memory.
Pavel Tatashina4a3ede2017-11-15 17:36:31 -08006782 */
6783 pgcnt = 0;
Naoya Horiguchi907ec5f2018-10-26 15:10:15 -07006784 for_each_mem_range(i, &memblock.memory, NULL,
6785 NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end, NULL) {
Pavel Tatashinec393a0f2018-10-26 15:10:21 -07006786 if (next < start)
6787 pgcnt += zero_pfn_range(PFN_DOWN(next), PFN_UP(start));
Naoya Horiguchi907ec5f2018-10-26 15:10:15 -07006788 next = end;
Pavel Tatashina4a3ede2017-11-15 17:36:31 -08006789 }
Pavel Tatashinec393a0f2018-10-26 15:10:21 -07006790 pgcnt += zero_pfn_range(PFN_DOWN(next), max_pfn);
Pavel Tatashina4a3ede2017-11-15 17:36:31 -08006791
6792 /*
6793 * Struct pages that do not have backing memory. This could be because
6794 * firmware is using some of this memory, or for some other reasons.
Pavel Tatashina4a3ede2017-11-15 17:36:31 -08006795 */
6796 if (pgcnt)
Naoya Horiguchi907ec5f2018-10-26 15:10:15 -07006797 pr_info("Zeroed struct page in unavailable ranges: %lld pages", pgcnt);
Pavel Tatashina4a3ede2017-11-15 17:36:31 -08006798}
Mike Rapoportaca52c32018-10-30 15:07:44 -07006799#endif /* !CONFIG_FLAT_NODE_MEM_MAP */
Pavel Tatashina4a3ede2017-11-15 17:36:31 -08006800
Tejun Heo0ee332c2011-12-08 10:22:09 -08006801#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
Miklos Szeredi418508c2007-05-23 13:57:55 -07006802
6803#if MAX_NUMNODES > 1
6804/*
6805 * Figure out the number of possible node ids.
6806 */
Cody P Schaferf9872ca2013-04-29 15:08:01 -07006807void __init setup_nr_node_ids(void)
Miklos Szeredi418508c2007-05-23 13:57:55 -07006808{
Wei Yang904a9552015-09-08 14:59:48 -07006809 unsigned int highest;
Miklos Szeredi418508c2007-05-23 13:57:55 -07006810
Wei Yang904a9552015-09-08 14:59:48 -07006811 highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES);
Miklos Szeredi418508c2007-05-23 13:57:55 -07006812 nr_node_ids = highest + 1;
6813}
Miklos Szeredi418508c2007-05-23 13:57:55 -07006814#endif
6815
Mel Gormanc7132162006-09-27 01:49:43 -07006816/**
Tejun Heo1e019792011-07-12 09:45:34 +02006817 * node_map_pfn_alignment - determine the maximum internode alignment
6818 *
6819 * This function should be called after node map is populated and sorted.
6820 * It calculates the maximum power of two alignment which can distinguish
6821 * all the nodes.
6822 *
6823 * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
6824 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the
6825 * nodes are shifted by 256MiB, 256MiB. Note that if only the last node is
6826 * shifted, 1GiB is enough and this function will indicate so.
6827 *
6828 * This is used to test whether pfn -> nid mapping of the chosen memory
6829 * model has fine enough granularity to avoid incorrect mapping for the
6830 * populated node map.
6831 *
6832 * Returns the determined alignment in pfn's. 0 if there is no alignment
6833 * requirement (single node).
6834 */
6835unsigned long __init node_map_pfn_alignment(void)
6836{
6837 unsigned long accl_mask = 0, last_end = 0;
Tejun Heoc13291a2011-07-12 10:46:30 +02006838 unsigned long start, end, mask;
Anshuman Khandual98fa15f2019-03-05 15:42:58 -08006839 int last_nid = NUMA_NO_NODE;
Tejun Heoc13291a2011-07-12 10:46:30 +02006840 int i, nid;
Tejun Heo1e019792011-07-12 09:45:34 +02006841
Tejun Heoc13291a2011-07-12 10:46:30 +02006842 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
Tejun Heo1e019792011-07-12 09:45:34 +02006843 if (!start || last_nid < 0 || last_nid == nid) {
6844 last_nid = nid;
6845 last_end = end;
6846 continue;
6847 }
6848
6849 /*
6850 * Start with a mask granular enough to pin-point to the
6851 * start pfn and tick off bits one-by-one until it becomes
6852 * too coarse to separate the current node from the last.
6853 */
6854 mask = ~((1 << __ffs(start)) - 1);
6855 while (mask && last_end <= (start & (mask << 1)))
6856 mask <<= 1;
6857
6858 /* accumulate all internode masks */
6859 accl_mask |= mask;
6860 }
6861
6862 /* convert mask to number of pages */
6863 return ~accl_mask + 1;
6864}
6865
Mel Gormana6af2bc2007-02-10 01:42:57 -08006866/* Find the lowest pfn for a node */
Adrian Bunkb69a7282008-07-23 21:28:12 -07006867static unsigned long __init find_min_pfn_for_node(int nid)
Mel Gormanc7132162006-09-27 01:49:43 -07006868{
Mel Gormana6af2bc2007-02-10 01:42:57 -08006869 unsigned long min_pfn = ULONG_MAX;
Tejun Heoc13291a2011-07-12 10:46:30 +02006870 unsigned long start_pfn;
6871 int i;
Mel Gorman1abbfb42006-11-23 12:01:41 +00006872
Tejun Heoc13291a2011-07-12 10:46:30 +02006873 for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL)
6874 min_pfn = min(min_pfn, start_pfn);
Mel Gormanc7132162006-09-27 01:49:43 -07006875
Mel Gormana6af2bc2007-02-10 01:42:57 -08006876 if (min_pfn == ULONG_MAX) {
Joe Perches11705322016-03-17 14:19:50 -07006877 pr_warn("Could not find start_pfn for node %d\n", nid);
Mel Gormana6af2bc2007-02-10 01:42:57 -08006878 return 0;
6879 }
6880
6881 return min_pfn;
Mel Gormanc7132162006-09-27 01:49:43 -07006882}
6883
6884/**
6885 * find_min_pfn_with_active_regions - Find the minimum PFN registered
6886 *
6887 * It returns the minimum PFN based on information provided via
Zhang Zhen7d018172014-06-04 16:10:53 -07006888 * memblock_set_node().
Mel Gormanc7132162006-09-27 01:49:43 -07006889 */
6890unsigned long __init find_min_pfn_with_active_regions(void)
6891{
6892 return find_min_pfn_for_node(MAX_NUMNODES);
6893}
6894
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07006895/*
6896 * early_calculate_totalpages()
6897 * Sum pages in active regions for movable zone.
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08006898 * Populate N_MEMORY for calculating usable_nodes.
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07006899 */
Adrian Bunk484f51f2007-10-16 01:26:03 -07006900static unsigned long __init early_calculate_totalpages(void)
Mel Gorman7e63efef2007-07-17 04:03:15 -07006901{
Mel Gorman7e63efef2007-07-17 04:03:15 -07006902 unsigned long totalpages = 0;
Tejun Heoc13291a2011-07-12 10:46:30 +02006903 unsigned long start_pfn, end_pfn;
6904 int i, nid;
Mel Gorman7e63efef2007-07-17 04:03:15 -07006905
Tejun Heoc13291a2011-07-12 10:46:30 +02006906 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
6907 unsigned long pages = end_pfn - start_pfn;
6908
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07006909 totalpages += pages;
6910 if (pages)
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08006911 node_set_state(nid, N_MEMORY);
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07006912 }
Pintu Kumarb8af2942013-09-11 14:20:34 -07006913 return totalpages;
Mel Gorman7e63efef2007-07-17 04:03:15 -07006914}
6915
Mel Gorman2a1e2742007-07-17 04:03:12 -07006916/*
6917 * Find the PFN the Movable zone begins in each node. Kernel memory
6918 * is spread evenly between nodes as long as the nodes have enough
6919 * memory. When they don't, some nodes will have more kernelcore than
6920 * others
6921 */
Kautuk Consulb224ef82012-03-21 16:34:15 -07006922static void __init find_zone_movable_pfns_for_nodes(void)
Mel Gorman2a1e2742007-07-17 04:03:12 -07006923{
6924 int i, nid;
6925 unsigned long usable_startpfn;
6926 unsigned long kernelcore_node, kernelcore_remaining;
Yinghai Lu66918dc2009-06-30 11:41:37 -07006927 /* save the state before borrow the nodemask */
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08006928 nodemask_t saved_node_state = node_states[N_MEMORY];
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07006929 unsigned long totalpages = early_calculate_totalpages();
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08006930 int usable_nodes = nodes_weight(node_states[N_MEMORY]);
Emil Medve136199f2014-04-07 15:37:52 -07006931 struct memblock_region *r;
Tang Chenb2f3eeb2014-01-21 15:49:38 -08006932
6933 /* Need to find movable_zone earlier when movable_node is specified. */
6934 find_usable_zone_for_movable();
Mel Gorman2a1e2742007-07-17 04:03:12 -07006935
Mel Gorman7e63efef2007-07-17 04:03:15 -07006936 /*
Tang Chenb2f3eeb2014-01-21 15:49:38 -08006937 * If movable_node is specified, ignore kernelcore and movablecore
6938 * options.
6939 */
6940 if (movable_node_is_enabled()) {
Emil Medve136199f2014-04-07 15:37:52 -07006941 for_each_memblock(memory, r) {
6942 if (!memblock_is_hotpluggable(r))
Tang Chenb2f3eeb2014-01-21 15:49:38 -08006943 continue;
6944
Emil Medve136199f2014-04-07 15:37:52 -07006945 nid = r->nid;
Tang Chenb2f3eeb2014-01-21 15:49:38 -08006946
Emil Medve136199f2014-04-07 15:37:52 -07006947 usable_startpfn = PFN_DOWN(r->base);
Tang Chenb2f3eeb2014-01-21 15:49:38 -08006948 zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
6949 min(usable_startpfn, zone_movable_pfn[nid]) :
6950 usable_startpfn;
6951 }
6952
6953 goto out2;
6954 }
6955
6956 /*
Taku Izumi342332e2016-03-15 14:55:22 -07006957 * If kernelcore=mirror is specified, ignore movablecore option
6958 */
6959 if (mirrored_kernelcore) {
6960 bool mem_below_4gb_not_mirrored = false;
6961
6962 for_each_memblock(memory, r) {
6963 if (memblock_is_mirror(r))
6964 continue;
6965
6966 nid = r->nid;
6967
6968 usable_startpfn = memblock_region_memory_base_pfn(r);
6969
6970 if (usable_startpfn < 0x100000) {
6971 mem_below_4gb_not_mirrored = true;
6972 continue;
6973 }
6974
6975 zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
6976 min(usable_startpfn, zone_movable_pfn[nid]) :
6977 usable_startpfn;
6978 }
6979
6980 if (mem_below_4gb_not_mirrored)
6981 pr_warn("This configuration results in unmirrored kernel memory.");
6982
6983 goto out2;
6984 }
6985
6986 /*
David Rientjesa5c6d652018-04-05 16:23:09 -07006987 * If kernelcore=nn% or movablecore=nn% was specified, calculate the
6988 * amount of necessary memory.
6989 */
6990 if (required_kernelcore_percent)
6991 required_kernelcore = (totalpages * 100 * required_kernelcore_percent) /
6992 10000UL;
6993 if (required_movablecore_percent)
6994 required_movablecore = (totalpages * 100 * required_movablecore_percent) /
6995 10000UL;
6996
6997 /*
6998 * If movablecore= was specified, calculate what size of
Mel Gorman7e63efef2007-07-17 04:03:15 -07006999 * kernelcore that corresponds so that memory usable for
7000 * any allocation type is evenly spread. If both kernelcore
7001 * and movablecore are specified, then the value of kernelcore
7002 * will be used for required_kernelcore if it's greater than
7003 * what movablecore would have allowed.
7004 */
7005 if (required_movablecore) {
Mel Gorman7e63efef2007-07-17 04:03:15 -07007006 unsigned long corepages;
7007
7008 /*
7009 * Round-up so that ZONE_MOVABLE is at least as large as what
7010 * was requested by the user
7011 */
7012 required_movablecore =
7013 roundup(required_movablecore, MAX_ORDER_NR_PAGES);
Xishi Qiu9fd745d2015-11-05 18:48:11 -08007014 required_movablecore = min(totalpages, required_movablecore);
Mel Gorman7e63efef2007-07-17 04:03:15 -07007015 corepages = totalpages - required_movablecore;
7016
7017 required_kernelcore = max(required_kernelcore, corepages);
7018 }
7019
Xishi Qiubde304b2015-11-05 18:48:56 -08007020 /*
7021 * If kernelcore was not specified or kernelcore size is larger
7022 * than totalpages, there is no ZONE_MOVABLE.
7023 */
7024 if (!required_kernelcore || required_kernelcore >= totalpages)
Yinghai Lu66918dc2009-06-30 11:41:37 -07007025 goto out;
Mel Gorman2a1e2742007-07-17 04:03:12 -07007026
7027 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
Mel Gorman2a1e2742007-07-17 04:03:12 -07007028 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
7029
7030restart:
7031 /* Spread kernelcore memory as evenly as possible throughout nodes */
7032 kernelcore_node = required_kernelcore / usable_nodes;
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08007033 for_each_node_state(nid, N_MEMORY) {
Tejun Heoc13291a2011-07-12 10:46:30 +02007034 unsigned long start_pfn, end_pfn;
7035
Mel Gorman2a1e2742007-07-17 04:03:12 -07007036 /*
7037 * Recalculate kernelcore_node if the division per node
7038 * now exceeds what is necessary to satisfy the requested
7039 * amount of memory for the kernel
7040 */
7041 if (required_kernelcore < kernelcore_node)
7042 kernelcore_node = required_kernelcore / usable_nodes;
7043
7044 /*
7045 * As the map is walked, we track how much memory is usable
7046 * by the kernel using kernelcore_remaining. When it is
7047 * 0, the rest of the node is usable by ZONE_MOVABLE
7048 */
7049 kernelcore_remaining = kernelcore_node;
7050
7051 /* Go through each range of PFNs within this node */
Tejun Heoc13291a2011-07-12 10:46:30 +02007052 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
Mel Gorman2a1e2742007-07-17 04:03:12 -07007053 unsigned long size_pages;
7054
Tejun Heoc13291a2011-07-12 10:46:30 +02007055 start_pfn = max(start_pfn, zone_movable_pfn[nid]);
Mel Gorman2a1e2742007-07-17 04:03:12 -07007056 if (start_pfn >= end_pfn)
7057 continue;
7058
7059 /* Account for what is only usable for kernelcore */
7060 if (start_pfn < usable_startpfn) {
7061 unsigned long kernel_pages;
7062 kernel_pages = min(end_pfn, usable_startpfn)
7063 - start_pfn;
7064
7065 kernelcore_remaining -= min(kernel_pages,
7066 kernelcore_remaining);
7067 required_kernelcore -= min(kernel_pages,
7068 required_kernelcore);
7069
7070 /* Continue if range is now fully accounted */
7071 if (end_pfn <= usable_startpfn) {
7072
7073 /*
7074 * Push zone_movable_pfn to the end so
7075 * that if we have to rebalance
7076 * kernelcore across nodes, we will
7077 * not double account here
7078 */
7079 zone_movable_pfn[nid] = end_pfn;
7080 continue;
7081 }
7082 start_pfn = usable_startpfn;
7083 }
7084
7085 /*
7086 * The usable PFN range for ZONE_MOVABLE is from
7087 * start_pfn->end_pfn. Calculate size_pages as the
7088 * number of pages used as kernelcore
7089 */
7090 size_pages = end_pfn - start_pfn;
7091 if (size_pages > kernelcore_remaining)
7092 size_pages = kernelcore_remaining;
7093 zone_movable_pfn[nid] = start_pfn + size_pages;
7094
7095 /*
7096 * Some kernelcore has been met, update counts and
7097 * break if the kernelcore for this node has been
Pintu Kumarb8af2942013-09-11 14:20:34 -07007098 * satisfied
Mel Gorman2a1e2742007-07-17 04:03:12 -07007099 */
7100 required_kernelcore -= min(required_kernelcore,
7101 size_pages);
7102 kernelcore_remaining -= size_pages;
7103 if (!kernelcore_remaining)
7104 break;
7105 }
7106 }
7107
7108 /*
7109 * If there is still required_kernelcore, we do another pass with one
7110 * less node in the count. This will push zone_movable_pfn[nid] further
7111 * along on the nodes that still have memory until kernelcore is
Pintu Kumarb8af2942013-09-11 14:20:34 -07007112 * satisfied
Mel Gorman2a1e2742007-07-17 04:03:12 -07007113 */
7114 usable_nodes--;
7115 if (usable_nodes && required_kernelcore > usable_nodes)
7116 goto restart;
7117
Tang Chenb2f3eeb2014-01-21 15:49:38 -08007118out2:
Mel Gorman2a1e2742007-07-17 04:03:12 -07007119 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
7120 for (nid = 0; nid < MAX_NUMNODES; nid++)
7121 zone_movable_pfn[nid] =
7122 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
Yinghai Lu66918dc2009-06-30 11:41:37 -07007123
Yinghai Lu20e69262013-03-01 14:51:27 -08007124out:
Yinghai Lu66918dc2009-06-30 11:41:37 -07007125 /* restore the node_state */
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08007126 node_states[N_MEMORY] = saved_node_state;
Mel Gorman2a1e2742007-07-17 04:03:12 -07007127}
7128
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08007129/* Any regular or high memory on that node ? */
7130static void check_for_memory(pg_data_t *pgdat, int nid)
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07007131{
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07007132 enum zone_type zone_type;
7133
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08007134 for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07007135 struct zone *zone = &pgdat->node_zones[zone_type];
Xishi Qiub38a8722013-11-12 15:07:20 -08007136 if (populated_zone(zone)) {
Oscar Salvador7b0e0c02018-10-26 15:03:58 -07007137 if (IS_ENABLED(CONFIG_HIGHMEM))
7138 node_set_state(nid, N_HIGH_MEMORY);
7139 if (zone_type <= ZONE_NORMAL)
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08007140 node_set_state(nid, N_NORMAL_MEMORY);
Bob Liud0048b02012-01-12 17:19:07 -08007141 break;
7142 }
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07007143 }
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07007144}
7145
Mel Gormanc7132162006-09-27 01:49:43 -07007146/**
7147 * free_area_init_nodes - Initialise all pg_data_t and zone data
Randy Dunlap88ca3b92006-10-04 02:15:25 -07007148 * @max_zone_pfn: an array of max PFNs for each zone
Mel Gormanc7132162006-09-27 01:49:43 -07007149 *
7150 * This will call free_area_init_node() for each active node in the system.
Zhang Zhen7d018172014-06-04 16:10:53 -07007151 * Using the page ranges provided by memblock_set_node(), the size of each
Mel Gormanc7132162006-09-27 01:49:43 -07007152 * zone in each node and their holes is calculated. If the maximum PFN
7153 * between two adjacent zones match, it is assumed that the zone is empty.
7154 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
7155 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
7156 * starts where the previous one ended. For example, ZONE_DMA32 starts
7157 * at arch_max_dma_pfn.
7158 */
7159void __init free_area_init_nodes(unsigned long *max_zone_pfn)
7160{
Tejun Heoc13291a2011-07-12 10:46:30 +02007161 unsigned long start_pfn, end_pfn;
7162 int i, nid;
Mel Gormana6af2bc2007-02-10 01:42:57 -08007163
Mel Gormanc7132162006-09-27 01:49:43 -07007164 /* Record where the zone boundaries are */
7165 memset(arch_zone_lowest_possible_pfn, 0,
7166 sizeof(arch_zone_lowest_possible_pfn));
7167 memset(arch_zone_highest_possible_pfn, 0,
7168 sizeof(arch_zone_highest_possible_pfn));
Oliver O'Halloran90cae1f2016-07-26 15:22:17 -07007169
7170 start_pfn = find_min_pfn_with_active_regions();
7171
7172 for (i = 0; i < MAX_NR_ZONES; i++) {
Mel Gorman2a1e2742007-07-17 04:03:12 -07007173 if (i == ZONE_MOVABLE)
7174 continue;
Oliver O'Halloran90cae1f2016-07-26 15:22:17 -07007175
7176 end_pfn = max(max_zone_pfn[i], start_pfn);
7177 arch_zone_lowest_possible_pfn[i] = start_pfn;
7178 arch_zone_highest_possible_pfn[i] = end_pfn;
7179
7180 start_pfn = end_pfn;
Mel Gormanc7132162006-09-27 01:49:43 -07007181 }
Mel Gorman2a1e2742007-07-17 04:03:12 -07007182
7183 /* Find the PFNs that ZONE_MOVABLE begins at in each node */
7184 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
Kautuk Consulb224ef82012-03-21 16:34:15 -07007185 find_zone_movable_pfns_for_nodes();
Mel Gormanc7132162006-09-27 01:49:43 -07007186
Mel Gormanc7132162006-09-27 01:49:43 -07007187 /* Print out the zone ranges */
Anton Blanchardf88dfff2014-12-10 15:42:53 -08007188 pr_info("Zone ranges:\n");
Mel Gorman2a1e2742007-07-17 04:03:12 -07007189 for (i = 0; i < MAX_NR_ZONES; i++) {
7190 if (i == ZONE_MOVABLE)
7191 continue;
Anton Blanchardf88dfff2014-12-10 15:42:53 -08007192 pr_info(" %-8s ", zone_names[i]);
David Rientjes72f0ba02010-03-05 13:42:14 -08007193 if (arch_zone_lowest_possible_pfn[i] ==
7194 arch_zone_highest_possible_pfn[i])
Anton Blanchardf88dfff2014-12-10 15:42:53 -08007195 pr_cont("empty\n");
David Rientjes72f0ba02010-03-05 13:42:14 -08007196 else
Juergen Gross8d29e182015-02-11 15:26:01 -08007197 pr_cont("[mem %#018Lx-%#018Lx]\n",
7198 (u64)arch_zone_lowest_possible_pfn[i]
7199 << PAGE_SHIFT,
7200 ((u64)arch_zone_highest_possible_pfn[i]
Bjorn Helgaasa62e2f42012-05-29 15:06:30 -07007201 << PAGE_SHIFT) - 1);
Mel Gorman2a1e2742007-07-17 04:03:12 -07007202 }
7203
7204 /* Print out the PFNs ZONE_MOVABLE begins at in each node */
Anton Blanchardf88dfff2014-12-10 15:42:53 -08007205 pr_info("Movable zone start for each node\n");
Mel Gorman2a1e2742007-07-17 04:03:12 -07007206 for (i = 0; i < MAX_NUMNODES; i++) {
7207 if (zone_movable_pfn[i])
Juergen Gross8d29e182015-02-11 15:26:01 -08007208 pr_info(" Node %d: %#018Lx\n", i,
7209 (u64)zone_movable_pfn[i] << PAGE_SHIFT);
Mel Gorman2a1e2742007-07-17 04:03:12 -07007210 }
Mel Gormanc7132162006-09-27 01:49:43 -07007211
Wanpeng Lif2d52fe2012-10-08 16:32:24 -07007212 /* Print out the early node map */
Anton Blanchardf88dfff2014-12-10 15:42:53 -08007213 pr_info("Early memory node ranges\n");
Tejun Heoc13291a2011-07-12 10:46:30 +02007214 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
Juergen Gross8d29e182015-02-11 15:26:01 -08007215 pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid,
7216 (u64)start_pfn << PAGE_SHIFT,
7217 ((u64)end_pfn << PAGE_SHIFT) - 1);
Mel Gormanc7132162006-09-27 01:49:43 -07007218
7219 /* Initialise every node */
Mel Gorman708614e2008-07-23 21:26:51 -07007220 mminit_verify_pageflags_layout();
Christoph Lameter8ef82862007-02-20 13:57:52 -08007221 setup_nr_node_ids();
Pavel Tatashine181ae0c2018-07-14 09:15:07 -04007222 zero_resv_unavail();
Mel Gormanc7132162006-09-27 01:49:43 -07007223 for_each_online_node(nid) {
7224 pg_data_t *pgdat = NODE_DATA(nid);
Johannes Weiner9109fb72008-07-23 21:27:20 -07007225 free_area_init_node(nid, NULL,
Mel Gormanc7132162006-09-27 01:49:43 -07007226 find_min_pfn_for_node(nid), NULL);
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07007227
7228 /* Any memory on that node */
7229 if (pgdat->node_present_pages)
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08007230 node_set_state(nid, N_MEMORY);
7231 check_for_memory(pgdat, nid);
Mel Gormanc7132162006-09-27 01:49:43 -07007232 }
7233}
Mel Gorman2a1e2742007-07-17 04:03:12 -07007234
David Rientjesa5c6d652018-04-05 16:23:09 -07007235static int __init cmdline_parse_core(char *p, unsigned long *core,
7236 unsigned long *percent)
Mel Gorman2a1e2742007-07-17 04:03:12 -07007237{
7238 unsigned long long coremem;
David Rientjesa5c6d652018-04-05 16:23:09 -07007239 char *endptr;
7240
Mel Gorman2a1e2742007-07-17 04:03:12 -07007241 if (!p)
7242 return -EINVAL;
7243
David Rientjesa5c6d652018-04-05 16:23:09 -07007244 /* Value may be a percentage of total memory, otherwise bytes */
7245 coremem = simple_strtoull(p, &endptr, 0);
7246 if (*endptr == '%') {
7247 /* Paranoid check for percent values greater than 100 */
7248 WARN_ON(coremem > 100);
Mel Gorman2a1e2742007-07-17 04:03:12 -07007249
David Rientjesa5c6d652018-04-05 16:23:09 -07007250 *percent = coremem;
7251 } else {
7252 coremem = memparse(p, &p);
7253 /* Paranoid check that UL is enough for the coremem value */
7254 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
Mel Gorman2a1e2742007-07-17 04:03:12 -07007255
David Rientjesa5c6d652018-04-05 16:23:09 -07007256 *core = coremem >> PAGE_SHIFT;
7257 *percent = 0UL;
7258 }
Mel Gorman2a1e2742007-07-17 04:03:12 -07007259 return 0;
7260}
Mel Gormaned7ed362007-07-17 04:03:14 -07007261
Mel Gorman7e63efef2007-07-17 04:03:15 -07007262/*
7263 * kernelcore=size sets the amount of memory for use for allocations that
7264 * cannot be reclaimed or migrated.
7265 */
7266static int __init cmdline_parse_kernelcore(char *p)
7267{
Taku Izumi342332e2016-03-15 14:55:22 -07007268 /* parse kernelcore=mirror */
7269 if (parse_option_str(p, "mirror")) {
7270 mirrored_kernelcore = true;
7271 return 0;
7272 }
7273
David Rientjesa5c6d652018-04-05 16:23:09 -07007274 return cmdline_parse_core(p, &required_kernelcore,
7275 &required_kernelcore_percent);
Mel Gorman7e63efef2007-07-17 04:03:15 -07007276}
7277
7278/*
7279 * movablecore=size sets the amount of memory for use for allocations that
7280 * can be reclaimed or migrated.
7281 */
7282static int __init cmdline_parse_movablecore(char *p)
7283{
David Rientjesa5c6d652018-04-05 16:23:09 -07007284 return cmdline_parse_core(p, &required_movablecore,
7285 &required_movablecore_percent);
Mel Gorman7e63efef2007-07-17 04:03:15 -07007286}
7287
Mel Gormaned7ed362007-07-17 04:03:14 -07007288early_param("kernelcore", cmdline_parse_kernelcore);
Mel Gorman7e63efef2007-07-17 04:03:15 -07007289early_param("movablecore", cmdline_parse_movablecore);
Mel Gormaned7ed362007-07-17 04:03:14 -07007290
Tejun Heo0ee332c2011-12-08 10:22:09 -08007291#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
Mel Gormanc7132162006-09-27 01:49:43 -07007292
Jiang Liuc3d5f5f2013-07-03 15:03:14 -07007293void adjust_managed_page_count(struct page *page, long count)
7294{
Arun KS9705bea2018-12-28 00:34:24 -08007295 atomic_long_add(count, &page_zone(page)->managed_pages);
Arun KSca79b0c2018-12-28 00:34:29 -08007296 totalram_pages_add(count);
Jiang Liu3dcc0572013-07-03 15:03:21 -07007297#ifdef CONFIG_HIGHMEM
7298 if (PageHighMem(page))
Arun KSca79b0c2018-12-28 00:34:29 -08007299 totalhigh_pages_add(count);
Jiang Liu3dcc0572013-07-03 15:03:21 -07007300#endif
Jiang Liuc3d5f5f2013-07-03 15:03:14 -07007301}
Jiang Liu3dcc0572013-07-03 15:03:21 -07007302EXPORT_SYMBOL(adjust_managed_page_count);
Jiang Liuc3d5f5f2013-07-03 15:03:14 -07007303
Alexey Dobriyane5cb1132018-12-28 00:36:03 -08007304unsigned long free_reserved_area(void *start, void *end, int poison, const char *s)
Jiang Liu69afade2013-04-29 15:06:21 -07007305{
Jiang Liu11199692013-07-03 15:02:48 -07007306 void *pos;
7307 unsigned long pages = 0;
Jiang Liu69afade2013-04-29 15:06:21 -07007308
Jiang Liu11199692013-07-03 15:02:48 -07007309 start = (void *)PAGE_ALIGN((unsigned long)start);
7310 end = (void *)((unsigned long)end & PAGE_MASK);
7311 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
Dave Hansen0d834322018-08-02 15:58:26 -07007312 struct page *page = virt_to_page(pos);
7313 void *direct_map_addr;
7314
7315 /*
7316 * 'direct_map_addr' might be different from 'pos'
7317 * because some architectures' virt_to_page()
7318 * work with aliases. Getting the direct map
7319 * address ensures that we get a _writeable_
7320 * alias for the memset().
7321 */
7322 direct_map_addr = page_address(page);
Jiang Liudbe67df2013-07-03 15:02:51 -07007323 if ((unsigned int)poison <= 0xFF)
Dave Hansen0d834322018-08-02 15:58:26 -07007324 memset(direct_map_addr, poison, PAGE_SIZE);
7325
7326 free_reserved_page(page);
Jiang Liu69afade2013-04-29 15:06:21 -07007327 }
7328
7329 if (pages && s)
Josh Poimboeufadb1fe92016-10-25 09:51:14 -05007330 pr_info("Freeing %s memory: %ldK\n",
7331 s, pages << (PAGE_SHIFT - 10));
Jiang Liu69afade2013-04-29 15:06:21 -07007332
7333 return pages;
7334}
Jiang Liu11199692013-07-03 15:02:48 -07007335EXPORT_SYMBOL(free_reserved_area);
Jiang Liu69afade2013-04-29 15:06:21 -07007336
Jiang Liucfa11e02013-04-29 15:07:00 -07007337#ifdef CONFIG_HIGHMEM
7338void free_highmem_page(struct page *page)
7339{
7340 __free_reserved_page(page);
Arun KSca79b0c2018-12-28 00:34:29 -08007341 totalram_pages_inc();
Arun KS9705bea2018-12-28 00:34:24 -08007342 atomic_long_inc(&page_zone(page)->managed_pages);
Arun KSca79b0c2018-12-28 00:34:29 -08007343 totalhigh_pages_inc();
Jiang Liucfa11e02013-04-29 15:07:00 -07007344}
7345#endif
7346
Jiang Liu7ee3d4e2013-07-03 15:03:41 -07007347
7348void __init mem_init_print_info(const char *str)
7349{
7350 unsigned long physpages, codesize, datasize, rosize, bss_size;
7351 unsigned long init_code_size, init_data_size;
7352
7353 physpages = get_num_physpages();
7354 codesize = _etext - _stext;
7355 datasize = _edata - _sdata;
7356 rosize = __end_rodata - __start_rodata;
7357 bss_size = __bss_stop - __bss_start;
7358 init_data_size = __init_end - __init_begin;
7359 init_code_size = _einittext - _sinittext;
7360
7361 /*
7362 * Detect special cases and adjust section sizes accordingly:
7363 * 1) .init.* may be embedded into .data sections
7364 * 2) .init.text.* may be out of [__init_begin, __init_end],
7365 * please refer to arch/tile/kernel/vmlinux.lds.S.
7366 * 3) .rodata.* may be embedded into .text or .data sections.
7367 */
7368#define adj_init_size(start, end, size, pos, adj) \
Pintu Kumarb8af2942013-09-11 14:20:34 -07007369 do { \
7370 if (start <= pos && pos < end && size > adj) \
7371 size -= adj; \
7372 } while (0)
Jiang Liu7ee3d4e2013-07-03 15:03:41 -07007373
7374 adj_init_size(__init_begin, __init_end, init_data_size,
7375 _sinittext, init_code_size);
7376 adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
7377 adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
7378 adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
7379 adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);
7380
7381#undef adj_init_size
7382
Joe Perches756a0252016-03-17 14:19:47 -07007383 pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved"
Jiang Liu7ee3d4e2013-07-03 15:03:41 -07007384#ifdef CONFIG_HIGHMEM
Joe Perches756a0252016-03-17 14:19:47 -07007385 ", %luK highmem"
Jiang Liu7ee3d4e2013-07-03 15:03:41 -07007386#endif
Joe Perches756a0252016-03-17 14:19:47 -07007387 "%s%s)\n",
7388 nr_free_pages() << (PAGE_SHIFT - 10),
7389 physpages << (PAGE_SHIFT - 10),
7390 codesize >> 10, datasize >> 10, rosize >> 10,
7391 (init_data_size + init_code_size) >> 10, bss_size >> 10,
Arun KSca79b0c2018-12-28 00:34:29 -08007392 (physpages - totalram_pages() - totalcma_pages) << (PAGE_SHIFT - 10),
Joe Perches756a0252016-03-17 14:19:47 -07007393 totalcma_pages << (PAGE_SHIFT - 10),
Jiang Liu7ee3d4e2013-07-03 15:03:41 -07007394#ifdef CONFIG_HIGHMEM
Arun KSca79b0c2018-12-28 00:34:29 -08007395 totalhigh_pages() << (PAGE_SHIFT - 10),
Jiang Liu7ee3d4e2013-07-03 15:03:41 -07007396#endif
Joe Perches756a0252016-03-17 14:19:47 -07007397 str ? ", " : "", str ? str : "");
Jiang Liu7ee3d4e2013-07-03 15:03:41 -07007398}
7399
Mel Gorman0e0b8642006-09-27 01:49:56 -07007400/**
Randy Dunlap88ca3b92006-10-04 02:15:25 -07007401 * set_dma_reserve - set the specified number of pages reserved in the first zone
7402 * @new_dma_reserve: The number of pages to mark reserved
Mel Gorman0e0b8642006-09-27 01:49:56 -07007403 *
Yaowei Bai013110a2015-09-08 15:04:10 -07007404 * The per-cpu batchsize and zone watermarks are determined by managed_pages.
Mel Gorman0e0b8642006-09-27 01:49:56 -07007405 * In the DMA zone, a significant percentage may be consumed by kernel image
7406 * and other unfreeable allocations which can skew the watermarks badly. This
Randy Dunlap88ca3b92006-10-04 02:15:25 -07007407 * function may optionally be used to account for unfreeable pages in the
7408 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
7409 * smaller per-cpu batchsize.
Mel Gorman0e0b8642006-09-27 01:49:56 -07007410 */
7411void __init set_dma_reserve(unsigned long new_dma_reserve)
7412{
7413 dma_reserve = new_dma_reserve;
7414}
7415
Linus Torvalds1da177e2005-04-16 15:20:36 -07007416void __init free_area_init(unsigned long *zones_size)
7417{
Pavel Tatashine181ae0c2018-07-14 09:15:07 -04007418 zero_resv_unavail();
Johannes Weiner9109fb72008-07-23 21:27:20 -07007419 free_area_init_node(0, zones_size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07007420 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
7421}
Linus Torvalds1da177e2005-04-16 15:20:36 -07007422
Sebastian Andrzej Siewior005fd4b2016-11-03 15:50:02 +01007423static int page_alloc_cpu_dead(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007424{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007425
Sebastian Andrzej Siewior005fd4b2016-11-03 15:50:02 +01007426 lru_add_drain_cpu(cpu);
7427 drain_pages(cpu);
Christoph Lameter9f8f2172008-02-04 22:29:11 -08007428
Sebastian Andrzej Siewior005fd4b2016-11-03 15:50:02 +01007429 /*
7430 * Spill the event counters of the dead processor
7431 * into the current processors event counters.
7432 * This artificially elevates the count of the current
7433 * processor.
7434 */
7435 vm_events_fold_cpu(cpu);
Christoph Lameter9f8f2172008-02-04 22:29:11 -08007436
Sebastian Andrzej Siewior005fd4b2016-11-03 15:50:02 +01007437 /*
7438 * Zero the differential counters of the dead processor
7439 * so that the vm statistics are consistent.
7440 *
7441 * This is only okay since the processor is dead and cannot
7442 * race with what we are doing.
7443 */
7444 cpu_vm_stats_fold(cpu);
7445 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007446}
Linus Torvalds1da177e2005-04-16 15:20:36 -07007447
7448void __init page_alloc_init(void)
7449{
Sebastian Andrzej Siewior005fd4b2016-11-03 15:50:02 +01007450 int ret;
7451
7452 ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC_DEAD,
7453 "mm/page_alloc:dead", NULL,
7454 page_alloc_cpu_dead);
7455 WARN_ON(ret < 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007456}
7457
7458/*
Yaowei Bai34b10062015-09-08 15:04:13 -07007459 * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07007460 * or min_free_kbytes changes.
7461 */
7462static void calculate_totalreserve_pages(void)
7463{
7464 struct pglist_data *pgdat;
7465 unsigned long reserve_pages = 0;
Christoph Lameter2f6726e2006-09-25 23:31:18 -07007466 enum zone_type i, j;
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07007467
7468 for_each_online_pgdat(pgdat) {
Mel Gorman281e3722016-07-28 15:46:11 -07007469
7470 pgdat->totalreserve_pages = 0;
7471
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07007472 for (i = 0; i < MAX_NR_ZONES; i++) {
7473 struct zone *zone = pgdat->node_zones + i;
Mel Gorman3484b2d2014-08-06 16:07:14 -07007474 long max = 0;
Arun KS9705bea2018-12-28 00:34:24 -08007475 unsigned long managed_pages = zone_managed_pages(zone);
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07007476
7477 /* Find valid and maximum lowmem_reserve in the zone */
7478 for (j = i; j < MAX_NR_ZONES; j++) {
7479 if (zone->lowmem_reserve[j] > max)
7480 max = zone->lowmem_reserve[j];
7481 }
7482
Mel Gorman41858962009-06-16 15:32:12 -07007483 /* we treat the high watermark as reserved pages. */
7484 max += high_wmark_pages(zone);
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07007485
Arun KS3d6357d2018-12-28 00:34:20 -08007486 if (max > managed_pages)
7487 max = managed_pages;
Johannes Weinera8d01432016-01-14 15:20:15 -08007488
Mel Gorman281e3722016-07-28 15:46:11 -07007489 pgdat->totalreserve_pages += max;
Johannes Weinera8d01432016-01-14 15:20:15 -08007490
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07007491 reserve_pages += max;
7492 }
7493 }
7494 totalreserve_pages = reserve_pages;
7495}
7496
7497/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07007498 * setup_per_zone_lowmem_reserve - called whenever
Yaowei Bai34b10062015-09-08 15:04:13 -07007499 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone
Linus Torvalds1da177e2005-04-16 15:20:36 -07007500 * has a correct pages reserved value, so an adequate number of
7501 * pages are left in the zone after a successful __alloc_pages().
7502 */
7503static void setup_per_zone_lowmem_reserve(void)
7504{
7505 struct pglist_data *pgdat;
Christoph Lameter2f6726e2006-09-25 23:31:18 -07007506 enum zone_type j, idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007507
KAMEZAWA Hiroyukiec936fc2006-03-27 01:15:59 -08007508 for_each_online_pgdat(pgdat) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007509 for (j = 0; j < MAX_NR_ZONES; j++) {
7510 struct zone *zone = pgdat->node_zones + j;
Arun KS9705bea2018-12-28 00:34:24 -08007511 unsigned long managed_pages = zone_managed_pages(zone);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007512
7513 zone->lowmem_reserve[j] = 0;
7514
Christoph Lameter2f6726e2006-09-25 23:31:18 -07007515 idx = j;
7516 while (idx) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007517 struct zone *lower_zone;
7518
Christoph Lameter2f6726e2006-09-25 23:31:18 -07007519 idx--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007520 lower_zone = pgdat->node_zones + idx;
Joonsoo Kimd3cda232018-04-10 16:30:11 -07007521
7522 if (sysctl_lowmem_reserve_ratio[idx] < 1) {
7523 sysctl_lowmem_reserve_ratio[idx] = 0;
7524 lower_zone->lowmem_reserve[j] = 0;
7525 } else {
7526 lower_zone->lowmem_reserve[j] =
7527 managed_pages / sysctl_lowmem_reserve_ratio[idx];
7528 }
Arun KS9705bea2018-12-28 00:34:24 -08007529 managed_pages += zone_managed_pages(lower_zone);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007530 }
7531 }
7532 }
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07007533
7534 /* update totalreserve_pages */
7535 calculate_totalreserve_pages();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007536}
7537
Mel Gormancfd3da12011-04-25 21:36:42 +00007538static void __setup_per_zone_wmarks(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007539{
7540 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
7541 unsigned long lowmem_pages = 0;
7542 struct zone *zone;
7543 unsigned long flags;
7544
7545 /* Calculate total number of !ZONE_HIGHMEM pages */
7546 for_each_zone(zone) {
7547 if (!is_highmem(zone))
Arun KS9705bea2018-12-28 00:34:24 -08007548 lowmem_pages += zone_managed_pages(zone);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007549 }
7550
7551 for_each_zone(zone) {
Andrew Mortonac924c62006-05-15 09:43:59 -07007552 u64 tmp;
7553
Gerald Schaefer1125b4e2008-10-18 20:27:11 -07007554 spin_lock_irqsave(&zone->lock, flags);
Arun KS9705bea2018-12-28 00:34:24 -08007555 tmp = (u64)pages_min * zone_managed_pages(zone);
Andrew Mortonac924c62006-05-15 09:43:59 -07007556 do_div(tmp, lowmem_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007557 if (is_highmem(zone)) {
7558 /*
Nick Piggin669ed172005-11-13 16:06:45 -08007559 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
7560 * need highmem pages, so cap pages_min to a small
7561 * value here.
7562 *
Mel Gorman41858962009-06-16 15:32:12 -07007563 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
Yaowei Bai42ff2702015-04-14 15:47:14 -07007564 * deltas control asynch page reclaim, and so should
Nick Piggin669ed172005-11-13 16:06:45 -08007565 * not be capped for highmem.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007566 */
Andrew Morton90ae8d62013-02-22 16:32:22 -08007567 unsigned long min_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007568
Arun KS9705bea2018-12-28 00:34:24 -08007569 min_pages = zone_managed_pages(zone) / 1024;
Andrew Morton90ae8d62013-02-22 16:32:22 -08007570 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
Mel Gormana9214442018-12-28 00:35:44 -08007571 zone->_watermark[WMARK_MIN] = min_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007572 } else {
Nick Piggin669ed172005-11-13 16:06:45 -08007573 /*
7574 * If it's a lowmem zone, reserve a number of pages
Linus Torvalds1da177e2005-04-16 15:20:36 -07007575 * proportionate to the zone's size.
7576 */
Mel Gormana9214442018-12-28 00:35:44 -08007577 zone->_watermark[WMARK_MIN] = tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007578 }
7579
Johannes Weiner795ae7a2016-03-17 14:19:14 -07007580 /*
7581 * Set the kswapd watermarks distance according to the
7582 * scale factor in proportion to available memory, but
7583 * ensure a minimum size on small systems.
7584 */
7585 tmp = max_t(u64, tmp >> 2,
Arun KS9705bea2018-12-28 00:34:24 -08007586 mult_frac(zone_managed_pages(zone),
Johannes Weiner795ae7a2016-03-17 14:19:14 -07007587 watermark_scale_factor, 10000));
7588
Mel Gormana9214442018-12-28 00:35:44 -08007589 zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp;
7590 zone->_watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2;
Mel Gorman1c308442018-12-28 00:35:52 -08007591 zone->watermark_boost = 0;
Marek Szyprowski49f223a2012-01-25 12:49:24 +01007592
Gerald Schaefer1125b4e2008-10-18 20:27:11 -07007593 spin_unlock_irqrestore(&zone->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007594 }
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07007595
7596 /* update totalreserve_pages */
7597 calculate_totalreserve_pages();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007598}
7599
Mel Gormancfd3da12011-04-25 21:36:42 +00007600/**
7601 * setup_per_zone_wmarks - called when min_free_kbytes changes
7602 * or when memory is hot-{added|removed}
7603 *
7604 * Ensures that the watermark[min,low,high] values for each zone are set
7605 * correctly with respect to min_free_kbytes.
7606 */
7607void setup_per_zone_wmarks(void)
7608{
Michal Hockob93e0f32017-09-06 16:20:37 -07007609 static DEFINE_SPINLOCK(lock);
7610
7611 spin_lock(&lock);
Mel Gormancfd3da12011-04-25 21:36:42 +00007612 __setup_per_zone_wmarks();
Michal Hockob93e0f32017-09-06 16:20:37 -07007613 spin_unlock(&lock);
Mel Gormancfd3da12011-04-25 21:36:42 +00007614}
7615
Randy Dunlap55a44622009-09-21 17:01:20 -07007616/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07007617 * Initialise min_free_kbytes.
7618 *
7619 * For small machines we want it small (128k min). For large machines
7620 * we want it large (64MB max). But it is not linear, because network
7621 * bandwidth does not increase linearly with machine size. We use
7622 *
Pintu Kumarb8af2942013-09-11 14:20:34 -07007623 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007624 * min_free_kbytes = sqrt(lowmem_kbytes * 16)
7625 *
7626 * which yields
7627 *
7628 * 16MB: 512k
7629 * 32MB: 724k
7630 * 64MB: 1024k
7631 * 128MB: 1448k
7632 * 256MB: 2048k
7633 * 512MB: 2896k
7634 * 1024MB: 4096k
7635 * 2048MB: 5792k
7636 * 4096MB: 8192k
7637 * 8192MB: 11584k
7638 * 16384MB: 16384k
7639 */
KOSAKI Motohiro1b79acc2011-05-24 17:11:32 -07007640int __meminit init_per_zone_wmark_min(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007641{
7642 unsigned long lowmem_kbytes;
Michal Hocko5f127332013-07-08 16:00:40 -07007643 int new_min_free_kbytes;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007644
7645 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
Michal Hocko5f127332013-07-08 16:00:40 -07007646 new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007647
Michal Hocko5f127332013-07-08 16:00:40 -07007648 if (new_min_free_kbytes > user_min_free_kbytes) {
7649 min_free_kbytes = new_min_free_kbytes;
7650 if (min_free_kbytes < 128)
7651 min_free_kbytes = 128;
7652 if (min_free_kbytes > 65536)
7653 min_free_kbytes = 65536;
7654 } else {
7655 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
7656 new_min_free_kbytes, user_min_free_kbytes);
7657 }
Minchan Kimbc75d332009-06-16 15:32:48 -07007658 setup_per_zone_wmarks();
KOSAKI Motohiroa6cccdc2011-05-24 17:11:33 -07007659 refresh_zone_stat_thresholds();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007660 setup_per_zone_lowmem_reserve();
Joonsoo Kim6423aa82016-08-10 16:27:49 -07007661
7662#ifdef CONFIG_NUMA
7663 setup_min_unmapped_ratio();
7664 setup_min_slab_ratio();
7665#endif
7666
Linus Torvalds1da177e2005-04-16 15:20:36 -07007667 return 0;
7668}
Jason Baronbc22af742016-05-05 16:22:12 -07007669core_initcall(init_per_zone_wmark_min)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007670
7671/*
Pintu Kumarb8af2942013-09-11 14:20:34 -07007672 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
Linus Torvalds1da177e2005-04-16 15:20:36 -07007673 * that we can call two helper functions whenever min_free_kbytes
7674 * changes.
7675 */
Joe Perchescccad5b2014-06-06 14:38:09 -07007676int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07007677 void __user *buffer, size_t *length, loff_t *ppos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007678{
Han Pingtianda8c7572014-01-23 15:53:17 -08007679 int rc;
7680
7681 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
7682 if (rc)
7683 return rc;
7684
Michal Hocko5f127332013-07-08 16:00:40 -07007685 if (write) {
7686 user_min_free_kbytes = min_free_kbytes;
Minchan Kimbc75d332009-06-16 15:32:48 -07007687 setup_per_zone_wmarks();
Michal Hocko5f127332013-07-08 16:00:40 -07007688 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007689 return 0;
7690}
7691
Mel Gorman1c308442018-12-28 00:35:52 -08007692int watermark_boost_factor_sysctl_handler(struct ctl_table *table, int write,
7693 void __user *buffer, size_t *length, loff_t *ppos)
7694{
7695 int rc;
7696
7697 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
7698 if (rc)
7699 return rc;
7700
7701 return 0;
7702}
7703
Johannes Weiner795ae7a2016-03-17 14:19:14 -07007704int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write,
7705 void __user *buffer, size_t *length, loff_t *ppos)
7706{
7707 int rc;
7708
7709 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
7710 if (rc)
7711 return rc;
7712
7713 if (write)
7714 setup_per_zone_wmarks();
7715
7716 return 0;
7717}
7718
Christoph Lameter96146342006-07-03 00:24:13 -07007719#ifdef CONFIG_NUMA
Joonsoo Kim6423aa82016-08-10 16:27:49 -07007720static void setup_min_unmapped_ratio(void)
Christoph Lameter96146342006-07-03 00:24:13 -07007721{
Joonsoo Kim6423aa82016-08-10 16:27:49 -07007722 pg_data_t *pgdat;
Christoph Lameter96146342006-07-03 00:24:13 -07007723 struct zone *zone;
Christoph Lameter96146342006-07-03 00:24:13 -07007724
Mel Gormana5f5f912016-07-28 15:46:32 -07007725 for_each_online_pgdat(pgdat)
Joonsoo Kim81cbcbc2016-08-10 16:27:46 -07007726 pgdat->min_unmapped_pages = 0;
Mel Gormana5f5f912016-07-28 15:46:32 -07007727
Christoph Lameter96146342006-07-03 00:24:13 -07007728 for_each_zone(zone)
Arun KS9705bea2018-12-28 00:34:24 -08007729 zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) *
7730 sysctl_min_unmapped_ratio) / 100;
Christoph Lameter96146342006-07-03 00:24:13 -07007731}
Christoph Lameter0ff38492006-09-25 23:31:52 -07007732
Joonsoo Kim6423aa82016-08-10 16:27:49 -07007733
7734int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07007735 void __user *buffer, size_t *length, loff_t *ppos)
Christoph Lameter0ff38492006-09-25 23:31:52 -07007736{
Christoph Lameter0ff38492006-09-25 23:31:52 -07007737 int rc;
7738
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07007739 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
Christoph Lameter0ff38492006-09-25 23:31:52 -07007740 if (rc)
7741 return rc;
7742
Joonsoo Kim6423aa82016-08-10 16:27:49 -07007743 setup_min_unmapped_ratio();
7744
7745 return 0;
7746}
7747
7748static void setup_min_slab_ratio(void)
7749{
7750 pg_data_t *pgdat;
7751 struct zone *zone;
7752
Mel Gormana5f5f912016-07-28 15:46:32 -07007753 for_each_online_pgdat(pgdat)
7754 pgdat->min_slab_pages = 0;
7755
Christoph Lameter0ff38492006-09-25 23:31:52 -07007756 for_each_zone(zone)
Arun KS9705bea2018-12-28 00:34:24 -08007757 zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) *
7758 sysctl_min_slab_ratio) / 100;
Joonsoo Kim6423aa82016-08-10 16:27:49 -07007759}
7760
7761int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
7762 void __user *buffer, size_t *length, loff_t *ppos)
7763{
7764 int rc;
7765
7766 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
7767 if (rc)
7768 return rc;
7769
7770 setup_min_slab_ratio();
7771
Christoph Lameter0ff38492006-09-25 23:31:52 -07007772 return 0;
7773}
Christoph Lameter96146342006-07-03 00:24:13 -07007774#endif
7775
Linus Torvalds1da177e2005-04-16 15:20:36 -07007776/*
7777 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
7778 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
7779 * whenever sysctl_lowmem_reserve_ratio changes.
7780 *
7781 * The reserve ratio obviously has absolutely no relation with the
Mel Gorman41858962009-06-16 15:32:12 -07007782 * minimum watermarks. The lowmem reserve ratio can only make sense
Linus Torvalds1da177e2005-04-16 15:20:36 -07007783 * if in function of the boot time zone sizes.
7784 */
Joe Perchescccad5b2014-06-06 14:38:09 -07007785int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07007786 void __user *buffer, size_t *length, loff_t *ppos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007787{
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07007788 proc_dointvec_minmax(table, write, buffer, length, ppos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007789 setup_per_zone_lowmem_reserve();
7790 return 0;
7791}
7792
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08007793/*
7794 * percpu_pagelist_fraction - changes the pcp->high for each zone on each
Pintu Kumarb8af2942013-09-11 14:20:34 -07007795 * cpu. It is the fraction of total pages in each zone that a hot per cpu
7796 * pagelist can have before it gets flushed back to buddy allocator.
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08007797 */
Joe Perchescccad5b2014-06-06 14:38:09 -07007798int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07007799 void __user *buffer, size_t *length, loff_t *ppos)
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08007800{
7801 struct zone *zone;
David Rientjes7cd2b0a2014-06-23 13:22:04 -07007802 int old_percpu_pagelist_fraction;
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08007803 int ret;
7804
Cody P Schaferc8e251f2013-07-03 15:01:29 -07007805 mutex_lock(&pcp_batch_high_lock);
David Rientjes7cd2b0a2014-06-23 13:22:04 -07007806 old_percpu_pagelist_fraction = percpu_pagelist_fraction;
7807
7808 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
7809 if (!write || ret < 0)
7810 goto out;
7811
7812 /* Sanity checking to avoid pcp imbalance */
7813 if (percpu_pagelist_fraction &&
7814 percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) {
7815 percpu_pagelist_fraction = old_percpu_pagelist_fraction;
7816 ret = -EINVAL;
7817 goto out;
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08007818 }
David Rientjes7cd2b0a2014-06-23 13:22:04 -07007819
7820 /* No change? */
7821 if (percpu_pagelist_fraction == old_percpu_pagelist_fraction)
7822 goto out;
7823
7824 for_each_populated_zone(zone) {
7825 unsigned int cpu;
7826
7827 for_each_possible_cpu(cpu)
7828 pageset_set_high_and_batch(zone,
7829 per_cpu_ptr(zone->pageset, cpu));
7830 }
7831out:
Cody P Schaferc8e251f2013-07-03 15:01:29 -07007832 mutex_unlock(&pcp_batch_high_lock);
David Rientjes7cd2b0a2014-06-23 13:22:04 -07007833 return ret;
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08007834}
7835
Rasmus Villemoesa9919c72015-06-24 16:56:28 -07007836#ifdef CONFIG_NUMA
David S. Millerf034b5d2006-08-24 03:08:07 -07007837int hashdist = HASHDIST_DEFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007838
Linus Torvalds1da177e2005-04-16 15:20:36 -07007839static int __init set_hashdist(char *str)
7840{
7841 if (!str)
7842 return 0;
7843 hashdist = simple_strtoul(str, &str, 0);
7844 return 1;
7845}
7846__setup("hashdist=", set_hashdist);
7847#endif
7848
Srikar Dronamrajuf6f34b42016-10-07 16:59:15 -07007849#ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES
7850/*
7851 * Returns the number of pages that arch has reserved but
7852 * is not known to alloc_large_system_hash().
7853 */
7854static unsigned long __init arch_reserved_kernel_pages(void)
7855{
7856 return 0;
7857}
7858#endif
7859
Linus Torvalds1da177e2005-04-16 15:20:36 -07007860/*
Pavel Tatashin90172172017-07-06 15:39:14 -07007861 * Adaptive scale is meant to reduce sizes of hash tables on large memory
7862 * machines. As memory size is increased the scale is also increased but at
7863 * slower pace. Starting from ADAPT_SCALE_BASE (64G), every time memory
7864 * quadruples the scale is increased by one, which means the size of hash table
7865 * only doubles, instead of quadrupling as well.
7866 * Because 32-bit systems cannot have large physical memory, where this scaling
7867 * makes sense, it is disabled on such platforms.
7868 */
7869#if __BITS_PER_LONG > 32
7870#define ADAPT_SCALE_BASE (64ul << 30)
7871#define ADAPT_SCALE_SHIFT 2
7872#define ADAPT_SCALE_NPAGES (ADAPT_SCALE_BASE >> PAGE_SHIFT)
7873#endif
7874
7875/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07007876 * allocate a large system hash table from bootmem
7877 * - it is assumed that the hash table must contain an exact power-of-2
7878 * quantity of entries
7879 * - limit is the number of hash buckets, not the total allocation size
7880 */
7881void *__init alloc_large_system_hash(const char *tablename,
7882 unsigned long bucketsize,
7883 unsigned long numentries,
7884 int scale,
7885 int flags,
7886 unsigned int *_hash_shift,
7887 unsigned int *_hash_mask,
Tim Bird31fe62b2012-05-23 13:33:35 +00007888 unsigned long low_limit,
7889 unsigned long high_limit)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007890{
Tim Bird31fe62b2012-05-23 13:33:35 +00007891 unsigned long long max = high_limit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007892 unsigned long log2qty, size;
7893 void *table = NULL;
Pavel Tatashin3749a8f2017-07-06 15:39:08 -07007894 gfp_t gfp_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007895
7896 /* allow the kernel cmdline to have a say */
7897 if (!numentries) {
7898 /* round applicable memory size up to nearest megabyte */
Andrew Morton04903662006-12-06 20:37:33 -08007899 numentries = nr_kernel_pages;
Srikar Dronamrajuf6f34b42016-10-07 16:59:15 -07007900 numentries -= arch_reserved_kernel_pages();
Jerry Zhoua7e83312013-09-11 14:20:26 -07007901
7902 /* It isn't necessary when PAGE_SIZE >= 1MB */
7903 if (PAGE_SHIFT < 20)
7904 numentries = round_up(numentries, (1<<20)/PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007905
Pavel Tatashin90172172017-07-06 15:39:14 -07007906#if __BITS_PER_LONG > 32
7907 if (!high_limit) {
7908 unsigned long adapt;
7909
7910 for (adapt = ADAPT_SCALE_NPAGES; adapt < numentries;
7911 adapt <<= ADAPT_SCALE_SHIFT)
7912 scale++;
7913 }
7914#endif
7915
Linus Torvalds1da177e2005-04-16 15:20:36 -07007916 /* limit to 1 bucket per 2^scale bytes of low memory */
7917 if (scale > PAGE_SHIFT)
7918 numentries >>= (scale - PAGE_SHIFT);
7919 else
7920 numentries <<= (PAGE_SHIFT - scale);
Paul Mundt9ab37b82007-01-05 16:36:30 -08007921
7922 /* Make sure we've got at least a 0-order allocation.. */
Jan Beulich2c85f512009-09-21 17:03:07 -07007923 if (unlikely(flags & HASH_SMALL)) {
7924 /* Makes no sense without HASH_EARLY */
7925 WARN_ON(!(flags & HASH_EARLY));
7926 if (!(numentries >> *_hash_shift)) {
7927 numentries = 1UL << *_hash_shift;
7928 BUG_ON(!numentries);
7929 }
7930 } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
Paul Mundt9ab37b82007-01-05 16:36:30 -08007931 numentries = PAGE_SIZE / bucketsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007932 }
John Hawkes6e692ed2006-03-25 03:08:02 -08007933 numentries = roundup_pow_of_two(numentries);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007934
7935 /* limit allocation size to 1/16 total memory by default */
7936 if (max == 0) {
7937 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
7938 do_div(max, bucketsize);
7939 }
Dimitri Sivanich074b8512012-02-08 12:39:07 -08007940 max = min(max, 0x80000000ULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007941
Tim Bird31fe62b2012-05-23 13:33:35 +00007942 if (numentries < low_limit)
7943 numentries = low_limit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007944 if (numentries > max)
7945 numentries = max;
7946
David Howellsf0d1b0b2006-12-08 02:37:49 -08007947 log2qty = ilog2(numentries);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007948
Pavel Tatashin3749a8f2017-07-06 15:39:08 -07007949 gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007950 do {
7951 size = bucketsize << log2qty;
Pavel Tatashinea1f5f32017-11-15 17:36:27 -08007952 if (flags & HASH_EARLY) {
7953 if (flags & HASH_ZERO)
Mike Rapoport7e1c4e22018-10-30 15:09:57 -07007954 table = memblock_alloc_nopanic(size,
7955 SMP_CACHE_BYTES);
Pavel Tatashinea1f5f32017-11-15 17:36:27 -08007956 else
Mike Rapoport7e1c4e22018-10-30 15:09:57 -07007957 table = memblock_alloc_raw(size,
7958 SMP_CACHE_BYTES);
Pavel Tatashinea1f5f32017-11-15 17:36:27 -08007959 } else if (hashdist) {
Pavel Tatashin3749a8f2017-07-06 15:39:08 -07007960 table = __vmalloc(size, gfp_flags, PAGE_KERNEL);
Pavel Tatashinea1f5f32017-11-15 17:36:27 -08007961 } else {
Eric Dumazet1037b832007-07-15 23:38:05 -07007962 /*
7963 * If bucketsize is not a power-of-two, we may free
Mel Gormana1dd2682009-06-16 15:32:19 -07007964 * some pages at the end of hash table which
7965 * alloc_pages_exact() automatically does
Eric Dumazet1037b832007-07-15 23:38:05 -07007966 */
Catalin Marinas264ef8a2009-07-07 10:33:01 +01007967 if (get_order(size) < MAX_ORDER) {
Pavel Tatashin3749a8f2017-07-06 15:39:08 -07007968 table = alloc_pages_exact(size, gfp_flags);
7969 kmemleak_alloc(table, size, 1, gfp_flags);
Catalin Marinas264ef8a2009-07-07 10:33:01 +01007970 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007971 }
7972 } while (!table && size > PAGE_SIZE && --log2qty);
7973
7974 if (!table)
7975 panic("Failed to allocate %s hash table\n", tablename);
7976
Joe Perches11705322016-03-17 14:19:50 -07007977 pr_info("%s hash table entries: %ld (order: %d, %lu bytes)\n",
7978 tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007979
7980 if (_hash_shift)
7981 *_hash_shift = log2qty;
7982 if (_hash_mask)
7983 *_hash_mask = (1 << log2qty) - 1;
7984
7985 return table;
7986}
KAMEZAWA Hiroyukia117e662006-03-27 01:15:25 -08007987
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -07007988/*
Minchan Kim80934512012-07-31 16:43:01 -07007989 * This function checks whether pageblock includes unmovable pages or not.
7990 * If @count is not zero, it is okay to include less @count unmovable pages
7991 *
Pintu Kumarb8af2942013-09-11 14:20:34 -07007992 * PageLRU check without isolation or lru_lock could race so that
Yisheng Xie0efadf42017-02-24 14:57:39 -08007993 * MIGRATE_MOVABLE block might include unmovable pages. And __PageMovable
7994 * check without lock_page also may miss some movable non-lru pages at
7995 * race condition. So you can't expect this function should be exact.
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -07007996 */
Wen Congyangb023f462012-12-11 16:00:45 -08007997bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
Michal Hockod381c542018-12-28 00:33:56 -08007998 int migratetype, int flags)
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07007999{
8000 unsigned long pfn, iter, found;
Michal Nazarewicz47118af2011-12-29 13:09:50 +01008001
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07008002 /*
Michal Hocko15c30bc2018-05-25 14:47:42 -07008003 * TODO we could make this much more efficient by not checking every
8004 * page in the range if we know all of them are in MOVABLE_ZONE and
8005 * that the movable zone guarantees that pages are migratable but
8006 * the later is not the case right now unfortunatelly. E.g. movablecore
8007 * can still lead to having bootmem allocations in zone_movable.
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07008008 */
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07008009
Michal Hocko4da2ce22017-11-15 17:33:26 -08008010 /*
8011 * CMA allocations (alloc_contig_range) really need to mark isolate
8012 * CMA pageblocks even when they are not movable in fact so consider
8013 * them movable here.
8014 */
8015 if (is_migrate_cma(migratetype) &&
8016 is_migrate_cma(get_pageblock_migratetype(page)))
8017 return false;
8018
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07008019 pfn = page_to_pfn(page);
8020 for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
8021 unsigned long check = pfn + iter;
8022
Namhyung Kim29723fc2011-02-25 14:44:25 -08008023 if (!pfn_valid_within(check))
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07008024 continue;
Namhyung Kim29723fc2011-02-25 14:44:25 -08008025
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07008026 page = pfn_to_page(check);
Naoya Horiguchic8721bb2013-09-11 14:22:09 -07008027
Michal Hockod7ab3672017-11-15 17:33:30 -08008028 if (PageReserved(page))
Michal Hocko15c30bc2018-05-25 14:47:42 -07008029 goto unmovable;
Michal Hockod7ab3672017-11-15 17:33:30 -08008030
Naoya Horiguchic8721bb2013-09-11 14:22:09 -07008031 /*
Michal Hocko9d789992018-11-16 15:08:15 -08008032 * If the zone is movable and we have ruled out all reserved
8033 * pages then it should be reasonably safe to assume the rest
8034 * is movable.
8035 */
8036 if (zone_idx(zone) == ZONE_MOVABLE)
8037 continue;
8038
8039 /*
Naoya Horiguchic8721bb2013-09-11 14:22:09 -07008040 * Hugepages are not in LRU lists, but they're movable.
8041 * We need not scan over tail pages bacause we don't
8042 * handle each tail page individually in migration.
8043 */
8044 if (PageHuge(page)) {
Oscar Salvador17e2e7d2018-12-21 14:31:00 -08008045 struct page *head = compound_head(page);
8046 unsigned int skip_pages;
Aneesh Kumar K.V464c7ff2018-09-04 15:45:59 -07008047
Oscar Salvador17e2e7d2018-12-21 14:31:00 -08008048 if (!hugepage_migration_supported(page_hstate(head)))
Aneesh Kumar K.V464c7ff2018-09-04 15:45:59 -07008049 goto unmovable;
8050
Oscar Salvador17e2e7d2018-12-21 14:31:00 -08008051 skip_pages = (1 << compound_order(head)) - (page - head);
8052 iter += skip_pages - 1;
Naoya Horiguchic8721bb2013-09-11 14:22:09 -07008053 continue;
8054 }
8055
Minchan Kim97d255c2012-07-31 16:42:59 -07008056 /*
8057 * We can't use page_count without pin a page
8058 * because another CPU can free compound page.
8059 * This check already skips compound tails of THP
Joonsoo Kim0139aa72016-05-19 17:10:49 -07008060 * because their page->_refcount is zero at all time.
Minchan Kim97d255c2012-07-31 16:42:59 -07008061 */
Joonsoo Kimfe896d12016-03-17 14:19:26 -07008062 if (!page_ref_count(page)) {
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07008063 if (PageBuddy(page))
8064 iter += (1 << page_order(page)) - 1;
8065 continue;
8066 }
Minchan Kim97d255c2012-07-31 16:42:59 -07008067
Wen Congyangb023f462012-12-11 16:00:45 -08008068 /*
8069 * The HWPoisoned page may be not in buddy system, and
8070 * page_count() is not 0.
8071 */
Michal Hockod381c542018-12-28 00:33:56 -08008072 if ((flags & SKIP_HWPOISON) && PageHWPoison(page))
Wen Congyangb023f462012-12-11 16:00:45 -08008073 continue;
8074
Yisheng Xie0efadf42017-02-24 14:57:39 -08008075 if (__PageMovable(page))
8076 continue;
8077
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07008078 if (!PageLRU(page))
8079 found++;
8080 /*
Johannes Weiner6b4f7792014-12-12 16:56:13 -08008081 * If there are RECLAIMABLE pages, we need to check
8082 * it. But now, memory offline itself doesn't call
8083 * shrink_node_slabs() and it still to be fixed.
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07008084 */
8085 /*
8086 * If the page is not RAM, page_count()should be 0.
8087 * we don't need more check. This is an _used_ not-movable page.
8088 *
8089 * The problematic thing here is PG_reserved pages. PG_reserved
8090 * is set to both of a memory hole page and a _used_ kernel
8091 * page at boot.
8092 */
8093 if (found > count)
Michal Hocko15c30bc2018-05-25 14:47:42 -07008094 goto unmovable;
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07008095 }
Minchan Kim80934512012-07-31 16:43:01 -07008096 return false;
Michal Hocko15c30bc2018-05-25 14:47:42 -07008097unmovable:
8098 WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE);
Michal Hockod381c542018-12-28 00:33:56 -08008099 if (flags & REPORT_FAILURE)
8100 dump_page(pfn_to_page(pfn+iter), "unmovable page");
Michal Hocko15c30bc2018-05-25 14:47:42 -07008101 return true;
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07008102}
8103
Vlastimil Babka080fe202016-02-05 15:36:41 -08008104#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008105
8106static unsigned long pfn_max_align_down(unsigned long pfn)
8107{
8108 return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
8109 pageblock_nr_pages) - 1);
8110}
8111
8112static unsigned long pfn_max_align_up(unsigned long pfn)
8113{
8114 return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
8115 pageblock_nr_pages));
8116}
8117
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008118/* [start, end) must belong to a single zone. */
Mel Gormanbb13ffe2012-10-08 16:32:41 -07008119static int __alloc_contig_migrate_range(struct compact_control *cc,
8120 unsigned long start, unsigned long end)
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008121{
8122 /* This function is based on compact_zone() from compaction.c. */
Minchan Kimbeb51ea2012-10-08 16:33:51 -07008123 unsigned long nr_reclaimed;
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008124 unsigned long pfn = start;
8125 unsigned int tries = 0;
8126 int ret = 0;
8127
Marek Szyprowskibe49a6e2012-12-12 13:51:19 -08008128 migrate_prep();
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008129
Mel Gormanbb13ffe2012-10-08 16:32:41 -07008130 while (pfn < end || !list_empty(&cc->migratepages)) {
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008131 if (fatal_signal_pending(current)) {
8132 ret = -EINTR;
8133 break;
8134 }
8135
Mel Gormanbb13ffe2012-10-08 16:32:41 -07008136 if (list_empty(&cc->migratepages)) {
8137 cc->nr_migratepages = 0;
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07008138 pfn = isolate_migratepages_range(cc, pfn, end);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008139 if (!pfn) {
8140 ret = -EINTR;
8141 break;
8142 }
8143 tries = 0;
8144 } else if (++tries == 5) {
8145 ret = ret < 0 ? ret : -EBUSY;
8146 break;
8147 }
8148
Minchan Kimbeb51ea2012-10-08 16:33:51 -07008149 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
8150 &cc->migratepages);
8151 cc->nr_migratepages -= nr_reclaimed;
Minchan Kim02c6de82012-10-08 16:31:55 -07008152
Hugh Dickins9c620e22013-02-22 16:35:14 -08008153 ret = migrate_pages(&cc->migratepages, alloc_migrate_target,
Anshuman Khandual31025352018-04-05 16:22:08 -07008154 NULL, 0, cc->mode, MR_CONTIG_RANGE);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008155 }
Srinivas Pandruvada2a6f5122013-02-22 16:32:09 -08008156 if (ret < 0) {
8157 putback_movable_pages(&cc->migratepages);
8158 return ret;
8159 }
8160 return 0;
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008161}
8162
8163/**
8164 * alloc_contig_range() -- tries to allocate given range of pages
8165 * @start: start PFN to allocate
8166 * @end: one-past-the-last PFN to allocate
Michal Nazarewicz0815f3d2012-04-03 15:06:15 +02008167 * @migratetype: migratetype of the underlaying pageblocks (either
8168 * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks
8169 * in range must have the same migratetype and it must
8170 * be either of the two.
Lucas Stachca96b622017-02-24 14:58:37 -08008171 * @gfp_mask: GFP mask to use during compaction
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008172 *
8173 * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
Mike Kravetz2c7452a2018-04-05 16:25:26 -07008174 * aligned. The PFN range must belong to a single zone.
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008175 *
Mike Kravetz2c7452a2018-04-05 16:25:26 -07008176 * The first thing this routine does is attempt to MIGRATE_ISOLATE all
8177 * pageblocks in the range. Once isolated, the pageblocks should not
8178 * be modified by others.
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008179 *
8180 * Returns zero on success or negative error code. On success all
8181 * pages which PFN is in [start, end) are allocated for the caller and
8182 * need to be freed with free_contig_range().
8183 */
Michal Nazarewicz0815f3d2012-04-03 15:06:15 +02008184int alloc_contig_range(unsigned long start, unsigned long end,
Lucas Stachca96b622017-02-24 14:58:37 -08008185 unsigned migratetype, gfp_t gfp_mask)
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008186{
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008187 unsigned long outer_start, outer_end;
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -08008188 unsigned int order;
8189 int ret = 0;
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008190
Mel Gormanbb13ffe2012-10-08 16:32:41 -07008191 struct compact_control cc = {
8192 .nr_migratepages = 0,
8193 .order = -1,
8194 .zone = page_zone(pfn_to_page(start)),
David Rientjese0b9dae2014-06-04 16:08:28 -07008195 .mode = MIGRATE_SYNC,
Mel Gormanbb13ffe2012-10-08 16:32:41 -07008196 .ignore_skip_hint = true,
Vlastimil Babka2583d672017-11-17 15:26:38 -08008197 .no_set_skip_hint = true,
Michal Hocko7dea19f2017-05-03 14:53:15 -07008198 .gfp_mask = current_gfp_context(gfp_mask),
Mel Gormanbb13ffe2012-10-08 16:32:41 -07008199 };
8200 INIT_LIST_HEAD(&cc.migratepages);
8201
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008202 /*
8203 * What we do here is we mark all pageblocks in range as
8204 * MIGRATE_ISOLATE. Because pageblock and max order pages may
8205 * have different sizes, and due to the way page allocator
8206 * work, we align the range to biggest of the two pages so
8207 * that page allocator won't try to merge buddies from
8208 * different pageblocks and change MIGRATE_ISOLATE to some
8209 * other migration type.
8210 *
8211 * Once the pageblocks are marked as MIGRATE_ISOLATE, we
8212 * migrate the pages from an unaligned range (ie. pages that
8213 * we are interested in). This will put all the pages in
8214 * range back to page allocator as MIGRATE_ISOLATE.
8215 *
8216 * When this is done, we take the pages in range from page
8217 * allocator removing them from the buddy system. This way
8218 * page allocator will never consider using them.
8219 *
8220 * This lets us mark the pageblocks back as
8221 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
8222 * aligned range but not in the unaligned, original range are
8223 * put back to page allocator so that buddy can use them.
8224 */
8225
8226 ret = start_isolate_page_range(pfn_max_align_down(start),
Michal Hockod381c542018-12-28 00:33:56 -08008227 pfn_max_align_up(end), migratetype, 0);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008228 if (ret)
Bob Liu86a595f2012-10-25 13:37:56 -07008229 return ret;
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008230
Joonsoo Kim8ef58492016-01-14 15:18:45 -08008231 /*
8232 * In case of -EBUSY, we'd like to know which page causes problem.
Mike Kravetz63cd4482017-11-29 16:10:01 -08008233 * So, just fall through. test_pages_isolated() has a tracepoint
8234 * which will report the busy page.
8235 *
8236 * It is possible that busy pages could become available before
8237 * the call to test_pages_isolated, and the range will actually be
8238 * allocated. So, if we fall through be sure to clear ret so that
8239 * -EBUSY is not accidentally used or returned to caller.
Joonsoo Kim8ef58492016-01-14 15:18:45 -08008240 */
Mel Gormanbb13ffe2012-10-08 16:32:41 -07008241 ret = __alloc_contig_migrate_range(&cc, start, end);
Joonsoo Kim8ef58492016-01-14 15:18:45 -08008242 if (ret && ret != -EBUSY)
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008243 goto done;
Mike Kravetz63cd4482017-11-29 16:10:01 -08008244 ret =0;
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008245
8246 /*
8247 * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
8248 * aligned blocks that are marked as MIGRATE_ISOLATE. What's
8249 * more, all pages in [start, end) are free in page allocator.
8250 * What we are going to do is to allocate all pages from
8251 * [start, end) (that is remove them from page allocator).
8252 *
8253 * The only problem is that pages at the beginning and at the
8254 * end of interesting range may be not aligned with pages that
8255 * page allocator holds, ie. they can be part of higher order
8256 * pages. Because of this, we reserve the bigger range and
8257 * once this is done free the pages we are not interested in.
8258 *
8259 * We don't have to hold zone->lock here because the pages are
8260 * isolated thus they won't get removed from buddy.
8261 */
8262
8263 lru_add_drain_all();
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008264
8265 order = 0;
8266 outer_start = start;
8267 while (!PageBuddy(pfn_to_page(outer_start))) {
8268 if (++order >= MAX_ORDER) {
Joonsoo Kim8ef58492016-01-14 15:18:45 -08008269 outer_start = start;
8270 break;
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008271 }
8272 outer_start &= ~0UL << order;
8273 }
8274
Joonsoo Kim8ef58492016-01-14 15:18:45 -08008275 if (outer_start != start) {
8276 order = page_order(pfn_to_page(outer_start));
8277
8278 /*
8279 * outer_start page could be small order buddy page and
8280 * it doesn't include start page. Adjust outer_start
8281 * in this case to report failed page properly
8282 * on tracepoint in test_pages_isolated()
8283 */
8284 if (outer_start + (1UL << order) <= start)
8285 outer_start = start;
8286 }
8287
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008288 /* Make sure the range is really isolated. */
Wen Congyangb023f462012-12-11 16:00:45 -08008289 if (test_pages_isolated(outer_start, end, false)) {
Jonathan Toppins75dddef2017-08-10 15:23:35 -07008290 pr_info_ratelimited("%s: [%lx, %lx) PFNs busy\n",
Michal Nazarewiczdae803e2014-11-13 15:19:27 -08008291 __func__, outer_start, end);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008292 ret = -EBUSY;
8293 goto done;
8294 }
8295
Marek Szyprowski49f223a2012-01-25 12:49:24 +01008296 /* Grab isolated pages from freelists. */
Mel Gormanbb13ffe2012-10-08 16:32:41 -07008297 outer_end = isolate_freepages_range(&cc, outer_start, end);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008298 if (!outer_end) {
8299 ret = -EBUSY;
8300 goto done;
8301 }
8302
8303 /* Free head and tail (if any) */
8304 if (start != outer_start)
8305 free_contig_range(outer_start, start - outer_start);
8306 if (end != outer_end)
8307 free_contig_range(end, outer_end - end);
8308
8309done:
8310 undo_isolate_page_range(pfn_max_align_down(start),
Michal Nazarewicz0815f3d2012-04-03 15:06:15 +02008311 pfn_max_align_up(end), migratetype);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008312 return ret;
8313}
8314
8315void free_contig_range(unsigned long pfn, unsigned nr_pages)
8316{
Marek Szyprowskibcc2b022012-12-20 15:05:18 -08008317 unsigned int count = 0;
8318
8319 for (; nr_pages--; pfn++) {
8320 struct page *page = pfn_to_page(pfn);
8321
8322 count += page_count(page) != 1;
8323 __free_page(page);
8324 }
8325 WARN(count != 0, "%d pages are still in use!\n", count);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01008326}
8327#endif
8328
Joonsoo Kimd883c6c2018-05-23 10:18:21 +09008329#ifdef CONFIG_MEMORY_HOTPLUG
Cody P Schafer0a647f32013-07-03 15:01:33 -07008330/*
8331 * The zone indicated has a new number of managed_pages; batch sizes and percpu
8332 * page high values need to be recalulated.
8333 */
Jiang Liu4ed7e022012-07-31 16:43:35 -07008334void __meminit zone_pcp_update(struct zone *zone)
8335{
Cody P Schafer0a647f32013-07-03 15:01:33 -07008336 unsigned cpu;
Cody P Schaferc8e251f2013-07-03 15:01:29 -07008337 mutex_lock(&pcp_batch_high_lock);
Cody P Schafer0a647f32013-07-03 15:01:33 -07008338 for_each_possible_cpu(cpu)
Cody P Schafer169f6c12013-07-03 15:01:41 -07008339 pageset_set_high_and_batch(zone,
8340 per_cpu_ptr(zone->pageset, cpu));
Cody P Schaferc8e251f2013-07-03 15:01:29 -07008341 mutex_unlock(&pcp_batch_high_lock);
Jiang Liu4ed7e022012-07-31 16:43:35 -07008342}
8343#endif
8344
Jiang Liu340175b2012-07-31 16:43:32 -07008345void zone_pcp_reset(struct zone *zone)
8346{
8347 unsigned long flags;
Minchan Kim5a883812012-10-08 16:33:39 -07008348 int cpu;
8349 struct per_cpu_pageset *pset;
Jiang Liu340175b2012-07-31 16:43:32 -07008350
8351 /* avoid races with drain_pages() */
8352 local_irq_save(flags);
8353 if (zone->pageset != &boot_pageset) {
Minchan Kim5a883812012-10-08 16:33:39 -07008354 for_each_online_cpu(cpu) {
8355 pset = per_cpu_ptr(zone->pageset, cpu);
8356 drain_zonestat(zone, pset);
8357 }
Jiang Liu340175b2012-07-31 16:43:32 -07008358 free_percpu(zone->pageset);
8359 zone->pageset = &boot_pageset;
8360 }
8361 local_irq_restore(flags);
8362}
8363
Wen Congyang6dcd73d2012-12-11 16:01:01 -08008364#ifdef CONFIG_MEMORY_HOTREMOVE
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07008365/*
Joonsoo Kimb9eb6312016-05-19 17:12:06 -07008366 * All pages in the range must be in a single zone and isolated
8367 * before calling this.
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07008368 */
8369void
8370__offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
8371{
8372 struct page *page;
8373 struct zone *zone;
Mel Gorman7aeb09f2014-06-04 16:10:21 -07008374 unsigned int order, i;
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07008375 unsigned long pfn;
8376 unsigned long flags;
8377 /* find the first valid pfn */
8378 for (pfn = start_pfn; pfn < end_pfn; pfn++)
8379 if (pfn_valid(pfn))
8380 break;
8381 if (pfn == end_pfn)
8382 return;
Michal Hocko2d070ea2017-07-06 15:37:56 -07008383 offline_mem_sections(pfn, end_pfn);
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07008384 zone = page_zone(pfn_to_page(pfn));
8385 spin_lock_irqsave(&zone->lock, flags);
8386 pfn = start_pfn;
8387 while (pfn < end_pfn) {
8388 if (!pfn_valid(pfn)) {
8389 pfn++;
8390 continue;
8391 }
8392 page = pfn_to_page(pfn);
Wen Congyangb023f462012-12-11 16:00:45 -08008393 /*
8394 * The HWPoisoned page may be not in buddy system, and
8395 * page_count() is not 0.
8396 */
8397 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
8398 pfn++;
8399 SetPageReserved(page);
8400 continue;
8401 }
8402
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07008403 BUG_ON(page_count(page));
8404 BUG_ON(!PageBuddy(page));
8405 order = page_order(page);
8406#ifdef CONFIG_DEBUG_VM
Joe Perches11705322016-03-17 14:19:50 -07008407 pr_info("remove from free list %lx %d %lx\n",
8408 pfn, 1 << order, end_pfn);
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07008409#endif
8410 list_del(&page->lru);
8411 rmv_page_order(page);
8412 zone->free_area[order].nr_free--;
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07008413 for (i = 0; i < (1 << order); i++)
8414 SetPageReserved((page+i));
8415 pfn += (1 << order);
8416 }
8417 spin_unlock_irqrestore(&zone->lock, flags);
8418}
8419#endif
Wu Fengguang8d22ba12009-12-16 12:19:58 +01008420
Wu Fengguang8d22ba12009-12-16 12:19:58 +01008421bool is_free_buddy_page(struct page *page)
8422{
8423 struct zone *zone = page_zone(page);
8424 unsigned long pfn = page_to_pfn(page);
8425 unsigned long flags;
Mel Gorman7aeb09f2014-06-04 16:10:21 -07008426 unsigned int order;
Wu Fengguang8d22ba12009-12-16 12:19:58 +01008427
8428 spin_lock_irqsave(&zone->lock, flags);
8429 for (order = 0; order < MAX_ORDER; order++) {
8430 struct page *page_head = page - (pfn & ((1 << order) - 1));
8431
8432 if (PageBuddy(page_head) && page_order(page_head) >= order)
8433 break;
8434 }
8435 spin_unlock_irqrestore(&zone->lock, flags);
8436
8437 return order < MAX_ORDER;
8438}
Naoya Horiguchid4ae9912018-08-23 17:00:42 -07008439
8440#ifdef CONFIG_MEMORY_FAILURE
8441/*
8442 * Set PG_hwpoison flag if a given page is confirmed to be a free page. This
8443 * test is performed under the zone lock to prevent a race against page
8444 * allocation.
8445 */
8446bool set_hwpoison_free_buddy_page(struct page *page)
8447{
8448 struct zone *zone = page_zone(page);
8449 unsigned long pfn = page_to_pfn(page);
8450 unsigned long flags;
8451 unsigned int order;
8452 bool hwpoisoned = false;
8453
8454 spin_lock_irqsave(&zone->lock, flags);
8455 for (order = 0; order < MAX_ORDER; order++) {
8456 struct page *page_head = page - (pfn & ((1 << order) - 1));
8457
8458 if (PageBuddy(page_head) && page_order(page_head) >= order) {
8459 if (!TestSetPageHWPoison(page))
8460 hwpoisoned = true;
8461 break;
8462 }
8463 }
8464 spin_unlock_irqrestore(&zone->lock, flags);
8465
8466 return hwpoisoned;
8467}
8468#endif