blob: f265d37b31522c65b59bfc397c6f1457224cf752 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/mm/page_alloc.c
3 *
4 * Manages the free list, the system allocates free pages here.
5 * Note that kmalloc() lives in slab.c
6 *
7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
8 * Swap reorganised 29.12.95, Stephen Tweedie
9 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
10 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
11 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
12 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
14 * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
15 */
16
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/stddef.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/interrupt.h>
21#include <linux/pagemap.h>
KOSAKI Motohiro10ed2732008-03-04 14:28:32 -080022#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/bootmem.h>
Yinghai Luedbe7d22010-08-25 13:39:16 -070024#include <linux/memblock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/compiler.h>
Randy Dunlap9f158332005-09-13 01:25:16 -070026#include <linux/kernel.h>
Andrey Ryabininb8c73fc2015-02-13 14:39:28 -080027#include <linux/kasan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/module.h>
29#include <linux/suspend.h>
30#include <linux/pagevec.h>
31#include <linux/blkdev.h>
32#include <linux/slab.h>
Dave Hansena238ab52011-05-24 17:12:16 -070033#include <linux/ratelimit.h>
David Rientjes5a3135c22007-10-16 23:25:53 -070034#include <linux/oom.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/notifier.h>
36#include <linux/topology.h>
37#include <linux/sysctl.h>
38#include <linux/cpu.h>
39#include <linux/cpuset.h>
Dave Hansenbdc8cb92005-10-29 18:16:53 -070040#include <linux/memory_hotplug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/nodemask.h>
42#include <linux/vmalloc.h>
KOSAKI Motohiroa6cccdc2011-05-24 17:11:33 -070043#include <linux/vmstat.h>
Christoph Lameter4be38e32006-01-06 00:11:17 -080044#include <linux/mempolicy.h>
Dan Williams4b94ffd2016-01-15 16:56:22 -080045#include <linux/memremap.h>
Yasunori Goto68113782006-06-23 02:03:11 -070046#include <linux/stop_machine.h>
Mel Gormanc7132162006-09-27 01:49:43 -070047#include <linux/sort.h>
48#include <linux/pfn.h>
Andrew Morton3fcfab12006-10-19 23:28:16 -070049#include <linux/backing-dev.h>
Akinobu Mita933e3122006-12-08 02:39:45 -080050#include <linux/fault-inject.h>
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -070051#include <linux/page-isolation.h>
Joonsoo Kimeefa864b2014-12-12 16:55:46 -080052#include <linux/page_ext.h>
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -070053#include <linux/debugobjects.h>
Catalin Marinasdbb1f812009-06-11 13:23:19 +010054#include <linux/kmemleak.h>
Mel Gorman56de7262010-05-24 14:32:30 -070055#include <linux/compaction.h>
Mel Gorman0d3d0622009-09-21 17:02:44 -070056#include <trace/events/kmem.h>
Michal Hockod379f012017-02-22 15:42:00 -080057#include <trace/events/oom.h>
Linus Torvalds268bb0c2011-05-20 12:50:29 -070058#include <linux/prefetch.h>
Lisa Du6e543d52013-09-11 14:22:36 -070059#include <linux/mm_inline.h>
Michal Nazarewicz041d3a82011-12-29 13:09:50 +010060#include <linux/migrate.h>
David Rientjes949f7ec2013-04-29 15:07:48 -070061#include <linux/hugetlb.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060062#include <linux/sched/rt.h>
Ingo Molnar5b3cc152017-02-02 20:43:54 +010063#include <linux/sched/mm.h>
Joonsoo Kim48c96a32014-12-12 16:56:01 -080064#include <linux/page_owner.h>
Mel Gorman0e1cc952015-06-30 14:57:27 -070065#include <linux/kthread.h>
Vladimir Davydov49491482016-07-26 15:24:24 -070066#include <linux/memcontrol.h>
Steven Rostedt (VMware)42c269c2017-03-03 16:15:39 -050067#include <linux/ftrace.h>
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +010068#include <linux/lockdep.h>
Chen Yu556b9692017-08-25 15:55:30 -070069#include <linux/nmi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070070
Jiang Liu7ee3d4e2013-07-03 15:03:41 -070071#include <asm/sections.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070072#include <asm/tlbflush.h>
Andrew Mortonac924c62006-05-15 09:43:59 -070073#include <asm/div64.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070074#include "internal.h"
75
Cody P Schaferc8e251f2013-07-03 15:01:29 -070076/* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
77static DEFINE_MUTEX(pcp_batch_high_lock);
David Rientjes7cd2b0a2014-06-23 13:22:04 -070078#define MIN_PERCPU_PAGELIST_FRACTION (8)
Cody P Schaferc8e251f2013-07-03 15:01:29 -070079
Lee Schermerhorn72812012010-05-26 14:44:56 -070080#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
81DEFINE_PER_CPU(int, numa_node);
82EXPORT_PER_CPU_SYMBOL(numa_node);
83#endif
84
Lee Schermerhorn7aac7892010-05-26 14:45:00 -070085#ifdef CONFIG_HAVE_MEMORYLESS_NODES
86/*
87 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
88 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
89 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
90 * defined in <linux/topology.h>.
91 */
92DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */
93EXPORT_PER_CPU_SYMBOL(_numa_mem_);
Joonsoo Kimad2c8142014-10-09 15:26:13 -070094int _node_numa_mem_[MAX_NUMNODES];
Lee Schermerhorn7aac7892010-05-26 14:45:00 -070095#endif
96
Mel Gormanbd233f52017-02-24 14:56:56 -080097/* work_structs for global per-cpu drains */
98DEFINE_MUTEX(pcpu_drain_mutex);
99DEFINE_PER_CPU(struct work_struct, pcpu_drain);
100
Emese Revfy38addce2016-06-20 20:41:19 +0200101#ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
Kees Cook58bea412016-10-19 00:08:04 +0200102volatile unsigned long latent_entropy __latent_entropy;
Emese Revfy38addce2016-06-20 20:41:19 +0200103EXPORT_SYMBOL(latent_entropy);
104#endif
105
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106/*
Christoph Lameter13808912007-10-16 01:25:27 -0700107 * Array of node states.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 */
Christoph Lameter13808912007-10-16 01:25:27 -0700109nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
110 [N_POSSIBLE] = NODE_MASK_ALL,
111 [N_ONLINE] = { { [0] = 1UL } },
112#ifndef CONFIG_NUMA
113 [N_NORMAL_MEMORY] = { { [0] = 1UL } },
114#ifdef CONFIG_HIGHMEM
115 [N_HIGH_MEMORY] = { { [0] = 1UL } },
116#endif
Lai Jiangshan20b2f522012-12-12 13:52:00 -0800117 [N_MEMORY] = { { [0] = 1UL } },
Christoph Lameter13808912007-10-16 01:25:27 -0700118 [N_CPU] = { { [0] = 1UL } },
119#endif /* NUMA */
120};
121EXPORT_SYMBOL(node_states);
122
Jiang Liuc3d5f5f2013-07-03 15:03:14 -0700123/* Protect totalram_pages and zone->managed_pages */
124static DEFINE_SPINLOCK(managed_page_count_lock);
125
Ravikiran G Thirumalai6c231b72005-09-06 15:17:45 -0700126unsigned long totalram_pages __read_mostly;
Hideo AOKIcb45b0e2006-04-10 22:52:59 -0700127unsigned long totalreserve_pages __read_mostly;
Pintu Kumare48322a2014-12-18 16:17:15 -0800128unsigned long totalcma_pages __read_mostly;
Johannes Weinerab8fabd2012-01-10 15:07:42 -0800129
Hugh Dickins1b76b022012-05-11 01:00:07 -0700130int percpu_pagelist_fraction;
Benjamin Herrenschmidtdcce2842009-06-18 13:24:12 +1000131gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132
Vlastimil Babkabb14c2c2015-09-08 15:01:25 -0700133/*
134 * A cached value of the page's pageblock's migratetype, used when the page is
135 * put on a pcplist. Used to avoid the pageblock migratetype lookup when
136 * freeing from pcplists in most cases, at the cost of possibly becoming stale.
137 * Also the migratetype set in the page does not necessarily match the pcplist
138 * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any
139 * other index - this ensures that it will be put on the correct CMA freelist.
140 */
141static inline int get_pcppage_migratetype(struct page *page)
142{
143 return page->index;
144}
145
146static inline void set_pcppage_migratetype(struct page *page, int migratetype)
147{
148 page->index = migratetype;
149}
150
Rafael J. Wysocki452aa692010-03-05 13:42:13 -0800151#ifdef CONFIG_PM_SLEEP
152/*
153 * The following functions are used by the suspend/hibernate code to temporarily
154 * change gfp_allowed_mask in order to avoid using I/O during memory allocations
155 * while devices are suspended. To avoid races with the suspend/hibernate code,
156 * they should always be called with pm_mutex held (gfp_allowed_mask also should
157 * only be modified with pm_mutex held, unless the suspend/hibernate code is
158 * guaranteed not to run in parallel with that modification).
159 */
Rafael J. Wysockic9e664f2010-12-03 22:57:45 +0100160
161static gfp_t saved_gfp_mask;
162
163void pm_restore_gfp_mask(void)
Rafael J. Wysocki452aa692010-03-05 13:42:13 -0800164{
165 WARN_ON(!mutex_is_locked(&pm_mutex));
Rafael J. Wysockic9e664f2010-12-03 22:57:45 +0100166 if (saved_gfp_mask) {
167 gfp_allowed_mask = saved_gfp_mask;
168 saved_gfp_mask = 0;
169 }
Rafael J. Wysocki452aa692010-03-05 13:42:13 -0800170}
171
Rafael J. Wysockic9e664f2010-12-03 22:57:45 +0100172void pm_restrict_gfp_mask(void)
Rafael J. Wysocki452aa692010-03-05 13:42:13 -0800173{
Rafael J. Wysocki452aa692010-03-05 13:42:13 -0800174 WARN_ON(!mutex_is_locked(&pm_mutex));
Rafael J. Wysockic9e664f2010-12-03 22:57:45 +0100175 WARN_ON(saved_gfp_mask);
176 saved_gfp_mask = gfp_allowed_mask;
Mel Gormand0164ad2015-11-06 16:28:21 -0800177 gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS);
Rafael J. Wysocki452aa692010-03-05 13:42:13 -0800178}
Mel Gormanf90ac392012-01-10 15:07:15 -0800179
180bool pm_suspended_storage(void)
181{
Mel Gormand0164ad2015-11-06 16:28:21 -0800182 if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
Mel Gormanf90ac392012-01-10 15:07:15 -0800183 return false;
184 return true;
185}
Rafael J. Wysocki452aa692010-03-05 13:42:13 -0800186#endif /* CONFIG_PM_SLEEP */
187
Mel Gormand9c23402007-10-16 01:26:01 -0700188#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -0800189unsigned int pageblock_order __read_mostly;
Mel Gormand9c23402007-10-16 01:26:01 -0700190#endif
191
Hugh Dickinsd98c7a02006-02-14 13:52:59 -0800192static void __free_pages_ok(struct page *page, unsigned int order);
David Howellsa226f6c2006-01-06 00:11:08 -0800193
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194/*
195 * results with 256, 32 in the lowmem_reserve sysctl:
196 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
197 * 1G machine -> (16M dma, 784M normal, 224M high)
198 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
199 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
Yaowei Bai84109e12015-02-12 15:00:22 -0800200 * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA
Andi Kleena2f1b422005-11-05 17:25:53 +0100201 *
202 * TBD: should special case ZONE_DMA32 machines here - in those we normally
203 * don't need any ZONE_NORMAL reservation
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 */
Christoph Lameter2f1b6242006-09-25 23:31:13 -0700205int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
Christoph Lameter4b51d662007-02-10 01:43:10 -0800206#ifdef CONFIG_ZONE_DMA
Christoph Lameter2f1b6242006-09-25 23:31:13 -0700207 256,
Christoph Lameter4b51d662007-02-10 01:43:10 -0800208#endif
Christoph Lameterfb0e7942006-09-25 23:31:13 -0700209#ifdef CONFIG_ZONE_DMA32
Christoph Lameter2f1b6242006-09-25 23:31:13 -0700210 256,
Christoph Lameterfb0e7942006-09-25 23:31:13 -0700211#endif
Christoph Lametere53ef382006-09-25 23:31:14 -0700212#ifdef CONFIG_HIGHMEM
Mel Gorman2a1e2742007-07-17 04:03:12 -0700213 32,
Christoph Lametere53ef382006-09-25 23:31:14 -0700214#endif
Mel Gorman2a1e2742007-07-17 04:03:12 -0700215 32,
Christoph Lameter2f1b6242006-09-25 23:31:13 -0700216};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217
218EXPORT_SYMBOL(totalram_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219
Helge Deller15ad7cd2006-12-06 20:40:36 -0800220static char * const zone_names[MAX_NR_ZONES] = {
Christoph Lameter4b51d662007-02-10 01:43:10 -0800221#ifdef CONFIG_ZONE_DMA
Christoph Lameter2f1b6242006-09-25 23:31:13 -0700222 "DMA",
Christoph Lameter4b51d662007-02-10 01:43:10 -0800223#endif
Christoph Lameterfb0e7942006-09-25 23:31:13 -0700224#ifdef CONFIG_ZONE_DMA32
Christoph Lameter2f1b6242006-09-25 23:31:13 -0700225 "DMA32",
Christoph Lameterfb0e7942006-09-25 23:31:13 -0700226#endif
Christoph Lameter2f1b6242006-09-25 23:31:13 -0700227 "Normal",
Christoph Lametere53ef382006-09-25 23:31:14 -0700228#ifdef CONFIG_HIGHMEM
Mel Gorman2a1e2742007-07-17 04:03:12 -0700229 "HighMem",
Christoph Lametere53ef382006-09-25 23:31:14 -0700230#endif
Mel Gorman2a1e2742007-07-17 04:03:12 -0700231 "Movable",
Dan Williams033fbae2015-08-09 15:29:06 -0400232#ifdef CONFIG_ZONE_DEVICE
233 "Device",
234#endif
Christoph Lameter2f1b6242006-09-25 23:31:13 -0700235};
236
Vlastimil Babka60f30352016-03-15 14:56:08 -0700237char * const migratetype_names[MIGRATE_TYPES] = {
238 "Unmovable",
239 "Movable",
240 "Reclaimable",
241 "HighAtomic",
242#ifdef CONFIG_CMA
243 "CMA",
244#endif
245#ifdef CONFIG_MEMORY_ISOLATION
246 "Isolate",
247#endif
248};
249
Kirill A. Shutemovf1e61552015-11-06 16:29:50 -0800250compound_page_dtor * const compound_page_dtors[] = {
251 NULL,
252 free_compound_page,
253#ifdef CONFIG_HUGETLB_PAGE
254 free_huge_page,
255#endif
Kirill A. Shutemov9a982252016-01-15 16:54:17 -0800256#ifdef CONFIG_TRANSPARENT_HUGEPAGE
257 free_transhuge_page,
258#endif
Kirill A. Shutemovf1e61552015-11-06 16:29:50 -0800259};
260
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261int min_free_kbytes = 1024;
Han Pingtian42aa83c2014-01-23 15:53:28 -0800262int user_min_free_kbytes = -1;
Johannes Weiner795ae7a2016-03-17 14:19:14 -0700263int watermark_scale_factor = 10;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264
Jan Beulich2c85f512009-09-21 17:03:07 -0700265static unsigned long __meminitdata nr_kernel_pages;
266static unsigned long __meminitdata nr_all_pages;
Yasunori Gotoa3142c82007-05-08 00:23:07 -0700267static unsigned long __meminitdata dma_reserve;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268
Tejun Heo0ee332c2011-12-08 10:22:09 -0800269#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
270static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
271static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
272static unsigned long __initdata required_kernelcore;
273static unsigned long __initdata required_movablecore;
274static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
Taku Izumi342332e2016-03-15 14:55:22 -0700275static bool mirrored_kernelcore;
Mel Gormanc7132162006-09-27 01:49:43 -0700276
Tejun Heo0ee332c2011-12-08 10:22:09 -0800277/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
278int movable_zone;
279EXPORT_SYMBOL(movable_zone);
280#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
Mel Gormanc7132162006-09-27 01:49:43 -0700281
Miklos Szeredi418508c2007-05-23 13:57:55 -0700282#if MAX_NUMNODES > 1
283int nr_node_ids __read_mostly = MAX_NUMNODES;
Christoph Lameter62bc62a2009-06-16 15:32:15 -0700284int nr_online_nodes __read_mostly = 1;
Miklos Szeredi418508c2007-05-23 13:57:55 -0700285EXPORT_SYMBOL(nr_node_ids);
Christoph Lameter62bc62a2009-06-16 15:32:15 -0700286EXPORT_SYMBOL(nr_online_nodes);
Miklos Szeredi418508c2007-05-23 13:57:55 -0700287#endif
288
Mel Gorman9ef9acb2007-10-16 01:25:54 -0700289int page_group_by_mobility_disabled __read_mostly;
290
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700291#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
292static inline void reset_deferred_meminit(pg_data_t *pgdat)
293{
Michal Hocko864b9a32017-06-02 14:46:49 -0700294 unsigned long max_initialise;
295 unsigned long reserved_lowmem;
296
297 /*
298 * Initialise at least 2G of a node but also take into account that
299 * two large system hashes that can take up 1GB for 0.25TB/node.
300 */
301 max_initialise = max(2UL << (30 - PAGE_SHIFT),
302 (pgdat->node_spanned_pages >> 8));
303
304 /*
305 * Compensate the all the memblock reservations (e.g. crash kernel)
306 * from the initial estimation to make sure we will initialize enough
307 * memory to boot.
308 */
309 reserved_lowmem = memblock_reserved_memory_within(pgdat->node_start_pfn,
310 pgdat->node_start_pfn + max_initialise);
311 max_initialise += reserved_lowmem;
312
313 pgdat->static_init_size = min(max_initialise, pgdat->node_spanned_pages);
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700314 pgdat->first_deferred_pfn = ULONG_MAX;
315}
316
317/* Returns true if the struct page for the pfn is uninitialised */
Mel Gorman0e1cc952015-06-30 14:57:27 -0700318static inline bool __meminit early_page_uninitialised(unsigned long pfn)
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700319{
Mel Gormanef70b6f2016-07-14 12:07:23 -0700320 int nid = early_pfn_to_nid(pfn);
321
322 if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700323 return true;
324
325 return false;
326}
327
328/*
329 * Returns false when the remaining initialisation should be deferred until
330 * later in the boot cycle when it can be parallelised.
331 */
332static inline bool update_defer_init(pg_data_t *pgdat,
333 unsigned long pfn, unsigned long zone_end,
334 unsigned long *nr_initialised)
335{
336 /* Always populate low zones for address-contrained allocations */
337 if (zone_end < pgdat_end_pfn(pgdat))
338 return true;
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700339 (*nr_initialised)++;
Michal Hocko864b9a32017-06-02 14:46:49 -0700340 if ((*nr_initialised > pgdat->static_init_size) &&
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700341 (pfn & (PAGES_PER_SECTION - 1)) == 0) {
342 pgdat->first_deferred_pfn = pfn;
343 return false;
344 }
345
346 return true;
347}
348#else
349static inline void reset_deferred_meminit(pg_data_t *pgdat)
350{
351}
352
353static inline bool early_page_uninitialised(unsigned long pfn)
354{
355 return false;
356}
357
358static inline bool update_defer_init(pg_data_t *pgdat,
359 unsigned long pfn, unsigned long zone_end,
360 unsigned long *nr_initialised)
361{
362 return true;
363}
364#endif
365
Mel Gorman0b423ca2016-05-19 17:14:27 -0700366/* Return a pointer to the bitmap storing bits affecting a block of pages */
367static inline unsigned long *get_pageblock_bitmap(struct page *page,
368 unsigned long pfn)
369{
370#ifdef CONFIG_SPARSEMEM
371 return __pfn_to_section(pfn)->pageblock_flags;
372#else
373 return page_zone(page)->pageblock_flags;
374#endif /* CONFIG_SPARSEMEM */
375}
376
377static inline int pfn_to_bitidx(struct page *page, unsigned long pfn)
378{
379#ifdef CONFIG_SPARSEMEM
380 pfn &= (PAGES_PER_SECTION-1);
381 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
382#else
383 pfn = pfn - round_down(page_zone(page)->zone_start_pfn, pageblock_nr_pages);
384 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
385#endif /* CONFIG_SPARSEMEM */
386}
387
388/**
389 * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
390 * @page: The page within the block of interest
391 * @pfn: The target page frame number
392 * @end_bitidx: The last bit of interest to retrieve
393 * @mask: mask of bits that the caller is interested in
394 *
395 * Return: pageblock_bits flags
396 */
397static __always_inline unsigned long __get_pfnblock_flags_mask(struct page *page,
398 unsigned long pfn,
399 unsigned long end_bitidx,
400 unsigned long mask)
401{
402 unsigned long *bitmap;
403 unsigned long bitidx, word_bitidx;
404 unsigned long word;
405
406 bitmap = get_pageblock_bitmap(page, pfn);
407 bitidx = pfn_to_bitidx(page, pfn);
408 word_bitidx = bitidx / BITS_PER_LONG;
409 bitidx &= (BITS_PER_LONG-1);
410
411 word = bitmap[word_bitidx];
412 bitidx += end_bitidx;
413 return (word >> (BITS_PER_LONG - bitidx - 1)) & mask;
414}
415
416unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
417 unsigned long end_bitidx,
418 unsigned long mask)
419{
420 return __get_pfnblock_flags_mask(page, pfn, end_bitidx, mask);
421}
422
423static __always_inline int get_pfnblock_migratetype(struct page *page, unsigned long pfn)
424{
425 return __get_pfnblock_flags_mask(page, pfn, PB_migrate_end, MIGRATETYPE_MASK);
426}
427
428/**
429 * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
430 * @page: The page within the block of interest
431 * @flags: The flags to set
432 * @pfn: The target page frame number
433 * @end_bitidx: The last bit of interest
434 * @mask: mask of bits that the caller is interested in
435 */
436void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
437 unsigned long pfn,
438 unsigned long end_bitidx,
439 unsigned long mask)
440{
441 unsigned long *bitmap;
442 unsigned long bitidx, word_bitidx;
443 unsigned long old_word, word;
444
445 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
446
447 bitmap = get_pageblock_bitmap(page, pfn);
448 bitidx = pfn_to_bitidx(page, pfn);
449 word_bitidx = bitidx / BITS_PER_LONG;
450 bitidx &= (BITS_PER_LONG-1);
451
452 VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page);
453
454 bitidx += end_bitidx;
455 mask <<= (BITS_PER_LONG - bitidx - 1);
456 flags <<= (BITS_PER_LONG - bitidx - 1);
457
458 word = READ_ONCE(bitmap[word_bitidx]);
459 for (;;) {
460 old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags);
461 if (word == old_word)
462 break;
463 word = old_word;
464 }
465}
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700466
Minchan Kimee6f5092012-07-31 16:43:50 -0700467void set_pageblock_migratetype(struct page *page, int migratetype)
Mel Gormanb2a0ac82007-10-16 01:25:48 -0700468{
KOSAKI Motohiro5d0f3f72013-11-12 15:08:18 -0800469 if (unlikely(page_group_by_mobility_disabled &&
470 migratetype < MIGRATE_PCPTYPES))
Mel Gorman49255c62009-06-16 15:31:58 -0700471 migratetype = MIGRATE_UNMOVABLE;
472
Mel Gormanb2a0ac82007-10-16 01:25:48 -0700473 set_pageblock_flags_group(page, (unsigned long)migratetype,
474 PB_migrate, PB_migrate_end);
475}
476
Nick Piggin13e74442006-01-06 00:10:58 -0800477#ifdef CONFIG_DEBUG_VM
Dave Hansenc6a57e12005-10-29 18:16:52 -0700478static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479{
Dave Hansenbdc8cb92005-10-29 18:16:53 -0700480 int ret = 0;
481 unsigned seq;
482 unsigned long pfn = page_to_pfn(page);
Cody P Schaferb5e6a5a2013-02-22 16:35:28 -0800483 unsigned long sp, start_pfn;
Dave Hansenc6a57e12005-10-29 18:16:52 -0700484
Dave Hansenbdc8cb92005-10-29 18:16:53 -0700485 do {
486 seq = zone_span_seqbegin(zone);
Cody P Schaferb5e6a5a2013-02-22 16:35:28 -0800487 start_pfn = zone->zone_start_pfn;
488 sp = zone->spanned_pages;
Cody P Schafer108bcc92013-02-22 16:35:23 -0800489 if (!zone_spans_pfn(zone, pfn))
Dave Hansenbdc8cb92005-10-29 18:16:53 -0700490 ret = 1;
491 } while (zone_span_seqretry(zone, seq));
492
Cody P Schaferb5e6a5a2013-02-22 16:35:28 -0800493 if (ret)
Dave Hansen613813e2014-06-04 16:07:27 -0700494 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
495 pfn, zone_to_nid(zone), zone->name,
496 start_pfn, start_pfn + sp);
Cody P Schaferb5e6a5a2013-02-22 16:35:28 -0800497
Dave Hansenbdc8cb92005-10-29 18:16:53 -0700498 return ret;
Dave Hansenc6a57e12005-10-29 18:16:52 -0700499}
500
501static int page_is_consistent(struct zone *zone, struct page *page)
502{
Andy Whitcroft14e07292007-05-06 14:49:14 -0700503 if (!pfn_valid_within(page_to_pfn(page)))
Dave Hansenc6a57e12005-10-29 18:16:52 -0700504 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 if (zone != page_zone(page))
Dave Hansenc6a57e12005-10-29 18:16:52 -0700506 return 0;
507
508 return 1;
509}
510/*
511 * Temporary debugging check for pages not lying within a given zone.
512 */
Matthias Kaehlcked73d3c9f2017-07-06 15:39:23 -0700513static int __maybe_unused bad_range(struct zone *zone, struct page *page)
Dave Hansenc6a57e12005-10-29 18:16:52 -0700514{
515 if (page_outside_zone_boundaries(zone, page))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516 return 1;
Dave Hansenc6a57e12005-10-29 18:16:52 -0700517 if (!page_is_consistent(zone, page))
518 return 1;
519
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520 return 0;
521}
Nick Piggin13e74442006-01-06 00:10:58 -0800522#else
Matthias Kaehlcked73d3c9f2017-07-06 15:39:23 -0700523static inline int __maybe_unused bad_range(struct zone *zone, struct page *page)
Nick Piggin13e74442006-01-06 00:10:58 -0800524{
525 return 0;
526}
527#endif
528
Kirill A. Shutemovd230dec2014-04-07 15:37:38 -0700529static void bad_page(struct page *page, const char *reason,
530 unsigned long bad_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531{
Hugh Dickinsd936cf92009-01-06 14:40:12 -0800532 static unsigned long resume;
533 static unsigned long nr_shown;
534 static unsigned long nr_unshown;
535
536 /*
537 * Allow a burst of 60 reports, then keep quiet for that minute;
538 * or allow a steady drip of one report per second.
539 */
540 if (nr_shown == 60) {
541 if (time_before(jiffies, resume)) {
542 nr_unshown++;
543 goto out;
544 }
545 if (nr_unshown) {
Vlastimil Babkaff8e8112016-03-15 14:56:24 -0700546 pr_alert(
Hugh Dickins1e9e6362009-01-06 14:40:13 -0800547 "BUG: Bad page state: %lu messages suppressed\n",
Hugh Dickinsd936cf92009-01-06 14:40:12 -0800548 nr_unshown);
549 nr_unshown = 0;
550 }
551 nr_shown = 0;
552 }
553 if (nr_shown++ == 0)
554 resume = jiffies + 60 * HZ;
555
Vlastimil Babkaff8e8112016-03-15 14:56:24 -0700556 pr_alert("BUG: Bad page state in process %s pfn:%05lx\n",
Hugh Dickins3dc14742009-01-06 14:40:08 -0800557 current->comm, page_to_pfn(page));
Vlastimil Babkaff8e8112016-03-15 14:56:24 -0700558 __dump_page(page, reason);
559 bad_flags &= page->flags;
560 if (bad_flags)
561 pr_alert("bad because of flags: %#lx(%pGp)\n",
562 bad_flags, &bad_flags);
Vlastimil Babka4e462112016-03-15 14:56:21 -0700563 dump_page_owner(page);
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700564
Dave Jones4f318882011-10-31 17:07:24 -0700565 print_modules();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566 dump_stack();
Hugh Dickinsd936cf92009-01-06 14:40:12 -0800567out:
Hugh Dickins8cc3b392009-01-06 14:40:06 -0800568 /* Leave bad fields for debug, except PageBuddy could make trouble */
Mel Gorman22b751c2013-02-22 16:34:59 -0800569 page_mapcount_reset(page); /* remove PageBuddy */
Rusty Russell373d4d02013-01-21 17:17:39 +1030570 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571}
572
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573/*
574 * Higher-order pages are called "compound pages". They are structured thusly:
575 *
Kirill A. Shutemov1d798ca2015-11-06 16:29:54 -0800576 * The first PAGE_SIZE page is called the "head page" and have PG_head set.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577 *
Kirill A. Shutemov1d798ca2015-11-06 16:29:54 -0800578 * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded
579 * in bit 0 of page->compound_head. The rest of bits is pointer to head page.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 *
Kirill A. Shutemov1d798ca2015-11-06 16:29:54 -0800581 * The first tail page's ->compound_dtor holds the offset in array of compound
582 * page destructors. See compound_page_dtors.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583 *
Kirill A. Shutemov1d798ca2015-11-06 16:29:54 -0800584 * The first tail page's ->compound_order holds the order of allocation.
Hugh Dickins41d78ba2006-02-14 13:52:58 -0800585 * This usage means that zero-order pages may not be compound.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586 */
Hugh Dickinsd98c7a02006-02-14 13:52:59 -0800587
Kirill A. Shutemov9a982252016-01-15 16:54:17 -0800588void free_compound_page(struct page *page)
Hugh Dickinsd98c7a02006-02-14 13:52:59 -0800589{
Christoph Lameterd85f3382007-05-06 14:49:39 -0700590 __free_pages_ok(page, compound_order(page));
Hugh Dickinsd98c7a02006-02-14 13:52:59 -0800591}
592
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -0800593void prep_compound_page(struct page *page, unsigned int order)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594{
595 int i;
596 int nr_pages = 1 << order;
597
Kirill A. Shutemovf1e61552015-11-06 16:29:50 -0800598 set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
Christoph Lameterd85f3382007-05-06 14:49:39 -0700599 set_compound_order(page, order);
Christoph Lameter6d777952007-05-06 14:49:40 -0700600 __SetPageHead(page);
Andy Whitcroft18229df2008-11-06 12:53:27 -0800601 for (i = 1; i < nr_pages; i++) {
602 struct page *p = page + i;
Youquan Song58a84aa2011-12-08 14:34:18 -0800603 set_page_count(p, 0);
Kirill A. Shutemov1c290f62016-01-15 16:52:07 -0800604 p->mapping = TAIL_MAPPING;
Kirill A. Shutemov1d798ca2015-11-06 16:29:54 -0800605 set_compound_head(p, page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 }
Kirill A. Shutemov53f92632016-01-15 16:53:42 -0800607 atomic_set(compound_mapcount_ptr(page), -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608}
609
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800610#ifdef CONFIG_DEBUG_PAGEALLOC
611unsigned int _debug_guardpage_minorder;
Christian Borntraegerea6eabb2016-03-15 14:55:30 -0700612bool _debug_pagealloc_enabled __read_mostly
613 = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT);
Joonsoo Kim505f6d22016-03-17 14:17:56 -0700614EXPORT_SYMBOL(_debug_pagealloc_enabled);
Joonsoo Kime30825f2014-12-12 16:55:49 -0800615bool _debug_guardpage_enabled __read_mostly;
616
Joonsoo Kim031bc572014-12-12 16:55:52 -0800617static int __init early_debug_pagealloc(char *buf)
618{
619 if (!buf)
620 return -EINVAL;
Minfei Huang2a138dc2016-05-20 16:58:13 -0700621 return kstrtobool(buf, &_debug_pagealloc_enabled);
Joonsoo Kim031bc572014-12-12 16:55:52 -0800622}
623early_param("debug_pagealloc", early_debug_pagealloc);
624
Joonsoo Kime30825f2014-12-12 16:55:49 -0800625static bool need_debug_guardpage(void)
626{
Joonsoo Kim031bc572014-12-12 16:55:52 -0800627 /* If we don't use debug_pagealloc, we don't need guard page */
628 if (!debug_pagealloc_enabled())
629 return false;
630
Joonsoo Kimf1c1e9f2016-10-07 16:58:18 -0700631 if (!debug_guardpage_minorder())
632 return false;
633
Joonsoo Kime30825f2014-12-12 16:55:49 -0800634 return true;
635}
636
637static void init_debug_guardpage(void)
638{
Joonsoo Kim031bc572014-12-12 16:55:52 -0800639 if (!debug_pagealloc_enabled())
640 return;
641
Joonsoo Kimf1c1e9f2016-10-07 16:58:18 -0700642 if (!debug_guardpage_minorder())
643 return;
644
Joonsoo Kime30825f2014-12-12 16:55:49 -0800645 _debug_guardpage_enabled = true;
646}
647
648struct page_ext_operations debug_guardpage_ops = {
649 .need = need_debug_guardpage,
650 .init = init_debug_guardpage,
651};
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800652
653static int __init debug_guardpage_minorder_setup(char *buf)
654{
655 unsigned long res;
656
657 if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) {
Joe Perches11705322016-03-17 14:19:50 -0700658 pr_err("Bad debug_guardpage_minorder value\n");
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800659 return 0;
660 }
661 _debug_guardpage_minorder = res;
Joe Perches11705322016-03-17 14:19:50 -0700662 pr_info("Setting debug_guardpage_minorder to %lu\n", res);
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800663 return 0;
664}
Joonsoo Kimf1c1e9f2016-10-07 16:58:18 -0700665early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup);
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800666
Joonsoo Kimacbc15a2016-10-07 16:58:15 -0700667static inline bool set_page_guard(struct zone *zone, struct page *page,
Joonsoo Kim2847cf92014-12-12 16:55:01 -0800668 unsigned int order, int migratetype)
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800669{
Joonsoo Kime30825f2014-12-12 16:55:49 -0800670 struct page_ext *page_ext;
671
672 if (!debug_guardpage_enabled())
Joonsoo Kimacbc15a2016-10-07 16:58:15 -0700673 return false;
674
675 if (order >= debug_guardpage_minorder())
676 return false;
Joonsoo Kime30825f2014-12-12 16:55:49 -0800677
678 page_ext = lookup_page_ext(page);
Yang Shif86e4272016-06-03 14:55:38 -0700679 if (unlikely(!page_ext))
Joonsoo Kimacbc15a2016-10-07 16:58:15 -0700680 return false;
Yang Shif86e4272016-06-03 14:55:38 -0700681
Joonsoo Kime30825f2014-12-12 16:55:49 -0800682 __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
683
Joonsoo Kim2847cf92014-12-12 16:55:01 -0800684 INIT_LIST_HEAD(&page->lru);
685 set_page_private(page, order);
686 /* Guard pages are not available for any usage */
687 __mod_zone_freepage_state(zone, -(1 << order), migratetype);
Joonsoo Kimacbc15a2016-10-07 16:58:15 -0700688
689 return true;
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800690}
691
Joonsoo Kim2847cf92014-12-12 16:55:01 -0800692static inline void clear_page_guard(struct zone *zone, struct page *page,
693 unsigned int order, int migratetype)
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800694{
Joonsoo Kime30825f2014-12-12 16:55:49 -0800695 struct page_ext *page_ext;
696
697 if (!debug_guardpage_enabled())
698 return;
699
700 page_ext = lookup_page_ext(page);
Yang Shif86e4272016-06-03 14:55:38 -0700701 if (unlikely(!page_ext))
702 return;
703
Joonsoo Kime30825f2014-12-12 16:55:49 -0800704 __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
705
Joonsoo Kim2847cf92014-12-12 16:55:01 -0800706 set_page_private(page, 0);
707 if (!is_migrate_isolate(migratetype))
708 __mod_zone_freepage_state(zone, (1 << order), migratetype);
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800709}
710#else
Joonsoo Kim980ac162016-10-07 16:58:27 -0700711struct page_ext_operations debug_guardpage_ops;
Joonsoo Kimacbc15a2016-10-07 16:58:15 -0700712static inline bool set_page_guard(struct zone *zone, struct page *page,
713 unsigned int order, int migratetype) { return false; }
Joonsoo Kim2847cf92014-12-12 16:55:01 -0800714static inline void clear_page_guard(struct zone *zone, struct page *page,
715 unsigned int order, int migratetype) {}
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800716#endif
717
Mel Gorman7aeb09f2014-06-04 16:10:21 -0700718static inline void set_page_order(struct page *page, unsigned int order)
Andrew Morton6aa3001b22006-04-18 22:20:52 -0700719{
Hugh Dickins4c21e2f2005-10-29 18:16:40 -0700720 set_page_private(page, order);
Nick Piggin676165a2006-04-10 11:21:48 +1000721 __SetPageBuddy(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722}
723
724static inline void rmv_page_order(struct page *page)
725{
Nick Piggin676165a2006-04-10 11:21:48 +1000726 __ClearPageBuddy(page);
Hugh Dickins4c21e2f2005-10-29 18:16:40 -0700727 set_page_private(page, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728}
729
730/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 * This function checks whether a page is free && is the buddy
732 * we can do coalesce a page and its buddy if
Vlastimil Babka13ad59d2017-02-22 15:41:51 -0800733 * (a) the buddy is not in a hole (check before calling!) &&
Nick Piggin676165a2006-04-10 11:21:48 +1000734 * (b) the buddy is in the buddy system &&
Andy Whitcroftcb2b95e2006-06-23 02:03:01 -0700735 * (c) a page and its buddy have the same order &&
736 * (d) a page and its buddy are in the same zone.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737 *
Wang Sheng-Huicf6fe942013-09-11 14:22:48 -0700738 * For recording whether a page is in the buddy system, we set ->_mapcount
739 * PAGE_BUDDY_MAPCOUNT_VALUE.
740 * Setting, clearing, and testing _mapcount PAGE_BUDDY_MAPCOUNT_VALUE is
741 * serialized by zone->lock.
Nick Piggin676165a2006-04-10 11:21:48 +1000742 *
743 * For recording page's order, we use page_private(page).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744 */
Andy Whitcroftcb2b95e2006-06-23 02:03:01 -0700745static inline int page_is_buddy(struct page *page, struct page *buddy,
Mel Gorman7aeb09f2014-06-04 16:10:21 -0700746 unsigned int order)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747{
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800748 if (page_is_guard(buddy) && page_order(buddy) == order) {
Mel Gormand34c5fa2014-06-04 16:10:10 -0700749 if (page_zone_id(page) != page_zone_id(buddy))
750 return 0;
751
Weijie Yang4c5018c2015-02-10 14:11:39 -0800752 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
753
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800754 return 1;
755 }
756
Andy Whitcroftcb2b95e2006-06-23 02:03:01 -0700757 if (PageBuddy(buddy) && page_order(buddy) == order) {
Mel Gormand34c5fa2014-06-04 16:10:10 -0700758 /*
759 * zone check is done late to avoid uselessly
760 * calculating zone/node ids for pages that could
761 * never merge.
762 */
763 if (page_zone_id(page) != page_zone_id(buddy))
764 return 0;
765
Weijie Yang4c5018c2015-02-10 14:11:39 -0800766 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
767
Andrew Morton6aa3001b22006-04-18 22:20:52 -0700768 return 1;
Nick Piggin676165a2006-04-10 11:21:48 +1000769 }
Andrew Morton6aa3001b22006-04-18 22:20:52 -0700770 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771}
772
773/*
774 * Freeing function for a buddy system allocator.
775 *
776 * The concept of a buddy system is to maintain direct-mapped table
777 * (containing bit values) for memory blocks of various "orders".
778 * The bottom level table contains the map for the smallest allocatable
779 * units of memory (here, pages), and each level above it describes
780 * pairs of units from the levels below, hence, "buddies".
781 * At a high level, all that happens here is marking the table entry
782 * at the bottom level available, and propagating the changes upward
783 * as necessary, plus some accounting needed to play nicely with other
784 * parts of the VM system.
785 * At each level, we keep a list of pages, which are heads of continuous
Wang Sheng-Huicf6fe942013-09-11 14:22:48 -0700786 * free pages of length of (1 << order) and marked with _mapcount
787 * PAGE_BUDDY_MAPCOUNT_VALUE. Page's order is recorded in page_private(page)
788 * field.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789 * So when we are allocating or freeing one, we can derive the state of the
Michal Nazarewicz5f63b722012-01-11 15:16:11 +0100790 * other. That is, if we allocate a small block, and both were
791 * free, the remainder of the region must be split into blocks.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792 * If a block is freed, and its buddy is also free, then this
Michal Nazarewicz5f63b722012-01-11 15:16:11 +0100793 * triggers coalescing into a block of larger size.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794 *
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +0100795 * -- nyc
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 */
797
Nick Piggin48db57f2006-01-08 01:00:42 -0800798static inline void __free_one_page(struct page *page,
Mel Gormandc4b0ca2014-06-04 16:10:17 -0700799 unsigned long pfn,
Mel Gormaned0ae212009-06-16 15:32:07 -0700800 struct zone *zone, unsigned int order,
801 int migratetype)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802{
Vlastimil Babka76741e72017-02-22 15:41:48 -0800803 unsigned long combined_pfn;
804 unsigned long uninitialized_var(buddy_pfn);
Corrado Zoccolo6dda9d52010-05-24 14:31:54 -0700805 struct page *buddy;
Vlastimil Babkad9dddbf2016-03-25 14:21:50 -0700806 unsigned int max_order;
807
808 max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809
Cody P Schaferd29bb972013-02-22 16:35:25 -0800810 VM_BUG_ON(!zone_is_initialized(zone));
Kirill A. Shutemov6e9f0d52015-02-11 15:25:50 -0800811 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812
Mel Gormaned0ae212009-06-16 15:32:07 -0700813 VM_BUG_ON(migratetype == -1);
Vlastimil Babkad9dddbf2016-03-25 14:21:50 -0700814 if (likely(!is_migrate_isolate(migratetype)))
Joonsoo Kim8f82b552014-11-13 15:19:18 -0800815 __mod_zone_freepage_state(zone, 1 << order, migratetype);
Mel Gormaned0ae212009-06-16 15:32:07 -0700816
Vlastimil Babka76741e72017-02-22 15:41:48 -0800817 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page);
Sasha Levin309381fea2014-01-23 15:52:54 -0800818 VM_BUG_ON_PAGE(bad_range(zone, page), page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819
Vlastimil Babkad9dddbf2016-03-25 14:21:50 -0700820continue_merging:
Joonsoo Kim3c605092014-11-13 15:19:21 -0800821 while (order < max_order - 1) {
Vlastimil Babka76741e72017-02-22 15:41:48 -0800822 buddy_pfn = __find_buddy_pfn(pfn, order);
823 buddy = page + (buddy_pfn - pfn);
Vlastimil Babka13ad59d2017-02-22 15:41:51 -0800824
825 if (!pfn_valid_within(buddy_pfn))
826 goto done_merging;
Andy Whitcroftcb2b95e2006-06-23 02:03:01 -0700827 if (!page_is_buddy(page, buddy, order))
Vlastimil Babkad9dddbf2016-03-25 14:21:50 -0700828 goto done_merging;
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800829 /*
830 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
831 * merge with it and move up one order.
832 */
833 if (page_is_guard(buddy)) {
Joonsoo Kim2847cf92014-12-12 16:55:01 -0800834 clear_page_guard(zone, buddy, order, migratetype);
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800835 } else {
836 list_del(&buddy->lru);
837 zone->free_area[order].nr_free--;
838 rmv_page_order(buddy);
839 }
Vlastimil Babka76741e72017-02-22 15:41:48 -0800840 combined_pfn = buddy_pfn & pfn;
841 page = page + (combined_pfn - pfn);
842 pfn = combined_pfn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843 order++;
844 }
Vlastimil Babkad9dddbf2016-03-25 14:21:50 -0700845 if (max_order < MAX_ORDER) {
846 /* If we are here, it means order is >= pageblock_order.
847 * We want to prevent merge between freepages on isolate
848 * pageblock and normal pageblock. Without this, pageblock
849 * isolation could cause incorrect freepage or CMA accounting.
850 *
851 * We don't want to hit this code for the more frequent
852 * low-order merging.
853 */
854 if (unlikely(has_isolate_pageblock(zone))) {
855 int buddy_mt;
856
Vlastimil Babka76741e72017-02-22 15:41:48 -0800857 buddy_pfn = __find_buddy_pfn(pfn, order);
858 buddy = page + (buddy_pfn - pfn);
Vlastimil Babkad9dddbf2016-03-25 14:21:50 -0700859 buddy_mt = get_pageblock_migratetype(buddy);
860
861 if (migratetype != buddy_mt
862 && (is_migrate_isolate(migratetype) ||
863 is_migrate_isolate(buddy_mt)))
864 goto done_merging;
865 }
866 max_order++;
867 goto continue_merging;
868 }
869
870done_merging:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871 set_page_order(page, order);
Corrado Zoccolo6dda9d52010-05-24 14:31:54 -0700872
873 /*
874 * If this is not the largest possible page, check if the buddy
875 * of the next-highest order is free. If it is, it's possible
876 * that pages are being freed that will coalesce soon. In case,
877 * that is happening, add the free page to the tail of the list
878 * so it's less likely to be used soon and more likely to be merged
879 * as a higher order page
880 */
Vlastimil Babka13ad59d2017-02-22 15:41:51 -0800881 if ((order < MAX_ORDER-2) && pfn_valid_within(buddy_pfn)) {
Corrado Zoccolo6dda9d52010-05-24 14:31:54 -0700882 struct page *higher_page, *higher_buddy;
Vlastimil Babka76741e72017-02-22 15:41:48 -0800883 combined_pfn = buddy_pfn & pfn;
884 higher_page = page + (combined_pfn - pfn);
885 buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1);
886 higher_buddy = higher_page + (buddy_pfn - combined_pfn);
Tony Luckb4fb8f62017-03-08 09:35:39 -0800887 if (pfn_valid_within(buddy_pfn) &&
888 page_is_buddy(higher_page, higher_buddy, order + 1)) {
Corrado Zoccolo6dda9d52010-05-24 14:31:54 -0700889 list_add_tail(&page->lru,
890 &zone->free_area[order].free_list[migratetype]);
891 goto out;
892 }
893 }
894
895 list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
896out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897 zone->free_area[order].nr_free++;
898}
899
Mel Gorman7bfec6f2016-05-19 17:14:15 -0700900/*
901 * A bad page could be due to a number of fields. Instead of multiple branches,
902 * try and check multiple fields with one check. The caller must do a detailed
903 * check if necessary.
904 */
905static inline bool page_expected_state(struct page *page,
906 unsigned long check_flags)
907{
908 if (unlikely(atomic_read(&page->_mapcount) != -1))
909 return false;
910
911 if (unlikely((unsigned long)page->mapping |
912 page_ref_count(page) |
913#ifdef CONFIG_MEMCG
914 (unsigned long)page->mem_cgroup |
915#endif
916 (page->flags & check_flags)))
917 return false;
918
919 return true;
920}
921
Mel Gormanbb552ac2016-05-19 17:14:18 -0700922static void free_pages_check_bad(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923{
Mel Gorman7bfec6f2016-05-19 17:14:15 -0700924 const char *bad_reason;
925 unsigned long bad_flags;
926
Mel Gorman7bfec6f2016-05-19 17:14:15 -0700927 bad_reason = NULL;
928 bad_flags = 0;
Dave Hansenf0b791a2014-01-23 15:52:49 -0800929
Kirill A. Shutemov53f92632016-01-15 16:53:42 -0800930 if (unlikely(atomic_read(&page->_mapcount) != -1))
Dave Hansenf0b791a2014-01-23 15:52:49 -0800931 bad_reason = "nonzero mapcount";
932 if (unlikely(page->mapping != NULL))
933 bad_reason = "non-NULL mapping";
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700934 if (unlikely(page_ref_count(page) != 0))
Joonsoo Kim0139aa72016-05-19 17:10:49 -0700935 bad_reason = "nonzero _refcount";
Dave Hansenf0b791a2014-01-23 15:52:49 -0800936 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) {
937 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
938 bad_flags = PAGE_FLAGS_CHECK_AT_FREE;
939 }
Johannes Weiner9edad6e2014-12-10 15:44:58 -0800940#ifdef CONFIG_MEMCG
941 if (unlikely(page->mem_cgroup))
942 bad_reason = "page still charged to cgroup";
943#endif
Mel Gorman7bfec6f2016-05-19 17:14:15 -0700944 bad_page(page, bad_reason, bad_flags);
Mel Gormanbb552ac2016-05-19 17:14:18 -0700945}
946
947static inline int free_pages_check(struct page *page)
948{
Mel Gormanda838d42016-05-19 17:14:21 -0700949 if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE)))
Mel Gormanbb552ac2016-05-19 17:14:18 -0700950 return 0;
Mel Gormanbb552ac2016-05-19 17:14:18 -0700951
952 /* Something has gone sideways, find it */
953 free_pages_check_bad(page);
Mel Gorman7bfec6f2016-05-19 17:14:15 -0700954 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955}
956
Mel Gorman4db75482016-05-19 17:14:32 -0700957static int free_tail_pages_check(struct page *head_page, struct page *page)
958{
959 int ret = 1;
960
961 /*
962 * We rely page->lru.next never has bit 0 set, unless the page
963 * is PageTail(). Let's make sure that's true even for poisoned ->lru.
964 */
965 BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1);
966
967 if (!IS_ENABLED(CONFIG_DEBUG_VM)) {
968 ret = 0;
969 goto out;
970 }
971 switch (page - head_page) {
972 case 1:
973 /* the first tail page: ->mapping is compound_mapcount() */
974 if (unlikely(compound_mapcount(page))) {
975 bad_page(page, "nonzero compound_mapcount", 0);
976 goto out;
977 }
978 break;
979 case 2:
980 /*
981 * the second tail page: ->mapping is
982 * page_deferred_list().next -- ignore value.
983 */
984 break;
985 default:
986 if (page->mapping != TAIL_MAPPING) {
987 bad_page(page, "corrupted mapping in tail page", 0);
988 goto out;
989 }
990 break;
991 }
992 if (unlikely(!PageTail(page))) {
993 bad_page(page, "PageTail not set", 0);
994 goto out;
995 }
996 if (unlikely(compound_head(page) != head_page)) {
997 bad_page(page, "compound_head not consistent", 0);
998 goto out;
999 }
1000 ret = 0;
1001out:
1002 page->mapping = NULL;
1003 clear_compound_head(page);
1004 return ret;
1005}
1006
Mel Gormane2769db2016-05-19 17:14:38 -07001007static __always_inline bool free_pages_prepare(struct page *page,
1008 unsigned int order, bool check_free)
1009{
1010 int bad = 0;
1011
1012 VM_BUG_ON_PAGE(PageTail(page), page);
1013
1014 trace_mm_page_free(page, order);
Mel Gormane2769db2016-05-19 17:14:38 -07001015
1016 /*
1017 * Check tail pages before head page information is cleared to
1018 * avoid checking PageCompound for order-0 pages.
1019 */
1020 if (unlikely(order)) {
1021 bool compound = PageCompound(page);
1022 int i;
1023
1024 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
1025
Kirill A. Shutemov9a73f612016-07-26 15:25:53 -07001026 if (compound)
1027 ClearPageDoubleMap(page);
Mel Gormane2769db2016-05-19 17:14:38 -07001028 for (i = 1; i < (1 << order); i++) {
1029 if (compound)
1030 bad += free_tail_pages_check(page, page + i);
1031 if (unlikely(free_pages_check(page + i))) {
1032 bad++;
1033 continue;
1034 }
1035 (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1036 }
1037 }
Minchan Kimbda807d2016-07-26 15:23:05 -07001038 if (PageMappingFlags(page))
Mel Gormane2769db2016-05-19 17:14:38 -07001039 page->mapping = NULL;
Vladimir Davydovc4159a72016-08-08 23:03:12 +03001040 if (memcg_kmem_enabled() && PageKmemcg(page))
Vladimir Davydov49491482016-07-26 15:24:24 -07001041 memcg_kmem_uncharge(page, order);
Mel Gormane2769db2016-05-19 17:14:38 -07001042 if (check_free)
1043 bad += free_pages_check(page);
1044 if (bad)
1045 return false;
1046
1047 page_cpupid_reset_last(page);
1048 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1049 reset_page_owner(page, order);
1050
1051 if (!PageHighMem(page)) {
1052 debug_check_no_locks_freed(page_address(page),
1053 PAGE_SIZE << order);
1054 debug_check_no_obj_freed(page_address(page),
1055 PAGE_SIZE << order);
1056 }
1057 arch_free_page(page, order);
1058 kernel_poison_pages(page, 1 << order, 0);
1059 kernel_map_pages(page, 1 << order, 0);
seokhoon.yoon29b52de2016-05-20 16:58:47 -07001060 kasan_free_pages(page, order);
Mel Gormane2769db2016-05-19 17:14:38 -07001061
1062 return true;
1063}
Mel Gorman4db75482016-05-19 17:14:32 -07001064
1065#ifdef CONFIG_DEBUG_VM
1066static inline bool free_pcp_prepare(struct page *page)
1067{
Mel Gormane2769db2016-05-19 17:14:38 -07001068 return free_pages_prepare(page, 0, true);
Mel Gorman4db75482016-05-19 17:14:32 -07001069}
1070
1071static inline bool bulkfree_pcp_prepare(struct page *page)
1072{
1073 return false;
1074}
1075#else
1076static bool free_pcp_prepare(struct page *page)
1077{
Mel Gormane2769db2016-05-19 17:14:38 -07001078 return free_pages_prepare(page, 0, false);
Mel Gorman4db75482016-05-19 17:14:32 -07001079}
1080
1081static bool bulkfree_pcp_prepare(struct page *page)
1082{
1083 return free_pages_check(page);
1084}
1085#endif /* CONFIG_DEBUG_VM */
1086
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087/*
Mel Gorman5f8dcc22009-09-21 17:03:19 -07001088 * Frees a number of pages from the PCP lists
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089 * Assumes all pages on list are in same zone, and of same order.
Renaud Lienhart207f36e2005-09-10 00:26:59 -07001090 * count is the number of pages to free.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091 *
1092 * If the zone was previously in an "all pages pinned" state then look to
1093 * see if this freeing clears that state.
1094 *
1095 * And clear the zone's pages_scanned counter, to hold off the "all pages are
1096 * pinned" detection logic.
1097 */
Mel Gorman5f8dcc22009-09-21 17:03:19 -07001098static void free_pcppages_bulk(struct zone *zone, int count,
1099 struct per_cpu_pages *pcp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100{
Mel Gorman5f8dcc22009-09-21 17:03:19 -07001101 int migratetype = 0;
Mel Gormana6f9edd62009-09-21 17:03:20 -07001102 int batch_free = 0;
Mel Gorman37779992016-05-19 17:13:58 -07001103 bool isolated_pageblocks;
Mel Gorman5f8dcc22009-09-21 17:03:19 -07001104
Mel Gormand34b0732017-04-20 14:37:43 -07001105 spin_lock(&zone->lock);
Mel Gorman37779992016-05-19 17:13:58 -07001106 isolated_pageblocks = has_isolate_pageblock(zone);
Mel Gormanf2260e62009-06-16 15:32:13 -07001107
Mel Gormane5b31ac2016-05-19 17:14:24 -07001108 while (count) {
Nick Piggin48db57f2006-01-08 01:00:42 -08001109 struct page *page;
Mel Gorman5f8dcc22009-09-21 17:03:19 -07001110 struct list_head *list;
Nick Piggin48db57f2006-01-08 01:00:42 -08001111
Mel Gorman5f8dcc22009-09-21 17:03:19 -07001112 /*
Mel Gormana6f9edd62009-09-21 17:03:20 -07001113 * Remove pages from lists in a round-robin fashion. A
1114 * batch_free count is maintained that is incremented when an
1115 * empty list is encountered. This is so more pages are freed
1116 * off fuller lists instead of spinning excessively around empty
1117 * lists
Mel Gorman5f8dcc22009-09-21 17:03:19 -07001118 */
1119 do {
Mel Gormana6f9edd62009-09-21 17:03:20 -07001120 batch_free++;
Mel Gorman5f8dcc22009-09-21 17:03:19 -07001121 if (++migratetype == MIGRATE_PCPTYPES)
1122 migratetype = 0;
1123 list = &pcp->lists[migratetype];
1124 } while (list_empty(list));
1125
Namhyung Kim1d168712011-03-22 16:32:45 -07001126 /* This is the only non-empty list. Free them all. */
1127 if (batch_free == MIGRATE_PCPTYPES)
Mel Gormane5b31ac2016-05-19 17:14:24 -07001128 batch_free = count;
Namhyung Kim1d168712011-03-22 16:32:45 -07001129
Mel Gormana6f9edd62009-09-21 17:03:20 -07001130 do {
Bartlomiej Zolnierkiewicz770c8aa2012-10-08 16:31:57 -07001131 int mt; /* migratetype of the to-be-freed page */
1132
Geliang Tanga16601c2016-01-14 15:20:30 -08001133 page = list_last_entry(list, struct page, lru);
Mel Gormana6f9edd62009-09-21 17:03:20 -07001134 /* must delete as __free_one_page list manipulates */
1135 list_del(&page->lru);
Vlastimil Babkaaa016d12015-09-08 15:01:22 -07001136
Vlastimil Babkabb14c2c2015-09-08 15:01:25 -07001137 mt = get_pcppage_migratetype(page);
Vlastimil Babkaaa016d12015-09-08 15:01:22 -07001138 /* MIGRATE_ISOLATE page should not go to pcplists */
1139 VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
1140 /* Pageblock could have been isolated meanwhile */
Mel Gorman37779992016-05-19 17:13:58 -07001141 if (unlikely(isolated_pageblocks))
Joonsoo Kim51bb1a42014-11-13 15:19:14 -08001142 mt = get_pageblock_migratetype(page);
Joonsoo Kim51bb1a42014-11-13 15:19:14 -08001143
Mel Gorman4db75482016-05-19 17:14:32 -07001144 if (bulkfree_pcp_prepare(page))
1145 continue;
1146
Mel Gormandc4b0ca2014-06-04 16:10:17 -07001147 __free_one_page(page, page_to_pfn(page), zone, 0, mt);
Bartlomiej Zolnierkiewicz770c8aa2012-10-08 16:31:57 -07001148 trace_mm_page_pcpu_drain(page, 0, mt);
Mel Gormane5b31ac2016-05-19 17:14:24 -07001149 } while (--count && --batch_free && !list_empty(list));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150 }
Mel Gormand34b0732017-04-20 14:37:43 -07001151 spin_unlock(&zone->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152}
1153
Mel Gormandc4b0ca2014-06-04 16:10:17 -07001154static void free_one_page(struct zone *zone,
1155 struct page *page, unsigned long pfn,
Mel Gorman7aeb09f2014-06-04 16:10:21 -07001156 unsigned int order,
Mel Gormaned0ae212009-06-16 15:32:07 -07001157 int migratetype)
Nick Piggin48db57f2006-01-08 01:00:42 -08001158{
Mel Gormand34b0732017-04-20 14:37:43 -07001159 spin_lock(&zone->lock);
Joonsoo Kimad53f922014-11-13 15:19:11 -08001160 if (unlikely(has_isolate_pageblock(zone) ||
1161 is_migrate_isolate(migratetype))) {
1162 migratetype = get_pfnblock_migratetype(page, pfn);
Joonsoo Kimad53f922014-11-13 15:19:11 -08001163 }
Mel Gormandc4b0ca2014-06-04 16:10:17 -07001164 __free_one_page(page, pfn, zone, order, migratetype);
Mel Gormand34b0732017-04-20 14:37:43 -07001165 spin_unlock(&zone->lock);
Nick Piggin48db57f2006-01-08 01:00:42 -08001166}
1167
Robin Holt1e8ce832015-06-30 14:56:45 -07001168static void __meminit __init_single_page(struct page *page, unsigned long pfn,
1169 unsigned long zone, int nid)
1170{
Pavel Tatashinf7f99102017-11-15 17:36:44 -08001171 mm_zero_struct_page(page);
Robin Holt1e8ce832015-06-30 14:56:45 -07001172 set_page_links(page, zone, nid, pfn);
Robin Holt1e8ce832015-06-30 14:56:45 -07001173 init_page_count(page);
1174 page_mapcount_reset(page);
1175 page_cpupid_reset_last(page);
Robin Holt1e8ce832015-06-30 14:56:45 -07001176
Robin Holt1e8ce832015-06-30 14:56:45 -07001177 INIT_LIST_HEAD(&page->lru);
1178#ifdef WANT_PAGE_VIRTUAL
1179 /* The shift won't overflow because ZONE_NORMAL is below 4G. */
1180 if (!is_highmem_idx(zone))
1181 set_page_address(page, __va(pfn << PAGE_SHIFT));
1182#endif
1183}
1184
1185static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone,
1186 int nid)
1187{
1188 return __init_single_page(pfn_to_page(pfn), pfn, zone, nid);
1189}
1190
Mel Gorman7e18adb2015-06-30 14:57:05 -07001191#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
Arnd Bergmann57148a62017-10-03 16:15:10 -07001192static void __meminit init_reserved_page(unsigned long pfn)
Mel Gorman7e18adb2015-06-30 14:57:05 -07001193{
1194 pg_data_t *pgdat;
1195 int nid, zid;
1196
1197 if (!early_page_uninitialised(pfn))
1198 return;
1199
1200 nid = early_pfn_to_nid(pfn);
1201 pgdat = NODE_DATA(nid);
1202
1203 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1204 struct zone *zone = &pgdat->node_zones[zid];
1205
1206 if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone))
1207 break;
1208 }
1209 __init_single_pfn(pfn, zid, nid);
1210}
1211#else
1212static inline void init_reserved_page(unsigned long pfn)
1213{
1214}
1215#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1216
Nathan Zimmer92923ca2015-06-30 14:56:48 -07001217/*
1218 * Initialised pages do not have PageReserved set. This function is
1219 * called for each range allocated by the bootmem allocator and
1220 * marks the pages PageReserved. The remaining valid pages are later
1221 * sent to the buddy page allocator.
1222 */
Stefan Bader4b50bcc2016-05-20 16:58:38 -07001223void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
Nathan Zimmer92923ca2015-06-30 14:56:48 -07001224{
1225 unsigned long start_pfn = PFN_DOWN(start);
1226 unsigned long end_pfn = PFN_UP(end);
1227
Mel Gorman7e18adb2015-06-30 14:57:05 -07001228 for (; start_pfn < end_pfn; start_pfn++) {
1229 if (pfn_valid(start_pfn)) {
1230 struct page *page = pfn_to_page(start_pfn);
1231
1232 init_reserved_page(start_pfn);
Kirill A. Shutemov1d798ca2015-11-06 16:29:54 -08001233
1234 /* Avoid false-positive PageTail() */
1235 INIT_LIST_HEAD(&page->lru);
1236
Mel Gorman7e18adb2015-06-30 14:57:05 -07001237 SetPageReserved(page);
1238 }
1239 }
Nathan Zimmer92923ca2015-06-30 14:56:48 -07001240}
1241
KOSAKI Motohiroec95f532010-05-24 14:32:38 -07001242static void __free_pages_ok(struct page *page, unsigned int order)
1243{
Mel Gormand34b0732017-04-20 14:37:43 -07001244 unsigned long flags;
Minchan Kim95e34412012-10-08 16:32:11 -07001245 int migratetype;
Mel Gormandc4b0ca2014-06-04 16:10:17 -07001246 unsigned long pfn = page_to_pfn(page);
KOSAKI Motohiroec95f532010-05-24 14:32:38 -07001247
Mel Gormane2769db2016-05-19 17:14:38 -07001248 if (!free_pages_prepare(page, order, true))
KOSAKI Motohiroec95f532010-05-24 14:32:38 -07001249 return;
1250
Mel Gormancfc47a22014-06-04 16:10:19 -07001251 migratetype = get_pfnblock_migratetype(page, pfn);
Mel Gormand34b0732017-04-20 14:37:43 -07001252 local_irq_save(flags);
1253 __count_vm_events(PGFREE, 1 << order);
Mel Gormandc4b0ca2014-06-04 16:10:17 -07001254 free_one_page(page_zone(page), page, pfn, order, migratetype);
Mel Gormand34b0732017-04-20 14:37:43 -07001255 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256}
1257
Li Zhang949698a2016-05-19 17:11:37 -07001258static void __init __free_pages_boot_core(struct page *page, unsigned int order)
David Howellsa226f6c2006-01-06 00:11:08 -08001259{
Johannes Weinerc3993072012-01-10 15:08:10 -08001260 unsigned int nr_pages = 1 << order;
Yinghai Lue2d0bd22013-09-11 14:20:37 -07001261 struct page *p = page;
Johannes Weinerc3993072012-01-10 15:08:10 -08001262 unsigned int loop;
David Howellsa226f6c2006-01-06 00:11:08 -08001263
Yinghai Lue2d0bd22013-09-11 14:20:37 -07001264 prefetchw(p);
1265 for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
1266 prefetchw(p + 1);
Johannes Weinerc3993072012-01-10 15:08:10 -08001267 __ClearPageReserved(p);
1268 set_page_count(p, 0);
David Howellsa226f6c2006-01-06 00:11:08 -08001269 }
Yinghai Lue2d0bd22013-09-11 14:20:37 -07001270 __ClearPageReserved(p);
1271 set_page_count(p, 0);
Johannes Weinerc3993072012-01-10 15:08:10 -08001272
Yinghai Lue2d0bd22013-09-11 14:20:37 -07001273 page_zone(page)->managed_pages += nr_pages;
Johannes Weinerc3993072012-01-10 15:08:10 -08001274 set_page_refcounted(page);
1275 __free_pages(page, order);
David Howellsa226f6c2006-01-06 00:11:08 -08001276}
1277
Mel Gorman75a592a2015-06-30 14:56:59 -07001278#if defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) || \
1279 defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
Mel Gorman7ace9912015-08-06 15:46:13 -07001280
Mel Gorman75a592a2015-06-30 14:56:59 -07001281static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
1282
1283int __meminit early_pfn_to_nid(unsigned long pfn)
1284{
Mel Gorman7ace9912015-08-06 15:46:13 -07001285 static DEFINE_SPINLOCK(early_pfn_lock);
Mel Gorman75a592a2015-06-30 14:56:59 -07001286 int nid;
1287
Mel Gorman7ace9912015-08-06 15:46:13 -07001288 spin_lock(&early_pfn_lock);
Mel Gorman75a592a2015-06-30 14:56:59 -07001289 nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
Mel Gorman7ace9912015-08-06 15:46:13 -07001290 if (nid < 0)
Mel Gormane4568d32016-07-14 12:07:20 -07001291 nid = first_online_node;
Mel Gorman7ace9912015-08-06 15:46:13 -07001292 spin_unlock(&early_pfn_lock);
1293
1294 return nid;
Mel Gorman75a592a2015-06-30 14:56:59 -07001295}
1296#endif
1297
1298#ifdef CONFIG_NODES_SPAN_OTHER_NODES
Matthias Kaehlcked73d3c9f2017-07-06 15:39:23 -07001299static inline bool __meminit __maybe_unused
1300meminit_pfn_in_nid(unsigned long pfn, int node,
1301 struct mminit_pfnnid_cache *state)
Mel Gorman75a592a2015-06-30 14:56:59 -07001302{
1303 int nid;
1304
1305 nid = __early_pfn_to_nid(pfn, state);
1306 if (nid >= 0 && nid != node)
1307 return false;
1308 return true;
1309}
1310
1311/* Only safe to use early in boot when initialisation is single-threaded */
1312static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
1313{
1314 return meminit_pfn_in_nid(pfn, node, &early_pfnnid_cache);
1315}
1316
1317#else
1318
1319static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
1320{
1321 return true;
1322}
Matthias Kaehlcked73d3c9f2017-07-06 15:39:23 -07001323static inline bool __meminit __maybe_unused
1324meminit_pfn_in_nid(unsigned long pfn, int node,
1325 struct mminit_pfnnid_cache *state)
Mel Gorman75a592a2015-06-30 14:56:59 -07001326{
1327 return true;
1328}
1329#endif
1330
1331
Mel Gorman0e1cc952015-06-30 14:57:27 -07001332void __init __free_pages_bootmem(struct page *page, unsigned long pfn,
Mel Gorman3a80a7f2015-06-30 14:57:02 -07001333 unsigned int order)
1334{
1335 if (early_page_uninitialised(pfn))
1336 return;
Li Zhang949698a2016-05-19 17:11:37 -07001337 return __free_pages_boot_core(page, order);
Mel Gorman3a80a7f2015-06-30 14:57:02 -07001338}
1339
Joonsoo Kim7cf91a92016-03-15 14:57:51 -07001340/*
1341 * Check that the whole (or subset of) a pageblock given by the interval of
1342 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
1343 * with the migration of free compaction scanner. The scanners then need to
1344 * use only pfn_valid_within() check for arches that allow holes within
1345 * pageblocks.
1346 *
1347 * Return struct page pointer of start_pfn, or NULL if checks were not passed.
1348 *
1349 * It's possible on some configurations to have a setup like node0 node1 node0
1350 * i.e. it's possible that all pages within a zones range of pages do not
1351 * belong to a single zone. We assume that a border between node0 and node1
1352 * can occur within a single pageblock, but not a node0 node1 node0
1353 * interleaving within a single pageblock. It is therefore sufficient to check
1354 * the first and last page of a pageblock and avoid checking each individual
1355 * page in a pageblock.
1356 */
1357struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
1358 unsigned long end_pfn, struct zone *zone)
1359{
1360 struct page *start_page;
1361 struct page *end_page;
1362
1363 /* end_pfn is one past the range we are checking */
1364 end_pfn--;
1365
1366 if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn))
1367 return NULL;
1368
Michal Hocko2d070ea2017-07-06 15:37:56 -07001369 start_page = pfn_to_online_page(start_pfn);
1370 if (!start_page)
1371 return NULL;
Joonsoo Kim7cf91a92016-03-15 14:57:51 -07001372
1373 if (page_zone(start_page) != zone)
1374 return NULL;
1375
1376 end_page = pfn_to_page(end_pfn);
1377
1378 /* This gives a shorter code than deriving page_zone(end_page) */
1379 if (page_zone_id(start_page) != page_zone_id(end_page))
1380 return NULL;
1381
1382 return start_page;
1383}
1384
1385void set_zone_contiguous(struct zone *zone)
1386{
1387 unsigned long block_start_pfn = zone->zone_start_pfn;
1388 unsigned long block_end_pfn;
1389
1390 block_end_pfn = ALIGN(block_start_pfn + 1, pageblock_nr_pages);
1391 for (; block_start_pfn < zone_end_pfn(zone);
1392 block_start_pfn = block_end_pfn,
1393 block_end_pfn += pageblock_nr_pages) {
1394
1395 block_end_pfn = min(block_end_pfn, zone_end_pfn(zone));
1396
1397 if (!__pageblock_pfn_to_page(block_start_pfn,
1398 block_end_pfn, zone))
1399 return;
1400 }
1401
1402 /* We confirm that there is no hole */
1403 zone->contiguous = true;
1404}
1405
1406void clear_zone_contiguous(struct zone *zone)
1407{
1408 zone->contiguous = false;
1409}
1410
Mel Gorman7e18adb2015-06-30 14:57:05 -07001411#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
Pavel Tatashin2f47a912017-11-15 17:36:09 -08001412static void __init deferred_free_range(unsigned long pfn,
1413 unsigned long nr_pages)
Mel Gormana4de83d2015-06-30 14:57:16 -07001414{
Pavel Tatashin2f47a912017-11-15 17:36:09 -08001415 struct page *page;
1416 unsigned long i;
Mel Gormana4de83d2015-06-30 14:57:16 -07001417
Pavel Tatashin2f47a912017-11-15 17:36:09 -08001418 if (!nr_pages)
Mel Gormana4de83d2015-06-30 14:57:16 -07001419 return;
1420
Pavel Tatashin2f47a912017-11-15 17:36:09 -08001421 page = pfn_to_page(pfn);
1422
Mel Gormana4de83d2015-06-30 14:57:16 -07001423 /* Free a large naturally-aligned chunk if possible */
Xishi Qiue7801492016-10-07 16:58:09 -07001424 if (nr_pages == pageblock_nr_pages &&
1425 (pfn & (pageblock_nr_pages - 1)) == 0) {
Mel Gormanac5d2532015-06-30 14:57:20 -07001426 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
Xishi Qiue7801492016-10-07 16:58:09 -07001427 __free_pages_boot_core(page, pageblock_order);
Mel Gormana4de83d2015-06-30 14:57:16 -07001428 return;
1429 }
1430
Xishi Qiue7801492016-10-07 16:58:09 -07001431 for (i = 0; i < nr_pages; i++, page++, pfn++) {
1432 if ((pfn & (pageblock_nr_pages - 1)) == 0)
1433 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
Li Zhang949698a2016-05-19 17:11:37 -07001434 __free_pages_boot_core(page, 0);
Xishi Qiue7801492016-10-07 16:58:09 -07001435 }
Mel Gormana4de83d2015-06-30 14:57:16 -07001436}
1437
Nicolai Stanged3cd1312015-08-06 15:46:16 -07001438/* Completion tracking for deferred_init_memmap() threads */
1439static atomic_t pgdat_init_n_undone __initdata;
1440static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
1441
1442static inline void __init pgdat_init_report_one_done(void)
1443{
1444 if (atomic_dec_and_test(&pgdat_init_n_undone))
1445 complete(&pgdat_init_all_done_comp);
1446}
Mel Gorman0e1cc952015-06-30 14:57:27 -07001447
Pavel Tatashin2f47a912017-11-15 17:36:09 -08001448/*
1449 * Helper for deferred_init_range, free the given range, reset the counters, and
1450 * return number of pages freed.
1451 */
1452static inline unsigned long __init __def_free(unsigned long *nr_free,
1453 unsigned long *free_base_pfn,
1454 struct page **page)
1455{
1456 unsigned long nr = *nr_free;
1457
1458 deferred_free_range(*free_base_pfn, nr);
1459 *free_base_pfn = 0;
1460 *nr_free = 0;
1461 *page = NULL;
1462
1463 return nr;
1464}
1465
1466static unsigned long __init deferred_init_range(int nid, int zid,
1467 unsigned long start_pfn,
1468 unsigned long end_pfn)
1469{
1470 struct mminit_pfnnid_cache nid_init_state = { };
1471 unsigned long nr_pgmask = pageblock_nr_pages - 1;
1472 unsigned long free_base_pfn = 0;
1473 unsigned long nr_pages = 0;
1474 unsigned long nr_free = 0;
1475 struct page *page = NULL;
1476 unsigned long pfn;
1477
1478 /*
1479 * First we check if pfn is valid on architectures where it is possible
1480 * to have holes within pageblock_nr_pages. On systems where it is not
1481 * possible, this function is optimized out.
1482 *
1483 * Then, we check if a current large page is valid by only checking the
1484 * validity of the head pfn.
1485 *
1486 * meminit_pfn_in_nid is checked on systems where pfns can interleave
1487 * within a node: a pfn is between start and end of a node, but does not
1488 * belong to this memory node.
1489 *
1490 * Finally, we minimize pfn page lookups and scheduler checks by
1491 * performing it only once every pageblock_nr_pages.
1492 *
1493 * We do it in two loops: first we initialize struct page, than free to
1494 * buddy allocator, becuse while we are freeing pages we can access
1495 * pages that are ahead (computing buddy page in __free_one_page()).
1496 */
1497 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1498 if (!pfn_valid_within(pfn))
1499 continue;
1500 if ((pfn & nr_pgmask) || pfn_valid(pfn)) {
1501 if (meminit_pfn_in_nid(pfn, nid, &nid_init_state)) {
1502 if (page && (pfn & nr_pgmask))
1503 page++;
1504 else
1505 page = pfn_to_page(pfn);
1506 __init_single_page(page, pfn, zid, nid);
1507 cond_resched();
1508 }
1509 }
1510 }
1511
1512 page = NULL;
1513 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1514 if (!pfn_valid_within(pfn)) {
1515 nr_pages += __def_free(&nr_free, &free_base_pfn, &page);
1516 } else if (!(pfn & nr_pgmask) && !pfn_valid(pfn)) {
1517 nr_pages += __def_free(&nr_free, &free_base_pfn, &page);
1518 } else if (!meminit_pfn_in_nid(pfn, nid, &nid_init_state)) {
1519 nr_pages += __def_free(&nr_free, &free_base_pfn, &page);
1520 } else if (page && (pfn & nr_pgmask)) {
1521 page++;
1522 nr_free++;
1523 } else {
1524 nr_pages += __def_free(&nr_free, &free_base_pfn, &page);
1525 page = pfn_to_page(pfn);
1526 free_base_pfn = pfn;
1527 nr_free = 1;
1528 cond_resched();
1529 }
1530 }
1531 /* Free the last block of pages to allocator */
1532 nr_pages += __def_free(&nr_free, &free_base_pfn, &page);
1533
1534 return nr_pages;
1535}
1536
Mel Gorman7e18adb2015-06-30 14:57:05 -07001537/* Initialise remaining memory on a node */
Mel Gorman0e1cc952015-06-30 14:57:27 -07001538static int __init deferred_init_memmap(void *data)
Mel Gorman7e18adb2015-06-30 14:57:05 -07001539{
Mel Gorman0e1cc952015-06-30 14:57:27 -07001540 pg_data_t *pgdat = data;
1541 int nid = pgdat->node_id;
Mel Gorman7e18adb2015-06-30 14:57:05 -07001542 unsigned long start = jiffies;
1543 unsigned long nr_pages = 0;
Pavel Tatashin2f47a912017-11-15 17:36:09 -08001544 unsigned long spfn, epfn;
1545 phys_addr_t spa, epa;
1546 int zid;
Mel Gorman7e18adb2015-06-30 14:57:05 -07001547 struct zone *zone;
Mel Gorman7e18adb2015-06-30 14:57:05 -07001548 unsigned long first_init_pfn = pgdat->first_deferred_pfn;
Mel Gorman0e1cc952015-06-30 14:57:27 -07001549 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
Pavel Tatashin2f47a912017-11-15 17:36:09 -08001550 u64 i;
Mel Gorman7e18adb2015-06-30 14:57:05 -07001551
Mel Gorman0e1cc952015-06-30 14:57:27 -07001552 if (first_init_pfn == ULONG_MAX) {
Nicolai Stanged3cd1312015-08-06 15:46:16 -07001553 pgdat_init_report_one_done();
Mel Gorman0e1cc952015-06-30 14:57:27 -07001554 return 0;
1555 }
1556
1557 /* Bind memory initialisation thread to a local node if possible */
1558 if (!cpumask_empty(cpumask))
1559 set_cpus_allowed_ptr(current, cpumask);
Mel Gorman7e18adb2015-06-30 14:57:05 -07001560
1561 /* Sanity check boundaries */
1562 BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn);
1563 BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
1564 pgdat->first_deferred_pfn = ULONG_MAX;
1565
1566 /* Only the highest zone is deferred so find it */
1567 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1568 zone = pgdat->node_zones + zid;
1569 if (first_init_pfn < zone_end_pfn(zone))
1570 break;
1571 }
Pavel Tatashin2f47a912017-11-15 17:36:09 -08001572 first_init_pfn = max(zone->zone_start_pfn, first_init_pfn);
Mel Gorman7e18adb2015-06-30 14:57:05 -07001573
Pavel Tatashin2f47a912017-11-15 17:36:09 -08001574 for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &spa, &epa, NULL) {
1575 spfn = max_t(unsigned long, first_init_pfn, PFN_UP(spa));
1576 epfn = min_t(unsigned long, zone_end_pfn(zone), PFN_DOWN(epa));
1577 nr_pages += deferred_init_range(nid, zid, spfn, epfn);
Mel Gorman7e18adb2015-06-30 14:57:05 -07001578 }
1579
1580 /* Sanity check that the next zone really is unpopulated */
1581 WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
1582
Mel Gorman0e1cc952015-06-30 14:57:27 -07001583 pr_info("node %d initialised, %lu pages in %ums\n", nid, nr_pages,
Mel Gorman7e18adb2015-06-30 14:57:05 -07001584 jiffies_to_msecs(jiffies - start));
Nicolai Stanged3cd1312015-08-06 15:46:16 -07001585
1586 pgdat_init_report_one_done();
Mel Gorman0e1cc952015-06-30 14:57:27 -07001587 return 0;
1588}
Joonsoo Kim7cf91a92016-03-15 14:57:51 -07001589#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
Mel Gorman0e1cc952015-06-30 14:57:27 -07001590
1591void __init page_alloc_init_late(void)
1592{
Joonsoo Kim7cf91a92016-03-15 14:57:51 -07001593 struct zone *zone;
1594
1595#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
Mel Gorman0e1cc952015-06-30 14:57:27 -07001596 int nid;
1597
Nicolai Stanged3cd1312015-08-06 15:46:16 -07001598 /* There will be num_node_state(N_MEMORY) threads */
1599 atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
Mel Gorman0e1cc952015-06-30 14:57:27 -07001600 for_each_node_state(nid, N_MEMORY) {
Mel Gorman0e1cc952015-06-30 14:57:27 -07001601 kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
1602 }
1603
1604 /* Block until all are initialised */
Nicolai Stanged3cd1312015-08-06 15:46:16 -07001605 wait_for_completion(&pgdat_init_all_done_comp);
Mel Gorman4248b0d2015-08-06 15:46:20 -07001606
1607 /* Reinit limits that are based on free pages after the kernel is up */
1608 files_maxfiles_init();
Joonsoo Kim7cf91a92016-03-15 14:57:51 -07001609#endif
Pavel Tatashin3010f872017-08-18 15:16:05 -07001610#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
1611 /* Discard memblock private memory */
1612 memblock_discard();
1613#endif
Joonsoo Kim7cf91a92016-03-15 14:57:51 -07001614
1615 for_each_populated_zone(zone)
1616 set_zone_contiguous(zone);
Mel Gorman7e18adb2015-06-30 14:57:05 -07001617}
Mel Gorman7e18adb2015-06-30 14:57:05 -07001618
Michal Nazarewicz47118af2011-12-29 13:09:50 +01001619#ifdef CONFIG_CMA
Li Zhong9cf510a2013-08-23 13:52:52 +08001620/* Free whole pageblock and set its migration type to MIGRATE_CMA. */
Michal Nazarewicz47118af2011-12-29 13:09:50 +01001621void __init init_cma_reserved_pageblock(struct page *page)
1622{
1623 unsigned i = pageblock_nr_pages;
1624 struct page *p = page;
1625
1626 do {
1627 __ClearPageReserved(p);
1628 set_page_count(p, 0);
1629 } while (++p, --i);
1630
Michal Nazarewicz47118af2011-12-29 13:09:50 +01001631 set_pageblock_migratetype(page, MIGRATE_CMA);
Michal Nazarewiczdc783272014-07-02 15:22:35 -07001632
1633 if (pageblock_order >= MAX_ORDER) {
1634 i = pageblock_nr_pages;
1635 p = page;
1636 do {
1637 set_page_refcounted(p);
1638 __free_pages(p, MAX_ORDER - 1);
1639 p += MAX_ORDER_NR_PAGES;
1640 } while (i -= MAX_ORDER_NR_PAGES);
1641 } else {
1642 set_page_refcounted(page);
1643 __free_pages(page, pageblock_order);
1644 }
1645
Jiang Liu3dcc0572013-07-03 15:03:21 -07001646 adjust_managed_page_count(page, pageblock_nr_pages);
Michal Nazarewicz47118af2011-12-29 13:09:50 +01001647}
1648#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001649
1650/*
1651 * The order of subdivision here is critical for the IO subsystem.
1652 * Please do not alter this order without good reasons and regression
1653 * testing. Specifically, as large blocks of memory are subdivided,
1654 * the order in which smaller blocks are delivered depends on the order
1655 * they're subdivided in this function. This is the primary factor
1656 * influencing the order in which pages are delivered to the IO
1657 * subsystem according to empirical testing, and this is also justified
1658 * by considering the behavior of a buddy system containing a single
1659 * large block of memory acted on by a series of small allocations.
1660 * This behavior is a critical factor in sglist merging's success.
1661 *
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +01001662 * -- nyc
Linus Torvalds1da177e2005-04-16 15:20:36 -07001663 */
Nick Piggin085cc7d52006-01-06 00:11:01 -08001664static inline void expand(struct zone *zone, struct page *page,
Mel Gormanb2a0ac82007-10-16 01:25:48 -07001665 int low, int high, struct free_area *area,
1666 int migratetype)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667{
1668 unsigned long size = 1 << high;
1669
1670 while (high > low) {
1671 area--;
1672 high--;
1673 size >>= 1;
Sasha Levin309381fea2014-01-23 15:52:54 -08001674 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -08001675
Joonsoo Kimacbc15a2016-10-07 16:58:15 -07001676 /*
1677 * Mark as guard pages (or page), that will allow to
1678 * merge back to allocator when buddy will be freed.
1679 * Corresponding page table entries will not be touched,
1680 * pages will stay not present in virtual address space
1681 */
1682 if (set_page_guard(zone, &page[size], high, migratetype))
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -08001683 continue;
Joonsoo Kimacbc15a2016-10-07 16:58:15 -07001684
Mel Gormanb2a0ac82007-10-16 01:25:48 -07001685 list_add(&page[size].lru, &area->free_list[migratetype]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686 area->nr_free++;
1687 set_page_order(&page[size], high);
1688 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689}
1690
Vlastimil Babka4e611802016-05-19 17:14:41 -07001691static void check_new_page_bad(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692{
Vlastimil Babka4e611802016-05-19 17:14:41 -07001693 const char *bad_reason = NULL;
1694 unsigned long bad_flags = 0;
Dave Hansenf0b791a2014-01-23 15:52:49 -08001695
Kirill A. Shutemov53f92632016-01-15 16:53:42 -08001696 if (unlikely(atomic_read(&page->_mapcount) != -1))
Dave Hansenf0b791a2014-01-23 15:52:49 -08001697 bad_reason = "nonzero mapcount";
1698 if (unlikely(page->mapping != NULL))
1699 bad_reason = "non-NULL mapping";
Joonsoo Kimfe896d12016-03-17 14:19:26 -07001700 if (unlikely(page_ref_count(page) != 0))
Dave Hansenf0b791a2014-01-23 15:52:49 -08001701 bad_reason = "nonzero _count";
Naoya Horiguchif4c18e62015-08-06 15:47:08 -07001702 if (unlikely(page->flags & __PG_HWPOISON)) {
1703 bad_reason = "HWPoisoned (hardware-corrupted)";
1704 bad_flags = __PG_HWPOISON;
Naoya Horiguchie570f562016-05-20 16:58:50 -07001705 /* Don't complain about hwpoisoned pages */
1706 page_mapcount_reset(page); /* remove PageBuddy */
1707 return;
Naoya Horiguchif4c18e62015-08-06 15:47:08 -07001708 }
Dave Hansenf0b791a2014-01-23 15:52:49 -08001709 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) {
1710 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set";
1711 bad_flags = PAGE_FLAGS_CHECK_AT_PREP;
1712 }
Johannes Weiner9edad6e2014-12-10 15:44:58 -08001713#ifdef CONFIG_MEMCG
1714 if (unlikely(page->mem_cgroup))
1715 bad_reason = "page still charged to cgroup";
1716#endif
Vlastimil Babka4e611802016-05-19 17:14:41 -07001717 bad_page(page, bad_reason, bad_flags);
1718}
1719
1720/*
1721 * This page is about to be returned from the page allocator
1722 */
1723static inline int check_new_page(struct page *page)
1724{
1725 if (likely(page_expected_state(page,
1726 PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON)))
1727 return 0;
1728
1729 check_new_page_bad(page);
1730 return 1;
Wu Fengguang2a7684a2009-09-16 11:50:12 +02001731}
1732
Vinayak Menonbd33ef32017-05-03 14:54:42 -07001733static inline bool free_pages_prezeroed(void)
Laura Abbott1414c7f2016-03-15 14:56:30 -07001734{
1735 return IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) &&
Vinayak Menonbd33ef32017-05-03 14:54:42 -07001736 page_poisoning_enabled();
Laura Abbott1414c7f2016-03-15 14:56:30 -07001737}
1738
Mel Gorman479f8542016-05-19 17:14:35 -07001739#ifdef CONFIG_DEBUG_VM
1740static bool check_pcp_refill(struct page *page)
1741{
1742 return false;
1743}
1744
1745static bool check_new_pcp(struct page *page)
1746{
1747 return check_new_page(page);
1748}
1749#else
1750static bool check_pcp_refill(struct page *page)
1751{
1752 return check_new_page(page);
1753}
1754static bool check_new_pcp(struct page *page)
1755{
1756 return false;
1757}
1758#endif /* CONFIG_DEBUG_VM */
1759
1760static bool check_new_pages(struct page *page, unsigned int order)
1761{
1762 int i;
1763 for (i = 0; i < (1 << order); i++) {
1764 struct page *p = page + i;
1765
1766 if (unlikely(check_new_page(p)))
1767 return true;
1768 }
1769
1770 return false;
1771}
1772
Joonsoo Kim46f24fd2016-07-26 15:23:58 -07001773inline void post_alloc_hook(struct page *page, unsigned int order,
1774 gfp_t gfp_flags)
1775{
1776 set_page_private(page, 0);
1777 set_page_refcounted(page);
1778
1779 arch_alloc_page(page, order);
1780 kernel_map_pages(page, 1 << order, 1);
1781 kernel_poison_pages(page, 1 << order, 1);
1782 kasan_alloc_pages(page, order);
1783 set_page_owner(page, order, gfp_flags);
1784}
1785
Mel Gorman479f8542016-05-19 17:14:35 -07001786static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
Mel Gormanc6038442016-05-19 17:13:38 -07001787 unsigned int alloc_flags)
Wu Fengguang2a7684a2009-09-16 11:50:12 +02001788{
1789 int i;
Hugh Dickins689bceb2005-11-21 21:32:20 -08001790
Joonsoo Kim46f24fd2016-07-26 15:23:58 -07001791 post_alloc_hook(page, order, gfp_flags);
Nick Piggin17cf4402006-03-22 00:08:41 -08001792
Vinayak Menonbd33ef32017-05-03 14:54:42 -07001793 if (!free_pages_prezeroed() && (gfp_flags & __GFP_ZERO))
Anisse Astierf4d28972015-06-24 16:56:36 -07001794 for (i = 0; i < (1 << order); i++)
1795 clear_highpage(page + i);
Nick Piggin17cf4402006-03-22 00:08:41 -08001796
1797 if (order && (gfp_flags & __GFP_COMP))
1798 prep_compound_page(page, order);
1799
Vlastimil Babka75379192015-02-11 15:25:38 -08001800 /*
Michal Hocko2f064f32015-08-21 14:11:51 -07001801 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
Vlastimil Babka75379192015-02-11 15:25:38 -08001802 * allocate the page. The expectation is that the caller is taking
1803 * steps that will free more memory. The caller should avoid the page
1804 * being used for !PFMEMALLOC purposes.
1805 */
Michal Hocko2f064f32015-08-21 14:11:51 -07001806 if (alloc_flags & ALLOC_NO_WATERMARKS)
1807 set_page_pfmemalloc(page);
1808 else
1809 clear_page_pfmemalloc(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810}
1811
Mel Gorman56fd56b2007-10-16 01:25:58 -07001812/*
1813 * Go through the free lists for the given migratetype and remove
1814 * the smallest available page from the freelists
1815 */
Aaron Lu85ccc8f2017-11-15 17:36:53 -08001816static __always_inline
Mel Gorman728ec982009-06-16 15:32:04 -07001817struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
Mel Gorman56fd56b2007-10-16 01:25:58 -07001818 int migratetype)
1819{
1820 unsigned int current_order;
Pintu Kumarb8af2942013-09-11 14:20:34 -07001821 struct free_area *area;
Mel Gorman56fd56b2007-10-16 01:25:58 -07001822 struct page *page;
1823
1824 /* Find a page of the appropriate size in the preferred list */
1825 for (current_order = order; current_order < MAX_ORDER; ++current_order) {
1826 area = &(zone->free_area[current_order]);
Geliang Tanga16601c2016-01-14 15:20:30 -08001827 page = list_first_entry_or_null(&area->free_list[migratetype],
Mel Gorman56fd56b2007-10-16 01:25:58 -07001828 struct page, lru);
Geliang Tanga16601c2016-01-14 15:20:30 -08001829 if (!page)
1830 continue;
Mel Gorman56fd56b2007-10-16 01:25:58 -07001831 list_del(&page->lru);
1832 rmv_page_order(page);
1833 area->nr_free--;
Mel Gorman56fd56b2007-10-16 01:25:58 -07001834 expand(zone, page, order, current_order, area, migratetype);
Vlastimil Babkabb14c2c2015-09-08 15:01:25 -07001835 set_pcppage_migratetype(page, migratetype);
Mel Gorman56fd56b2007-10-16 01:25:58 -07001836 return page;
1837 }
1838
1839 return NULL;
1840}
1841
1842
Mel Gormanb2a0ac82007-10-16 01:25:48 -07001843/*
1844 * This array describes the order lists are fallen back to when
1845 * the free lists for the desirable migrate type are depleted
1846 */
Michal Nazarewicz47118af2011-12-29 13:09:50 +01001847static int fallbacks[MIGRATE_TYPES][4] = {
Mel Gorman974a7862015-11-06 16:28:34 -08001848 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },
1849 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },
1850 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES },
Joonsoo Kimdc676472015-04-14 15:45:15 -07001851#ifdef CONFIG_CMA
Mel Gorman974a7862015-11-06 16:28:34 -08001852 [MIGRATE_CMA] = { MIGRATE_TYPES }, /* Never used */
Michal Nazarewicz47118af2011-12-29 13:09:50 +01001853#endif
Minchan Kim194159f2013-02-22 16:33:58 -08001854#ifdef CONFIG_MEMORY_ISOLATION
Mel Gorman974a7862015-11-06 16:28:34 -08001855 [MIGRATE_ISOLATE] = { MIGRATE_TYPES }, /* Never used */
Minchan Kim194159f2013-02-22 16:33:58 -08001856#endif
Mel Gormanb2a0ac82007-10-16 01:25:48 -07001857};
1858
Joonsoo Kimdc676472015-04-14 15:45:15 -07001859#ifdef CONFIG_CMA
Aaron Lu85ccc8f2017-11-15 17:36:53 -08001860static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone,
Joonsoo Kimdc676472015-04-14 15:45:15 -07001861 unsigned int order)
1862{
1863 return __rmqueue_smallest(zone, order, MIGRATE_CMA);
1864}
1865#else
1866static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
1867 unsigned int order) { return NULL; }
1868#endif
1869
Mel Gormanc361be52007-10-16 01:25:51 -07001870/*
1871 * Move the free pages in a range to the free lists of the requested type.
Mel Gormand9c23402007-10-16 01:26:01 -07001872 * Note that start_page and end_pages are not aligned on a pageblock
Mel Gormanc361be52007-10-16 01:25:51 -07001873 * boundary. If alignment is required, use move_freepages_block()
1874 */
Vlastimil Babka02aa0cd2017-05-08 15:54:40 -07001875static int move_freepages(struct zone *zone,
Adrian Bunkb69a7282008-07-23 21:28:12 -07001876 struct page *start_page, struct page *end_page,
Vlastimil Babka02aa0cd2017-05-08 15:54:40 -07001877 int migratetype, int *num_movable)
Mel Gormanc361be52007-10-16 01:25:51 -07001878{
1879 struct page *page;
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -08001880 unsigned int order;
Mel Gormand1003132007-10-16 01:26:00 -07001881 int pages_moved = 0;
Mel Gormanc361be52007-10-16 01:25:51 -07001882
1883#ifndef CONFIG_HOLES_IN_ZONE
1884 /*
1885 * page_zone is not safe to call in this context when
1886 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
1887 * anyway as we check zone boundaries in move_freepages_block().
1888 * Remove at a later date when no bug reports exist related to
Mel Gormanac0e5b72007-10-16 01:25:58 -07001889 * grouping pages by mobility
Mel Gormanc361be52007-10-16 01:25:51 -07001890 */
Mel Gorman97ee4ba2014-10-09 15:28:28 -07001891 VM_BUG_ON(page_zone(start_page) != page_zone(end_page));
Mel Gormanc361be52007-10-16 01:25:51 -07001892#endif
1893
Vlastimil Babka02aa0cd2017-05-08 15:54:40 -07001894 if (num_movable)
1895 *num_movable = 0;
1896
Mel Gormanc361be52007-10-16 01:25:51 -07001897 for (page = start_page; page <= end_page;) {
1898 if (!pfn_valid_within(page_to_pfn(page))) {
1899 page++;
1900 continue;
1901 }
1902
Ard Biesheuvelf073bdc2017-01-10 16:58:00 -08001903 /* Make sure we are not inadvertently changing nodes */
1904 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
1905
Mel Gormanc361be52007-10-16 01:25:51 -07001906 if (!PageBuddy(page)) {
Vlastimil Babka02aa0cd2017-05-08 15:54:40 -07001907 /*
1908 * We assume that pages that could be isolated for
1909 * migration are movable. But we don't actually try
1910 * isolating, as that would be expensive.
1911 */
1912 if (num_movable &&
1913 (PageLRU(page) || __PageMovable(page)))
1914 (*num_movable)++;
1915
Mel Gormanc361be52007-10-16 01:25:51 -07001916 page++;
1917 continue;
1918 }
1919
1920 order = page_order(page);
Kirill A. Shutemov84be48d2011-03-22 16:33:41 -07001921 list_move(&page->lru,
1922 &zone->free_area[order].free_list[migratetype]);
Mel Gormanc361be52007-10-16 01:25:51 -07001923 page += 1 << order;
Mel Gormand1003132007-10-16 01:26:00 -07001924 pages_moved += 1 << order;
Mel Gormanc361be52007-10-16 01:25:51 -07001925 }
1926
Mel Gormand1003132007-10-16 01:26:00 -07001927 return pages_moved;
Mel Gormanc361be52007-10-16 01:25:51 -07001928}
1929
Minchan Kimee6f5092012-07-31 16:43:50 -07001930int move_freepages_block(struct zone *zone, struct page *page,
Vlastimil Babka02aa0cd2017-05-08 15:54:40 -07001931 int migratetype, int *num_movable)
Mel Gormanc361be52007-10-16 01:25:51 -07001932{
1933 unsigned long start_pfn, end_pfn;
1934 struct page *start_page, *end_page;
1935
1936 start_pfn = page_to_pfn(page);
Mel Gormand9c23402007-10-16 01:26:01 -07001937 start_pfn = start_pfn & ~(pageblock_nr_pages-1);
Mel Gormanc361be52007-10-16 01:25:51 -07001938 start_page = pfn_to_page(start_pfn);
Mel Gormand9c23402007-10-16 01:26:01 -07001939 end_page = start_page + pageblock_nr_pages - 1;
1940 end_pfn = start_pfn + pageblock_nr_pages - 1;
Mel Gormanc361be52007-10-16 01:25:51 -07001941
1942 /* Do not cross zone boundaries */
Cody P Schafer108bcc92013-02-22 16:35:23 -08001943 if (!zone_spans_pfn(zone, start_pfn))
Mel Gormanc361be52007-10-16 01:25:51 -07001944 start_page = page;
Cody P Schafer108bcc92013-02-22 16:35:23 -08001945 if (!zone_spans_pfn(zone, end_pfn))
Mel Gormanc361be52007-10-16 01:25:51 -07001946 return 0;
1947
Vlastimil Babka02aa0cd2017-05-08 15:54:40 -07001948 return move_freepages(zone, start_page, end_page, migratetype,
1949 num_movable);
Mel Gormanc361be52007-10-16 01:25:51 -07001950}
1951
Mel Gorman2f66a682009-09-21 17:02:31 -07001952static void change_pageblock_range(struct page *pageblock_page,
1953 int start_order, int migratetype)
1954{
1955 int nr_pageblocks = 1 << (start_order - pageblock_order);
1956
1957 while (nr_pageblocks--) {
1958 set_pageblock_migratetype(pageblock_page, migratetype);
1959 pageblock_page += pageblock_nr_pages;
1960 }
1961}
1962
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07001963/*
Vlastimil Babka9c0415e2015-02-11 15:28:21 -08001964 * When we are falling back to another migratetype during allocation, try to
1965 * steal extra free pages from the same pageblocks to satisfy further
1966 * allocations, instead of polluting multiple pageblocks.
1967 *
1968 * If we are stealing a relatively large buddy page, it is likely there will
1969 * be more free pages in the pageblock, so try to steal them all. For
1970 * reclaimable and unmovable allocations, we steal regardless of page size,
1971 * as fragmentation caused by those allocations polluting movable pageblocks
1972 * is worse than movable allocations stealing from unmovable and reclaimable
1973 * pageblocks.
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07001974 */
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07001975static bool can_steal_fallback(unsigned int order, int start_mt)
1976{
1977 /*
1978 * Leaving this order check is intended, although there is
1979 * relaxed order check in next check. The reason is that
1980 * we can actually steal whole pageblock if this condition met,
1981 * but, below check doesn't guarantee it and that is just heuristic
1982 * so could be changed anytime.
1983 */
1984 if (order >= pageblock_order)
1985 return true;
1986
1987 if (order >= pageblock_order / 2 ||
1988 start_mt == MIGRATE_RECLAIMABLE ||
1989 start_mt == MIGRATE_UNMOVABLE ||
1990 page_group_by_mobility_disabled)
1991 return true;
1992
1993 return false;
1994}
1995
1996/*
1997 * This function implements actual steal behaviour. If order is large enough,
1998 * we can steal whole pageblock. If not, we first move freepages in this
Vlastimil Babka02aa0cd2017-05-08 15:54:40 -07001999 * pageblock to our migratetype and determine how many already-allocated pages
2000 * are there in the pageblock with a compatible migratetype. If at least half
2001 * of pages are free or compatible, we can change migratetype of the pageblock
2002 * itself, so pages freed in the future will be put on the correct free list.
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002003 */
2004static void steal_suitable_fallback(struct zone *zone, struct page *page,
Vlastimil Babka3bc48f92017-05-08 15:54:37 -07002005 int start_type, bool whole_block)
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07002006{
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -08002007 unsigned int current_order = page_order(page);
Vlastimil Babka3bc48f92017-05-08 15:54:37 -07002008 struct free_area *area;
Vlastimil Babka02aa0cd2017-05-08 15:54:40 -07002009 int free_pages, movable_pages, alike_pages;
2010 int old_block_type;
2011
2012 old_block_type = get_pageblock_migratetype(page);
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07002013
Vlastimil Babka3bc48f92017-05-08 15:54:37 -07002014 /*
2015 * This can happen due to races and we want to prevent broken
2016 * highatomic accounting.
2017 */
Vlastimil Babka02aa0cd2017-05-08 15:54:40 -07002018 if (is_migrate_highatomic(old_block_type))
Vlastimil Babka3bc48f92017-05-08 15:54:37 -07002019 goto single_page;
2020
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07002021 /* Take ownership for orders >= pageblock_order */
2022 if (current_order >= pageblock_order) {
2023 change_pageblock_range(page, current_order, start_type);
Vlastimil Babka3bc48f92017-05-08 15:54:37 -07002024 goto single_page;
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07002025 }
2026
Vlastimil Babka3bc48f92017-05-08 15:54:37 -07002027 /* We are not allowed to try stealing from the whole block */
2028 if (!whole_block)
2029 goto single_page;
2030
Vlastimil Babka02aa0cd2017-05-08 15:54:40 -07002031 free_pages = move_freepages_block(zone, page, start_type,
2032 &movable_pages);
2033 /*
2034 * Determine how many pages are compatible with our allocation.
2035 * For movable allocation, it's the number of movable pages which
2036 * we just obtained. For other types it's a bit more tricky.
2037 */
2038 if (start_type == MIGRATE_MOVABLE) {
2039 alike_pages = movable_pages;
2040 } else {
2041 /*
2042 * If we are falling back a RECLAIMABLE or UNMOVABLE allocation
2043 * to MOVABLE pageblock, consider all non-movable pages as
2044 * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or
2045 * vice versa, be conservative since we can't distinguish the
2046 * exact migratetype of non-movable pages.
2047 */
2048 if (old_block_type == MIGRATE_MOVABLE)
2049 alike_pages = pageblock_nr_pages
2050 - (free_pages + movable_pages);
2051 else
2052 alike_pages = 0;
2053 }
2054
Vlastimil Babka3bc48f92017-05-08 15:54:37 -07002055 /* moving whole block can fail due to zone boundary conditions */
Vlastimil Babka02aa0cd2017-05-08 15:54:40 -07002056 if (!free_pages)
Vlastimil Babka3bc48f92017-05-08 15:54:37 -07002057 goto single_page;
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07002058
Vlastimil Babka02aa0cd2017-05-08 15:54:40 -07002059 /*
2060 * If a sufficient number of pages in the block are either free or of
2061 * comparable migratability as our allocation, claim the whole block.
2062 */
2063 if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002064 page_group_by_mobility_disabled)
2065 set_pageblock_migratetype(page, start_type);
Vlastimil Babka3bc48f92017-05-08 15:54:37 -07002066
2067 return;
2068
2069single_page:
2070 area = &zone->free_area[current_order];
2071 list_move(&page->lru, &area->free_list[start_type]);
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002072}
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07002073
Joonsoo Kim2149cda2015-04-14 15:45:21 -07002074/*
2075 * Check whether there is a suitable fallback freepage with requested order.
2076 * If only_stealable is true, this function returns fallback_mt only if
2077 * we can steal other freepages all together. This would help to reduce
2078 * fragmentation due to mixed migratetype pages in one pageblock.
2079 */
2080int find_suitable_fallback(struct free_area *area, unsigned int order,
2081 int migratetype, bool only_stealable, bool *can_steal)
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002082{
2083 int i;
2084 int fallback_mt;
2085
2086 if (area->nr_free == 0)
2087 return -1;
2088
2089 *can_steal = false;
2090 for (i = 0;; i++) {
2091 fallback_mt = fallbacks[migratetype][i];
Mel Gorman974a7862015-11-06 16:28:34 -08002092 if (fallback_mt == MIGRATE_TYPES)
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002093 break;
2094
2095 if (list_empty(&area->free_list[fallback_mt]))
2096 continue;
2097
2098 if (can_steal_fallback(order, migratetype))
2099 *can_steal = true;
2100
Joonsoo Kim2149cda2015-04-14 15:45:21 -07002101 if (!only_stealable)
2102 return fallback_mt;
2103
2104 if (*can_steal)
2105 return fallback_mt;
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07002106 }
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002107
2108 return -1;
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07002109}
2110
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002111/*
2112 * Reserve a pageblock for exclusive use of high-order atomic allocations if
2113 * there are no empty page blocks that contain a page with a suitable order
2114 */
2115static void reserve_highatomic_pageblock(struct page *page, struct zone *zone,
2116 unsigned int alloc_order)
2117{
2118 int mt;
2119 unsigned long max_managed, flags;
2120
2121 /*
2122 * Limit the number reserved to 1 pageblock or roughly 1% of a zone.
2123 * Check is race-prone but harmless.
2124 */
2125 max_managed = (zone->managed_pages / 100) + pageblock_nr_pages;
2126 if (zone->nr_reserved_highatomic >= max_managed)
2127 return;
2128
2129 spin_lock_irqsave(&zone->lock, flags);
2130
2131 /* Recheck the nr_reserved_highatomic limit under the lock */
2132 if (zone->nr_reserved_highatomic >= max_managed)
2133 goto out_unlock;
2134
2135 /* Yoink! */
2136 mt = get_pageblock_migratetype(page);
Xishi Qiua6ffdc02017-05-03 14:52:52 -07002137 if (!is_migrate_highatomic(mt) && !is_migrate_isolate(mt)
2138 && !is_migrate_cma(mt)) {
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002139 zone->nr_reserved_highatomic += pageblock_nr_pages;
2140 set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
Vlastimil Babka02aa0cd2017-05-08 15:54:40 -07002141 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL);
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002142 }
2143
2144out_unlock:
2145 spin_unlock_irqrestore(&zone->lock, flags);
2146}
2147
2148/*
2149 * Used when an allocation is about to fail under memory pressure. This
2150 * potentially hurts the reliability of high-order allocations when under
2151 * intense memory pressure but failed atomic allocations should be easier
2152 * to recover from than an OOM.
Minchan Kim29fac032016-12-12 16:42:14 -08002153 *
2154 * If @force is true, try to unreserve a pageblock even though highatomic
2155 * pageblock is exhausted.
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002156 */
Minchan Kim29fac032016-12-12 16:42:14 -08002157static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
2158 bool force)
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002159{
2160 struct zonelist *zonelist = ac->zonelist;
2161 unsigned long flags;
2162 struct zoneref *z;
2163 struct zone *zone;
2164 struct page *page;
2165 int order;
Minchan Kim04c87162016-12-12 16:42:11 -08002166 bool ret;
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002167
2168 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx,
2169 ac->nodemask) {
Minchan Kim29fac032016-12-12 16:42:14 -08002170 /*
2171 * Preserve at least one pageblock unless memory pressure
2172 * is really high.
2173 */
2174 if (!force && zone->nr_reserved_highatomic <=
2175 pageblock_nr_pages)
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002176 continue;
2177
2178 spin_lock_irqsave(&zone->lock, flags);
2179 for (order = 0; order < MAX_ORDER; order++) {
2180 struct free_area *area = &(zone->free_area[order]);
2181
Geliang Tanga16601c2016-01-14 15:20:30 -08002182 page = list_first_entry_or_null(
2183 &area->free_list[MIGRATE_HIGHATOMIC],
2184 struct page, lru);
2185 if (!page)
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002186 continue;
2187
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002188 /*
Minchan Kim4855e4a2016-12-12 16:42:08 -08002189 * In page freeing path, migratetype change is racy so
2190 * we can counter several free pages in a pageblock
2191 * in this loop althoug we changed the pageblock type
2192 * from highatomic to ac->migratetype. So we should
2193 * adjust the count once.
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002194 */
Xishi Qiua6ffdc02017-05-03 14:52:52 -07002195 if (is_migrate_highatomic_page(page)) {
Minchan Kim4855e4a2016-12-12 16:42:08 -08002196 /*
2197 * It should never happen but changes to
2198 * locking could inadvertently allow a per-cpu
2199 * drain to add pages to MIGRATE_HIGHATOMIC
2200 * while unreserving so be safe and watch for
2201 * underflows.
2202 */
2203 zone->nr_reserved_highatomic -= min(
2204 pageblock_nr_pages,
2205 zone->nr_reserved_highatomic);
2206 }
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002207
2208 /*
2209 * Convert to ac->migratetype and avoid the normal
2210 * pageblock stealing heuristics. Minimally, the caller
2211 * is doing the work and needs the pages. More
2212 * importantly, if the block was always converted to
2213 * MIGRATE_UNMOVABLE or another type then the number
2214 * of pageblocks that cannot be completely freed
2215 * may increase.
2216 */
2217 set_pageblock_migratetype(page, ac->migratetype);
Vlastimil Babka02aa0cd2017-05-08 15:54:40 -07002218 ret = move_freepages_block(zone, page, ac->migratetype,
2219 NULL);
Minchan Kim29fac032016-12-12 16:42:14 -08002220 if (ret) {
2221 spin_unlock_irqrestore(&zone->lock, flags);
2222 return ret;
2223 }
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002224 }
2225 spin_unlock_irqrestore(&zone->lock, flags);
2226 }
Minchan Kim04c87162016-12-12 16:42:11 -08002227
2228 return false;
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002229}
2230
Vlastimil Babka3bc48f92017-05-08 15:54:37 -07002231/*
2232 * Try finding a free buddy page on the fallback list and put it on the free
2233 * list of requested migratetype, possibly along with other pages from the same
2234 * block, depending on fragmentation avoidance heuristics. Returns true if
2235 * fallback was found so that __rmqueue_smallest() can grab it.
Rasmus Villemoesb0025292017-07-10 15:49:26 -07002236 *
2237 * The use of signed ints for order and current_order is a deliberate
2238 * deviation from the rest of this file, to make the for loop
2239 * condition simpler.
Vlastimil Babka3bc48f92017-05-08 15:54:37 -07002240 */
Aaron Lu85ccc8f2017-11-15 17:36:53 -08002241static __always_inline bool
Rasmus Villemoesb0025292017-07-10 15:49:26 -07002242__rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002243{
Pintu Kumarb8af2942013-09-11 14:20:34 -07002244 struct free_area *area;
Rasmus Villemoesb0025292017-07-10 15:49:26 -07002245 int current_order;
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002246 struct page *page;
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002247 int fallback_mt;
2248 bool can_steal;
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002249
Vlastimil Babka7a8f58f2017-07-10 15:47:14 -07002250 /*
2251 * Find the largest available free page in the other list. This roughly
2252 * approximates finding the pageblock with the most free pages, which
2253 * would be too costly to do exactly.
2254 */
Rasmus Villemoesb0025292017-07-10 15:49:26 -07002255 for (current_order = MAX_ORDER - 1; current_order >= order;
Mel Gorman7aeb09f2014-06-04 16:10:21 -07002256 --current_order) {
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002257 area = &(zone->free_area[current_order]);
2258 fallback_mt = find_suitable_fallback(area, current_order,
Joonsoo Kim2149cda2015-04-14 15:45:21 -07002259 start_migratetype, false, &can_steal);
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002260 if (fallback_mt == -1)
2261 continue;
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002262
Vlastimil Babka7a8f58f2017-07-10 15:47:14 -07002263 /*
2264 * We cannot steal all free pages from the pageblock and the
2265 * requested migratetype is movable. In that case it's better to
2266 * steal and split the smallest available page instead of the
2267 * largest available page, because even if the next movable
2268 * allocation falls back into a different pageblock than this
2269 * one, it won't cause permanent fragmentation.
2270 */
2271 if (!can_steal && start_migratetype == MIGRATE_MOVABLE
2272 && current_order > order)
2273 goto find_smallest;
Mel Gormane0104872007-10-16 01:25:53 -07002274
Vlastimil Babka7a8f58f2017-07-10 15:47:14 -07002275 goto do_steal;
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002276 }
2277
Vlastimil Babka3bc48f92017-05-08 15:54:37 -07002278 return false;
Vlastimil Babka7a8f58f2017-07-10 15:47:14 -07002279
2280find_smallest:
2281 for (current_order = order; current_order < MAX_ORDER;
2282 current_order++) {
2283 area = &(zone->free_area[current_order]);
2284 fallback_mt = find_suitable_fallback(area, current_order,
2285 start_migratetype, false, &can_steal);
2286 if (fallback_mt != -1)
2287 break;
2288 }
2289
2290 /*
2291 * This should not happen - we already found a suitable fallback
2292 * when looking for the largest page.
2293 */
2294 VM_BUG_ON(current_order == MAX_ORDER);
2295
2296do_steal:
2297 page = list_first_entry(&area->free_list[fallback_mt],
2298 struct page, lru);
2299
2300 steal_suitable_fallback(zone, page, start_migratetype, can_steal);
2301
2302 trace_mm_page_alloc_extfrag(page, order, current_order,
2303 start_migratetype, fallback_mt);
2304
2305 return true;
2306
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002307}
2308
Mel Gorman56fd56b2007-10-16 01:25:58 -07002309/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002310 * Do the hard work of removing an element from the buddy allocator.
2311 * Call me with the zone->lock already held.
2312 */
Aaron Lu85ccc8f2017-11-15 17:36:53 -08002313static __always_inline struct page *
2314__rmqueue(struct zone *zone, unsigned int order, int migratetype)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002315{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002316 struct page *page;
2317
Vlastimil Babka3bc48f92017-05-08 15:54:37 -07002318retry:
Mel Gorman56fd56b2007-10-16 01:25:58 -07002319 page = __rmqueue_smallest(zone, order, migratetype);
Mel Gorman974a7862015-11-06 16:28:34 -08002320 if (unlikely(!page)) {
Joonsoo Kimdc676472015-04-14 15:45:15 -07002321 if (migratetype == MIGRATE_MOVABLE)
2322 page = __rmqueue_cma_fallback(zone, order);
2323
Vlastimil Babka3bc48f92017-05-08 15:54:37 -07002324 if (!page && __rmqueue_fallback(zone, order, migratetype))
2325 goto retry;
Mel Gorman728ec982009-06-16 15:32:04 -07002326 }
2327
Mel Gorman0d3d0622009-09-21 17:02:44 -07002328 trace_mm_page_alloc_zone_locked(page, order, migratetype);
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002329 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002330}
2331
Michal Nazarewicz5f63b722012-01-11 15:16:11 +01002332/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002333 * Obtain a specified number of elements from the buddy allocator, all under
2334 * a single hold of the lock, for efficiency. Add them to the supplied list.
2335 * Returns the number of new pages which were placed at *list.
2336 */
Michal Nazarewicz5f63b722012-01-11 15:16:11 +01002337static int rmqueue_bulk(struct zone *zone, unsigned int order,
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002338 unsigned long count, struct list_head *list,
Mel Gormanb745bc82014-06-04 16:10:22 -07002339 int migratetype, bool cold)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002340{
Mel Gormana6de7342016-12-12 16:44:41 -08002341 int i, alloced = 0;
Michal Nazarewicz5f63b722012-01-11 15:16:11 +01002342
Mel Gormand34b0732017-04-20 14:37:43 -07002343 spin_lock(&zone->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002344 for (i = 0; i < count; ++i) {
Mel Gorman6ac02062016-01-14 15:20:28 -08002345 struct page *page = __rmqueue(zone, order, migratetype);
Nick Piggin085cc7d52006-01-06 00:11:01 -08002346 if (unlikely(page == NULL))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002347 break;
Mel Gorman81eabcb2007-12-17 16:20:05 -08002348
Mel Gorman479f8542016-05-19 17:14:35 -07002349 if (unlikely(check_pcp_refill(page)))
2350 continue;
2351
Mel Gorman81eabcb2007-12-17 16:20:05 -08002352 /*
2353 * Split buddy pages returned by expand() are received here
2354 * in physical page order. The page is added to the callers and
2355 * list and the list head then moves forward. From the callers
2356 * perspective, the linked list is ordered by page number in
2357 * some conditions. This is useful for IO devices that can
2358 * merge IO requests if the physical pages are ordered
2359 * properly.
2360 */
Mel Gormanb745bc82014-06-04 16:10:22 -07002361 if (likely(!cold))
Mel Gormane084b2d2009-07-29 15:02:04 -07002362 list_add(&page->lru, list);
2363 else
2364 list_add_tail(&page->lru, list);
Mel Gorman81eabcb2007-12-17 16:20:05 -08002365 list = &page->lru;
Mel Gormana6de7342016-12-12 16:44:41 -08002366 alloced++;
Vlastimil Babkabb14c2c2015-09-08 15:01:25 -07002367 if (is_migrate_cma(get_pcppage_migratetype(page)))
Bartlomiej Zolnierkiewiczd1ce7492012-10-08 16:32:02 -07002368 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
2369 -(1 << order));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002370 }
Mel Gormana6de7342016-12-12 16:44:41 -08002371
2372 /*
2373 * i pages were removed from the buddy list even if some leak due
2374 * to check_pcp_refill failing so adjust NR_FREE_PAGES based
2375 * on i. Do not confuse with 'alloced' which is the number of
2376 * pages added to the pcp list.
2377 */
Mel Gormanf2260e62009-06-16 15:32:13 -07002378 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
Mel Gormand34b0732017-04-20 14:37:43 -07002379 spin_unlock(&zone->lock);
Mel Gormana6de7342016-12-12 16:44:41 -08002380 return alloced;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002381}
2382
Christoph Lameter4ae7c032005-06-21 17:14:57 -07002383#ifdef CONFIG_NUMA
Christoph Lameter8fce4d82006-03-09 17:33:54 -08002384/*
Christoph Lameter4037d452007-05-09 02:35:14 -07002385 * Called from the vmstat counter updater to drain pagesets of this
2386 * currently executing processor on remote nodes after they have
2387 * expired.
2388 *
Christoph Lameter879336c2006-03-22 00:09:08 -08002389 * Note that this function must be called with the thread pinned to
2390 * a single processor.
Christoph Lameter8fce4d82006-03-09 17:33:54 -08002391 */
Christoph Lameter4037d452007-05-09 02:35:14 -07002392void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
Christoph Lameter4ae7c032005-06-21 17:14:57 -07002393{
Christoph Lameter4ae7c032005-06-21 17:14:57 -07002394 unsigned long flags;
Michal Nazarewicz7be12fc2014-08-06 16:05:15 -07002395 int to_drain, batch;
Christoph Lameter4ae7c032005-06-21 17:14:57 -07002396
Christoph Lameter4037d452007-05-09 02:35:14 -07002397 local_irq_save(flags);
Jason Low4db0c3c2015-04-15 16:14:08 -07002398 batch = READ_ONCE(pcp->batch);
Michal Nazarewicz7be12fc2014-08-06 16:05:15 -07002399 to_drain = min(pcp->count, batch);
KOSAKI Motohiro2a135152012-07-31 16:42:53 -07002400 if (to_drain > 0) {
2401 free_pcppages_bulk(zone, to_drain, pcp);
2402 pcp->count -= to_drain;
2403 }
Christoph Lameter4037d452007-05-09 02:35:14 -07002404 local_irq_restore(flags);
Christoph Lameter4ae7c032005-06-21 17:14:57 -07002405}
2406#endif
2407
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002408/*
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002409 * Drain pcplists of the indicated processor and zone.
2410 *
2411 * The processor must either be the current processor and the
2412 * thread pinned to the current processor or a processor that
2413 * is not online.
2414 */
2415static void drain_pages_zone(unsigned int cpu, struct zone *zone)
2416{
2417 unsigned long flags;
2418 struct per_cpu_pageset *pset;
2419 struct per_cpu_pages *pcp;
2420
2421 local_irq_save(flags);
2422 pset = per_cpu_ptr(zone->pageset, cpu);
2423
2424 pcp = &pset->pcp;
2425 if (pcp->count) {
2426 free_pcppages_bulk(zone, pcp->count, pcp);
2427 pcp->count = 0;
2428 }
2429 local_irq_restore(flags);
2430}
2431
2432/*
2433 * Drain pcplists of all zones on the indicated processor.
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002434 *
2435 * The processor must either be the current processor and the
2436 * thread pinned to the current processor or a processor that
2437 * is not online.
2438 */
2439static void drain_pages(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002440{
2441 struct zone *zone;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002442
KOSAKI Motohiroee99c712009-03-31 15:19:31 -07002443 for_each_populated_zone(zone) {
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002444 drain_pages_zone(cpu, zone);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002445 }
2446}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002447
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002448/*
2449 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002450 *
2451 * The CPU has to be pinned. When zone parameter is non-NULL, spill just
2452 * the single zone's pages.
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002453 */
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002454void drain_local_pages(struct zone *zone)
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002455{
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002456 int cpu = smp_processor_id();
2457
2458 if (zone)
2459 drain_pages_zone(cpu, zone);
2460 else
2461 drain_pages(cpu);
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002462}
2463
Mel Gorman0ccce3b2017-02-24 14:56:32 -08002464static void drain_local_pages_wq(struct work_struct *work)
2465{
Michal Hockoa459eeb2017-02-24 14:56:35 -08002466 /*
2467 * drain_all_pages doesn't use proper cpu hotplug protection so
2468 * we can race with cpu offline when the WQ can move this from
2469 * a cpu pinned worker to an unbound one. We can operate on a different
2470 * cpu which is allright but we also have to make sure to not move to
2471 * a different one.
2472 */
2473 preempt_disable();
Mel Gorman0ccce3b2017-02-24 14:56:32 -08002474 drain_local_pages(NULL);
Michal Hockoa459eeb2017-02-24 14:56:35 -08002475 preempt_enable();
Mel Gorman0ccce3b2017-02-24 14:56:32 -08002476}
2477
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002478/*
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07002479 * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
2480 *
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002481 * When zone parameter is non-NULL, spill just the single zone's pages.
2482 *
Mel Gorman0ccce3b2017-02-24 14:56:32 -08002483 * Note that this can be extremely slow as the draining happens in a workqueue.
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002484 */
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002485void drain_all_pages(struct zone *zone)
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002486{
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07002487 int cpu;
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07002488
2489 /*
2490 * Allocate in the BSS so we wont require allocation in
2491 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
2492 */
2493 static cpumask_t cpus_with_pcps;
2494
Michal Hockoce612872017-04-07 16:05:05 -07002495 /*
2496 * Make sure nobody triggers this path before mm_percpu_wq is fully
2497 * initialized.
2498 */
2499 if (WARN_ON_ONCE(!mm_percpu_wq))
2500 return;
2501
Mel Gorman0ccce3b2017-02-24 14:56:32 -08002502 /* Workqueues cannot recurse */
2503 if (current->flags & PF_WQ_WORKER)
2504 return;
2505
Mel Gormanbd233f52017-02-24 14:56:56 -08002506 /*
2507 * Do not drain if one is already in progress unless it's specific to
2508 * a zone. Such callers are primarily CMA and memory hotplug and need
2509 * the drain to be complete when the call returns.
2510 */
2511 if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) {
2512 if (!zone)
2513 return;
2514 mutex_lock(&pcpu_drain_mutex);
2515 }
Mel Gorman0ccce3b2017-02-24 14:56:32 -08002516
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07002517 /*
2518 * We don't care about racing with CPU hotplug event
2519 * as offline notification will cause the notified
2520 * cpu to drain that CPU pcps and on_each_cpu_mask
2521 * disables preemption as part of its processing
2522 */
2523 for_each_online_cpu(cpu) {
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002524 struct per_cpu_pageset *pcp;
2525 struct zone *z;
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07002526 bool has_pcps = false;
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002527
2528 if (zone) {
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07002529 pcp = per_cpu_ptr(zone->pageset, cpu);
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002530 if (pcp->pcp.count)
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07002531 has_pcps = true;
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002532 } else {
2533 for_each_populated_zone(z) {
2534 pcp = per_cpu_ptr(z->pageset, cpu);
2535 if (pcp->pcp.count) {
2536 has_pcps = true;
2537 break;
2538 }
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07002539 }
2540 }
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002541
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07002542 if (has_pcps)
2543 cpumask_set_cpu(cpu, &cpus_with_pcps);
2544 else
2545 cpumask_clear_cpu(cpu, &cpus_with_pcps);
2546 }
Mel Gorman0ccce3b2017-02-24 14:56:32 -08002547
Mel Gormanbd233f52017-02-24 14:56:56 -08002548 for_each_cpu(cpu, &cpus_with_pcps) {
2549 struct work_struct *work = per_cpu_ptr(&pcpu_drain, cpu);
2550 INIT_WORK(work, drain_local_pages_wq);
Michal Hockoce612872017-04-07 16:05:05 -07002551 queue_work_on(cpu, mm_percpu_wq, work);
Mel Gorman0ccce3b2017-02-24 14:56:32 -08002552 }
Mel Gormanbd233f52017-02-24 14:56:56 -08002553 for_each_cpu(cpu, &cpus_with_pcps)
2554 flush_work(per_cpu_ptr(&pcpu_drain, cpu));
2555
2556 mutex_unlock(&pcpu_drain_mutex);
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002557}
2558
Rafael J. Wysocki296699d2007-07-29 23:27:18 +02002559#ifdef CONFIG_HIBERNATION
Linus Torvalds1da177e2005-04-16 15:20:36 -07002560
Chen Yu556b9692017-08-25 15:55:30 -07002561/*
2562 * Touch the watchdog for every WD_PAGE_COUNT pages.
2563 */
2564#define WD_PAGE_COUNT (128*1024)
2565
Linus Torvalds1da177e2005-04-16 15:20:36 -07002566void mark_free_pages(struct zone *zone)
2567{
Chen Yu556b9692017-08-25 15:55:30 -07002568 unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT;
Rafael J. Wysockif623f0d2006-09-25 23:32:49 -07002569 unsigned long flags;
Mel Gorman7aeb09f2014-06-04 16:10:21 -07002570 unsigned int order, t;
Geliang Tang86760a22016-01-14 15:20:33 -08002571 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002572
Xishi Qiu8080fc02013-09-11 14:21:45 -07002573 if (zone_is_empty(zone))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002574 return;
2575
2576 spin_lock_irqsave(&zone->lock, flags);
Rafael J. Wysockif623f0d2006-09-25 23:32:49 -07002577
Cody P Schafer108bcc92013-02-22 16:35:23 -08002578 max_zone_pfn = zone_end_pfn(zone);
Rafael J. Wysockif623f0d2006-09-25 23:32:49 -07002579 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
2580 if (pfn_valid(pfn)) {
Geliang Tang86760a22016-01-14 15:20:33 -08002581 page = pfn_to_page(pfn);
Joonsoo Kimba6b0972016-05-19 17:12:16 -07002582
Chen Yu556b9692017-08-25 15:55:30 -07002583 if (!--page_count) {
2584 touch_nmi_watchdog();
2585 page_count = WD_PAGE_COUNT;
2586 }
2587
Joonsoo Kimba6b0972016-05-19 17:12:16 -07002588 if (page_zone(page) != zone)
2589 continue;
2590
Rafael J. Wysocki7be98232007-05-06 14:50:42 -07002591 if (!swsusp_page_is_forbidden(page))
2592 swsusp_unset_page_free(page);
Rafael J. Wysockif623f0d2006-09-25 23:32:49 -07002593 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002594
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002595 for_each_migratetype_order(order, t) {
Geliang Tang86760a22016-01-14 15:20:33 -08002596 list_for_each_entry(page,
2597 &zone->free_area[order].free_list[t], lru) {
Rafael J. Wysockif623f0d2006-09-25 23:32:49 -07002598 unsigned long i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002599
Geliang Tang86760a22016-01-14 15:20:33 -08002600 pfn = page_to_pfn(page);
Chen Yu556b9692017-08-25 15:55:30 -07002601 for (i = 0; i < (1UL << order); i++) {
2602 if (!--page_count) {
2603 touch_nmi_watchdog();
2604 page_count = WD_PAGE_COUNT;
2605 }
Rafael J. Wysocki7be98232007-05-06 14:50:42 -07002606 swsusp_set_page_free(pfn_to_page(pfn + i));
Chen Yu556b9692017-08-25 15:55:30 -07002607 }
Rafael J. Wysockif623f0d2006-09-25 23:32:49 -07002608 }
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002609 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002610 spin_unlock_irqrestore(&zone->lock, flags);
2611}
Mel Gormane2c55dc2007-10-16 01:25:50 -07002612#endif /* CONFIG_PM */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002613
Mel Gorman2d4894b2017-11-15 17:37:59 -08002614static bool free_unref_page_prepare(struct page *page, unsigned long pfn)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002615{
Mel Gorman5f8dcc22009-09-21 17:03:19 -07002616 int migratetype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002617
Mel Gorman4db75482016-05-19 17:14:32 -07002618 if (!free_pcp_prepare(page))
Mel Gorman9cca35d42017-11-15 17:37:37 -08002619 return false;
Hugh Dickins689bceb2005-11-21 21:32:20 -08002620
Mel Gormandc4b0ca2014-06-04 16:10:17 -07002621 migratetype = get_pfnblock_migratetype(page, pfn);
Vlastimil Babkabb14c2c2015-09-08 15:01:25 -07002622 set_pcppage_migratetype(page, migratetype);
Mel Gorman9cca35d42017-11-15 17:37:37 -08002623 return true;
2624}
2625
Mel Gorman2d4894b2017-11-15 17:37:59 -08002626static void free_unref_page_commit(struct page *page, unsigned long pfn)
Mel Gorman9cca35d42017-11-15 17:37:37 -08002627{
2628 struct zone *zone = page_zone(page);
2629 struct per_cpu_pages *pcp;
2630 int migratetype;
2631
2632 migratetype = get_pcppage_migratetype(page);
Mel Gormand34b0732017-04-20 14:37:43 -07002633 __count_vm_event(PGFREE);
Mel Gormanda456f12009-06-16 15:32:08 -07002634
Mel Gorman5f8dcc22009-09-21 17:03:19 -07002635 /*
2636 * We only track unmovable, reclaimable and movable on pcp lists.
2637 * Free ISOLATE pages back to the allocator because they are being
Xishi Qiua6ffdc02017-05-03 14:52:52 -07002638 * offlined but treat HIGHATOMIC as movable pages so we can get those
Mel Gorman5f8dcc22009-09-21 17:03:19 -07002639 * areas back if necessary. Otherwise, we may have to free
2640 * excessively into the page allocator
2641 */
2642 if (migratetype >= MIGRATE_PCPTYPES) {
Minchan Kim194159f2013-02-22 16:33:58 -08002643 if (unlikely(is_migrate_isolate(migratetype))) {
Mel Gormandc4b0ca2014-06-04 16:10:17 -07002644 free_one_page(zone, page, pfn, 0, migratetype);
Mel Gorman9cca35d42017-11-15 17:37:37 -08002645 return;
Mel Gorman5f8dcc22009-09-21 17:03:19 -07002646 }
2647 migratetype = MIGRATE_MOVABLE;
2648 }
2649
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09002650 pcp = &this_cpu_ptr(zone->pageset)->pcp;
Mel Gorman2d4894b2017-11-15 17:37:59 -08002651 list_add(&page->lru, &pcp->lists[migratetype]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002652 pcp->count++;
Nick Piggin48db57f2006-01-08 01:00:42 -08002653 if (pcp->count >= pcp->high) {
Jason Low4db0c3c2015-04-15 16:14:08 -07002654 unsigned long batch = READ_ONCE(pcp->batch);
Cody P Schafer998d39cb2013-07-03 15:01:32 -07002655 free_pcppages_bulk(zone, batch, pcp);
2656 pcp->count -= batch;
Nick Piggin48db57f2006-01-08 01:00:42 -08002657 }
Mel Gorman9cca35d42017-11-15 17:37:37 -08002658}
Mel Gorman5f8dcc22009-09-21 17:03:19 -07002659
Mel Gorman9cca35d42017-11-15 17:37:37 -08002660/*
2661 * Free a 0-order page
Mel Gorman9cca35d42017-11-15 17:37:37 -08002662 */
Mel Gorman2d4894b2017-11-15 17:37:59 -08002663void free_unref_page(struct page *page)
Mel Gorman9cca35d42017-11-15 17:37:37 -08002664{
2665 unsigned long flags;
2666 unsigned long pfn = page_to_pfn(page);
2667
Mel Gorman2d4894b2017-11-15 17:37:59 -08002668 if (!free_unref_page_prepare(page, pfn))
Mel Gorman9cca35d42017-11-15 17:37:37 -08002669 return;
2670
2671 local_irq_save(flags);
Mel Gorman2d4894b2017-11-15 17:37:59 -08002672 free_unref_page_commit(page, pfn);
Mel Gormand34b0732017-04-20 14:37:43 -07002673 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002674}
2675
Nick Piggin8dfcc9b2006-03-22 00:08:05 -08002676/*
Konstantin Khlebnikovcc598502012-01-10 15:07:04 -08002677 * Free a list of 0-order pages
2678 */
Mel Gorman2d4894b2017-11-15 17:37:59 -08002679void free_unref_page_list(struct list_head *list)
Konstantin Khlebnikovcc598502012-01-10 15:07:04 -08002680{
2681 struct page *page, *next;
Mel Gorman9cca35d42017-11-15 17:37:37 -08002682 unsigned long flags, pfn;
Konstantin Khlebnikovcc598502012-01-10 15:07:04 -08002683
Mel Gorman9cca35d42017-11-15 17:37:37 -08002684 /* Prepare pages for freeing */
Konstantin Khlebnikovcc598502012-01-10 15:07:04 -08002685 list_for_each_entry_safe(page, next, list, lru) {
Mel Gorman9cca35d42017-11-15 17:37:37 -08002686 pfn = page_to_pfn(page);
Mel Gorman2d4894b2017-11-15 17:37:59 -08002687 if (!free_unref_page_prepare(page, pfn))
Mel Gorman9cca35d42017-11-15 17:37:37 -08002688 list_del(&page->lru);
2689 set_page_private(page, pfn);
Konstantin Khlebnikovcc598502012-01-10 15:07:04 -08002690 }
Mel Gorman9cca35d42017-11-15 17:37:37 -08002691
2692 local_irq_save(flags);
2693 list_for_each_entry_safe(page, next, list, lru) {
2694 unsigned long pfn = page_private(page);
2695
2696 set_page_private(page, 0);
Mel Gorman2d4894b2017-11-15 17:37:59 -08002697 trace_mm_page_free_batched(page);
2698 free_unref_page_commit(page, pfn);
Mel Gorman9cca35d42017-11-15 17:37:37 -08002699 }
2700 local_irq_restore(flags);
Konstantin Khlebnikovcc598502012-01-10 15:07:04 -08002701}
2702
2703/*
Nick Piggin8dfcc9b2006-03-22 00:08:05 -08002704 * split_page takes a non-compound higher-order page, and splits it into
2705 * n (1<<order) sub-pages: page[0..n]
2706 * Each sub-page must be freed individually.
2707 *
2708 * Note: this is probably too low level an operation for use in drivers.
2709 * Please consult with lkml before using this in your driver.
2710 */
2711void split_page(struct page *page, unsigned int order)
2712{
2713 int i;
2714
Sasha Levin309381fea2014-01-23 15:52:54 -08002715 VM_BUG_ON_PAGE(PageCompound(page), page);
2716 VM_BUG_ON_PAGE(!page_count(page), page);
Vegard Nossumb1eeab62008-11-25 16:55:53 +01002717
Joonsoo Kima9627bc2016-07-26 15:23:49 -07002718 for (i = 1; i < (1 << order); i++)
Nick Piggin7835e982006-03-22 00:08:40 -08002719 set_page_refcounted(page + i);
Joonsoo Kima9627bc2016-07-26 15:23:49 -07002720 split_page_owner(page, order);
Nick Piggin8dfcc9b2006-03-22 00:08:05 -08002721}
K. Y. Srinivasan5853ff22013-03-25 15:47:38 -07002722EXPORT_SYMBOL_GPL(split_page);
Nick Piggin8dfcc9b2006-03-22 00:08:05 -08002723
Joonsoo Kim3c605092014-11-13 15:19:21 -08002724int __isolate_free_page(struct page *page, unsigned int order)
Mel Gorman748446b2010-05-24 14:32:27 -07002725{
Mel Gorman748446b2010-05-24 14:32:27 -07002726 unsigned long watermark;
2727 struct zone *zone;
Bartlomiej Zolnierkiewicz2139cbe2012-10-08 16:32:00 -07002728 int mt;
Mel Gorman748446b2010-05-24 14:32:27 -07002729
2730 BUG_ON(!PageBuddy(page));
2731
2732 zone = page_zone(page);
Marek Szyprowski2e30abd2012-12-11 16:02:57 -08002733 mt = get_pageblock_migratetype(page);
Mel Gorman748446b2010-05-24 14:32:27 -07002734
Minchan Kim194159f2013-02-22 16:33:58 -08002735 if (!is_migrate_isolate(mt)) {
Vlastimil Babka8348faf2016-10-07 16:58:00 -07002736 /*
2737 * Obey watermarks as if the page was being allocated. We can
2738 * emulate a high-order watermark check with a raised order-0
2739 * watermark, because we already know our high-order page
2740 * exists.
2741 */
2742 watermark = min_wmark_pages(zone) + (1UL << order);
Vlastimil Babka984fdba2016-10-07 16:57:57 -07002743 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
Marek Szyprowski2e30abd2012-12-11 16:02:57 -08002744 return 0;
2745
Mel Gorman8fb74b92013-01-11 14:32:16 -08002746 __mod_zone_freepage_state(zone, -(1UL << order), mt);
Marek Szyprowski2e30abd2012-12-11 16:02:57 -08002747 }
Mel Gorman748446b2010-05-24 14:32:27 -07002748
2749 /* Remove page from free list */
2750 list_del(&page->lru);
2751 zone->free_area[order].nr_free--;
2752 rmv_page_order(page);
Bartlomiej Zolnierkiewicz2139cbe2012-10-08 16:32:00 -07002753
zhong jiang400bc7f2016-07-28 15:45:07 -07002754 /*
2755 * Set the pageblock if the isolated page is at least half of a
2756 * pageblock
2757 */
Mel Gorman748446b2010-05-24 14:32:27 -07002758 if (order >= pageblock_order - 1) {
2759 struct page *endpage = page + (1 << order) - 1;
Michal Nazarewicz47118af2011-12-29 13:09:50 +01002760 for (; page < endpage; page += pageblock_nr_pages) {
2761 int mt = get_pageblock_migratetype(page);
Minchan Kim88ed3652016-12-12 16:42:05 -08002762 if (!is_migrate_isolate(mt) && !is_migrate_cma(mt)
Xishi Qiua6ffdc02017-05-03 14:52:52 -07002763 && !is_migrate_highatomic(mt))
Michal Nazarewicz47118af2011-12-29 13:09:50 +01002764 set_pageblock_migratetype(page,
2765 MIGRATE_MOVABLE);
2766 }
Mel Gorman748446b2010-05-24 14:32:27 -07002767 }
2768
Joonsoo Kimf3a14ce2015-07-17 16:24:15 -07002769
Mel Gorman8fb74b92013-01-11 14:32:16 -08002770 return 1UL << order;
Mel Gorman1fb3f8c2012-10-08 16:29:12 -07002771}
2772
2773/*
Mel Gorman060e7412016-05-19 17:13:27 -07002774 * Update NUMA hit/miss statistics
2775 *
2776 * Must be called with interrupts disabled.
Mel Gorman060e7412016-05-19 17:13:27 -07002777 */
Michal Hocko41b61672017-01-10 16:57:42 -08002778static inline void zone_statistics(struct zone *preferred_zone, struct zone *z)
Mel Gorman060e7412016-05-19 17:13:27 -07002779{
2780#ifdef CONFIG_NUMA
Kemi Wang3a321d22017-09-08 16:12:48 -07002781 enum numa_stat_item local_stat = NUMA_LOCAL;
Mel Gorman060e7412016-05-19 17:13:27 -07002782
Michal Hocko2df26632017-01-10 16:57:39 -08002783 if (z->node != numa_node_id())
Mel Gorman060e7412016-05-19 17:13:27 -07002784 local_stat = NUMA_OTHER;
Mel Gorman060e7412016-05-19 17:13:27 -07002785
Michal Hocko2df26632017-01-10 16:57:39 -08002786 if (z->node == preferred_zone->node)
Kemi Wang3a321d22017-09-08 16:12:48 -07002787 __inc_numa_state(z, NUMA_HIT);
Michal Hocko2df26632017-01-10 16:57:39 -08002788 else {
Kemi Wang3a321d22017-09-08 16:12:48 -07002789 __inc_numa_state(z, NUMA_MISS);
2790 __inc_numa_state(preferred_zone, NUMA_FOREIGN);
Mel Gorman060e7412016-05-19 17:13:27 -07002791 }
Kemi Wang3a321d22017-09-08 16:12:48 -07002792 __inc_numa_state(z, local_stat);
Mel Gorman060e7412016-05-19 17:13:27 -07002793#endif
2794}
2795
Mel Gorman066b2392017-02-24 14:56:26 -08002796/* Remove page from the per-cpu list, caller must protect the list */
2797static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
2798 bool cold, struct per_cpu_pages *pcp,
2799 struct list_head *list)
2800{
2801 struct page *page;
2802
2803 do {
2804 if (list_empty(list)) {
2805 pcp->count += rmqueue_bulk(zone, 0,
2806 pcp->batch, list,
2807 migratetype, cold);
2808 if (unlikely(list_empty(list)))
2809 return NULL;
2810 }
2811
2812 if (cold)
2813 page = list_last_entry(list, struct page, lru);
2814 else
2815 page = list_first_entry(list, struct page, lru);
2816
2817 list_del(&page->lru);
2818 pcp->count--;
2819 } while (check_new_pcp(page));
2820
2821 return page;
2822}
2823
2824/* Lock and remove page from the per-cpu list */
2825static struct page *rmqueue_pcplist(struct zone *preferred_zone,
2826 struct zone *zone, unsigned int order,
2827 gfp_t gfp_flags, int migratetype)
2828{
2829 struct per_cpu_pages *pcp;
2830 struct list_head *list;
2831 bool cold = ((gfp_flags & __GFP_COLD) != 0);
2832 struct page *page;
Mel Gormand34b0732017-04-20 14:37:43 -07002833 unsigned long flags;
Mel Gorman066b2392017-02-24 14:56:26 -08002834
Mel Gormand34b0732017-04-20 14:37:43 -07002835 local_irq_save(flags);
Mel Gorman066b2392017-02-24 14:56:26 -08002836 pcp = &this_cpu_ptr(zone->pageset)->pcp;
2837 list = &pcp->lists[migratetype];
2838 page = __rmqueue_pcplist(zone, migratetype, cold, pcp, list);
2839 if (page) {
2840 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
2841 zone_statistics(preferred_zone, zone);
2842 }
Mel Gormand34b0732017-04-20 14:37:43 -07002843 local_irq_restore(flags);
Mel Gorman066b2392017-02-24 14:56:26 -08002844 return page;
2845}
2846
Mel Gorman060e7412016-05-19 17:13:27 -07002847/*
Vlastimil Babka75379192015-02-11 15:25:38 -08002848 * Allocate a page from the given zone. Use pcplists for order-0 allocations.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002849 */
Mel Gorman0a15c3e2009-06-16 15:32:05 -07002850static inline
Mel Gorman066b2392017-02-24 14:56:26 -08002851struct page *rmqueue(struct zone *preferred_zone,
Mel Gorman7aeb09f2014-06-04 16:10:21 -07002852 struct zone *zone, unsigned int order,
Mel Gormanc6038442016-05-19 17:13:38 -07002853 gfp_t gfp_flags, unsigned int alloc_flags,
2854 int migratetype)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002855{
2856 unsigned long flags;
Hugh Dickins689bceb2005-11-21 21:32:20 -08002857 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002858
Mel Gormand34b0732017-04-20 14:37:43 -07002859 if (likely(order == 0)) {
Mel Gorman066b2392017-02-24 14:56:26 -08002860 page = rmqueue_pcplist(preferred_zone, zone, order,
2861 gfp_flags, migratetype);
2862 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002863 }
2864
Mel Gorman066b2392017-02-24 14:56:26 -08002865 /*
2866 * We most definitely don't want callers attempting to
2867 * allocate greater than order-1 page units with __GFP_NOFAIL.
2868 */
2869 WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
2870 spin_lock_irqsave(&zone->lock, flags);
2871
2872 do {
2873 page = NULL;
2874 if (alloc_flags & ALLOC_HARDER) {
2875 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
2876 if (page)
2877 trace_mm_page_alloc_zone_locked(page, order, migratetype);
2878 }
2879 if (!page)
2880 page = __rmqueue(zone, order, migratetype);
2881 } while (page && check_new_pages(page, order));
2882 spin_unlock(&zone->lock);
2883 if (!page)
2884 goto failed;
2885 __mod_zone_freepage_state(zone, -(1 << order),
2886 get_pcppage_migratetype(page));
2887
Mel Gorman16709d12016-07-28 15:46:56 -07002888 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
Michal Hocko41b61672017-01-10 16:57:42 -08002889 zone_statistics(preferred_zone, zone);
Nick Piggina74609f2006-01-06 00:11:20 -08002890 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002891
Mel Gorman066b2392017-02-24 14:56:26 -08002892out:
2893 VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002894 return page;
Nick Piggina74609f2006-01-06 00:11:20 -08002895
2896failed:
2897 local_irq_restore(flags);
Nick Piggina74609f2006-01-06 00:11:20 -08002898 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002899}
2900
Akinobu Mita933e3122006-12-08 02:39:45 -08002901#ifdef CONFIG_FAIL_PAGE_ALLOC
2902
Akinobu Mitab2588c42011-07-26 16:09:03 -07002903static struct {
Akinobu Mita933e3122006-12-08 02:39:45 -08002904 struct fault_attr attr;
2905
Viresh Kumar621a5f72015-09-26 15:04:07 -07002906 bool ignore_gfp_highmem;
Mel Gorman71baba42015-11-06 16:28:28 -08002907 bool ignore_gfp_reclaim;
Akinobu Mita54114992007-07-15 23:40:23 -07002908 u32 min_order;
Akinobu Mita933e3122006-12-08 02:39:45 -08002909} fail_page_alloc = {
2910 .attr = FAULT_ATTR_INITIALIZER,
Mel Gorman71baba42015-11-06 16:28:28 -08002911 .ignore_gfp_reclaim = true,
Viresh Kumar621a5f72015-09-26 15:04:07 -07002912 .ignore_gfp_highmem = true,
Akinobu Mita54114992007-07-15 23:40:23 -07002913 .min_order = 1,
Akinobu Mita933e3122006-12-08 02:39:45 -08002914};
2915
2916static int __init setup_fail_page_alloc(char *str)
2917{
2918 return setup_fault_attr(&fail_page_alloc.attr, str);
2919}
2920__setup("fail_page_alloc=", setup_fail_page_alloc);
2921
Gavin Shandeaf3862012-07-31 16:41:51 -07002922static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
Akinobu Mita933e3122006-12-08 02:39:45 -08002923{
Akinobu Mita54114992007-07-15 23:40:23 -07002924 if (order < fail_page_alloc.min_order)
Gavin Shandeaf3862012-07-31 16:41:51 -07002925 return false;
Akinobu Mita933e3122006-12-08 02:39:45 -08002926 if (gfp_mask & __GFP_NOFAIL)
Gavin Shandeaf3862012-07-31 16:41:51 -07002927 return false;
Akinobu Mita933e3122006-12-08 02:39:45 -08002928 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
Gavin Shandeaf3862012-07-31 16:41:51 -07002929 return false;
Mel Gorman71baba42015-11-06 16:28:28 -08002930 if (fail_page_alloc.ignore_gfp_reclaim &&
2931 (gfp_mask & __GFP_DIRECT_RECLAIM))
Gavin Shandeaf3862012-07-31 16:41:51 -07002932 return false;
Akinobu Mita933e3122006-12-08 02:39:45 -08002933
2934 return should_fail(&fail_page_alloc.attr, 1 << order);
2935}
2936
2937#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
2938
2939static int __init fail_page_alloc_debugfs(void)
2940{
Al Virof4ae40a62011-07-24 04:33:43 -04002941 umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
Akinobu Mita933e3122006-12-08 02:39:45 -08002942 struct dentry *dir;
Akinobu Mita933e3122006-12-08 02:39:45 -08002943
Akinobu Mitadd48c082011-08-03 16:21:01 -07002944 dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
2945 &fail_page_alloc.attr);
2946 if (IS_ERR(dir))
2947 return PTR_ERR(dir);
Akinobu Mita933e3122006-12-08 02:39:45 -08002948
Akinobu Mitab2588c42011-07-26 16:09:03 -07002949 if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
Mel Gorman71baba42015-11-06 16:28:28 -08002950 &fail_page_alloc.ignore_gfp_reclaim))
Akinobu Mitab2588c42011-07-26 16:09:03 -07002951 goto fail;
2952 if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir,
2953 &fail_page_alloc.ignore_gfp_highmem))
2954 goto fail;
2955 if (!debugfs_create_u32("min-order", mode, dir,
2956 &fail_page_alloc.min_order))
2957 goto fail;
Akinobu Mita933e3122006-12-08 02:39:45 -08002958
Akinobu Mitab2588c42011-07-26 16:09:03 -07002959 return 0;
2960fail:
Akinobu Mitadd48c082011-08-03 16:21:01 -07002961 debugfs_remove_recursive(dir);
Akinobu Mita933e3122006-12-08 02:39:45 -08002962
Akinobu Mitab2588c42011-07-26 16:09:03 -07002963 return -ENOMEM;
Akinobu Mita933e3122006-12-08 02:39:45 -08002964}
2965
2966late_initcall(fail_page_alloc_debugfs);
2967
2968#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
2969
2970#else /* CONFIG_FAIL_PAGE_ALLOC */
2971
Gavin Shandeaf3862012-07-31 16:41:51 -07002972static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
Akinobu Mita933e3122006-12-08 02:39:45 -08002973{
Gavin Shandeaf3862012-07-31 16:41:51 -07002974 return false;
Akinobu Mita933e3122006-12-08 02:39:45 -08002975}
2976
2977#endif /* CONFIG_FAIL_PAGE_ALLOC */
2978
Linus Torvalds1da177e2005-04-16 15:20:36 -07002979/*
Mel Gorman97a16fc2015-11-06 16:28:40 -08002980 * Return true if free base pages are above 'mark'. For high-order checks it
2981 * will return true of the order-0 watermark is reached and there is at least
2982 * one free page of a suitable size. Checking now avoids taking the zone lock
2983 * to check in the allocation paths if no pages are free.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002984 */
Michal Hocko86a294a2016-05-20 16:57:12 -07002985bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
2986 int classzone_idx, unsigned int alloc_flags,
2987 long free_pages)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002988{
Christoph Lameterd23ad422007-02-10 01:43:02 -08002989 long min = mark;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002990 int o;
Michal Hockocd04ae12017-09-06 16:24:50 -07002991 const bool alloc_harder = (alloc_flags & (ALLOC_HARDER|ALLOC_OOM));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002992
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002993 /* free_pages may go negative - that's OK */
Michal Hockodf0a6da2012-01-10 15:08:02 -08002994 free_pages -= (1 << order) - 1;
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002995
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08002996 if (alloc_flags & ALLOC_HIGH)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002997 min -= min / 2;
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002998
2999 /*
3000 * If the caller does not have rights to ALLOC_HARDER then subtract
3001 * the high-atomic reserves. This will over-estimate the size of the
3002 * atomic reserve but it avoids a search.
3003 */
Michal Hockocd04ae12017-09-06 16:24:50 -07003004 if (likely(!alloc_harder)) {
Mel Gorman0aaa29a2015-11-06 16:28:37 -08003005 free_pages -= z->nr_reserved_highatomic;
Michal Hockocd04ae12017-09-06 16:24:50 -07003006 } else {
3007 /*
3008 * OOM victims can try even harder than normal ALLOC_HARDER
3009 * users on the grounds that it's definitely going to be in
3010 * the exit path shortly and free memory. Any allocation it
3011 * makes during the free path will be small and short-lived.
3012 */
3013 if (alloc_flags & ALLOC_OOM)
3014 min -= min / 2;
3015 else
3016 min -= min / 4;
3017 }
3018
Mel Gormane2b19192015-11-06 16:28:09 -08003019
Bartlomiej Zolnierkiewiczd95ea5d2012-10-08 16:32:05 -07003020#ifdef CONFIG_CMA
3021 /* If allocation can't use CMA areas don't use free CMA pages */
3022 if (!(alloc_flags & ALLOC_CMA))
Mel Gorman97a16fc2015-11-06 16:28:40 -08003023 free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES);
Bartlomiej Zolnierkiewiczd95ea5d2012-10-08 16:32:05 -07003024#endif
Tomasz Stanislawski026b0812013-06-12 14:05:02 -07003025
Mel Gorman97a16fc2015-11-06 16:28:40 -08003026 /*
3027 * Check watermarks for an order-0 allocation request. If these
3028 * are not met, then a high-order request also cannot go ahead
3029 * even if a suitable page happened to be free.
3030 */
3031 if (free_pages <= min + z->lowmem_reserve[classzone_idx])
Mel Gorman88f5acf2011-01-13 15:45:41 -08003032 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003033
Mel Gorman97a16fc2015-11-06 16:28:40 -08003034 /* If this is an order-0 request then the watermark is fine */
3035 if (!order)
3036 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003037
Mel Gorman97a16fc2015-11-06 16:28:40 -08003038 /* For a high-order request, check at least one suitable page is free */
3039 for (o = order; o < MAX_ORDER; o++) {
3040 struct free_area *area = &z->free_area[o];
3041 int mt;
3042
3043 if (!area->nr_free)
3044 continue;
3045
3046 if (alloc_harder)
3047 return true;
3048
3049 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
3050 if (!list_empty(&area->free_list[mt]))
3051 return true;
3052 }
3053
3054#ifdef CONFIG_CMA
3055 if ((alloc_flags & ALLOC_CMA) &&
3056 !list_empty(&area->free_list[MIGRATE_CMA])) {
3057 return true;
3058 }
3059#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003060 }
Mel Gorman97a16fc2015-11-06 16:28:40 -08003061 return false;
Mel Gorman88f5acf2011-01-13 15:45:41 -08003062}
3063
Mel Gorman7aeb09f2014-06-04 16:10:21 -07003064bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
Mel Gormanc6038442016-05-19 17:13:38 -07003065 int classzone_idx, unsigned int alloc_flags)
Mel Gorman88f5acf2011-01-13 15:45:41 -08003066{
3067 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
3068 zone_page_state(z, NR_FREE_PAGES));
3069}
3070
Mel Gorman48ee5f32016-05-19 17:14:07 -07003071static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
3072 unsigned long mark, int classzone_idx, unsigned int alloc_flags)
3073{
3074 long free_pages = zone_page_state(z, NR_FREE_PAGES);
3075 long cma_pages = 0;
3076
3077#ifdef CONFIG_CMA
3078 /* If allocation can't use CMA areas don't use free CMA pages */
3079 if (!(alloc_flags & ALLOC_CMA))
3080 cma_pages = zone_page_state(z, NR_FREE_CMA_PAGES);
3081#endif
3082
3083 /*
3084 * Fast check for order-0 only. If this fails then the reserves
3085 * need to be calculated. There is a corner case where the check
3086 * passes but only the high-order atomic reserve are free. If
3087 * the caller is !atomic then it'll uselessly search the free
3088 * list. That corner case is then slower but it is harmless.
3089 */
3090 if (!order && (free_pages - cma_pages) > mark + z->lowmem_reserve[classzone_idx])
3091 return true;
3092
3093 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
3094 free_pages);
3095}
3096
Mel Gorman7aeb09f2014-06-04 16:10:21 -07003097bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
Mel Gormane2b19192015-11-06 16:28:09 -08003098 unsigned long mark, int classzone_idx)
Mel Gorman88f5acf2011-01-13 15:45:41 -08003099{
3100 long free_pages = zone_page_state(z, NR_FREE_PAGES);
3101
3102 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
3103 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
3104
Mel Gormane2b19192015-11-06 16:28:09 -08003105 return __zone_watermark_ok(z, order, mark, classzone_idx, 0,
Mel Gorman88f5acf2011-01-13 15:45:41 -08003106 free_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003107}
3108
Paul Jackson9276b1bc2006-12-06 20:31:48 -08003109#ifdef CONFIG_NUMA
David Rientjes957f8222012-10-08 16:33:24 -07003110static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3111{
Gavin Shane02dc012017-02-24 14:59:33 -08003112 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <=
Mel Gorman5f7a75a2014-06-04 16:07:15 -07003113 RECLAIM_DISTANCE;
David Rientjes957f8222012-10-08 16:33:24 -07003114}
Paul Jackson9276b1bc2006-12-06 20:31:48 -08003115#else /* CONFIG_NUMA */
David Rientjes957f8222012-10-08 16:33:24 -07003116static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3117{
3118 return true;
3119}
Paul Jackson9276b1bc2006-12-06 20:31:48 -08003120#endif /* CONFIG_NUMA */
3121
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08003122/*
Paul Jackson0798e512006-12-06 20:31:38 -08003123 * get_page_from_freelist goes through the zonelist trying to allocate
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08003124 * a page.
3125 */
3126static struct page *
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003127get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
3128 const struct alloc_context *ac)
Martin Hicks753ee722005-06-21 17:14:41 -07003129{
Mel Gormanc33d6c02016-05-19 17:14:10 -07003130 struct zoneref *z = ac->preferred_zoneref;
Mel Gorman5117f452009-06-16 15:31:59 -07003131 struct zone *zone;
Mel Gorman3b8c0be2016-07-28 15:46:53 -07003132 struct pglist_data *last_pgdat_dirty_limit = NULL;
3133
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08003134 /*
Paul Jackson9276b1bc2006-12-06 20:31:48 -08003135 * Scan zonelist, looking for a zone with enough free.
Vladimir Davydov344736f2014-10-20 15:50:30 +04003136 * See also __cpuset_node_allowed() comment in kernel/cpuset.c.
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08003137 */
Mel Gormanc33d6c02016-05-19 17:14:10 -07003138 for_next_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003139 ac->nodemask) {
Mel Gormanbe06af02016-05-19 17:13:47 -07003140 struct page *page;
Johannes Weinere085dbc2013-09-11 14:20:46 -07003141 unsigned long mark;
3142
Mel Gorman664eedd2014-06-04 16:10:08 -07003143 if (cpusets_enabled() &&
3144 (alloc_flags & ALLOC_CPUSET) &&
Vlastimil Babka002f2902016-05-19 17:14:30 -07003145 !__cpuset_zone_allowed(zone, gfp_mask))
Mel Gormancd38b112011-07-25 17:12:29 -07003146 continue;
Johannes Weinera756cf52012-01-10 15:07:49 -08003147 /*
3148 * When allocating a page cache page for writing, we
Mel Gorman281e3722016-07-28 15:46:11 -07003149 * want to get it from a node that is within its dirty
3150 * limit, such that no single node holds more than its
Johannes Weinera756cf52012-01-10 15:07:49 -08003151 * proportional share of globally allowed dirty pages.
Mel Gorman281e3722016-07-28 15:46:11 -07003152 * The dirty limits take into account the node's
Johannes Weinera756cf52012-01-10 15:07:49 -08003153 * lowmem reserves and high watermark so that kswapd
3154 * should be able to balance it without having to
3155 * write pages from its LRU list.
3156 *
Johannes Weinera756cf52012-01-10 15:07:49 -08003157 * XXX: For now, allow allocations to potentially
Mel Gorman281e3722016-07-28 15:46:11 -07003158 * exceed the per-node dirty limit in the slowpath
Mel Gormanc9ab0c42015-11-06 16:28:12 -08003159 * (spread_dirty_pages unset) before going into reclaim,
Johannes Weinera756cf52012-01-10 15:07:49 -08003160 * which is important when on a NUMA setup the allowed
Mel Gorman281e3722016-07-28 15:46:11 -07003161 * nodes are together not big enough to reach the
Johannes Weinera756cf52012-01-10 15:07:49 -08003162 * global limit. The proper fix for these situations
Mel Gorman281e3722016-07-28 15:46:11 -07003163 * will require awareness of nodes in the
Johannes Weinera756cf52012-01-10 15:07:49 -08003164 * dirty-throttling and the flusher threads.
3165 */
Mel Gorman3b8c0be2016-07-28 15:46:53 -07003166 if (ac->spread_dirty_pages) {
3167 if (last_pgdat_dirty_limit == zone->zone_pgdat)
3168 continue;
3169
3170 if (!node_dirty_ok(zone->zone_pgdat)) {
3171 last_pgdat_dirty_limit = zone->zone_pgdat;
3172 continue;
3173 }
3174 }
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08003175
Johannes Weinere085dbc2013-09-11 14:20:46 -07003176 mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
Mel Gorman48ee5f32016-05-19 17:14:07 -07003177 if (!zone_watermark_fast(zone, order, mark,
Mel Gorman93ea9962016-05-19 17:14:13 -07003178 ac_classzone_idx(ac), alloc_flags)) {
Mel Gormanfa5e0842009-06-16 15:33:22 -07003179 int ret;
3180
Mel Gorman5dab2912014-06-04 16:10:14 -07003181 /* Checked here to keep the fast path fast */
3182 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
3183 if (alloc_flags & ALLOC_NO_WATERMARKS)
3184 goto try_this_zone;
3185
Mel Gormana5f5f912016-07-28 15:46:32 -07003186 if (node_reclaim_mode == 0 ||
Mel Gormanc33d6c02016-05-19 17:14:10 -07003187 !zone_allows_reclaim(ac->preferred_zoneref->zone, zone))
Mel Gormancd38b112011-07-25 17:12:29 -07003188 continue;
3189
Mel Gormana5f5f912016-07-28 15:46:32 -07003190 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
Mel Gormanfa5e0842009-06-16 15:33:22 -07003191 switch (ret) {
Mel Gormana5f5f912016-07-28 15:46:32 -07003192 case NODE_RECLAIM_NOSCAN:
Mel Gormanfa5e0842009-06-16 15:33:22 -07003193 /* did not scan */
Mel Gormancd38b112011-07-25 17:12:29 -07003194 continue;
Mel Gormana5f5f912016-07-28 15:46:32 -07003195 case NODE_RECLAIM_FULL:
Mel Gormanfa5e0842009-06-16 15:33:22 -07003196 /* scanned but unreclaimable */
Mel Gormancd38b112011-07-25 17:12:29 -07003197 continue;
Mel Gormanfa5e0842009-06-16 15:33:22 -07003198 default:
3199 /* did we reclaim enough */
Mel Gormanfed27192013-04-29 15:07:57 -07003200 if (zone_watermark_ok(zone, order, mark,
Mel Gorman93ea9962016-05-19 17:14:13 -07003201 ac_classzone_idx(ac), alloc_flags))
Mel Gormanfed27192013-04-29 15:07:57 -07003202 goto try_this_zone;
3203
Mel Gormanfed27192013-04-29 15:07:57 -07003204 continue;
Paul Jackson0798e512006-12-06 20:31:38 -08003205 }
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08003206 }
3207
Mel Gormanfa5e0842009-06-16 15:33:22 -07003208try_this_zone:
Mel Gorman066b2392017-02-24 14:56:26 -08003209 page = rmqueue(ac->preferred_zoneref->zone, zone, order,
Mel Gorman0aaa29a2015-11-06 16:28:37 -08003210 gfp_mask, alloc_flags, ac->migratetype);
Vlastimil Babka75379192015-02-11 15:25:38 -08003211 if (page) {
Mel Gorman479f8542016-05-19 17:14:35 -07003212 prep_new_page(page, order, gfp_mask, alloc_flags);
Mel Gorman0aaa29a2015-11-06 16:28:37 -08003213
3214 /*
3215 * If this is a high-order atomic allocation then check
3216 * if the pageblock should be reserved for the future
3217 */
3218 if (unlikely(order && (alloc_flags & ALLOC_HARDER)))
3219 reserve_highatomic_pageblock(page, zone, order);
3220
Vlastimil Babka75379192015-02-11 15:25:38 -08003221 return page;
3222 }
Mel Gorman54a6eb52008-04-28 02:12:16 -07003223 }
Paul Jackson9276b1bc2006-12-06 20:31:48 -08003224
Mel Gorman4ffeaf32014-08-06 16:07:22 -07003225 return NULL;
Martin Hicks753ee722005-06-21 17:14:41 -07003226}
3227
David Rientjes29423e772011-03-22 16:30:47 -07003228/*
3229 * Large machines with many possible nodes should not always dump per-node
3230 * meminfo in irq context.
3231 */
3232static inline bool should_suppress_show_mem(void)
3233{
3234 bool ret = false;
3235
3236#if NODES_SHIFT > 8
3237 ret = in_interrupt();
3238#endif
3239 return ret;
3240}
3241
Michal Hocko9af744d2017-02-22 15:46:16 -08003242static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
Dave Hansena238ab52011-05-24 17:12:16 -07003243{
Dave Hansena238ab52011-05-24 17:12:16 -07003244 unsigned int filter = SHOW_MEM_FILTER_NODES;
Michal Hockoaa187502017-02-22 15:41:45 -08003245 static DEFINE_RATELIMIT_STATE(show_mem_rs, HZ, 1);
Dave Hansena238ab52011-05-24 17:12:16 -07003246
Michal Hockoaa187502017-02-22 15:41:45 -08003247 if (should_suppress_show_mem() || !__ratelimit(&show_mem_rs))
Dave Hansena238ab52011-05-24 17:12:16 -07003248 return;
3249
3250 /*
3251 * This documents exceptions given to allocations in certain
3252 * contexts that are allowed to allocate outside current's set
3253 * of allowed nodes.
3254 */
3255 if (!(gfp_mask & __GFP_NOMEMALLOC))
Michal Hockocd04ae12017-09-06 16:24:50 -07003256 if (tsk_is_oom_victim(current) ||
Dave Hansena238ab52011-05-24 17:12:16 -07003257 (current->flags & (PF_MEMALLOC | PF_EXITING)))
3258 filter &= ~SHOW_MEM_FILTER_NODES;
Mel Gormand0164ad2015-11-06 16:28:21 -08003259 if (in_interrupt() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
Dave Hansena238ab52011-05-24 17:12:16 -07003260 filter &= ~SHOW_MEM_FILTER_NODES;
3261
Michal Hocko9af744d2017-02-22 15:46:16 -08003262 show_mem(filter, nodemask);
Michal Hockoaa187502017-02-22 15:41:45 -08003263}
3264
Michal Hockoa8e99252017-02-22 15:46:10 -08003265void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
Michal Hockoaa187502017-02-22 15:41:45 -08003266{
3267 struct va_format vaf;
3268 va_list args;
3269 static DEFINE_RATELIMIT_STATE(nopage_rs, DEFAULT_RATELIMIT_INTERVAL,
3270 DEFAULT_RATELIMIT_BURST);
3271
Tetsuo Handa0f7896f2017-05-03 14:55:34 -07003272 if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs))
Michal Hockoaa187502017-02-22 15:41:45 -08003273 return;
3274
Michal Hocko7877cdc2016-10-07 17:01:55 -07003275 pr_warn("%s: ", current->comm);
Joe Perches3ee9a4f2011-10-31 17:08:35 -07003276
Michal Hocko7877cdc2016-10-07 17:01:55 -07003277 va_start(args, fmt);
3278 vaf.fmt = fmt;
3279 vaf.va = &args;
3280 pr_cont("%pV", &vaf);
3281 va_end(args);
Joe Perches3ee9a4f2011-10-31 17:08:35 -07003282
David Rientjes685dbf62017-02-22 15:46:28 -08003283 pr_cont(", mode:%#x(%pGg), nodemask=", gfp_mask, &gfp_mask);
3284 if (nodemask)
3285 pr_cont("%*pbl\n", nodemask_pr_args(nodemask));
3286 else
3287 pr_cont("(null)\n");
3288
Michal Hockoa8e99252017-02-22 15:46:10 -08003289 cpuset_print_current_mems_allowed();
Joe Perches3ee9a4f2011-10-31 17:08:35 -07003290
Dave Hansena238ab52011-05-24 17:12:16 -07003291 dump_stack();
David Rientjes685dbf62017-02-22 15:46:28 -08003292 warn_alloc_show_mem(gfp_mask, nodemask);
Dave Hansena238ab52011-05-24 17:12:16 -07003293}
3294
Mel Gorman11e33f62009-06-16 15:31:57 -07003295static inline struct page *
Michal Hocko6c18ba72017-02-22 15:46:25 -08003296__alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order,
3297 unsigned int alloc_flags,
3298 const struct alloc_context *ac)
3299{
3300 struct page *page;
3301
3302 page = get_page_from_freelist(gfp_mask, order,
3303 alloc_flags|ALLOC_CPUSET, ac);
3304 /*
3305 * fallback to ignore cpuset restriction if our nodes
3306 * are depleted
3307 */
3308 if (!page)
3309 page = get_page_from_freelist(gfp_mask, order,
3310 alloc_flags, ac);
3311
3312 return page;
3313}
3314
3315static inline struct page *
Mel Gorman11e33f62009-06-16 15:31:57 -07003316__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003317 const struct alloc_context *ac, unsigned long *did_some_progress)
Mel Gorman11e33f62009-06-16 15:31:57 -07003318{
David Rientjes6e0fc462015-09-08 15:00:36 -07003319 struct oom_control oc = {
3320 .zonelist = ac->zonelist,
3321 .nodemask = ac->nodemask,
Vladimir Davydov2a966b72016-07-26 15:22:33 -07003322 .memcg = NULL,
David Rientjes6e0fc462015-09-08 15:00:36 -07003323 .gfp_mask = gfp_mask,
3324 .order = order,
David Rientjes6e0fc462015-09-08 15:00:36 -07003325 };
Linus Torvalds1da177e2005-04-16 15:20:36 -07003326 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003327
Johannes Weiner9879de72015-01-26 12:58:32 -08003328 *did_some_progress = 0;
3329
Johannes Weiner9879de72015-01-26 12:58:32 -08003330 /*
Johannes Weinerdc564012015-06-24 16:57:19 -07003331 * Acquire the oom lock. If that fails, somebody else is
3332 * making progress for us.
Johannes Weiner9879de72015-01-26 12:58:32 -08003333 */
Johannes Weinerdc564012015-06-24 16:57:19 -07003334 if (!mutex_trylock(&oom_lock)) {
Johannes Weiner9879de72015-01-26 12:58:32 -08003335 *did_some_progress = 1;
Mel Gorman11e33f62009-06-16 15:31:57 -07003336 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003337 return NULL;
3338 }
Jens Axboe6b1de912005-11-17 21:35:02 +01003339
Mel Gorman11e33f62009-06-16 15:31:57 -07003340 /*
3341 * Go through the zonelist yet one more time, keep very high watermark
3342 * here, this is only to catch a parallel oom killing, we must fail if
Tetsuo Handae746bf72017-08-31 16:15:20 -07003343 * we're still under heavy pressure. But make sure that this reclaim
3344 * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY
3345 * allocation which will never fail due to oom_lock already held.
Mel Gorman11e33f62009-06-16 15:31:57 -07003346 */
Tetsuo Handae746bf72017-08-31 16:15:20 -07003347 page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) &
3348 ~__GFP_DIRECT_RECLAIM, order,
3349 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08003350 if (page)
Mel Gorman11e33f62009-06-16 15:31:57 -07003351 goto out;
3352
Michal Hocko06ad2762017-02-22 15:46:22 -08003353 /* Coredumps can quickly deplete all memory reserves */
3354 if (current->flags & PF_DUMPCORE)
3355 goto out;
3356 /* The OOM killer will not help higher order allocs */
3357 if (order > PAGE_ALLOC_COSTLY_ORDER)
3358 goto out;
Michal Hockodcda9b02017-07-12 14:36:45 -07003359 /*
3360 * We have already exhausted all our reclaim opportunities without any
3361 * success so it is time to admit defeat. We will skip the OOM killer
3362 * because it is very likely that the caller has a more reasonable
3363 * fallback than shooting a random task.
3364 */
3365 if (gfp_mask & __GFP_RETRY_MAYFAIL)
3366 goto out;
Michal Hocko06ad2762017-02-22 15:46:22 -08003367 /* The OOM killer does not needlessly kill tasks for lowmem */
3368 if (ac->high_zoneidx < ZONE_NORMAL)
3369 goto out;
3370 if (pm_suspended_storage())
3371 goto out;
3372 /*
3373 * XXX: GFP_NOFS allocations should rather fail than rely on
3374 * other request to make a forward progress.
3375 * We are in an unfortunate situation where out_of_memory cannot
3376 * do much for this context but let's try it to at least get
3377 * access to memory reserved if the current task is killed (see
3378 * out_of_memory). Once filesystems are ready to handle allocation
3379 * failures more gracefully we should just bail out here.
3380 */
Michal Hocko3da88fb32016-05-19 17:13:09 -07003381
Michal Hocko06ad2762017-02-22 15:46:22 -08003382 /* The OOM killer may not free memory on a specific node */
3383 if (gfp_mask & __GFP_THISNODE)
3384 goto out;
3385
Mel Gorman11e33f62009-06-16 15:31:57 -07003386 /* Exhausted what can be done so it's blamo time */
Michal Hocko5020e282016-01-14 15:20:36 -08003387 if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
Michal Hockoc32b3cb2015-02-11 15:26:24 -08003388 *did_some_progress = 1;
Michal Hocko5020e282016-01-14 15:20:36 -08003389
Michal Hocko6c18ba72017-02-22 15:46:25 -08003390 /*
3391 * Help non-failing allocations by giving them access to memory
3392 * reserves
3393 */
3394 if (gfp_mask & __GFP_NOFAIL)
3395 page = __alloc_pages_cpuset_fallback(gfp_mask, order,
Michal Hocko5020e282016-01-14 15:20:36 -08003396 ALLOC_NO_WATERMARKS, ac);
Michal Hocko5020e282016-01-14 15:20:36 -08003397 }
Mel Gorman11e33f62009-06-16 15:31:57 -07003398out:
Johannes Weinerdc564012015-06-24 16:57:19 -07003399 mutex_unlock(&oom_lock);
Mel Gorman11e33f62009-06-16 15:31:57 -07003400 return page;
3401}
3402
Michal Hocko33c2d212016-05-20 16:57:06 -07003403/*
3404 * Maximum number of compaction retries wit a progress before OOM
3405 * killer is consider as the only way to move forward.
3406 */
3407#define MAX_COMPACT_RETRIES 16
3408
Mel Gorman56de7262010-05-24 14:32:30 -07003409#ifdef CONFIG_COMPACTION
3410/* Try memory compaction for high-order allocations before reclaim */
3411static struct page *
3412__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
Mel Gormanc6038442016-05-19 17:13:38 -07003413 unsigned int alloc_flags, const struct alloc_context *ac,
Vlastimil Babkaa5508cd2016-07-28 15:49:28 -07003414 enum compact_priority prio, enum compact_result *compact_result)
Mel Gorman56de7262010-05-24 14:32:30 -07003415{
Vlastimil Babka98dd3b42014-10-09 15:27:04 -07003416 struct page *page;
Vlastimil Babka499118e2017-05-08 15:59:50 -07003417 unsigned int noreclaim_flag;
Vlastimil Babka53853e22014-10-09 15:27:02 -07003418
Mel Gorman66199712012-01-12 17:19:41 -08003419 if (!order)
Mel Gorman56de7262010-05-24 14:32:30 -07003420 return NULL;
3421
Vlastimil Babka499118e2017-05-08 15:59:50 -07003422 noreclaim_flag = memalloc_noreclaim_save();
Michal Hockoc5d01d02016-05-20 16:56:53 -07003423 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
Vlastimil Babkac3486f52016-07-28 15:49:30 -07003424 prio);
Vlastimil Babka499118e2017-05-08 15:59:50 -07003425 memalloc_noreclaim_restore(noreclaim_flag);
Mel Gorman56de7262010-05-24 14:32:30 -07003426
Michal Hockoc5d01d02016-05-20 16:56:53 -07003427 if (*compact_result <= COMPACT_INACTIVE)
Vlastimil Babka98dd3b42014-10-09 15:27:04 -07003428 return NULL;
Mel Gorman56de7262010-05-24 14:32:30 -07003429
Vlastimil Babka98dd3b42014-10-09 15:27:04 -07003430 /*
3431 * At least in one zone compaction wasn't deferred or skipped, so let's
3432 * count a compaction stall
3433 */
3434 count_vm_event(COMPACTSTALL);
3435
Vlastimil Babka31a6c192016-07-28 15:49:13 -07003436 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
Vlastimil Babka98dd3b42014-10-09 15:27:04 -07003437
3438 if (page) {
3439 struct zone *zone = page_zone(page);
3440
3441 zone->compact_blockskip_flush = false;
3442 compaction_defer_reset(zone, order, true);
3443 count_vm_event(COMPACTSUCCESS);
3444 return page;
3445 }
3446
3447 /*
Vlastimil Babka98dd3b42014-10-09 15:27:04 -07003448 * It's bad if compaction run occurs and fails. The most likely reason
3449 * is that pages exist, but not enough to satisfy watermarks.
3450 */
3451 count_vm_event(COMPACTFAIL);
3452
3453 cond_resched();
3454
Mel Gorman56de7262010-05-24 14:32:30 -07003455 return NULL;
3456}
Michal Hocko33c2d212016-05-20 16:57:06 -07003457
Vlastimil Babka32508452016-10-07 17:00:28 -07003458static inline bool
3459should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
3460 enum compact_result compact_result,
3461 enum compact_priority *compact_priority,
Vlastimil Babkad9436492016-10-07 17:00:31 -07003462 int *compaction_retries)
Vlastimil Babka32508452016-10-07 17:00:28 -07003463{
3464 int max_retries = MAX_COMPACT_RETRIES;
Vlastimil Babkac2033b02016-10-07 17:00:34 -07003465 int min_priority;
Michal Hocko65190cf2017-02-22 15:42:03 -08003466 bool ret = false;
3467 int retries = *compaction_retries;
3468 enum compact_priority priority = *compact_priority;
Vlastimil Babka32508452016-10-07 17:00:28 -07003469
3470 if (!order)
3471 return false;
3472
Vlastimil Babkad9436492016-10-07 17:00:31 -07003473 if (compaction_made_progress(compact_result))
3474 (*compaction_retries)++;
3475
Vlastimil Babka32508452016-10-07 17:00:28 -07003476 /*
3477 * compaction considers all the zone as desperately out of memory
3478 * so it doesn't really make much sense to retry except when the
3479 * failure could be caused by insufficient priority
3480 */
Vlastimil Babkad9436492016-10-07 17:00:31 -07003481 if (compaction_failed(compact_result))
3482 goto check_priority;
Vlastimil Babka32508452016-10-07 17:00:28 -07003483
3484 /*
3485 * make sure the compaction wasn't deferred or didn't bail out early
3486 * due to locks contention before we declare that we should give up.
3487 * But do not retry if the given zonelist is not suitable for
3488 * compaction.
3489 */
Michal Hocko65190cf2017-02-22 15:42:03 -08003490 if (compaction_withdrawn(compact_result)) {
3491 ret = compaction_zonelist_suitable(ac, order, alloc_flags);
3492 goto out;
3493 }
Vlastimil Babka32508452016-10-07 17:00:28 -07003494
3495 /*
Michal Hockodcda9b02017-07-12 14:36:45 -07003496 * !costly requests are much more important than __GFP_RETRY_MAYFAIL
Vlastimil Babka32508452016-10-07 17:00:28 -07003497 * costly ones because they are de facto nofail and invoke OOM
3498 * killer to move on while costly can fail and users are ready
3499 * to cope with that. 1/4 retries is rather arbitrary but we
3500 * would need much more detailed feedback from compaction to
3501 * make a better decision.
3502 */
3503 if (order > PAGE_ALLOC_COSTLY_ORDER)
3504 max_retries /= 4;
Michal Hocko65190cf2017-02-22 15:42:03 -08003505 if (*compaction_retries <= max_retries) {
3506 ret = true;
3507 goto out;
3508 }
Vlastimil Babka32508452016-10-07 17:00:28 -07003509
Vlastimil Babkad9436492016-10-07 17:00:31 -07003510 /*
3511 * Make sure there are attempts at the highest priority if we exhausted
3512 * all retries or failed at the lower priorities.
3513 */
3514check_priority:
Vlastimil Babkac2033b02016-10-07 17:00:34 -07003515 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ?
3516 MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY;
Michal Hocko65190cf2017-02-22 15:42:03 -08003517
Vlastimil Babkac2033b02016-10-07 17:00:34 -07003518 if (*compact_priority > min_priority) {
Vlastimil Babkad9436492016-10-07 17:00:31 -07003519 (*compact_priority)--;
3520 *compaction_retries = 0;
Michal Hocko65190cf2017-02-22 15:42:03 -08003521 ret = true;
Vlastimil Babkad9436492016-10-07 17:00:31 -07003522 }
Michal Hocko65190cf2017-02-22 15:42:03 -08003523out:
3524 trace_compact_retry(order, priority, compact_result, retries, max_retries, ret);
3525 return ret;
Vlastimil Babka32508452016-10-07 17:00:28 -07003526}
Mel Gorman56de7262010-05-24 14:32:30 -07003527#else
3528static inline struct page *
3529__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
Mel Gormanc6038442016-05-19 17:13:38 -07003530 unsigned int alloc_flags, const struct alloc_context *ac,
Vlastimil Babkaa5508cd2016-07-28 15:49:28 -07003531 enum compact_priority prio, enum compact_result *compact_result)
Mel Gorman56de7262010-05-24 14:32:30 -07003532{
Michal Hocko33c2d212016-05-20 16:57:06 -07003533 *compact_result = COMPACT_SKIPPED;
Mel Gorman56de7262010-05-24 14:32:30 -07003534 return NULL;
3535}
Michal Hocko33c2d212016-05-20 16:57:06 -07003536
3537static inline bool
Michal Hocko86a294a2016-05-20 16:57:12 -07003538should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags,
3539 enum compact_result compact_result,
Vlastimil Babkaa5508cd2016-07-28 15:49:28 -07003540 enum compact_priority *compact_priority,
Vlastimil Babkad9436492016-10-07 17:00:31 -07003541 int *compaction_retries)
Michal Hocko33c2d212016-05-20 16:57:06 -07003542{
Michal Hocko31e49bf2016-05-20 16:57:15 -07003543 struct zone *zone;
3544 struct zoneref *z;
3545
3546 if (!order || order > PAGE_ALLOC_COSTLY_ORDER)
3547 return false;
3548
3549 /*
3550 * There are setups with compaction disabled which would prefer to loop
3551 * inside the allocator rather than hit the oom killer prematurely.
3552 * Let's give them a good hope and keep retrying while the order-0
3553 * watermarks are OK.
3554 */
3555 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
3556 ac->nodemask) {
3557 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone),
3558 ac_classzone_idx(ac), alloc_flags))
3559 return true;
3560 }
Michal Hocko33c2d212016-05-20 16:57:06 -07003561 return false;
3562}
Vlastimil Babka32508452016-10-07 17:00:28 -07003563#endif /* CONFIG_COMPACTION */
Mel Gorman56de7262010-05-24 14:32:30 -07003564
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +01003565#ifdef CONFIG_LOCKDEP
3566struct lockdep_map __fs_reclaim_map =
3567 STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map);
3568
3569static bool __need_fs_reclaim(gfp_t gfp_mask)
3570{
3571 gfp_mask = current_gfp_context(gfp_mask);
3572
3573 /* no reclaim without waiting on it */
3574 if (!(gfp_mask & __GFP_DIRECT_RECLAIM))
3575 return false;
3576
3577 /* this guy won't enter reclaim */
3578 if ((current->flags & PF_MEMALLOC) && !(gfp_mask & __GFP_NOMEMALLOC))
3579 return false;
3580
3581 /* We're only interested __GFP_FS allocations for now */
3582 if (!(gfp_mask & __GFP_FS))
3583 return false;
3584
3585 if (gfp_mask & __GFP_NOLOCKDEP)
3586 return false;
3587
3588 return true;
3589}
3590
3591void fs_reclaim_acquire(gfp_t gfp_mask)
3592{
3593 if (__need_fs_reclaim(gfp_mask))
3594 lock_map_acquire(&__fs_reclaim_map);
3595}
3596EXPORT_SYMBOL_GPL(fs_reclaim_acquire);
3597
3598void fs_reclaim_release(gfp_t gfp_mask)
3599{
3600 if (__need_fs_reclaim(gfp_mask))
3601 lock_map_release(&__fs_reclaim_map);
3602}
3603EXPORT_SYMBOL_GPL(fs_reclaim_release);
3604#endif
3605
Marek Szyprowskibba90712012-01-25 12:09:52 +01003606/* Perform direct synchronous page reclaim */
3607static int
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003608__perform_reclaim(gfp_t gfp_mask, unsigned int order,
3609 const struct alloc_context *ac)
Mel Gorman11e33f62009-06-16 15:31:57 -07003610{
Mel Gorman11e33f62009-06-16 15:31:57 -07003611 struct reclaim_state reclaim_state;
Marek Szyprowskibba90712012-01-25 12:09:52 +01003612 int progress;
Vlastimil Babka499118e2017-05-08 15:59:50 -07003613 unsigned int noreclaim_flag;
Mel Gorman11e33f62009-06-16 15:31:57 -07003614
3615 cond_resched();
3616
3617 /* We now go into synchronous reclaim */
3618 cpuset_memory_pressure_bump();
Vlastimil Babka499118e2017-05-08 15:59:50 -07003619 noreclaim_flag = memalloc_noreclaim_save();
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +01003620 fs_reclaim_acquire(gfp_mask);
Mel Gorman11e33f62009-06-16 15:31:57 -07003621 reclaim_state.reclaimed_slab = 0;
Andrew Mortonc06b1fc2011-01-13 15:47:32 -08003622 current->reclaim_state = &reclaim_state;
Mel Gorman11e33f62009-06-16 15:31:57 -07003623
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003624 progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
3625 ac->nodemask);
Mel Gorman11e33f62009-06-16 15:31:57 -07003626
Andrew Mortonc06b1fc2011-01-13 15:47:32 -08003627 current->reclaim_state = NULL;
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +01003628 fs_reclaim_release(gfp_mask);
Vlastimil Babka499118e2017-05-08 15:59:50 -07003629 memalloc_noreclaim_restore(noreclaim_flag);
Mel Gorman11e33f62009-06-16 15:31:57 -07003630
3631 cond_resched();
3632
Marek Szyprowskibba90712012-01-25 12:09:52 +01003633 return progress;
3634}
3635
3636/* The really slow allocator path where we enter direct reclaim */
3637static inline struct page *
3638__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
Mel Gormanc6038442016-05-19 17:13:38 -07003639 unsigned int alloc_flags, const struct alloc_context *ac,
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003640 unsigned long *did_some_progress)
Marek Szyprowskibba90712012-01-25 12:09:52 +01003641{
3642 struct page *page = NULL;
3643 bool drained = false;
3644
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003645 *did_some_progress = __perform_reclaim(gfp_mask, order, ac);
Mel Gorman9ee493c2010-09-09 16:38:18 -07003646 if (unlikely(!(*did_some_progress)))
3647 return NULL;
Mel Gorman11e33f62009-06-16 15:31:57 -07003648
Mel Gorman9ee493c2010-09-09 16:38:18 -07003649retry:
Vlastimil Babka31a6c192016-07-28 15:49:13 -07003650 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
Mel Gorman9ee493c2010-09-09 16:38:18 -07003651
3652 /*
3653 * If an allocation failed after direct reclaim, it could be because
Mel Gorman0aaa29a2015-11-06 16:28:37 -08003654 * pages are pinned on the per-cpu lists or in high alloc reserves.
3655 * Shrink them them and try again
Mel Gorman9ee493c2010-09-09 16:38:18 -07003656 */
3657 if (!page && !drained) {
Minchan Kim29fac032016-12-12 16:42:14 -08003658 unreserve_highatomic_pageblock(ac, false);
Vlastimil Babka93481ff2014-12-10 15:43:01 -08003659 drain_all_pages(NULL);
Mel Gorman9ee493c2010-09-09 16:38:18 -07003660 drained = true;
3661 goto retry;
3662 }
3663
Mel Gorman11e33f62009-06-16 15:31:57 -07003664 return page;
3665}
3666
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003667static void wake_all_kswapds(unsigned int order, const struct alloc_context *ac)
Mel Gorman11e33f62009-06-16 15:31:57 -07003668{
3669 struct zoneref *z;
3670 struct zone *zone;
Mel Gormane1a55632016-07-28 15:46:26 -07003671 pg_data_t *last_pgdat = NULL;
Mel Gorman11e33f62009-06-16 15:31:57 -07003672
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003673 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
Mel Gormane1a55632016-07-28 15:46:26 -07003674 ac->high_zoneidx, ac->nodemask) {
3675 if (last_pgdat != zone->zone_pgdat)
Mel Gorman52e9f872016-07-28 15:46:29 -07003676 wakeup_kswapd(zone, order, ac->high_zoneidx);
Mel Gormane1a55632016-07-28 15:46:26 -07003677 last_pgdat = zone->zone_pgdat;
3678 }
Mel Gorman11e33f62009-06-16 15:31:57 -07003679}
3680
Mel Gormanc6038442016-05-19 17:13:38 -07003681static inline unsigned int
Peter Zijlstra341ce062009-06-16 15:32:02 -07003682gfp_to_alloc_flags(gfp_t gfp_mask)
3683{
Mel Gormanc6038442016-05-19 17:13:38 -07003684 unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
Peter Zijlstra341ce062009-06-16 15:32:02 -07003685
Mel Gormana56f57f2009-06-16 15:32:02 -07003686 /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
Namhyung Kime6223a32010-10-26 14:21:59 -07003687 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
Mel Gormana56f57f2009-06-16 15:32:02 -07003688
Peter Zijlstra341ce062009-06-16 15:32:02 -07003689 /*
3690 * The caller may dip into page reserves a bit more if the caller
3691 * cannot run direct reclaim, or if the caller has realtime scheduling
3692 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
Mel Gormand0164ad2015-11-06 16:28:21 -08003693 * set both ALLOC_HARDER (__GFP_ATOMIC) and ALLOC_HIGH (__GFP_HIGH).
Peter Zijlstra341ce062009-06-16 15:32:02 -07003694 */
Namhyung Kime6223a32010-10-26 14:21:59 -07003695 alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
Peter Zijlstra341ce062009-06-16 15:32:02 -07003696
Mel Gormand0164ad2015-11-06 16:28:21 -08003697 if (gfp_mask & __GFP_ATOMIC) {
Andrea Arcangeli5c3240d2011-01-13 15:46:49 -08003698 /*
David Rientjesb104a352014-07-30 16:08:24 -07003699 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
3700 * if it can't schedule.
Andrea Arcangeli5c3240d2011-01-13 15:46:49 -08003701 */
David Rientjesb104a352014-07-30 16:08:24 -07003702 if (!(gfp_mask & __GFP_NOMEMALLOC))
Andrea Arcangeli5c3240d2011-01-13 15:46:49 -08003703 alloc_flags |= ALLOC_HARDER;
Peter Zijlstra341ce062009-06-16 15:32:02 -07003704 /*
David Rientjesb104a352014-07-30 16:08:24 -07003705 * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
Vladimir Davydov344736f2014-10-20 15:50:30 +04003706 * comment for __cpuset_node_allowed().
Peter Zijlstra341ce062009-06-16 15:32:02 -07003707 */
3708 alloc_flags &= ~ALLOC_CPUSET;
Andrew Mortonc06b1fc2011-01-13 15:47:32 -08003709 } else if (unlikely(rt_task(current)) && !in_interrupt())
Peter Zijlstra341ce062009-06-16 15:32:02 -07003710 alloc_flags |= ALLOC_HARDER;
3711
Bartlomiej Zolnierkiewiczd95ea5d2012-10-08 16:32:05 -07003712#ifdef CONFIG_CMA
David Rientjes43e7a342014-10-09 15:27:25 -07003713 if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
Bartlomiej Zolnierkiewiczd95ea5d2012-10-08 16:32:05 -07003714 alloc_flags |= ALLOC_CMA;
3715#endif
Peter Zijlstra341ce062009-06-16 15:32:02 -07003716 return alloc_flags;
3717}
3718
Michal Hockocd04ae12017-09-06 16:24:50 -07003719static bool oom_reserves_allowed(struct task_struct *tsk)
Mel Gorman072bb0a2012-07-31 16:43:58 -07003720{
Michal Hockocd04ae12017-09-06 16:24:50 -07003721 if (!tsk_is_oom_victim(tsk))
Vlastimil Babka31a6c192016-07-28 15:49:13 -07003722 return false;
3723
Michal Hockocd04ae12017-09-06 16:24:50 -07003724 /*
3725 * !MMU doesn't have oom reaper so give access to memory reserves
3726 * only to the thread with TIF_MEMDIE set
3727 */
3728 if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE))
3729 return false;
Vlastimil Babka31a6c192016-07-28 15:49:13 -07003730
Michal Hockocd04ae12017-09-06 16:24:50 -07003731 return true;
3732}
3733
3734/*
3735 * Distinguish requests which really need access to full memory
3736 * reserves from oom victims which can live with a portion of it
3737 */
3738static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask)
3739{
3740 if (unlikely(gfp_mask & __GFP_NOMEMALLOC))
3741 return 0;
3742 if (gfp_mask & __GFP_MEMALLOC)
3743 return ALLOC_NO_WATERMARKS;
3744 if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
3745 return ALLOC_NO_WATERMARKS;
3746 if (!in_interrupt()) {
3747 if (current->flags & PF_MEMALLOC)
3748 return ALLOC_NO_WATERMARKS;
3749 else if (oom_reserves_allowed(current))
3750 return ALLOC_OOM;
3751 }
3752
3753 return 0;
3754}
3755
3756bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
3757{
3758 return !!__gfp_pfmemalloc_flags(gfp_mask);
Mel Gorman072bb0a2012-07-31 16:43:58 -07003759}
3760
Michal Hocko0a0337e2016-05-20 16:57:00 -07003761/*
Michal Hocko0a0337e2016-05-20 16:57:00 -07003762 * Checks whether it makes sense to retry the reclaim to make a forward progress
3763 * for the given allocation request.
Johannes Weiner491d79a2017-05-03 14:52:16 -07003764 *
3765 * We give up when we either have tried MAX_RECLAIM_RETRIES in a row
3766 * without success, or when we couldn't even meet the watermark if we
3767 * reclaimed all remaining pages on the LRU lists.
Michal Hocko0a0337e2016-05-20 16:57:00 -07003768 *
3769 * Returns true if a retry is viable or false to enter the oom path.
3770 */
3771static inline bool
3772should_reclaim_retry(gfp_t gfp_mask, unsigned order,
3773 struct alloc_context *ac, int alloc_flags,
Vlastimil Babka423b4522016-10-07 17:00:40 -07003774 bool did_some_progress, int *no_progress_loops)
Michal Hocko0a0337e2016-05-20 16:57:00 -07003775{
3776 struct zone *zone;
3777 struct zoneref *z;
3778
3779 /*
Vlastimil Babka423b4522016-10-07 17:00:40 -07003780 * Costly allocations might have made a progress but this doesn't mean
3781 * their order will become available due to high fragmentation so
3782 * always increment the no progress counter for them
3783 */
3784 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER)
3785 *no_progress_loops = 0;
3786 else
3787 (*no_progress_loops)++;
3788
3789 /*
Michal Hocko0a0337e2016-05-20 16:57:00 -07003790 * Make sure we converge to OOM if we cannot make any progress
3791 * several times in the row.
3792 */
Minchan Kim04c87162016-12-12 16:42:11 -08003793 if (*no_progress_loops > MAX_RECLAIM_RETRIES) {
3794 /* Before OOM, exhaust highatomic_reserve */
Minchan Kim29fac032016-12-12 16:42:14 -08003795 return unreserve_highatomic_pageblock(ac, true);
Minchan Kim04c87162016-12-12 16:42:11 -08003796 }
Michal Hocko0a0337e2016-05-20 16:57:00 -07003797
Michal Hocko0a0337e2016-05-20 16:57:00 -07003798 /*
Mel Gormanbca67592016-07-28 15:47:05 -07003799 * Keep reclaiming pages while there is a chance this will lead
3800 * somewhere. If none of the target zones can satisfy our allocation
3801 * request even if all reclaimable pages are considered then we are
3802 * screwed and have to go OOM.
Michal Hocko0a0337e2016-05-20 16:57:00 -07003803 */
3804 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
3805 ac->nodemask) {
3806 unsigned long available;
Michal Hockoede37712016-05-20 16:57:03 -07003807 unsigned long reclaimable;
Michal Hockod379f012017-02-22 15:42:00 -08003808 unsigned long min_wmark = min_wmark_pages(zone);
3809 bool wmark;
Michal Hocko0a0337e2016-05-20 16:57:00 -07003810
Mel Gorman5a1c84b2016-07-28 15:47:31 -07003811 available = reclaimable = zone_reclaimable_pages(zone);
Mel Gorman5a1c84b2016-07-28 15:47:31 -07003812 available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
Michal Hocko0a0337e2016-05-20 16:57:00 -07003813
3814 /*
Johannes Weiner491d79a2017-05-03 14:52:16 -07003815 * Would the allocation succeed if we reclaimed all
3816 * reclaimable pages?
Michal Hocko0a0337e2016-05-20 16:57:00 -07003817 */
Michal Hockod379f012017-02-22 15:42:00 -08003818 wmark = __zone_watermark_ok(zone, order, min_wmark,
3819 ac_classzone_idx(ac), alloc_flags, available);
3820 trace_reclaim_retry_zone(z, order, reclaimable,
3821 available, min_wmark, *no_progress_loops, wmark);
3822 if (wmark) {
Michal Hockoede37712016-05-20 16:57:03 -07003823 /*
3824 * If we didn't make any progress and have a lot of
3825 * dirty + writeback pages then we should wait for
3826 * an IO to complete to slow down the reclaim and
3827 * prevent from pre mature OOM
3828 */
3829 if (!did_some_progress) {
Mel Gorman11fb9982016-07-28 15:46:20 -07003830 unsigned long write_pending;
Michal Hockoede37712016-05-20 16:57:03 -07003831
Mel Gorman5a1c84b2016-07-28 15:47:31 -07003832 write_pending = zone_page_state_snapshot(zone,
3833 NR_ZONE_WRITE_PENDING);
Michal Hockoede37712016-05-20 16:57:03 -07003834
Mel Gorman11fb9982016-07-28 15:46:20 -07003835 if (2 * write_pending > reclaimable) {
Michal Hockoede37712016-05-20 16:57:03 -07003836 congestion_wait(BLK_RW_ASYNC, HZ/10);
3837 return true;
3838 }
3839 }
Mel Gorman5a1c84b2016-07-28 15:47:31 -07003840
Michal Hockoede37712016-05-20 16:57:03 -07003841 /*
3842 * Memory allocation/reclaim might be called from a WQ
3843 * context and the current implementation of the WQ
3844 * concurrency control doesn't recognize that
3845 * a particular WQ is congested if the worker thread is
3846 * looping without ever sleeping. Therefore we have to
3847 * do a short sleep here rather than calling
3848 * cond_resched().
3849 */
3850 if (current->flags & PF_WQ_WORKER)
3851 schedule_timeout_uninterruptible(1);
3852 else
3853 cond_resched();
3854
Michal Hocko0a0337e2016-05-20 16:57:00 -07003855 return true;
3856 }
3857 }
3858
3859 return false;
3860}
3861
Vlastimil Babka902b6282017-07-06 15:39:56 -07003862static inline bool
3863check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac)
3864{
3865 /*
3866 * It's possible that cpuset's mems_allowed and the nodemask from
3867 * mempolicy don't intersect. This should be normally dealt with by
3868 * policy_nodemask(), but it's possible to race with cpuset update in
3869 * such a way the check therein was true, and then it became false
3870 * before we got our cpuset_mems_cookie here.
3871 * This assumes that for all allocations, ac->nodemask can come only
3872 * from MPOL_BIND mempolicy (whose documented semantics is to be ignored
3873 * when it does not intersect with the cpuset restrictions) or the
3874 * caller can deal with a violated nodemask.
3875 */
3876 if (cpusets_enabled() && ac->nodemask &&
3877 !cpuset_nodemask_valid_mems_allowed(ac->nodemask)) {
3878 ac->nodemask = NULL;
3879 return true;
3880 }
3881
3882 /*
3883 * When updating a task's mems_allowed or mempolicy nodemask, it is
3884 * possible to race with parallel threads in such a way that our
3885 * allocation can fail while the mask is being updated. If we are about
3886 * to fail, check if the cpuset changed during allocation and if so,
3887 * retry.
3888 */
3889 if (read_mems_allowed_retry(cpuset_mems_cookie))
3890 return true;
3891
3892 return false;
3893}
3894
Mel Gorman11e33f62009-06-16 15:31:57 -07003895static inline struct page *
3896__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003897 struct alloc_context *ac)
Mel Gorman11e33f62009-06-16 15:31:57 -07003898{
Mel Gormand0164ad2015-11-06 16:28:21 -08003899 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
Vlastimil Babka282722b2017-05-08 15:54:49 -07003900 const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
Mel Gorman11e33f62009-06-16 15:31:57 -07003901 struct page *page = NULL;
Mel Gormanc6038442016-05-19 17:13:38 -07003902 unsigned int alloc_flags;
Mel Gorman11e33f62009-06-16 15:31:57 -07003903 unsigned long did_some_progress;
Vlastimil Babka5ce9bfe2017-01-24 15:18:38 -08003904 enum compact_priority compact_priority;
Michal Hockoc5d01d02016-05-20 16:56:53 -07003905 enum compact_result compact_result;
Vlastimil Babka5ce9bfe2017-01-24 15:18:38 -08003906 int compaction_retries;
3907 int no_progress_loops;
Michal Hocko63f53de2016-10-07 17:01:58 -07003908 unsigned long alloc_start = jiffies;
3909 unsigned int stall_timeout = 10 * HZ;
Vlastimil Babka5ce9bfe2017-01-24 15:18:38 -08003910 unsigned int cpuset_mems_cookie;
Michal Hockocd04ae12017-09-06 16:24:50 -07003911 int reserve_flags;
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08003912
Christoph Lameter952f3b52006-12-06 20:33:26 -08003913 /*
Mel Gorman72807a72009-06-16 15:32:18 -07003914 * In the slowpath, we sanity check order to avoid ever trying to
3915 * reclaim >= MAX_ORDER areas which will never succeed. Callers may
3916 * be using allocators in order of preference for an area that is
3917 * too large.
3918 */
Mel Gorman1fc28b72009-07-29 15:04:08 -07003919 if (order >= MAX_ORDER) {
3920 WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
Mel Gorman72807a72009-06-16 15:32:18 -07003921 return NULL;
Mel Gorman1fc28b72009-07-29 15:04:08 -07003922 }
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08003923
Christoph Lameter952f3b52006-12-06 20:33:26 -08003924 /*
Mel Gormand0164ad2015-11-06 16:28:21 -08003925 * We also sanity check to catch abuse of atomic reserves being used by
3926 * callers that are not in atomic context.
3927 */
3928 if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
3929 (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
3930 gfp_mask &= ~__GFP_ATOMIC;
3931
Vlastimil Babka5ce9bfe2017-01-24 15:18:38 -08003932retry_cpuset:
3933 compaction_retries = 0;
3934 no_progress_loops = 0;
3935 compact_priority = DEF_COMPACT_PRIORITY;
3936 cpuset_mems_cookie = read_mems_allowed_begin();
Michal Hocko9a67f642017-02-22 15:46:19 -08003937
3938 /*
3939 * The fast path uses conservative alloc_flags to succeed only until
3940 * kswapd needs to be woken up, and to avoid the cost of setting up
3941 * alloc_flags precisely. So we do that now.
3942 */
3943 alloc_flags = gfp_to_alloc_flags(gfp_mask);
3944
Vlastimil Babkae47483b2017-01-24 15:18:41 -08003945 /*
3946 * We need to recalculate the starting point for the zonelist iterator
3947 * because we might have used different nodemask in the fast path, or
3948 * there was a cpuset modification and we are retrying - otherwise we
3949 * could end up iterating over non-eligible zones endlessly.
3950 */
3951 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
3952 ac->high_zoneidx, ac->nodemask);
3953 if (!ac->preferred_zoneref->zone)
3954 goto nopage;
3955
Mel Gormand0164ad2015-11-06 16:28:21 -08003956 if (gfp_mask & __GFP_KSWAPD_RECLAIM)
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003957 wake_all_kswapds(order, ac);
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08003958
Paul Jackson9bf22292005-09-06 15:18:12 -07003959 /*
Vlastimil Babka23771232016-07-28 15:49:16 -07003960 * The adjusted alloc_flags might result in immediate success, so try
3961 * that first
3962 */
3963 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
3964 if (page)
3965 goto got_pg;
3966
Vlastimil Babkaa8161d12016-07-28 15:49:19 -07003967 /*
3968 * For costly allocations, try direct compaction first, as it's likely
Vlastimil Babka282722b2017-05-08 15:54:49 -07003969 * that we have enough base pages and don't need to reclaim. For non-
3970 * movable high-order allocations, do that as well, as compaction will
3971 * try prevent permanent fragmentation by migrating from blocks of the
3972 * same migratetype.
3973 * Don't try this for allocations that are allowed to ignore
3974 * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen.
Vlastimil Babkaa8161d12016-07-28 15:49:19 -07003975 */
Vlastimil Babka282722b2017-05-08 15:54:49 -07003976 if (can_direct_reclaim &&
3977 (costly_order ||
3978 (order > 0 && ac->migratetype != MIGRATE_MOVABLE))
3979 && !gfp_pfmemalloc_allowed(gfp_mask)) {
Vlastimil Babkaa8161d12016-07-28 15:49:19 -07003980 page = __alloc_pages_direct_compact(gfp_mask, order,
3981 alloc_flags, ac,
Vlastimil Babkaa5508cd2016-07-28 15:49:28 -07003982 INIT_COMPACT_PRIORITY,
Vlastimil Babkaa8161d12016-07-28 15:49:19 -07003983 &compact_result);
3984 if (page)
3985 goto got_pg;
3986
Vlastimil Babka3eb27712016-07-28 15:49:22 -07003987 /*
3988 * Checks for costly allocations with __GFP_NORETRY, which
3989 * includes THP page fault allocations
3990 */
Vlastimil Babka282722b2017-05-08 15:54:49 -07003991 if (costly_order && (gfp_mask & __GFP_NORETRY)) {
Vlastimil Babkaa8161d12016-07-28 15:49:19 -07003992 /*
3993 * If compaction is deferred for high-order allocations,
3994 * it is because sync compaction recently failed. If
3995 * this is the case and the caller requested a THP
3996 * allocation, we do not want to heavily disrupt the
3997 * system, so we fail the allocation instead of entering
3998 * direct reclaim.
3999 */
4000 if (compact_result == COMPACT_DEFERRED)
4001 goto nopage;
4002
4003 /*
Vlastimil Babka3eb27712016-07-28 15:49:22 -07004004 * Looks like reclaim/compaction is worth trying, but
4005 * sync compaction could be very expensive, so keep
Vlastimil Babka25160352016-07-28 15:49:25 -07004006 * using async compaction.
Vlastimil Babkaa8161d12016-07-28 15:49:19 -07004007 */
Vlastimil Babkaa5508cd2016-07-28 15:49:28 -07004008 compact_priority = INIT_COMPACT_PRIORITY;
Vlastimil Babkaa8161d12016-07-28 15:49:19 -07004009 }
4010 }
Vlastimil Babka23771232016-07-28 15:49:16 -07004011
4012retry:
4013 /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */
4014 if (gfp_mask & __GFP_KSWAPD_RECLAIM)
4015 wake_all_kswapds(order, ac);
4016
Michal Hockocd04ae12017-09-06 16:24:50 -07004017 reserve_flags = __gfp_pfmemalloc_flags(gfp_mask);
4018 if (reserve_flags)
4019 alloc_flags = reserve_flags;
Vlastimil Babka23771232016-07-28 15:49:16 -07004020
4021 /*
Mel Gormane46e7b72016-06-03 14:56:01 -07004022 * Reset the zonelist iterators if memory policies can be ignored.
4023 * These allocations are high priority and system rather than user
4024 * orientated.
4025 */
Michal Hockocd04ae12017-09-06 16:24:50 -07004026 if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) {
Mel Gormane46e7b72016-06-03 14:56:01 -07004027 ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
4028 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4029 ac->high_zoneidx, ac->nodemask);
4030 }
4031
Vlastimil Babka23771232016-07-28 15:49:16 -07004032 /* Attempt with potentially adjusted zonelist and alloc_flags */
Vlastimil Babka31a6c192016-07-28 15:49:13 -07004033 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08004034 if (page)
4035 goto got_pg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004036
Mel Gormand0164ad2015-11-06 16:28:21 -08004037 /* Caller is not willing to reclaim, we can't balance anything */
Michal Hocko9a67f642017-02-22 15:46:19 -08004038 if (!can_direct_reclaim)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004039 goto nopage;
Michal Hocko9a67f642017-02-22 15:46:19 -08004040
4041 /* Make sure we know about allocations which stall for too long */
4042 if (time_after(jiffies, alloc_start + stall_timeout)) {
Johannes Weiner82251962017-05-03 14:53:48 -07004043 warn_alloc(gfp_mask & ~__GFP_NOWARN, ac->nodemask,
Michal Hocko9a67f642017-02-22 15:46:19 -08004044 "page allocation stalls for %ums, order:%u",
4045 jiffies_to_msecs(jiffies-alloc_start), order);
4046 stall_timeout += 10 * HZ;
David Rientjesaed0a0e2014-01-21 15:51:12 -08004047 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004048
Peter Zijlstra341ce062009-06-16 15:32:02 -07004049 /* Avoid recursion of direct reclaim */
Michal Hocko9a67f642017-02-22 15:46:19 -08004050 if (current->flags & PF_MEMALLOC)
Peter Zijlstra341ce062009-06-16 15:32:02 -07004051 goto nopage;
David Rientjes8fe78042014-08-06 16:07:54 -07004052
Mel Gorman11e33f62009-06-16 15:31:57 -07004053 /* Try direct reclaim and then allocating */
Vlastimil Babkaa9263752015-02-11 15:25:41 -08004054 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
4055 &did_some_progress);
Mel Gorman11e33f62009-06-16 15:31:57 -07004056 if (page)
4057 goto got_pg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004058
Vlastimil Babkaa8161d12016-07-28 15:49:19 -07004059 /* Try direct compaction and then allocating */
4060 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
Vlastimil Babkaa5508cd2016-07-28 15:49:28 -07004061 compact_priority, &compact_result);
Vlastimil Babkaa8161d12016-07-28 15:49:19 -07004062 if (page)
4063 goto got_pg;
4064
Johannes Weiner90839052015-06-24 16:57:21 -07004065 /* Do not loop if specifically requested */
4066 if (gfp_mask & __GFP_NORETRY)
Vlastimil Babkaa8161d12016-07-28 15:49:19 -07004067 goto nopage;
Johannes Weiner90839052015-06-24 16:57:21 -07004068
Michal Hocko0a0337e2016-05-20 16:57:00 -07004069 /*
4070 * Do not retry costly high order allocations unless they are
Michal Hockodcda9b02017-07-12 14:36:45 -07004071 * __GFP_RETRY_MAYFAIL
Michal Hocko0a0337e2016-05-20 16:57:00 -07004072 */
Michal Hockodcda9b02017-07-12 14:36:45 -07004073 if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL))
Vlastimil Babkaa8161d12016-07-28 15:49:19 -07004074 goto nopage;
Michal Hocko0a0337e2016-05-20 16:57:00 -07004075
Michal Hocko0a0337e2016-05-20 16:57:00 -07004076 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
Vlastimil Babka423b4522016-10-07 17:00:40 -07004077 did_some_progress > 0, &no_progress_loops))
Michal Hocko0a0337e2016-05-20 16:57:00 -07004078 goto retry;
4079
Michal Hocko33c2d212016-05-20 16:57:06 -07004080 /*
4081 * It doesn't make any sense to retry for the compaction if the order-0
4082 * reclaim is not able to make any progress because the current
4083 * implementation of the compaction depends on the sufficient amount
4084 * of free memory (see __compaction_suitable)
4085 */
4086 if (did_some_progress > 0 &&
Michal Hocko86a294a2016-05-20 16:57:12 -07004087 should_compact_retry(ac, order, alloc_flags,
Vlastimil Babkaa5508cd2016-07-28 15:49:28 -07004088 compact_result, &compact_priority,
Vlastimil Babkad9436492016-10-07 17:00:31 -07004089 &compaction_retries))
Michal Hocko33c2d212016-05-20 16:57:06 -07004090 goto retry;
4091
Vlastimil Babka902b6282017-07-06 15:39:56 -07004092
4093 /* Deal with possible cpuset update races before we start OOM killing */
4094 if (check_retry_cpuset(cpuset_mems_cookie, ac))
Vlastimil Babkae47483b2017-01-24 15:18:41 -08004095 goto retry_cpuset;
4096
Johannes Weiner90839052015-06-24 16:57:21 -07004097 /* Reclaim has failed us, start killing things */
4098 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
4099 if (page)
4100 goto got_pg;
4101
Michal Hocko9a67f642017-02-22 15:46:19 -08004102 /* Avoid allocations with no watermarks from looping endlessly */
Michal Hockocd04ae12017-09-06 16:24:50 -07004103 if (tsk_is_oom_victim(current) &&
4104 (alloc_flags == ALLOC_OOM ||
Tetsuo Handac2889832017-06-02 14:46:31 -07004105 (gfp_mask & __GFP_NOMEMALLOC)))
Michal Hocko9a67f642017-02-22 15:46:19 -08004106 goto nopage;
4107
Johannes Weiner90839052015-06-24 16:57:21 -07004108 /* Retry as long as the OOM killer is making progress */
Michal Hocko0a0337e2016-05-20 16:57:00 -07004109 if (did_some_progress) {
4110 no_progress_loops = 0;
Johannes Weiner90839052015-06-24 16:57:21 -07004111 goto retry;
Michal Hocko0a0337e2016-05-20 16:57:00 -07004112 }
Johannes Weiner90839052015-06-24 16:57:21 -07004113
Linus Torvalds1da177e2005-04-16 15:20:36 -07004114nopage:
Vlastimil Babka902b6282017-07-06 15:39:56 -07004115 /* Deal with possible cpuset update races before we fail */
4116 if (check_retry_cpuset(cpuset_mems_cookie, ac))
Vlastimil Babka5ce9bfe2017-01-24 15:18:38 -08004117 goto retry_cpuset;
4118
Michal Hocko9a67f642017-02-22 15:46:19 -08004119 /*
4120 * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
4121 * we always retry
4122 */
4123 if (gfp_mask & __GFP_NOFAIL) {
4124 /*
4125 * All existing users of the __GFP_NOFAIL are blockable, so warn
4126 * of any new users that actually require GFP_NOWAIT
4127 */
4128 if (WARN_ON_ONCE(!can_direct_reclaim))
4129 goto fail;
4130
4131 /*
4132 * PF_MEMALLOC request from this context is rather bizarre
4133 * because we cannot reclaim anything and only can loop waiting
4134 * for somebody to do a work for us
4135 */
4136 WARN_ON_ONCE(current->flags & PF_MEMALLOC);
4137
4138 /*
4139 * non failing costly orders are a hard requirement which we
4140 * are not prepared for much so let's warn about these users
4141 * so that we can identify them and convert them to something
4142 * else.
4143 */
4144 WARN_ON_ONCE(order > PAGE_ALLOC_COSTLY_ORDER);
4145
Michal Hocko6c18ba72017-02-22 15:46:25 -08004146 /*
4147 * Help non-failing allocations by giving them access to memory
4148 * reserves but do not use ALLOC_NO_WATERMARKS because this
4149 * could deplete whole memory reserves which would just make
4150 * the situation worse
4151 */
4152 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac);
4153 if (page)
4154 goto got_pg;
4155
Michal Hocko9a67f642017-02-22 15:46:19 -08004156 cond_resched();
4157 goto retry;
4158 }
4159fail:
Michal Hockoa8e99252017-02-22 15:46:10 -08004160 warn_alloc(gfp_mask, ac->nodemask,
Michal Hocko7877cdc2016-10-07 17:01:55 -07004161 "page allocation failure: order:%u", order);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004162got_pg:
Mel Gorman072bb0a2012-07-31 16:43:58 -07004163 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004164}
Mel Gorman11e33f62009-06-16 15:31:57 -07004165
Mel Gorman9cd75552017-02-24 14:56:29 -08004166static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
Vlastimil Babka04ec6262017-07-06 15:40:03 -07004167 int preferred_nid, nodemask_t *nodemask,
Mel Gorman9cd75552017-02-24 14:56:29 -08004168 struct alloc_context *ac, gfp_t *alloc_mask,
4169 unsigned int *alloc_flags)
4170{
4171 ac->high_zoneidx = gfp_zone(gfp_mask);
Vlastimil Babka04ec6262017-07-06 15:40:03 -07004172 ac->zonelist = node_zonelist(preferred_nid, gfp_mask);
Mel Gorman9cd75552017-02-24 14:56:29 -08004173 ac->nodemask = nodemask;
4174 ac->migratetype = gfpflags_to_migratetype(gfp_mask);
4175
4176 if (cpusets_enabled()) {
4177 *alloc_mask |= __GFP_HARDWALL;
Mel Gorman9cd75552017-02-24 14:56:29 -08004178 if (!ac->nodemask)
4179 ac->nodemask = &cpuset_current_mems_allowed;
Vlastimil Babka51047822017-02-24 14:56:53 -08004180 else
4181 *alloc_flags |= ALLOC_CPUSET;
Mel Gorman9cd75552017-02-24 14:56:29 -08004182 }
4183
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +01004184 fs_reclaim_acquire(gfp_mask);
4185 fs_reclaim_release(gfp_mask);
Mel Gorman9cd75552017-02-24 14:56:29 -08004186
4187 might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
4188
4189 if (should_fail_alloc_page(gfp_mask, order))
4190 return false;
4191
Mel Gorman9cd75552017-02-24 14:56:29 -08004192 if (IS_ENABLED(CONFIG_CMA) && ac->migratetype == MIGRATE_MOVABLE)
4193 *alloc_flags |= ALLOC_CMA;
4194
4195 return true;
4196}
4197
4198/* Determine whether to spread dirty pages and what the first usable zone */
4199static inline void finalise_ac(gfp_t gfp_mask,
4200 unsigned int order, struct alloc_context *ac)
4201{
4202 /* Dirty zone balancing only done in the fast path */
4203 ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);
4204
4205 /*
4206 * The preferred zone is used for statistics but crucially it is
4207 * also used as the starting point for the zonelist iterator. It
4208 * may get reset for allocations that ignore memory policies.
4209 */
4210 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4211 ac->high_zoneidx, ac->nodemask);
4212}
4213
Mel Gorman11e33f62009-06-16 15:31:57 -07004214/*
4215 * This is the 'heart' of the zoned buddy allocator.
4216 */
4217struct page *
Vlastimil Babka04ec6262017-07-06 15:40:03 -07004218__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
4219 nodemask_t *nodemask)
Mel Gorman11e33f62009-06-16 15:31:57 -07004220{
Mel Gorman5bb1b162016-05-19 17:13:50 -07004221 struct page *page;
Mel Gormane6cbd7f2016-07-28 15:46:50 -07004222 unsigned int alloc_flags = ALLOC_WMARK_LOW;
Tetsuo Handaf19360f2017-09-08 16:13:22 -07004223 gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */
Mel Gorman9cd75552017-02-24 14:56:29 -08004224 struct alloc_context ac = { };
Mel Gorman682a3382016-05-19 17:13:30 -07004225
Benjamin Herrenschmidtdcce2842009-06-18 13:24:12 +10004226 gfp_mask &= gfp_allowed_mask;
Tetsuo Handaf19360f2017-09-08 16:13:22 -07004227 alloc_mask = gfp_mask;
Vlastimil Babka04ec6262017-07-06 15:40:03 -07004228 if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags))
Mel Gorman11e33f62009-06-16 15:31:57 -07004229 return NULL;
4230
Mel Gorman9cd75552017-02-24 14:56:29 -08004231 finalise_ac(gfp_mask, order, &ac);
Mel Gorman5bb1b162016-05-19 17:13:50 -07004232
Mel Gorman5117f452009-06-16 15:31:59 -07004233 /* First allocation attempt */
Vlastimil Babkaa9263752015-02-11 15:25:41 -08004234 page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
Mel Gorman4fcb0972016-05-19 17:14:01 -07004235 if (likely(page))
4236 goto out;
Andrew Morton91fbdc02015-02-11 15:25:04 -08004237
Mel Gorman4fcb0972016-05-19 17:14:01 -07004238 /*
Michal Hocko7dea19f2017-05-03 14:53:15 -07004239 * Apply scoped allocation constraints. This is mainly about GFP_NOFS
4240 * resp. GFP_NOIO which has to be inherited for all allocation requests
4241 * from a particular context which has been marked by
4242 * memalloc_no{fs,io}_{save,restore}.
Mel Gorman4fcb0972016-05-19 17:14:01 -07004243 */
Michal Hocko7dea19f2017-05-03 14:53:15 -07004244 alloc_mask = current_gfp_context(gfp_mask);
Mel Gorman4fcb0972016-05-19 17:14:01 -07004245 ac.spread_dirty_pages = false;
Mel Gorman11e33f62009-06-16 15:31:57 -07004246
Mel Gorman47415262016-05-19 17:14:44 -07004247 /*
4248 * Restore the original nodemask if it was potentially replaced with
4249 * &cpuset_current_mems_allowed to optimize the fast-path attempt.
4250 */
Vlastimil Babkae47483b2017-01-24 15:18:41 -08004251 if (unlikely(ac.nodemask != nodemask))
Mel Gorman47415262016-05-19 17:14:44 -07004252 ac.nodemask = nodemask;
Vlastimil Babka16096c22017-01-24 15:18:35 -08004253
Mel Gorman4fcb0972016-05-19 17:14:01 -07004254 page = __alloc_pages_slowpath(alloc_mask, order, &ac);
Xishi Qiu23f086f2015-02-11 15:25:07 -08004255
Mel Gorman4fcb0972016-05-19 17:14:01 -07004256out:
Vladimir Davydovc4159a72016-08-08 23:03:12 +03004257 if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page &&
4258 unlikely(memcg_kmem_charge(page, gfp_mask, order) != 0)) {
4259 __free_pages(page, order);
4260 page = NULL;
Vladimir Davydov49491482016-07-26 15:24:24 -07004261 }
4262
Mel Gorman4fcb0972016-05-19 17:14:01 -07004263 trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);
4264
Mel Gorman11e33f62009-06-16 15:31:57 -07004265 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004266}
Mel Gormand2391712009-06-16 15:31:52 -07004267EXPORT_SYMBOL(__alloc_pages_nodemask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004268
4269/*
4270 * Common helper functions.
4271 */
Harvey Harrison920c7a52008-02-04 22:29:26 -08004272unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004273{
Akinobu Mita945a1112009-09-21 17:01:47 -07004274 struct page *page;
4275
4276 /*
4277 * __get_free_pages() returns a 32-bit address, which cannot represent
4278 * a highmem page
4279 */
4280 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
4281
Linus Torvalds1da177e2005-04-16 15:20:36 -07004282 page = alloc_pages(gfp_mask, order);
4283 if (!page)
4284 return 0;
4285 return (unsigned long) page_address(page);
4286}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004287EXPORT_SYMBOL(__get_free_pages);
4288
Harvey Harrison920c7a52008-02-04 22:29:26 -08004289unsigned long get_zeroed_page(gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004290{
Akinobu Mita945a1112009-09-21 17:01:47 -07004291 return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004292}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004293EXPORT_SYMBOL(get_zeroed_page);
4294
Harvey Harrison920c7a52008-02-04 22:29:26 -08004295void __free_pages(struct page *page, unsigned int order)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004296{
Nick Pigginb5810032005-10-29 18:16:12 -07004297 if (put_page_testzero(page)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004298 if (order == 0)
Mel Gorman2d4894b2017-11-15 17:37:59 -08004299 free_unref_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004300 else
4301 __free_pages_ok(page, order);
4302 }
4303}
4304
4305EXPORT_SYMBOL(__free_pages);
4306
Harvey Harrison920c7a52008-02-04 22:29:26 -08004307void free_pages(unsigned long addr, unsigned int order)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004308{
4309 if (addr != 0) {
Nick Piggin725d7042006-09-25 23:30:55 -07004310 VM_BUG_ON(!virt_addr_valid((void *)addr));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004311 __free_pages(virt_to_page((void *)addr), order);
4312 }
4313}
4314
4315EXPORT_SYMBOL(free_pages);
4316
Glauber Costa6a1a0d32012-12-18 14:22:00 -08004317/*
Alexander Duyckb63ae8c2015-05-06 21:11:57 -07004318 * Page Fragment:
4319 * An arbitrary-length arbitrary-offset area of memory which resides
4320 * within a 0 or higher order page. Multiple fragments within that page
4321 * are individually refcounted, in the page's reference counter.
4322 *
4323 * The page_frag functions below provide a simple allocation framework for
4324 * page fragments. This is used by the network stack and network device
4325 * drivers to provide a backing region of memory for use as either an
4326 * sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
4327 */
Alexander Duyck2976db82017-01-10 16:58:09 -08004328static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
4329 gfp_t gfp_mask)
Alexander Duyckb63ae8c2015-05-06 21:11:57 -07004330{
4331 struct page *page = NULL;
4332 gfp_t gfp = gfp_mask;
4333
4334#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
4335 gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
4336 __GFP_NOMEMALLOC;
4337 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
4338 PAGE_FRAG_CACHE_MAX_ORDER);
4339 nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
4340#endif
4341 if (unlikely(!page))
4342 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
4343
4344 nc->va = page ? page_address(page) : NULL;
4345
4346 return page;
4347}
4348
Alexander Duyck2976db82017-01-10 16:58:09 -08004349void __page_frag_cache_drain(struct page *page, unsigned int count)
Alexander Duyck44fdffd2016-12-14 15:05:26 -08004350{
4351 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
4352
4353 if (page_ref_sub_and_test(page, count)) {
Alexander Duyck2976db82017-01-10 16:58:09 -08004354 unsigned int order = compound_order(page);
4355
Alexander Duyck44fdffd2016-12-14 15:05:26 -08004356 if (order == 0)
Mel Gorman2d4894b2017-11-15 17:37:59 -08004357 free_unref_page(page);
Alexander Duyck44fdffd2016-12-14 15:05:26 -08004358 else
4359 __free_pages_ok(page, order);
4360 }
4361}
Alexander Duyck2976db82017-01-10 16:58:09 -08004362EXPORT_SYMBOL(__page_frag_cache_drain);
Alexander Duyck44fdffd2016-12-14 15:05:26 -08004363
Alexander Duyck8c2dd3e2017-01-10 16:58:06 -08004364void *page_frag_alloc(struct page_frag_cache *nc,
4365 unsigned int fragsz, gfp_t gfp_mask)
Alexander Duyckb63ae8c2015-05-06 21:11:57 -07004366{
4367 unsigned int size = PAGE_SIZE;
4368 struct page *page;
4369 int offset;
4370
4371 if (unlikely(!nc->va)) {
4372refill:
Alexander Duyck2976db82017-01-10 16:58:09 -08004373 page = __page_frag_cache_refill(nc, gfp_mask);
Alexander Duyckb63ae8c2015-05-06 21:11:57 -07004374 if (!page)
4375 return NULL;
4376
4377#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
4378 /* if size can vary use size else just use PAGE_SIZE */
4379 size = nc->size;
4380#endif
4381 /* Even if we own the page, we do not use atomic_set().
4382 * This would break get_page_unless_zero() users.
4383 */
Joonsoo Kimfe896d12016-03-17 14:19:26 -07004384 page_ref_add(page, size - 1);
Alexander Duyckb63ae8c2015-05-06 21:11:57 -07004385
4386 /* reset page count bias and offset to start of new frag */
Michal Hocko2f064f32015-08-21 14:11:51 -07004387 nc->pfmemalloc = page_is_pfmemalloc(page);
Alexander Duyckb63ae8c2015-05-06 21:11:57 -07004388 nc->pagecnt_bias = size;
4389 nc->offset = size;
4390 }
4391
4392 offset = nc->offset - fragsz;
4393 if (unlikely(offset < 0)) {
4394 page = virt_to_page(nc->va);
4395
Joonsoo Kimfe896d12016-03-17 14:19:26 -07004396 if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
Alexander Duyckb63ae8c2015-05-06 21:11:57 -07004397 goto refill;
4398
4399#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
4400 /* if size can vary use size else just use PAGE_SIZE */
4401 size = nc->size;
4402#endif
4403 /* OK, page count is 0, we can safely set it */
Joonsoo Kimfe896d12016-03-17 14:19:26 -07004404 set_page_count(page, size);
Alexander Duyckb63ae8c2015-05-06 21:11:57 -07004405
4406 /* reset page count bias and offset to start of new frag */
4407 nc->pagecnt_bias = size;
4408 offset = size - fragsz;
4409 }
4410
4411 nc->pagecnt_bias--;
4412 nc->offset = offset;
4413
4414 return nc->va + offset;
4415}
Alexander Duyck8c2dd3e2017-01-10 16:58:06 -08004416EXPORT_SYMBOL(page_frag_alloc);
Alexander Duyckb63ae8c2015-05-06 21:11:57 -07004417
4418/*
4419 * Frees a page fragment allocated out of either a compound or order 0 page.
4420 */
Alexander Duyck8c2dd3e2017-01-10 16:58:06 -08004421void page_frag_free(void *addr)
Alexander Duyckb63ae8c2015-05-06 21:11:57 -07004422{
4423 struct page *page = virt_to_head_page(addr);
4424
4425 if (unlikely(put_page_testzero(page)))
4426 __free_pages_ok(page, compound_order(page));
4427}
Alexander Duyck8c2dd3e2017-01-10 16:58:06 -08004428EXPORT_SYMBOL(page_frag_free);
Alexander Duyckb63ae8c2015-05-06 21:11:57 -07004429
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -08004430static void *make_alloc_exact(unsigned long addr, unsigned int order,
4431 size_t size)
Andi Kleenee85c2e2011-05-11 15:13:34 -07004432{
4433 if (addr) {
4434 unsigned long alloc_end = addr + (PAGE_SIZE << order);
4435 unsigned long used = addr + PAGE_ALIGN(size);
4436
4437 split_page(virt_to_page((void *)addr), order);
4438 while (used < alloc_end) {
4439 free_page(used);
4440 used += PAGE_SIZE;
4441 }
4442 }
4443 return (void *)addr;
4444}
4445
Timur Tabi2be0ffe2008-07-23 21:28:11 -07004446/**
4447 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
4448 * @size: the number of bytes to allocate
4449 * @gfp_mask: GFP flags for the allocation
4450 *
4451 * This function is similar to alloc_pages(), except that it allocates the
4452 * minimum number of pages to satisfy the request. alloc_pages() can only
4453 * allocate memory in power-of-two pages.
4454 *
4455 * This function is also limited by MAX_ORDER.
4456 *
4457 * Memory allocated by this function must be released by free_pages_exact().
4458 */
4459void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
4460{
4461 unsigned int order = get_order(size);
4462 unsigned long addr;
4463
4464 addr = __get_free_pages(gfp_mask, order);
Andi Kleenee85c2e2011-05-11 15:13:34 -07004465 return make_alloc_exact(addr, order, size);
Timur Tabi2be0ffe2008-07-23 21:28:11 -07004466}
4467EXPORT_SYMBOL(alloc_pages_exact);
4468
4469/**
Andi Kleenee85c2e2011-05-11 15:13:34 -07004470 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
4471 * pages on a node.
Randy Dunlapb5e6ab52011-05-16 13:16:54 -07004472 * @nid: the preferred node ID where memory should be allocated
Andi Kleenee85c2e2011-05-11 15:13:34 -07004473 * @size: the number of bytes to allocate
4474 * @gfp_mask: GFP flags for the allocation
4475 *
4476 * Like alloc_pages_exact(), but try to allocate on node nid first before falling
4477 * back.
Andi Kleenee85c2e2011-05-11 15:13:34 -07004478 */
Fabian Fredericke1931812014-08-06 16:04:59 -07004479void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
Andi Kleenee85c2e2011-05-11 15:13:34 -07004480{
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -08004481 unsigned int order = get_order(size);
Andi Kleenee85c2e2011-05-11 15:13:34 -07004482 struct page *p = alloc_pages_node(nid, gfp_mask, order);
4483 if (!p)
4484 return NULL;
4485 return make_alloc_exact((unsigned long)page_address(p), order, size);
4486}
Andi Kleenee85c2e2011-05-11 15:13:34 -07004487
4488/**
Timur Tabi2be0ffe2008-07-23 21:28:11 -07004489 * free_pages_exact - release memory allocated via alloc_pages_exact()
4490 * @virt: the value returned by alloc_pages_exact.
4491 * @size: size of allocation, same value as passed to alloc_pages_exact().
4492 *
4493 * Release the memory allocated by a previous call to alloc_pages_exact.
4494 */
4495void free_pages_exact(void *virt, size_t size)
4496{
4497 unsigned long addr = (unsigned long)virt;
4498 unsigned long end = addr + PAGE_ALIGN(size);
4499
4500 while (addr < end) {
4501 free_page(addr);
4502 addr += PAGE_SIZE;
4503 }
4504}
4505EXPORT_SYMBOL(free_pages_exact);
4506
Zhang Yanfeie0fb5812013-02-22 16:35:54 -08004507/**
4508 * nr_free_zone_pages - count number of pages beyond high watermark
4509 * @offset: The zone index of the highest zone
4510 *
4511 * nr_free_zone_pages() counts the number of counts pages which are beyond the
4512 * high watermark within all zones at or below a given zone index. For each
4513 * zone, the number of pages is calculated as:
mchehab@s-opensource.com0e056eb2017-03-30 17:11:36 -03004514 *
4515 * nr_free_zone_pages = managed_pages - high_pages
Zhang Yanfeie0fb5812013-02-22 16:35:54 -08004516 */
Zhang Yanfeiebec3862013-02-22 16:35:43 -08004517static unsigned long nr_free_zone_pages(int offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004518{
Mel Gormandd1a2392008-04-28 02:12:17 -07004519 struct zoneref *z;
Mel Gorman54a6eb52008-04-28 02:12:16 -07004520 struct zone *zone;
4521
Martin J. Blighe310fd42005-07-29 22:59:18 -07004522 /* Just pick one node, since fallback list is circular */
Zhang Yanfeiebec3862013-02-22 16:35:43 -08004523 unsigned long sum = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004524
Mel Gorman0e884602008-04-28 02:12:14 -07004525 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004526
Mel Gorman54a6eb52008-04-28 02:12:16 -07004527 for_each_zone_zonelist(zone, z, zonelist, offset) {
Jiang Liub40da042013-02-22 16:33:52 -08004528 unsigned long size = zone->managed_pages;
Mel Gorman41858962009-06-16 15:32:12 -07004529 unsigned long high = high_wmark_pages(zone);
Martin J. Blighe310fd42005-07-29 22:59:18 -07004530 if (size > high)
4531 sum += size - high;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004532 }
4533
4534 return sum;
4535}
4536
Zhang Yanfeie0fb5812013-02-22 16:35:54 -08004537/**
4538 * nr_free_buffer_pages - count number of pages beyond high watermark
4539 *
4540 * nr_free_buffer_pages() counts the number of pages which are beyond the high
4541 * watermark within ZONE_DMA and ZONE_NORMAL.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004542 */
Zhang Yanfeiebec3862013-02-22 16:35:43 -08004543unsigned long nr_free_buffer_pages(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004544{
Al Viroaf4ca452005-10-21 02:55:38 -04004545 return nr_free_zone_pages(gfp_zone(GFP_USER));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004546}
Meelap Shahc2f1a552007-07-17 04:04:39 -07004547EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004548
Zhang Yanfeie0fb5812013-02-22 16:35:54 -08004549/**
4550 * nr_free_pagecache_pages - count number of pages beyond high watermark
4551 *
4552 * nr_free_pagecache_pages() counts the number of pages which are beyond the
4553 * high watermark within all zones.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004554 */
Zhang Yanfeiebec3862013-02-22 16:35:43 -08004555unsigned long nr_free_pagecache_pages(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004556{
Mel Gorman2a1e2742007-07-17 04:03:12 -07004557 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004558}
Christoph Lameter08e0f6a2006-09-27 01:50:06 -07004559
4560static inline void show_node(struct zone *zone)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004561{
Kirill A. Shutemove5adfff2012-12-11 16:00:29 -08004562 if (IS_ENABLED(CONFIG_NUMA))
Andy Whitcroft25ba77c2006-12-06 20:33:03 -08004563 printk("Node %d ", zone_to_nid(zone));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004564}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004565
Igor Redkod02bd272016-03-17 14:19:05 -07004566long si_mem_available(void)
4567{
4568 long available;
4569 unsigned long pagecache;
4570 unsigned long wmark_low = 0;
4571 unsigned long pages[NR_LRU_LISTS];
4572 struct zone *zone;
4573 int lru;
4574
4575 for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
Mel Gorman2f95ff92016-08-11 15:32:57 -07004576 pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
Igor Redkod02bd272016-03-17 14:19:05 -07004577
4578 for_each_zone(zone)
4579 wmark_low += zone->watermark[WMARK_LOW];
4580
4581 /*
4582 * Estimate the amount of memory available for userspace allocations,
4583 * without causing swapping.
4584 */
Michal Hockoc41f0122017-09-06 16:23:36 -07004585 available = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages;
Igor Redkod02bd272016-03-17 14:19:05 -07004586
4587 /*
4588 * Not all the page cache can be freed, otherwise the system will
4589 * start swapping. Assume at least half of the page cache, or the
4590 * low watermark worth of cache, needs to stay.
4591 */
4592 pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE];
4593 pagecache -= min(pagecache / 2, wmark_low);
4594 available += pagecache;
4595
4596 /*
4597 * Part of the reclaimable slab consists of items that are in use,
4598 * and cannot be freed. Cap this estimate at the low watermark.
4599 */
Johannes Weinerd507e2eb2017-08-10 15:23:31 -07004600 available += global_node_page_state(NR_SLAB_RECLAIMABLE) -
4601 min(global_node_page_state(NR_SLAB_RECLAIMABLE) / 2,
4602 wmark_low);
Igor Redkod02bd272016-03-17 14:19:05 -07004603
4604 if (available < 0)
4605 available = 0;
4606 return available;
4607}
4608EXPORT_SYMBOL_GPL(si_mem_available);
4609
Linus Torvalds1da177e2005-04-16 15:20:36 -07004610void si_meminfo(struct sysinfo *val)
4611{
4612 val->totalram = totalram_pages;
Mel Gorman11fb9982016-07-28 15:46:20 -07004613 val->sharedram = global_node_page_state(NR_SHMEM);
Michal Hockoc41f0122017-09-06 16:23:36 -07004614 val->freeram = global_zone_page_state(NR_FREE_PAGES);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004615 val->bufferram = nr_blockdev_pages();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004616 val->totalhigh = totalhigh_pages;
4617 val->freehigh = nr_free_highpages();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004618 val->mem_unit = PAGE_SIZE;
4619}
4620
4621EXPORT_SYMBOL(si_meminfo);
4622
4623#ifdef CONFIG_NUMA
4624void si_meminfo_node(struct sysinfo *val, int nid)
4625{
Jiang Liucdd91a72013-07-03 15:03:27 -07004626 int zone_type; /* needs to be signed */
4627 unsigned long managed_pages = 0;
Joonsoo Kimfc2bd792016-05-19 17:12:23 -07004628 unsigned long managed_highpages = 0;
4629 unsigned long free_highpages = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004630 pg_data_t *pgdat = NODE_DATA(nid);
4631
Jiang Liucdd91a72013-07-03 15:03:27 -07004632 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
4633 managed_pages += pgdat->node_zones[zone_type].managed_pages;
4634 val->totalram = managed_pages;
Mel Gorman11fb9982016-07-28 15:46:20 -07004635 val->sharedram = node_page_state(pgdat, NR_SHMEM);
Mel Gorman75ef7182016-07-28 15:45:24 -07004636 val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES);
Christoph Lameter98d2b0e2006-09-25 23:31:12 -07004637#ifdef CONFIG_HIGHMEM
Joonsoo Kimfc2bd792016-05-19 17:12:23 -07004638 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
4639 struct zone *zone = &pgdat->node_zones[zone_type];
4640
4641 if (is_highmem(zone)) {
4642 managed_highpages += zone->managed_pages;
4643 free_highpages += zone_page_state(zone, NR_FREE_PAGES);
4644 }
4645 }
4646 val->totalhigh = managed_highpages;
4647 val->freehigh = free_highpages;
Christoph Lameter98d2b0e2006-09-25 23:31:12 -07004648#else
Joonsoo Kimfc2bd792016-05-19 17:12:23 -07004649 val->totalhigh = managed_highpages;
4650 val->freehigh = free_highpages;
Christoph Lameter98d2b0e2006-09-25 23:31:12 -07004651#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07004652 val->mem_unit = PAGE_SIZE;
4653}
4654#endif
4655
David Rientjesddd588b2011-03-22 16:30:46 -07004656/*
David Rientjes7bf02ea2011-05-24 17:11:16 -07004657 * Determine whether the node should be displayed or not, depending on whether
4658 * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
David Rientjesddd588b2011-03-22 16:30:46 -07004659 */
Michal Hocko9af744d2017-02-22 15:46:16 -08004660static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask)
David Rientjesddd588b2011-03-22 16:30:46 -07004661{
David Rientjesddd588b2011-03-22 16:30:46 -07004662 if (!(flags & SHOW_MEM_FILTER_NODES))
Michal Hocko9af744d2017-02-22 15:46:16 -08004663 return false;
David Rientjesddd588b2011-03-22 16:30:46 -07004664
Michal Hocko9af744d2017-02-22 15:46:16 -08004665 /*
4666 * no node mask - aka implicit memory numa policy. Do not bother with
4667 * the synchronization - read_mems_allowed_begin - because we do not
4668 * have to be precise here.
4669 */
4670 if (!nodemask)
4671 nodemask = &cpuset_current_mems_allowed;
4672
4673 return !node_isset(nid, *nodemask);
David Rientjesddd588b2011-03-22 16:30:46 -07004674}
4675
Linus Torvalds1da177e2005-04-16 15:20:36 -07004676#define K(x) ((x) << (PAGE_SHIFT-10))
4677
Rabin Vincent377e4f12012-12-11 16:00:24 -08004678static void show_migration_types(unsigned char type)
4679{
4680 static const char types[MIGRATE_TYPES] = {
4681 [MIGRATE_UNMOVABLE] = 'U',
Rabin Vincent377e4f12012-12-11 16:00:24 -08004682 [MIGRATE_MOVABLE] = 'M',
Vlastimil Babka475a2f92015-12-11 13:40:29 -08004683 [MIGRATE_RECLAIMABLE] = 'E',
4684 [MIGRATE_HIGHATOMIC] = 'H',
Rabin Vincent377e4f12012-12-11 16:00:24 -08004685#ifdef CONFIG_CMA
4686 [MIGRATE_CMA] = 'C',
4687#endif
Minchan Kim194159f2013-02-22 16:33:58 -08004688#ifdef CONFIG_MEMORY_ISOLATION
Rabin Vincent377e4f12012-12-11 16:00:24 -08004689 [MIGRATE_ISOLATE] = 'I',
Minchan Kim194159f2013-02-22 16:33:58 -08004690#endif
Rabin Vincent377e4f12012-12-11 16:00:24 -08004691 };
4692 char tmp[MIGRATE_TYPES + 1];
4693 char *p = tmp;
4694 int i;
4695
4696 for (i = 0; i < MIGRATE_TYPES; i++) {
4697 if (type & (1 << i))
4698 *p++ = types[i];
4699 }
4700
4701 *p = '\0';
Joe Perches1f84a182016-10-27 17:46:29 -07004702 printk(KERN_CONT "(%s) ", tmp);
Rabin Vincent377e4f12012-12-11 16:00:24 -08004703}
4704
Linus Torvalds1da177e2005-04-16 15:20:36 -07004705/*
4706 * Show free area list (used inside shift_scroll-lock stuff)
4707 * We also calculate the percentage fragmentation. We do this by counting the
4708 * memory on each free list with the exception of the first item on the list.
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07004709 *
4710 * Bits in @filter:
4711 * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
4712 * cpuset.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004713 */
Michal Hocko9af744d2017-02-22 15:46:16 -08004714void show_free_areas(unsigned int filter, nodemask_t *nodemask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004715{
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07004716 unsigned long free_pcp = 0;
Jes Sorensenc7241912006-09-27 01:50:05 -07004717 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004718 struct zone *zone;
Mel Gorman599d0c92016-07-28 15:45:31 -07004719 pg_data_t *pgdat;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004720
KOSAKI Motohiroee99c712009-03-31 15:19:31 -07004721 for_each_populated_zone(zone) {
Michal Hocko9af744d2017-02-22 15:46:16 -08004722 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
David Rientjesddd588b2011-03-22 16:30:46 -07004723 continue;
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07004724
Konstantin Khlebnikov761b0672015-04-14 15:45:32 -07004725 for_each_online_cpu(cpu)
4726 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004727 }
4728
KOSAKI Motohiroa7312862009-09-21 17:01:37 -07004729 printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
4730 " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07004731 " unevictable:%lu dirty:%lu writeback:%lu unstable:%lu\n"
4732 " slab_reclaimable:%lu slab_unreclaimable:%lu\n"
Bartlomiej Zolnierkiewiczd1ce7492012-10-08 16:32:02 -07004733 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07004734 " free:%lu free_pcp:%lu free_cma:%lu\n",
Mel Gorman599d0c92016-07-28 15:45:31 -07004735 global_node_page_state(NR_ACTIVE_ANON),
4736 global_node_page_state(NR_INACTIVE_ANON),
4737 global_node_page_state(NR_ISOLATED_ANON),
4738 global_node_page_state(NR_ACTIVE_FILE),
4739 global_node_page_state(NR_INACTIVE_FILE),
4740 global_node_page_state(NR_ISOLATED_FILE),
4741 global_node_page_state(NR_UNEVICTABLE),
Mel Gorman11fb9982016-07-28 15:46:20 -07004742 global_node_page_state(NR_FILE_DIRTY),
4743 global_node_page_state(NR_WRITEBACK),
4744 global_node_page_state(NR_UNSTABLE_NFS),
Johannes Weinerd507e2eb2017-08-10 15:23:31 -07004745 global_node_page_state(NR_SLAB_RECLAIMABLE),
4746 global_node_page_state(NR_SLAB_UNRECLAIMABLE),
Mel Gorman50658e22016-07-28 15:46:14 -07004747 global_node_page_state(NR_FILE_MAPPED),
Mel Gorman11fb9982016-07-28 15:46:20 -07004748 global_node_page_state(NR_SHMEM),
Michal Hockoc41f0122017-09-06 16:23:36 -07004749 global_zone_page_state(NR_PAGETABLE),
4750 global_zone_page_state(NR_BOUNCE),
4751 global_zone_page_state(NR_FREE_PAGES),
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07004752 free_pcp,
Michal Hockoc41f0122017-09-06 16:23:36 -07004753 global_zone_page_state(NR_FREE_CMA_PAGES));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004754
Mel Gorman599d0c92016-07-28 15:45:31 -07004755 for_each_online_pgdat(pgdat) {
Michal Hocko9af744d2017-02-22 15:46:16 -08004756 if (show_mem_node_skip(filter, pgdat->node_id, nodemask))
Michal Hockoc02e50b2017-02-22 15:46:07 -08004757 continue;
4758
Mel Gorman599d0c92016-07-28 15:45:31 -07004759 printk("Node %d"
4760 " active_anon:%lukB"
4761 " inactive_anon:%lukB"
4762 " active_file:%lukB"
4763 " inactive_file:%lukB"
4764 " unevictable:%lukB"
4765 " isolated(anon):%lukB"
4766 " isolated(file):%lukB"
Mel Gorman50658e22016-07-28 15:46:14 -07004767 " mapped:%lukB"
Mel Gorman11fb9982016-07-28 15:46:20 -07004768 " dirty:%lukB"
4769 " writeback:%lukB"
4770 " shmem:%lukB"
4771#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4772 " shmem_thp: %lukB"
4773 " shmem_pmdmapped: %lukB"
4774 " anon_thp: %lukB"
4775#endif
4776 " writeback_tmp:%lukB"
4777 " unstable:%lukB"
Mel Gorman599d0c92016-07-28 15:45:31 -07004778 " all_unreclaimable? %s"
4779 "\n",
4780 pgdat->node_id,
4781 K(node_page_state(pgdat, NR_ACTIVE_ANON)),
4782 K(node_page_state(pgdat, NR_INACTIVE_ANON)),
4783 K(node_page_state(pgdat, NR_ACTIVE_FILE)),
4784 K(node_page_state(pgdat, NR_INACTIVE_FILE)),
4785 K(node_page_state(pgdat, NR_UNEVICTABLE)),
4786 K(node_page_state(pgdat, NR_ISOLATED_ANON)),
4787 K(node_page_state(pgdat, NR_ISOLATED_FILE)),
Mel Gorman50658e22016-07-28 15:46:14 -07004788 K(node_page_state(pgdat, NR_FILE_MAPPED)),
Mel Gorman11fb9982016-07-28 15:46:20 -07004789 K(node_page_state(pgdat, NR_FILE_DIRTY)),
4790 K(node_page_state(pgdat, NR_WRITEBACK)),
Alexander Polakov1f06b812017-04-07 16:04:45 -07004791 K(node_page_state(pgdat, NR_SHMEM)),
Mel Gorman11fb9982016-07-28 15:46:20 -07004792#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4793 K(node_page_state(pgdat, NR_SHMEM_THPS) * HPAGE_PMD_NR),
4794 K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)
4795 * HPAGE_PMD_NR),
4796 K(node_page_state(pgdat, NR_ANON_THPS) * HPAGE_PMD_NR),
4797#endif
Mel Gorman11fb9982016-07-28 15:46:20 -07004798 K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
4799 K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
Johannes Weinerc73322d2017-05-03 14:51:51 -07004800 pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ?
4801 "yes" : "no");
Mel Gorman599d0c92016-07-28 15:45:31 -07004802 }
4803
KOSAKI Motohiroee99c712009-03-31 15:19:31 -07004804 for_each_populated_zone(zone) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004805 int i;
4806
Michal Hocko9af744d2017-02-22 15:46:16 -08004807 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
David Rientjesddd588b2011-03-22 16:30:46 -07004808 continue;
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07004809
4810 free_pcp = 0;
4811 for_each_online_cpu(cpu)
4812 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
4813
Linus Torvalds1da177e2005-04-16 15:20:36 -07004814 show_node(zone);
Joe Perches1f84a182016-10-27 17:46:29 -07004815 printk(KERN_CONT
4816 "%s"
Linus Torvalds1da177e2005-04-16 15:20:36 -07004817 " free:%lukB"
4818 " min:%lukB"
4819 " low:%lukB"
4820 " high:%lukB"
Minchan Kim71c799f2016-07-28 15:47:26 -07004821 " active_anon:%lukB"
4822 " inactive_anon:%lukB"
4823 " active_file:%lukB"
4824 " inactive_file:%lukB"
4825 " unevictable:%lukB"
Mel Gorman5a1c84b2016-07-28 15:47:31 -07004826 " writepending:%lukB"
Linus Torvalds1da177e2005-04-16 15:20:36 -07004827 " present:%lukB"
Jiang Liu9feedc92012-12-12 13:52:12 -08004828 " managed:%lukB"
KOSAKI Motohiro4a0aa732009-09-21 17:01:30 -07004829 " mlocked:%lukB"
KOSAKI Motohiroc6a7f572009-09-21 17:01:32 -07004830 " kernel_stack:%lukB"
KOSAKI Motohiro4a0aa732009-09-21 17:01:30 -07004831 " pagetables:%lukB"
KOSAKI Motohiro4a0aa732009-09-21 17:01:30 -07004832 " bounce:%lukB"
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07004833 " free_pcp:%lukB"
4834 " local_pcp:%ukB"
Bartlomiej Zolnierkiewiczd1ce7492012-10-08 16:32:02 -07004835 " free_cma:%lukB"
Linus Torvalds1da177e2005-04-16 15:20:36 -07004836 "\n",
4837 zone->name,
Mel Gorman88f5acf2011-01-13 15:45:41 -08004838 K(zone_page_state(zone, NR_FREE_PAGES)),
Mel Gorman41858962009-06-16 15:32:12 -07004839 K(min_wmark_pages(zone)),
4840 K(low_wmark_pages(zone)),
4841 K(high_wmark_pages(zone)),
Minchan Kim71c799f2016-07-28 15:47:26 -07004842 K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)),
4843 K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)),
4844 K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)),
4845 K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)),
4846 K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)),
Mel Gorman5a1c84b2016-07-28 15:47:31 -07004847 K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)),
Linus Torvalds1da177e2005-04-16 15:20:36 -07004848 K(zone->present_pages),
Jiang Liu9feedc92012-12-12 13:52:12 -08004849 K(zone->managed_pages),
KOSAKI Motohiro4a0aa732009-09-21 17:01:30 -07004850 K(zone_page_state(zone, NR_MLOCK)),
Andy Lutomirskid30dd8b2016-07-28 15:48:14 -07004851 zone_page_state(zone, NR_KERNEL_STACK_KB),
KOSAKI Motohiro4a0aa732009-09-21 17:01:30 -07004852 K(zone_page_state(zone, NR_PAGETABLE)),
KOSAKI Motohiro4a0aa732009-09-21 17:01:30 -07004853 K(zone_page_state(zone, NR_BOUNCE)),
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07004854 K(free_pcp),
4855 K(this_cpu_read(zone->pageset->pcp.count)),
Minchan Kim33e077b2016-07-28 15:47:14 -07004856 K(zone_page_state(zone, NR_FREE_CMA_PAGES)));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004857 printk("lowmem_reserve[]:");
4858 for (i = 0; i < MAX_NR_ZONES; i++)
Joe Perches1f84a182016-10-27 17:46:29 -07004859 printk(KERN_CONT " %ld", zone->lowmem_reserve[i]);
4860 printk(KERN_CONT "\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004861 }
4862
KOSAKI Motohiroee99c712009-03-31 15:19:31 -07004863 for_each_populated_zone(zone) {
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -08004864 unsigned int order;
4865 unsigned long nr[MAX_ORDER], flags, total = 0;
Rabin Vincent377e4f12012-12-11 16:00:24 -08004866 unsigned char types[MAX_ORDER];
Linus Torvalds1da177e2005-04-16 15:20:36 -07004867
Michal Hocko9af744d2017-02-22 15:46:16 -08004868 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
David Rientjesddd588b2011-03-22 16:30:46 -07004869 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004870 show_node(zone);
Joe Perches1f84a182016-10-27 17:46:29 -07004871 printk(KERN_CONT "%s: ", zone->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004872
4873 spin_lock_irqsave(&zone->lock, flags);
4874 for (order = 0; order < MAX_ORDER; order++) {
Rabin Vincent377e4f12012-12-11 16:00:24 -08004875 struct free_area *area = &zone->free_area[order];
4876 int type;
4877
4878 nr[order] = area->nr_free;
Kirill Korotaev8f9de512006-06-23 02:03:50 -07004879 total += nr[order] << order;
Rabin Vincent377e4f12012-12-11 16:00:24 -08004880
4881 types[order] = 0;
4882 for (type = 0; type < MIGRATE_TYPES; type++) {
4883 if (!list_empty(&area->free_list[type]))
4884 types[order] |= 1 << type;
4885 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004886 }
4887 spin_unlock_irqrestore(&zone->lock, flags);
Rabin Vincent377e4f12012-12-11 16:00:24 -08004888 for (order = 0; order < MAX_ORDER; order++) {
Joe Perches1f84a182016-10-27 17:46:29 -07004889 printk(KERN_CONT "%lu*%lukB ",
4890 nr[order], K(1UL) << order);
Rabin Vincent377e4f12012-12-11 16:00:24 -08004891 if (nr[order])
4892 show_migration_types(types[order]);
4893 }
Joe Perches1f84a182016-10-27 17:46:29 -07004894 printk(KERN_CONT "= %lukB\n", K(total));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004895 }
4896
David Rientjes949f7ec2013-04-29 15:07:48 -07004897 hugetlb_show_meminfo();
4898
Mel Gorman11fb9982016-07-28 15:46:20 -07004899 printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES));
Larry Woodmane6f36022008-02-04 22:29:30 -08004900
Linus Torvalds1da177e2005-04-16 15:20:36 -07004901 show_swap_cache_info();
4902}
4903
Mel Gorman19770b32008-04-28 02:12:18 -07004904static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
4905{
4906 zoneref->zone = zone;
4907 zoneref->zone_idx = zone_idx(zone);
4908}
4909
Linus Torvalds1da177e2005-04-16 15:20:36 -07004910/*
4911 * Builds allocation fallback zone lists.
Christoph Lameter1a932052006-01-06 00:11:16 -08004912 *
4913 * Add all populated zones of a node to the zonelist.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004914 */
Michal Hocko9d3be212017-09-06 16:20:30 -07004915static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004916{
Christoph Lameter1a932052006-01-06 00:11:16 -08004917 struct zone *zone;
Zhang Yanfeibc732f12013-07-08 16:00:06 -07004918 enum zone_type zone_type = MAX_NR_ZONES;
Michal Hocko9d3be212017-09-06 16:20:30 -07004919 int nr_zones = 0;
Christoph Lameter02a68a52006-01-06 00:11:18 -08004920
4921 do {
Christoph Lameter2f6726e2006-09-25 23:31:18 -07004922 zone_type--;
Christoph Lameter070f8032006-01-06 00:11:19 -08004923 zone = pgdat->node_zones + zone_type;
Mel Gorman6aa303d2016-09-01 16:14:55 -07004924 if (managed_zone(zone)) {
Michal Hocko9d3be212017-09-06 16:20:30 -07004925 zoneref_set_zone(zone, &zonerefs[nr_zones++]);
Christoph Lameter070f8032006-01-06 00:11:19 -08004926 check_highest_zone(zone_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004927 }
Christoph Lameter2f6726e2006-09-25 23:31:18 -07004928 } while (zone_type);
Zhang Yanfeibc732f12013-07-08 16:00:06 -07004929
Christoph Lameter070f8032006-01-06 00:11:19 -08004930 return nr_zones;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004931}
4932
4933#ifdef CONFIG_NUMA
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004934
4935static int __parse_numa_zonelist_order(char *s)
4936{
Michal Hockoc9bff3e2017-09-06 16:20:13 -07004937 /*
4938 * We used to support different zonlists modes but they turned
4939 * out to be just not useful. Let's keep the warning in place
4940 * if somebody still use the cmd line parameter so that we do
4941 * not fail it silently
4942 */
4943 if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) {
4944 pr_warn("Ignoring unsupported numa_zonelist_order value: %s\n", s);
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004945 return -EINVAL;
4946 }
4947 return 0;
4948}
4949
4950static __init int setup_numa_zonelist_order(char *s)
4951{
Volodymyr G. Lukiianykecb256f2011-01-13 15:46:26 -08004952 if (!s)
4953 return 0;
4954
Michal Hockoc9bff3e2017-09-06 16:20:13 -07004955 return __parse_numa_zonelist_order(s);
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004956}
4957early_param("numa_zonelist_order", setup_numa_zonelist_order);
4958
Michal Hockoc9bff3e2017-09-06 16:20:13 -07004959char numa_zonelist_order[] = "Node";
4960
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004961/*
4962 * sysctl handler for numa_zonelist_order
4963 */
Joe Perchescccad5b2014-06-06 14:38:09 -07004964int numa_zonelist_order_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07004965 void __user *buffer, size_t *length,
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004966 loff_t *ppos)
4967{
Michal Hockoc9bff3e2017-09-06 16:20:13 -07004968 char *str;
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004969 int ret;
4970
Michal Hockoc9bff3e2017-09-06 16:20:13 -07004971 if (!write)
4972 return proc_dostring(table, write, buffer, length, ppos);
4973 str = memdup_user_nul(buffer, 16);
4974 if (IS_ERR(str))
4975 return PTR_ERR(str);
Chen Gangdacbde02013-07-03 15:02:35 -07004976
Michal Hockoc9bff3e2017-09-06 16:20:13 -07004977 ret = __parse_numa_zonelist_order(str);
4978 kfree(str);
Andi Kleen443c6f12009-12-23 21:00:47 +01004979 return ret;
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004980}
4981
4982
Christoph Lameter62bc62a2009-06-16 15:32:15 -07004983#define MAX_NODE_LOAD (nr_online_nodes)
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004984static int node_load[MAX_NUMNODES];
4985
Linus Torvalds1da177e2005-04-16 15:20:36 -07004986/**
Pavel Pisa4dc3b162005-05-01 08:59:25 -07004987 * find_next_best_node - find the next node that should appear in a given node's fallback list
Linus Torvalds1da177e2005-04-16 15:20:36 -07004988 * @node: node whose fallback list we're appending
4989 * @used_node_mask: nodemask_t of already used nodes
4990 *
4991 * We use a number of factors to determine which is the next node that should
4992 * appear on a given node's fallback list. The node should not have appeared
4993 * already in @node's fallback list, and it should be the next closest node
4994 * according to the distance array (which contains arbitrary distance values
4995 * from each node to each node in the system), and should also prefer nodes
4996 * with no CPUs, since presumably they'll have very little allocation pressure
4997 * on them otherwise.
4998 * It returns -1 if no node is found.
4999 */
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005000static int find_next_best_node(int node, nodemask_t *used_node_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005001{
Linus Torvalds4cf808eb2006-02-17 20:38:21 +01005002 int n, val;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005003 int min_val = INT_MAX;
David Rientjes00ef2d22013-02-22 16:35:36 -08005004 int best_node = NUMA_NO_NODE;
Rusty Russella70f7302009-03-13 14:49:46 +10305005 const struct cpumask *tmp = cpumask_of_node(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005006
Linus Torvalds4cf808eb2006-02-17 20:38:21 +01005007 /* Use the local node if we haven't already */
5008 if (!node_isset(node, *used_node_mask)) {
5009 node_set(node, *used_node_mask);
5010 return node;
5011 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005012
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08005013 for_each_node_state(n, N_MEMORY) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005014
5015 /* Don't want a node to appear more than once */
5016 if (node_isset(n, *used_node_mask))
5017 continue;
5018
Linus Torvalds1da177e2005-04-16 15:20:36 -07005019 /* Use the distance array to find the distance */
5020 val = node_distance(node, n);
5021
Linus Torvalds4cf808eb2006-02-17 20:38:21 +01005022 /* Penalize nodes under us ("prefer the next node") */
5023 val += (n < node);
5024
Linus Torvalds1da177e2005-04-16 15:20:36 -07005025 /* Give preference to headless and unused nodes */
Rusty Russella70f7302009-03-13 14:49:46 +10305026 tmp = cpumask_of_node(n);
5027 if (!cpumask_empty(tmp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005028 val += PENALTY_FOR_NODE_WITH_CPUS;
5029
5030 /* Slight preference for less loaded node */
5031 val *= (MAX_NODE_LOAD*MAX_NUMNODES);
5032 val += node_load[n];
5033
5034 if (val < min_val) {
5035 min_val = val;
5036 best_node = n;
5037 }
5038 }
5039
5040 if (best_node >= 0)
5041 node_set(best_node, *used_node_mask);
5042
5043 return best_node;
5044}
5045
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005046
5047/*
5048 * Build zonelists ordered by node and zones within node.
5049 * This results in maximum locality--normal zone overflows into local
5050 * DMA zone, if any--but risks exhausting DMA zone.
5051 */
Michal Hocko9d3be212017-09-06 16:20:30 -07005052static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order,
5053 unsigned nr_nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005054{
Michal Hocko9d3be212017-09-06 16:20:30 -07005055 struct zoneref *zonerefs;
5056 int i;
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005057
Michal Hocko9d3be212017-09-06 16:20:30 -07005058 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
5059
5060 for (i = 0; i < nr_nodes; i++) {
5061 int nr_zones;
5062
5063 pg_data_t *node = NODE_DATA(node_order[i]);
5064
5065 nr_zones = build_zonerefs_node(node, zonerefs);
5066 zonerefs += nr_zones;
5067 }
5068 zonerefs->zone = NULL;
5069 zonerefs->zone_idx = 0;
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005070}
5071
5072/*
Christoph Lameter523b9452007-10-16 01:25:37 -07005073 * Build gfp_thisnode zonelists
5074 */
5075static void build_thisnode_zonelists(pg_data_t *pgdat)
5076{
Michal Hocko9d3be212017-09-06 16:20:30 -07005077 struct zoneref *zonerefs;
5078 int nr_zones;
Christoph Lameter523b9452007-10-16 01:25:37 -07005079
Michal Hocko9d3be212017-09-06 16:20:30 -07005080 zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs;
5081 nr_zones = build_zonerefs_node(pgdat, zonerefs);
5082 zonerefs += nr_zones;
5083 zonerefs->zone = NULL;
5084 zonerefs->zone_idx = 0;
Christoph Lameter523b9452007-10-16 01:25:37 -07005085}
5086
5087/*
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005088 * Build zonelists ordered by zone and nodes within zones.
5089 * This results in conserving DMA zone[s] until all Normal memory is
5090 * exhausted, but results in overflowing to remote node while memory
5091 * may still exist in local DMA zone.
5092 */
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005093
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005094static void build_zonelists(pg_data_t *pgdat)
5095{
Michal Hocko9d3be212017-09-06 16:20:30 -07005096 static int node_order[MAX_NUMNODES];
5097 int node, load, nr_nodes = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005098 nodemask_t used_mask;
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005099 int local_node, prev_node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005100
5101 /* NUMA-aware ordering of nodes */
5102 local_node = pgdat->node_id;
Christoph Lameter62bc62a2009-06-16 15:32:15 -07005103 load = nr_online_nodes;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005104 prev_node = local_node;
5105 nodes_clear(used_mask);
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005106
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005107 memset(node_order, 0, sizeof(node_order));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005108 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
5109 /*
5110 * We don't want to pressure a particular node.
5111 * So adding penalty to the first node in same
5112 * distance group to make it round-robin.
5113 */
David Rientjes957f8222012-10-08 16:33:24 -07005114 if (node_distance(local_node, node) !=
5115 node_distance(local_node, prev_node))
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005116 node_load[node] = load;
5117
Michal Hocko9d3be212017-09-06 16:20:30 -07005118 node_order[nr_nodes++] = node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005119 prev_node = node;
5120 load--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005121 }
Christoph Lameter523b9452007-10-16 01:25:37 -07005122
Michal Hocko9d3be212017-09-06 16:20:30 -07005123 build_zonelists_in_node_order(pgdat, node_order, nr_nodes);
Christoph Lameter523b9452007-10-16 01:25:37 -07005124 build_thisnode_zonelists(pgdat);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005125}
5126
Lee Schermerhorn7aac7892010-05-26 14:45:00 -07005127#ifdef CONFIG_HAVE_MEMORYLESS_NODES
5128/*
5129 * Return node id of node used for "local" allocations.
5130 * I.e., first node id of first zone in arg node's generic zonelist.
5131 * Used for initializing percpu 'numa_mem', which is used primarily
5132 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
5133 */
5134int local_memory_node(int node)
5135{
Mel Gormanc33d6c02016-05-19 17:14:10 -07005136 struct zoneref *z;
Lee Schermerhorn7aac7892010-05-26 14:45:00 -07005137
Mel Gormanc33d6c02016-05-19 17:14:10 -07005138 z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
Lee Schermerhorn7aac7892010-05-26 14:45:00 -07005139 gfp_zone(GFP_KERNEL),
Mel Gormanc33d6c02016-05-19 17:14:10 -07005140 NULL);
5141 return z->zone->node;
Lee Schermerhorn7aac7892010-05-26 14:45:00 -07005142}
5143#endif
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005144
Joonsoo Kim6423aa82016-08-10 16:27:49 -07005145static void setup_min_unmapped_ratio(void);
5146static void setup_min_slab_ratio(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005147#else /* CONFIG_NUMA */
5148
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005149static void build_zonelists(pg_data_t *pgdat)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005150{
Christoph Lameter19655d32006-09-25 23:31:19 -07005151 int node, local_node;
Michal Hocko9d3be212017-09-06 16:20:30 -07005152 struct zoneref *zonerefs;
5153 int nr_zones;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005154
5155 local_node = pgdat->node_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005156
Michal Hocko9d3be212017-09-06 16:20:30 -07005157 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
5158 nr_zones = build_zonerefs_node(pgdat, zonerefs);
5159 zonerefs += nr_zones;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005160
Mel Gorman54a6eb52008-04-28 02:12:16 -07005161 /*
5162 * Now we build the zonelist so that it contains the zones
5163 * of all the other nodes.
5164 * We don't want to pressure a particular node, so when
5165 * building the zones for node N, we make sure that the
5166 * zones coming right after the local ones are those from
5167 * node N+1 (modulo N)
5168 */
5169 for (node = local_node + 1; node < MAX_NUMNODES; node++) {
5170 if (!node_online(node))
5171 continue;
Michal Hocko9d3be212017-09-06 16:20:30 -07005172 nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs);
5173 zonerefs += nr_zones;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005174 }
Mel Gorman54a6eb52008-04-28 02:12:16 -07005175 for (node = 0; node < local_node; node++) {
5176 if (!node_online(node))
5177 continue;
Michal Hocko9d3be212017-09-06 16:20:30 -07005178 nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs);
5179 zonerefs += nr_zones;
Mel Gorman54a6eb52008-04-28 02:12:16 -07005180 }
5181
Michal Hocko9d3be212017-09-06 16:20:30 -07005182 zonerefs->zone = NULL;
5183 zonerefs->zone_idx = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005184}
5185
5186#endif /* CONFIG_NUMA */
5187
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09005188/*
5189 * Boot pageset table. One per cpu which is going to be used for all
5190 * zones and all nodes. The parameters will be set in such a way
5191 * that an item put on a list will immediately be handed over to
5192 * the buddy list. This is safe since pageset manipulation is done
5193 * with interrupts disabled.
5194 *
5195 * The boot_pagesets must be kept even after bootup is complete for
5196 * unused processors and/or zones. They do play a role for bootstrapping
5197 * hotplugged processors.
5198 *
5199 * zoneinfo_show() and maybe other functions do
5200 * not check if the processor is online before following the pageset pointer.
5201 * Other parts of the kernel may not check if the zone is available.
5202 */
5203static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
5204static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
Johannes Weiner385386c2017-07-06 15:40:43 -07005205static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09005206
Michal Hocko11cd8632017-09-06 16:20:34 -07005207static void __build_all_zonelists(void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005208{
Yasunori Goto68113782006-06-23 02:03:11 -07005209 int nid;
Michal Hockoafb6ebb2017-09-06 16:20:17 -07005210 int __maybe_unused cpu;
Jiang Liu9adb62a2012-07-31 16:43:28 -07005211 pg_data_t *self = data;
Michal Hockob93e0f32017-09-06 16:20:37 -07005212 static DEFINE_SPINLOCK(lock);
5213
5214 spin_lock(&lock);
Paul Jackson9276b1bc2006-12-06 20:31:48 -08005215
Bo Liu7f9cfb32009-08-18 14:11:19 -07005216#ifdef CONFIG_NUMA
5217 memset(node_load, 0, sizeof(node_load));
5218#endif
Jiang Liu9adb62a2012-07-31 16:43:28 -07005219
Wei Yangc1152582017-09-06 16:19:33 -07005220 /*
5221 * This node is hotadded and no memory is yet present. So just
5222 * building zonelists is fine - no need to touch other nodes.
5223 */
Jiang Liu9adb62a2012-07-31 16:43:28 -07005224 if (self && !node_online(self->node_id)) {
5225 build_zonelists(self);
Wei Yangc1152582017-09-06 16:19:33 -07005226 } else {
5227 for_each_online_node(nid) {
5228 pg_data_t *pgdat = NODE_DATA(nid);
Jiang Liu9adb62a2012-07-31 16:43:28 -07005229
Wei Yangc1152582017-09-06 16:19:33 -07005230 build_zonelists(pgdat);
5231 }
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09005232
Michal Hockoafb6ebb2017-09-06 16:20:17 -07005233#ifdef CONFIG_HAVE_MEMORYLESS_NODES
Michal Hockoafb6ebb2017-09-06 16:20:17 -07005234 /*
5235 * We now know the "local memory node" for each node--
5236 * i.e., the node of the first zone in the generic zonelist.
5237 * Set up numa_mem percpu variable for on-line cpus. During
5238 * boot, only the boot cpu should be on-line; we'll init the
5239 * secondary cpus' numa_mem as they come on-line. During
5240 * node/memory hotplug, we'll fixup all on-line cpus.
5241 */
Michal Hockod9c9a0b2017-09-06 16:20:20 -07005242 for_each_online_cpu(cpu)
Michal Hockoafb6ebb2017-09-06 16:20:17 -07005243 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
Michal Hockoafb6ebb2017-09-06 16:20:17 -07005244#endif
Michal Hockod9c9a0b2017-09-06 16:20:20 -07005245 }
Michal Hockob93e0f32017-09-06 16:20:37 -07005246
5247 spin_unlock(&lock);
Michal Hockoafb6ebb2017-09-06 16:20:17 -07005248}
5249
5250static noinline void __init
5251build_all_zonelists_init(void)
5252{
5253 int cpu;
5254
5255 __build_all_zonelists(NULL);
5256
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09005257 /*
5258 * Initialize the boot_pagesets that are going to be used
5259 * for bootstrapping processors. The real pagesets for
5260 * each zone will be allocated later when the per cpu
5261 * allocator is available.
5262 *
5263 * boot_pagesets are used also for bootstrapping offline
5264 * cpus if the system is already booted because the pagesets
5265 * are needed to initialize allocators on a specific cpu too.
5266 * F.e. the percpu allocator needs the page allocator which
5267 * needs the percpu allocator in order to allocate its pagesets
5268 * (a chicken-egg dilemma).
5269 */
Michal Hockoafb6ebb2017-09-06 16:20:17 -07005270 for_each_possible_cpu(cpu)
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09005271 setup_pageset(&per_cpu(boot_pageset, cpu), 0);
5272
Rasmus Villemoes061f67b2015-02-12 15:00:06 -08005273 mminit_verify_zonelist();
5274 cpuset_init_current_mems_allowed();
5275}
5276
Haicheng Li4eaf3f62010-05-24 14:32:52 -07005277/*
Haicheng Li4eaf3f62010-05-24 14:32:52 -07005278 * unless system_state == SYSTEM_BOOTING.
Rasmus Villemoes061f67b2015-02-12 15:00:06 -08005279 *
Michal Hocko72675e12017-09-06 16:20:24 -07005280 * __ref due to call of __init annotated helper build_all_zonelists_init
Rasmus Villemoes061f67b2015-02-12 15:00:06 -08005281 * [protected by SYSTEM_BOOTING].
Haicheng Li4eaf3f62010-05-24 14:32:52 -07005282 */
Michal Hocko72675e12017-09-06 16:20:24 -07005283void __ref build_all_zonelists(pg_data_t *pgdat)
Yasunori Goto68113782006-06-23 02:03:11 -07005284{
5285 if (system_state == SYSTEM_BOOTING) {
Rasmus Villemoes061f67b2015-02-12 15:00:06 -08005286 build_all_zonelists_init();
Yasunori Goto68113782006-06-23 02:03:11 -07005287 } else {
Michal Hocko11cd8632017-09-06 16:20:34 -07005288 __build_all_zonelists(pgdat);
Yasunori Goto68113782006-06-23 02:03:11 -07005289 /* cpuset refresh routine should be here */
5290 }
Andrew Mortonbd1e22b2006-06-23 02:03:47 -07005291 vm_total_pages = nr_free_pagecache_pages();
Mel Gorman9ef9acb2007-10-16 01:25:54 -07005292 /*
5293 * Disable grouping by mobility if the number of pages in the
5294 * system is too low to allow the mechanism to work. It would be
5295 * more accurate, but expensive to check per-zone. This check is
5296 * made on memory-hotadd so a system can start with mobility
5297 * disabled and enable it later
5298 */
Mel Gormand9c23402007-10-16 01:26:01 -07005299 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
Mel Gorman9ef9acb2007-10-16 01:25:54 -07005300 page_group_by_mobility_disabled = 1;
5301 else
5302 page_group_by_mobility_disabled = 0;
5303
Michal Hockoc9bff3e2017-09-06 16:20:13 -07005304 pr_info("Built %i zonelists, mobility grouping %s. Total pages: %ld\n",
Joe Perches756a0252016-03-17 14:19:47 -07005305 nr_online_nodes,
Joe Perches756a0252016-03-17 14:19:47 -07005306 page_group_by_mobility_disabled ? "off" : "on",
5307 vm_total_pages);
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005308#ifdef CONFIG_NUMA
Anton Blanchardf88dfff2014-12-10 15:42:53 -08005309 pr_info("Policy zone: %s\n", zone_names[policy_zone]);
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005310#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07005311}
5312
5313/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07005314 * Initially all pages are reserved - free ones are freed
5315 * up by free_all_bootmem() once the early boot process is
5316 * done. Non-atomic initialization, single-pass.
5317 */
Matt Tolentinoc09b4242006-01-17 07:03:44 +01005318void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
Dave Hansena2f3aa022007-01-10 23:15:30 -08005319 unsigned long start_pfn, enum memmap_context context)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005320{
Dan Williams4b94ffd2016-01-15 16:56:22 -08005321 struct vmem_altmap *altmap = to_vmem_altmap(__pfn_to_phys(start_pfn));
Andy Whitcroft29751f62005-06-23 00:08:00 -07005322 unsigned long end_pfn = start_pfn + size;
Dan Williams4b94ffd2016-01-15 16:56:22 -08005323 pg_data_t *pgdat = NODE_DATA(nid);
Andy Whitcroft29751f62005-06-23 00:08:00 -07005324 unsigned long pfn;
Mel Gorman3a80a7f2015-06-30 14:57:02 -07005325 unsigned long nr_initialised = 0;
Taku Izumi342332e2016-03-15 14:55:22 -07005326#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5327 struct memblock_region *r = NULL, *tmp;
5328#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07005329
Hugh Dickins22b31ee2009-01-06 14:40:09 -08005330 if (highest_memmap_pfn < end_pfn - 1)
5331 highest_memmap_pfn = end_pfn - 1;
5332
Dan Williams4b94ffd2016-01-15 16:56:22 -08005333 /*
5334 * Honor reservation requested by the driver for this ZONE_DEVICE
5335 * memory
5336 */
5337 if (altmap && start_pfn == altmap->base_pfn)
5338 start_pfn += altmap->reserve;
5339
Greg Ungerercbe8dd42006-01-12 01:05:24 -08005340 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
Dave Hansena2f3aa022007-01-10 23:15:30 -08005341 /*
Andrew Mortonb72d0ff2016-03-15 14:55:25 -07005342 * There can be holes in boot-time mem_map[]s handed to this
5343 * function. They do not exist on hotplugged memory.
Dave Hansena2f3aa022007-01-10 23:15:30 -08005344 */
Andrew Mortonb72d0ff2016-03-15 14:55:25 -07005345 if (context != MEMMAP_EARLY)
5346 goto not_early;
5347
Paul Burtonb92df1d2017-02-22 15:44:53 -08005348 if (!early_pfn_valid(pfn)) {
5349#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5350 /*
5351 * Skip to the pfn preceding the next valid one (or
5352 * end_pfn), such that we hit a valid pfn (or end_pfn)
5353 * on our next iteration of the loop.
5354 */
5355 pfn = memblock_next_valid_pfn(pfn, end_pfn) - 1;
5356#endif
Andrew Mortonb72d0ff2016-03-15 14:55:25 -07005357 continue;
Paul Burtonb92df1d2017-02-22 15:44:53 -08005358 }
Andrew Mortonb72d0ff2016-03-15 14:55:25 -07005359 if (!early_pfn_in_nid(pfn, nid))
5360 continue;
5361 if (!update_defer_init(pgdat, pfn, end_pfn, &nr_initialised))
5362 break;
Taku Izumi342332e2016-03-15 14:55:22 -07005363
5364#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
Andrew Mortonb72d0ff2016-03-15 14:55:25 -07005365 /*
Andrew Mortonb72d0ff2016-03-15 14:55:25 -07005366 * Check given memblock attribute by firmware which can affect
5367 * kernel memory layout. If zone==ZONE_MOVABLE but memory is
5368 * mirrored, it's an overlapped memmap init. skip it.
5369 */
5370 if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
5371 if (!r || pfn >= memblock_region_memory_end_pfn(r)) {
5372 for_each_memblock(memory, tmp)
5373 if (pfn < memblock_region_memory_end_pfn(tmp))
5374 break;
5375 r = tmp;
Taku Izumi342332e2016-03-15 14:55:22 -07005376 }
Andrew Mortonb72d0ff2016-03-15 14:55:25 -07005377 if (pfn >= memblock_region_memory_base_pfn(r) &&
5378 memblock_is_mirror(r)) {
5379 /* already initialized as NORMAL */
5380 pfn = memblock_region_memory_end_pfn(r);
5381 continue;
5382 }
Dave Hansena2f3aa022007-01-10 23:15:30 -08005383 }
Andrew Mortonb72d0ff2016-03-15 14:55:25 -07005384#endif
Mel Gormanac5d2532015-06-30 14:57:20 -07005385
Andrew Mortonb72d0ff2016-03-15 14:55:25 -07005386not_early:
Mel Gormanac5d2532015-06-30 14:57:20 -07005387 /*
5388 * Mark the block movable so that blocks are reserved for
5389 * movable at startup. This will force kernel allocations
5390 * to reserve their blocks rather than leaking throughout
5391 * the address space during boot when many long-lived
Mel Gorman974a7862015-11-06 16:28:34 -08005392 * kernel allocations are made.
Mel Gormanac5d2532015-06-30 14:57:20 -07005393 *
5394 * bitmap is created for zone's valid pfn range. but memmap
5395 * can be created for invalid pages (for alignment)
5396 * check here not to call set_pageblock_migratetype() against
5397 * pfn out of zone.
5398 */
5399 if (!(pfn & (pageblock_nr_pages - 1))) {
5400 struct page *page = pfn_to_page(pfn);
5401
5402 __init_single_page(page, pfn, zone, nid);
5403 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
Michal Hocko9b6e63c2017-10-03 16:16:19 -07005404 cond_resched();
Mel Gormanac5d2532015-06-30 14:57:20 -07005405 } else {
5406 __init_single_pfn(pfn, zone, nid);
5407 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005408 }
5409}
5410
Andi Kleen1e548de2008-02-04 22:29:26 -08005411static void __meminit zone_init_free_lists(struct zone *zone)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005412{
Mel Gorman7aeb09f2014-06-04 16:10:21 -07005413 unsigned int order, t;
Mel Gormanb2a0ac82007-10-16 01:25:48 -07005414 for_each_migratetype_order(order, t) {
5415 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005416 zone->free_area[order].nr_free = 0;
5417 }
5418}
5419
5420#ifndef __HAVE_ARCH_MEMMAP_INIT
5421#define memmap_init(size, nid, zone, start_pfn) \
Dave Hansena2f3aa022007-01-10 23:15:30 -08005422 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005423#endif
5424
David Rientjes7cd2b0a2014-06-23 13:22:04 -07005425static int zone_batchsize(struct zone *zone)
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07005426{
David Howells3a6be872009-05-06 16:03:03 -07005427#ifdef CONFIG_MMU
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07005428 int batch;
5429
5430 /*
5431 * The per-cpu-pages pools are set to around 1000th of the
Seth, Rohitba56e912005-10-29 18:15:47 -07005432 * size of the zone. But no more than 1/2 of a meg.
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07005433 *
5434 * OK, so we don't know how big the cache is. So guess.
5435 */
Jiang Liub40da042013-02-22 16:33:52 -08005436 batch = zone->managed_pages / 1024;
Seth, Rohitba56e912005-10-29 18:15:47 -07005437 if (batch * PAGE_SIZE > 512 * 1024)
5438 batch = (512 * 1024) / PAGE_SIZE;
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07005439 batch /= 4; /* We effectively *= 4 below */
5440 if (batch < 1)
5441 batch = 1;
5442
5443 /*
Nick Piggin0ceaacc2005-12-04 13:55:25 +11005444 * Clamp the batch to a 2^n - 1 value. Having a power
5445 * of 2 value was found to be more likely to have
5446 * suboptimal cache aliasing properties in some cases.
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07005447 *
Nick Piggin0ceaacc2005-12-04 13:55:25 +11005448 * For example if 2 tasks are alternately allocating
5449 * batches of pages, one task can end up with a lot
5450 * of pages of one half of the possible page colors
5451 * and the other with pages of the other colors.
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07005452 */
David Howells91552032009-05-06 16:03:02 -07005453 batch = rounddown_pow_of_two(batch + batch/2) - 1;
Seth, Rohitba56e912005-10-29 18:15:47 -07005454
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07005455 return batch;
David Howells3a6be872009-05-06 16:03:03 -07005456
5457#else
5458 /* The deferral and batching of frees should be suppressed under NOMMU
5459 * conditions.
5460 *
5461 * The problem is that NOMMU needs to be able to allocate large chunks
5462 * of contiguous memory as there's no hardware page translation to
5463 * assemble apparent contiguous memory from discontiguous pages.
5464 *
5465 * Queueing large contiguous runs of pages for batching, however,
5466 * causes the pages to actually be freed in smaller chunks. As there
5467 * can be a significant delay between the individual batches being
5468 * recycled, this leads to the once large chunks of space being
5469 * fragmented and becoming unavailable for high-order allocations.
5470 */
5471 return 0;
5472#endif
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07005473}
5474
Cody P Schafer8d7a8fa2013-07-03 15:01:31 -07005475/*
5476 * pcp->high and pcp->batch values are related and dependent on one another:
5477 * ->batch must never be higher then ->high.
5478 * The following function updates them in a safe manner without read side
5479 * locking.
5480 *
5481 * Any new users of pcp->batch and pcp->high should ensure they can cope with
5482 * those fields changing asynchronously (acording the the above rule).
5483 *
5484 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function
5485 * outside of boot time (or some other assurance that no concurrent updaters
5486 * exist).
5487 */
5488static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
5489 unsigned long batch)
5490{
5491 /* start with a fail safe value for batch */
5492 pcp->batch = 1;
5493 smp_wmb();
5494
5495 /* Update high, then batch, in order */
5496 pcp->high = high;
5497 smp_wmb();
5498
5499 pcp->batch = batch;
5500}
5501
Cody P Schafer36640332013-07-03 15:01:40 -07005502/* a companion to pageset_set_high() */
Cody P Schafer4008bab2013-07-03 15:01:28 -07005503static void pageset_set_batch(struct per_cpu_pageset *p, unsigned long batch)
5504{
Cody P Schafer8d7a8fa2013-07-03 15:01:31 -07005505 pageset_update(&p->pcp, 6 * batch, max(1UL, 1 * batch));
Cody P Schafer4008bab2013-07-03 15:01:28 -07005506}
5507
Cody P Schafer88c90db2013-07-03 15:01:35 -07005508static void pageset_init(struct per_cpu_pageset *p)
Christoph Lameter2caaad42005-06-21 17:15:00 -07005509{
5510 struct per_cpu_pages *pcp;
Mel Gorman5f8dcc22009-09-21 17:03:19 -07005511 int migratetype;
Christoph Lameter2caaad42005-06-21 17:15:00 -07005512
Magnus Damm1c6fe942005-10-26 01:58:59 -07005513 memset(p, 0, sizeof(*p));
5514
Christoph Lameter3dfa5722008-02-04 22:29:19 -08005515 pcp = &p->pcp;
Christoph Lameter2caaad42005-06-21 17:15:00 -07005516 pcp->count = 0;
Mel Gorman5f8dcc22009-09-21 17:03:19 -07005517 for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
5518 INIT_LIST_HEAD(&pcp->lists[migratetype]);
Christoph Lameter2caaad42005-06-21 17:15:00 -07005519}
5520
Cody P Schafer88c90db2013-07-03 15:01:35 -07005521static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
5522{
5523 pageset_init(p);
5524 pageset_set_batch(p, batch);
5525}
5526
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08005527/*
Cody P Schafer36640332013-07-03 15:01:40 -07005528 * pageset_set_high() sets the high water mark for hot per_cpu_pagelist
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08005529 * to the value high for the pageset p.
5530 */
Cody P Schafer36640332013-07-03 15:01:40 -07005531static void pageset_set_high(struct per_cpu_pageset *p,
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08005532 unsigned long high)
5533{
Cody P Schafer8d7a8fa2013-07-03 15:01:31 -07005534 unsigned long batch = max(1UL, high / 4);
5535 if ((high / 4) > (PAGE_SHIFT * 8))
5536 batch = PAGE_SHIFT * 8;
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08005537
Cody P Schafer8d7a8fa2013-07-03 15:01:31 -07005538 pageset_update(&p->pcp, high, batch);
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08005539}
5540
David Rientjes7cd2b0a2014-06-23 13:22:04 -07005541static void pageset_set_high_and_batch(struct zone *zone,
5542 struct per_cpu_pageset *pcp)
Cody P Schafer56cef2b2013-07-03 15:01:38 -07005543{
Cody P Schafer56cef2b2013-07-03 15:01:38 -07005544 if (percpu_pagelist_fraction)
Cody P Schafer36640332013-07-03 15:01:40 -07005545 pageset_set_high(pcp,
Cody P Schafer56cef2b2013-07-03 15:01:38 -07005546 (zone->managed_pages /
5547 percpu_pagelist_fraction));
5548 else
5549 pageset_set_batch(pcp, zone_batchsize(zone));
5550}
5551
Cody P Schafer169f6c12013-07-03 15:01:41 -07005552static void __meminit zone_pageset_init(struct zone *zone, int cpu)
5553{
5554 struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
5555
5556 pageset_init(pcp);
5557 pageset_set_high_and_batch(zone, pcp);
5558}
5559
Michal Hocko72675e12017-09-06 16:20:24 -07005560void __meminit setup_zone_pageset(struct zone *zone)
Wu Fengguang319774e2010-05-24 14:32:49 -07005561{
5562 int cpu;
Wu Fengguang319774e2010-05-24 14:32:49 -07005563 zone->pageset = alloc_percpu(struct per_cpu_pageset);
Cody P Schafer56cef2b2013-07-03 15:01:38 -07005564 for_each_possible_cpu(cpu)
5565 zone_pageset_init(zone, cpu);
Wu Fengguang319774e2010-05-24 14:32:49 -07005566}
5567
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07005568/*
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09005569 * Allocate per cpu pagesets and initialize them.
5570 * Before this call only boot pagesets were available.
Christoph Lameter2caaad42005-06-21 17:15:00 -07005571 */
Al Viro78d99552005-12-15 09:18:25 +00005572void __init setup_per_cpu_pageset(void)
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07005573{
Mel Gormanb4911ea2016-08-04 15:31:49 -07005574 struct pglist_data *pgdat;
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09005575 struct zone *zone;
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07005576
Wu Fengguang319774e2010-05-24 14:32:49 -07005577 for_each_populated_zone(zone)
5578 setup_zone_pageset(zone);
Mel Gormanb4911ea2016-08-04 15:31:49 -07005579
5580 for_each_online_pgdat(pgdat)
5581 pgdat->per_cpu_nodestats =
5582 alloc_percpu(struct per_cpu_nodestat);
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07005583}
5584
Matt Tolentinoc09b4242006-01-17 07:03:44 +01005585static __meminit void zone_pcp_init(struct zone *zone)
Dave Hansened8ece22005-10-29 18:16:50 -07005586{
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09005587 /*
5588 * per cpu subsystem is not up at this point. The following code
5589 * relies on the ability of the linker to provide the
5590 * offset of a (static) per cpu variable into the per cpu area.
5591 */
5592 zone->pageset = &boot_pageset;
Dave Hansened8ece22005-10-29 18:16:50 -07005593
Xishi Qiub38a8722013-11-12 15:07:20 -08005594 if (populated_zone(zone))
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09005595 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n",
5596 zone->name, zone->present_pages,
5597 zone_batchsize(zone));
Dave Hansened8ece22005-10-29 18:16:50 -07005598}
5599
Michal Hockodc0bbf32017-07-06 15:37:35 -07005600void __meminit init_currently_empty_zone(struct zone *zone,
Yasunori Goto718127c2006-06-23 02:03:10 -07005601 unsigned long zone_start_pfn,
Yaowei Baib171e402015-11-05 18:47:06 -08005602 unsigned long size)
Dave Hansened8ece22005-10-29 18:16:50 -07005603{
5604 struct pglist_data *pgdat = zone->zone_pgdat;
Linus Torvalds9dcb8b62016-10-26 10:15:30 -07005605
Dave Hansened8ece22005-10-29 18:16:50 -07005606 pgdat->nr_zones = zone_idx(zone) + 1;
5607
Dave Hansened8ece22005-10-29 18:16:50 -07005608 zone->zone_start_pfn = zone_start_pfn;
5609
Mel Gorman708614e2008-07-23 21:26:51 -07005610 mminit_dprintk(MMINIT_TRACE, "memmap_init",
5611 "Initialising map node %d zone %lu pfns %lu -> %lu\n",
5612 pgdat->node_id,
5613 (unsigned long)zone_idx(zone),
5614 zone_start_pfn, (zone_start_pfn + size));
5615
Andi Kleen1e548de2008-02-04 22:29:26 -08005616 zone_init_free_lists(zone);
Linus Torvalds9dcb8b62016-10-26 10:15:30 -07005617 zone->initialized = 1;
Dave Hansened8ece22005-10-29 18:16:50 -07005618}
5619
Tejun Heo0ee332c2011-12-08 10:22:09 -08005620#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
Mel Gormanc7132162006-09-27 01:49:43 -07005621#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
Mel Gorman8a942fd2015-06-30 14:56:55 -07005622
Mel Gormanc7132162006-09-27 01:49:43 -07005623/*
5624 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
Mel Gormanc7132162006-09-27 01:49:43 -07005625 */
Mel Gorman8a942fd2015-06-30 14:56:55 -07005626int __meminit __early_pfn_to_nid(unsigned long pfn,
5627 struct mminit_pfnnid_cache *state)
Mel Gormanc7132162006-09-27 01:49:43 -07005628{
Tejun Heoc13291a2011-07-12 10:46:30 +02005629 unsigned long start_pfn, end_pfn;
Yinghai Lue76b63f2013-09-11 14:22:17 -07005630 int nid;
Russ Anderson7c243c72013-04-29 15:07:59 -07005631
Mel Gorman8a942fd2015-06-30 14:56:55 -07005632 if (state->last_start <= pfn && pfn < state->last_end)
5633 return state->last_nid;
Mel Gormanc7132162006-09-27 01:49:43 -07005634
Yinghai Lue76b63f2013-09-11 14:22:17 -07005635 nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
5636 if (nid != -1) {
Mel Gorman8a942fd2015-06-30 14:56:55 -07005637 state->last_start = start_pfn;
5638 state->last_end = end_pfn;
5639 state->last_nid = nid;
Yinghai Lue76b63f2013-09-11 14:22:17 -07005640 }
5641
5642 return nid;
Mel Gormanc7132162006-09-27 01:49:43 -07005643}
5644#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
5645
Mel Gormanc7132162006-09-27 01:49:43 -07005646/**
Santosh Shilimkar67828322014-01-21 15:50:25 -08005647 * free_bootmem_with_active_regions - Call memblock_free_early_nid for each active range
Randy Dunlap88ca3b92006-10-04 02:15:25 -07005648 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
Santosh Shilimkar67828322014-01-21 15:50:25 -08005649 * @max_low_pfn: The highest PFN that will be passed to memblock_free_early_nid
Mel Gormanc7132162006-09-27 01:49:43 -07005650 *
Zhang Zhen7d018172014-06-04 16:10:53 -07005651 * If an architecture guarantees that all ranges registered contain no holes
5652 * and may be freed, this this function may be used instead of calling
5653 * memblock_free_early_nid() manually.
Mel Gormanc7132162006-09-27 01:49:43 -07005654 */
Tejun Heoc13291a2011-07-12 10:46:30 +02005655void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
Mel Gormanc7132162006-09-27 01:49:43 -07005656{
Tejun Heoc13291a2011-07-12 10:46:30 +02005657 unsigned long start_pfn, end_pfn;
5658 int i, this_nid;
Mel Gormanc7132162006-09-27 01:49:43 -07005659
Tejun Heoc13291a2011-07-12 10:46:30 +02005660 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) {
5661 start_pfn = min(start_pfn, max_low_pfn);
5662 end_pfn = min(end_pfn, max_low_pfn);
Mel Gormanc7132162006-09-27 01:49:43 -07005663
Tejun Heoc13291a2011-07-12 10:46:30 +02005664 if (start_pfn < end_pfn)
Santosh Shilimkar67828322014-01-21 15:50:25 -08005665 memblock_free_early_nid(PFN_PHYS(start_pfn),
5666 (end_pfn - start_pfn) << PAGE_SHIFT,
5667 this_nid);
Mel Gormanc7132162006-09-27 01:49:43 -07005668 }
5669}
5670
5671/**
5672 * sparse_memory_present_with_active_regions - Call memory_present for each active range
Randy Dunlap88ca3b92006-10-04 02:15:25 -07005673 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
Mel Gormanc7132162006-09-27 01:49:43 -07005674 *
Zhang Zhen7d018172014-06-04 16:10:53 -07005675 * If an architecture guarantees that all ranges registered contain no holes and may
5676 * be freed, this function may be used instead of calling memory_present() manually.
Mel Gormanc7132162006-09-27 01:49:43 -07005677 */
5678void __init sparse_memory_present_with_active_regions(int nid)
5679{
Tejun Heoc13291a2011-07-12 10:46:30 +02005680 unsigned long start_pfn, end_pfn;
5681 int i, this_nid;
Mel Gormanc7132162006-09-27 01:49:43 -07005682
Tejun Heoc13291a2011-07-12 10:46:30 +02005683 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid)
5684 memory_present(this_nid, start_pfn, end_pfn);
Mel Gormanc7132162006-09-27 01:49:43 -07005685}
5686
5687/**
5688 * get_pfn_range_for_nid - Return the start and end page frames for a node
Randy Dunlap88ca3b92006-10-04 02:15:25 -07005689 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
5690 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
5691 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
Mel Gormanc7132162006-09-27 01:49:43 -07005692 *
5693 * It returns the start and end page frame of a node based on information
Zhang Zhen7d018172014-06-04 16:10:53 -07005694 * provided by memblock_set_node(). If called for a node
Mel Gormanc7132162006-09-27 01:49:43 -07005695 * with no available memory, a warning is printed and the start and end
Randy Dunlap88ca3b92006-10-04 02:15:25 -07005696 * PFNs will be 0.
Mel Gormanc7132162006-09-27 01:49:43 -07005697 */
Yasunori Gotoa3142c82007-05-08 00:23:07 -07005698void __meminit get_pfn_range_for_nid(unsigned int nid,
Mel Gormanc7132162006-09-27 01:49:43 -07005699 unsigned long *start_pfn, unsigned long *end_pfn)
5700{
Tejun Heoc13291a2011-07-12 10:46:30 +02005701 unsigned long this_start_pfn, this_end_pfn;
Mel Gormanc7132162006-09-27 01:49:43 -07005702 int i;
Tejun Heoc13291a2011-07-12 10:46:30 +02005703
Mel Gormanc7132162006-09-27 01:49:43 -07005704 *start_pfn = -1UL;
5705 *end_pfn = 0;
5706
Tejun Heoc13291a2011-07-12 10:46:30 +02005707 for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
5708 *start_pfn = min(*start_pfn, this_start_pfn);
5709 *end_pfn = max(*end_pfn, this_end_pfn);
Mel Gormanc7132162006-09-27 01:49:43 -07005710 }
5711
Christoph Lameter633c0662007-10-16 01:25:37 -07005712 if (*start_pfn == -1UL)
Mel Gormanc7132162006-09-27 01:49:43 -07005713 *start_pfn = 0;
Mel Gormanc7132162006-09-27 01:49:43 -07005714}
5715
5716/*
Mel Gorman2a1e2742007-07-17 04:03:12 -07005717 * This finds a zone that can be used for ZONE_MOVABLE pages. The
5718 * assumption is made that zones within a node are ordered in monotonic
5719 * increasing memory addresses so that the "highest" populated zone is used
5720 */
Adrian Bunkb69a7282008-07-23 21:28:12 -07005721static void __init find_usable_zone_for_movable(void)
Mel Gorman2a1e2742007-07-17 04:03:12 -07005722{
5723 int zone_index;
5724 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
5725 if (zone_index == ZONE_MOVABLE)
5726 continue;
5727
5728 if (arch_zone_highest_possible_pfn[zone_index] >
5729 arch_zone_lowest_possible_pfn[zone_index])
5730 break;
5731 }
5732
5733 VM_BUG_ON(zone_index == -1);
5734 movable_zone = zone_index;
5735}
5736
5737/*
5738 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005739 * because it is sized independent of architecture. Unlike the other zones,
Mel Gorman2a1e2742007-07-17 04:03:12 -07005740 * the starting point for ZONE_MOVABLE is not fixed. It may be different
5741 * in each node depending on the size of each node and how evenly kernelcore
5742 * is distributed. This helper function adjusts the zone ranges
5743 * provided by the architecture for a given node by using the end of the
5744 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
5745 * zones within a node are in order of monotonic increases memory addresses
5746 */
Adrian Bunkb69a7282008-07-23 21:28:12 -07005747static void __meminit adjust_zone_range_for_zone_movable(int nid,
Mel Gorman2a1e2742007-07-17 04:03:12 -07005748 unsigned long zone_type,
5749 unsigned long node_start_pfn,
5750 unsigned long node_end_pfn,
5751 unsigned long *zone_start_pfn,
5752 unsigned long *zone_end_pfn)
5753{
5754 /* Only adjust if ZONE_MOVABLE is on this node */
5755 if (zone_movable_pfn[nid]) {
5756 /* Size ZONE_MOVABLE */
5757 if (zone_type == ZONE_MOVABLE) {
5758 *zone_start_pfn = zone_movable_pfn[nid];
5759 *zone_end_pfn = min(node_end_pfn,
5760 arch_zone_highest_possible_pfn[movable_zone]);
5761
Xishi Qiue506b992016-10-07 16:58:06 -07005762 /* Adjust for ZONE_MOVABLE starting within this range */
5763 } else if (!mirrored_kernelcore &&
5764 *zone_start_pfn < zone_movable_pfn[nid] &&
5765 *zone_end_pfn > zone_movable_pfn[nid]) {
5766 *zone_end_pfn = zone_movable_pfn[nid];
5767
Mel Gorman2a1e2742007-07-17 04:03:12 -07005768 /* Check if this whole range is within ZONE_MOVABLE */
5769 } else if (*zone_start_pfn >= zone_movable_pfn[nid])
5770 *zone_start_pfn = *zone_end_pfn;
5771 }
5772}
5773
5774/*
Mel Gormanc7132162006-09-27 01:49:43 -07005775 * Return the number of pages a zone spans in a node, including holes
5776 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
5777 */
Paul Mundt6ea6e682007-07-15 23:38:20 -07005778static unsigned long __meminit zone_spanned_pages_in_node(int nid,
Mel Gormanc7132162006-09-27 01:49:43 -07005779 unsigned long zone_type,
Zhang Yanfei7960aed2013-07-08 15:59:52 -07005780 unsigned long node_start_pfn,
5781 unsigned long node_end_pfn,
Taku Izumid91749c2016-03-15 14:55:18 -07005782 unsigned long *zone_start_pfn,
5783 unsigned long *zone_end_pfn,
Mel Gormanc7132162006-09-27 01:49:43 -07005784 unsigned long *ignored)
5785{
Xishi Qiub5685e92015-09-08 15:04:16 -07005786 /* When hotadd a new node from cpu_up(), the node should be empty */
Xishi Qiuf9126ab2015-08-14 15:35:16 -07005787 if (!node_start_pfn && !node_end_pfn)
5788 return 0;
5789
Zhang Yanfei7960aed2013-07-08 15:59:52 -07005790 /* Get the start and end of the zone */
Taku Izumid91749c2016-03-15 14:55:18 -07005791 *zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
5792 *zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
Mel Gorman2a1e2742007-07-17 04:03:12 -07005793 adjust_zone_range_for_zone_movable(nid, zone_type,
5794 node_start_pfn, node_end_pfn,
Taku Izumid91749c2016-03-15 14:55:18 -07005795 zone_start_pfn, zone_end_pfn);
Mel Gormanc7132162006-09-27 01:49:43 -07005796
5797 /* Check that this node has pages within the zone's required range */
Taku Izumid91749c2016-03-15 14:55:18 -07005798 if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn)
Mel Gormanc7132162006-09-27 01:49:43 -07005799 return 0;
5800
5801 /* Move the zone boundaries inside the node if necessary */
Taku Izumid91749c2016-03-15 14:55:18 -07005802 *zone_end_pfn = min(*zone_end_pfn, node_end_pfn);
5803 *zone_start_pfn = max(*zone_start_pfn, node_start_pfn);
Mel Gormanc7132162006-09-27 01:49:43 -07005804
5805 /* Return the spanned pages */
Taku Izumid91749c2016-03-15 14:55:18 -07005806 return *zone_end_pfn - *zone_start_pfn;
Mel Gormanc7132162006-09-27 01:49:43 -07005807}
5808
5809/*
5810 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
Randy Dunlap88ca3b92006-10-04 02:15:25 -07005811 * then all holes in the requested range will be accounted for.
Mel Gormanc7132162006-09-27 01:49:43 -07005812 */
Yinghai Lu32996252009-12-15 17:59:02 -08005813unsigned long __meminit __absent_pages_in_range(int nid,
Mel Gormanc7132162006-09-27 01:49:43 -07005814 unsigned long range_start_pfn,
5815 unsigned long range_end_pfn)
5816{
Tejun Heo96e907d2011-07-12 10:46:29 +02005817 unsigned long nr_absent = range_end_pfn - range_start_pfn;
5818 unsigned long start_pfn, end_pfn;
5819 int i;
Mel Gormanc7132162006-09-27 01:49:43 -07005820
Tejun Heo96e907d2011-07-12 10:46:29 +02005821 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
5822 start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
5823 end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
5824 nr_absent -= end_pfn - start_pfn;
Mel Gormanc7132162006-09-27 01:49:43 -07005825 }
Tejun Heo96e907d2011-07-12 10:46:29 +02005826 return nr_absent;
Mel Gormanc7132162006-09-27 01:49:43 -07005827}
5828
5829/**
5830 * absent_pages_in_range - Return number of page frames in holes within a range
5831 * @start_pfn: The start PFN to start searching for holes
5832 * @end_pfn: The end PFN to stop searching for holes
5833 *
Randy Dunlap88ca3b92006-10-04 02:15:25 -07005834 * It returns the number of pages frames in memory holes within a range.
Mel Gormanc7132162006-09-27 01:49:43 -07005835 */
5836unsigned long __init absent_pages_in_range(unsigned long start_pfn,
5837 unsigned long end_pfn)
5838{
5839 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
5840}
5841
5842/* Return the number of page frames in holes in a zone on a node */
Paul Mundt6ea6e682007-07-15 23:38:20 -07005843static unsigned long __meminit zone_absent_pages_in_node(int nid,
Mel Gormanc7132162006-09-27 01:49:43 -07005844 unsigned long zone_type,
Zhang Yanfei7960aed2013-07-08 15:59:52 -07005845 unsigned long node_start_pfn,
5846 unsigned long node_end_pfn,
Mel Gormanc7132162006-09-27 01:49:43 -07005847 unsigned long *ignored)
5848{
Tejun Heo96e907d2011-07-12 10:46:29 +02005849 unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
5850 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
Mel Gorman9c7cd682006-09-27 01:49:58 -07005851 unsigned long zone_start_pfn, zone_end_pfn;
Taku Izumi342332e2016-03-15 14:55:22 -07005852 unsigned long nr_absent;
Mel Gorman9c7cd682006-09-27 01:49:58 -07005853
Xishi Qiub5685e92015-09-08 15:04:16 -07005854 /* When hotadd a new node from cpu_up(), the node should be empty */
Xishi Qiuf9126ab2015-08-14 15:35:16 -07005855 if (!node_start_pfn && !node_end_pfn)
5856 return 0;
5857
Tejun Heo96e907d2011-07-12 10:46:29 +02005858 zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
5859 zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
Mel Gorman9c7cd682006-09-27 01:49:58 -07005860
Mel Gorman2a1e2742007-07-17 04:03:12 -07005861 adjust_zone_range_for_zone_movable(nid, zone_type,
5862 node_start_pfn, node_end_pfn,
5863 &zone_start_pfn, &zone_end_pfn);
Taku Izumi342332e2016-03-15 14:55:22 -07005864 nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
5865
5866 /*
5867 * ZONE_MOVABLE handling.
5868 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages
5869 * and vice versa.
5870 */
Xishi Qiue506b992016-10-07 16:58:06 -07005871 if (mirrored_kernelcore && zone_movable_pfn[nid]) {
5872 unsigned long start_pfn, end_pfn;
5873 struct memblock_region *r;
Taku Izumi342332e2016-03-15 14:55:22 -07005874
Xishi Qiue506b992016-10-07 16:58:06 -07005875 for_each_memblock(memory, r) {
5876 start_pfn = clamp(memblock_region_memory_base_pfn(r),
5877 zone_start_pfn, zone_end_pfn);
5878 end_pfn = clamp(memblock_region_memory_end_pfn(r),
5879 zone_start_pfn, zone_end_pfn);
Taku Izumi342332e2016-03-15 14:55:22 -07005880
Xishi Qiue506b992016-10-07 16:58:06 -07005881 if (zone_type == ZONE_MOVABLE &&
5882 memblock_is_mirror(r))
5883 nr_absent += end_pfn - start_pfn;
Taku Izumi342332e2016-03-15 14:55:22 -07005884
Xishi Qiue506b992016-10-07 16:58:06 -07005885 if (zone_type == ZONE_NORMAL &&
5886 !memblock_is_mirror(r))
5887 nr_absent += end_pfn - start_pfn;
Taku Izumi342332e2016-03-15 14:55:22 -07005888 }
5889 }
5890
5891 return nr_absent;
Mel Gormanc7132162006-09-27 01:49:43 -07005892}
Mel Gorman0e0b8642006-09-27 01:49:56 -07005893
Tejun Heo0ee332c2011-12-08 10:22:09 -08005894#else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
Paul Mundt6ea6e682007-07-15 23:38:20 -07005895static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
Mel Gormanc7132162006-09-27 01:49:43 -07005896 unsigned long zone_type,
Zhang Yanfei7960aed2013-07-08 15:59:52 -07005897 unsigned long node_start_pfn,
5898 unsigned long node_end_pfn,
Taku Izumid91749c2016-03-15 14:55:18 -07005899 unsigned long *zone_start_pfn,
5900 unsigned long *zone_end_pfn,
Mel Gormanc7132162006-09-27 01:49:43 -07005901 unsigned long *zones_size)
5902{
Taku Izumid91749c2016-03-15 14:55:18 -07005903 unsigned int zone;
5904
5905 *zone_start_pfn = node_start_pfn;
5906 for (zone = 0; zone < zone_type; zone++)
5907 *zone_start_pfn += zones_size[zone];
5908
5909 *zone_end_pfn = *zone_start_pfn + zones_size[zone_type];
5910
Mel Gormanc7132162006-09-27 01:49:43 -07005911 return zones_size[zone_type];
5912}
5913
Paul Mundt6ea6e682007-07-15 23:38:20 -07005914static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
Mel Gormanc7132162006-09-27 01:49:43 -07005915 unsigned long zone_type,
Zhang Yanfei7960aed2013-07-08 15:59:52 -07005916 unsigned long node_start_pfn,
5917 unsigned long node_end_pfn,
Mel Gormanc7132162006-09-27 01:49:43 -07005918 unsigned long *zholes_size)
5919{
5920 if (!zholes_size)
5921 return 0;
5922
5923 return zholes_size[zone_type];
5924}
Yinghai Lu20e69262013-03-01 14:51:27 -08005925
Tejun Heo0ee332c2011-12-08 10:22:09 -08005926#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
Mel Gormanc7132162006-09-27 01:49:43 -07005927
Yasunori Gotoa3142c82007-05-08 00:23:07 -07005928static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
Zhang Yanfei7960aed2013-07-08 15:59:52 -07005929 unsigned long node_start_pfn,
5930 unsigned long node_end_pfn,
5931 unsigned long *zones_size,
5932 unsigned long *zholes_size)
Mel Gormanc7132162006-09-27 01:49:43 -07005933{
Gu Zhengfebd5942015-06-24 16:57:02 -07005934 unsigned long realtotalpages = 0, totalpages = 0;
Mel Gormanc7132162006-09-27 01:49:43 -07005935 enum zone_type i;
5936
Gu Zhengfebd5942015-06-24 16:57:02 -07005937 for (i = 0; i < MAX_NR_ZONES; i++) {
5938 struct zone *zone = pgdat->node_zones + i;
Taku Izumid91749c2016-03-15 14:55:18 -07005939 unsigned long zone_start_pfn, zone_end_pfn;
Gu Zhengfebd5942015-06-24 16:57:02 -07005940 unsigned long size, real_size;
Mel Gormanc7132162006-09-27 01:49:43 -07005941
Gu Zhengfebd5942015-06-24 16:57:02 -07005942 size = zone_spanned_pages_in_node(pgdat->node_id, i,
5943 node_start_pfn,
5944 node_end_pfn,
Taku Izumid91749c2016-03-15 14:55:18 -07005945 &zone_start_pfn,
5946 &zone_end_pfn,
Gu Zhengfebd5942015-06-24 16:57:02 -07005947 zones_size);
5948 real_size = size - zone_absent_pages_in_node(pgdat->node_id, i,
Zhang Yanfei7960aed2013-07-08 15:59:52 -07005949 node_start_pfn, node_end_pfn,
5950 zholes_size);
Taku Izumid91749c2016-03-15 14:55:18 -07005951 if (size)
5952 zone->zone_start_pfn = zone_start_pfn;
5953 else
5954 zone->zone_start_pfn = 0;
Gu Zhengfebd5942015-06-24 16:57:02 -07005955 zone->spanned_pages = size;
5956 zone->present_pages = real_size;
5957
5958 totalpages += size;
5959 realtotalpages += real_size;
5960 }
5961
5962 pgdat->node_spanned_pages = totalpages;
Mel Gormanc7132162006-09-27 01:49:43 -07005963 pgdat->node_present_pages = realtotalpages;
5964 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
5965 realtotalpages);
5966}
5967
Mel Gorman835c1342007-10-16 01:25:47 -07005968#ifndef CONFIG_SPARSEMEM
5969/*
5970 * Calculate the size of the zone->blockflags rounded to an unsigned long
Mel Gormand9c23402007-10-16 01:26:01 -07005971 * Start by making sure zonesize is a multiple of pageblock_order by rounding
5972 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
Mel Gorman835c1342007-10-16 01:25:47 -07005973 * round what is now in bits to nearest long in bits, then return it in
5974 * bytes.
5975 */
Linus Torvalds7c455122013-02-18 09:58:02 -08005976static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
Mel Gorman835c1342007-10-16 01:25:47 -07005977{
5978 unsigned long usemapsize;
5979
Linus Torvalds7c455122013-02-18 09:58:02 -08005980 zonesize += zone_start_pfn & (pageblock_nr_pages-1);
Mel Gormand9c23402007-10-16 01:26:01 -07005981 usemapsize = roundup(zonesize, pageblock_nr_pages);
5982 usemapsize = usemapsize >> pageblock_order;
Mel Gorman835c1342007-10-16 01:25:47 -07005983 usemapsize *= NR_PAGEBLOCK_BITS;
5984 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
5985
5986 return usemapsize / 8;
5987}
5988
5989static void __init setup_usemap(struct pglist_data *pgdat,
Linus Torvalds7c455122013-02-18 09:58:02 -08005990 struct zone *zone,
5991 unsigned long zone_start_pfn,
5992 unsigned long zonesize)
Mel Gorman835c1342007-10-16 01:25:47 -07005993{
Linus Torvalds7c455122013-02-18 09:58:02 -08005994 unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
Mel Gorman835c1342007-10-16 01:25:47 -07005995 zone->pageblock_flags = NULL;
Julia Lawall58a01a42009-01-06 14:39:28 -08005996 if (usemapsize)
Santosh Shilimkar67828322014-01-21 15:50:25 -08005997 zone->pageblock_flags =
5998 memblock_virt_alloc_node_nopanic(usemapsize,
5999 pgdat->node_id);
Mel Gorman835c1342007-10-16 01:25:47 -07006000}
6001#else
Linus Torvalds7c455122013-02-18 09:58:02 -08006002static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
6003 unsigned long zone_start_pfn, unsigned long zonesize) {}
Mel Gorman835c1342007-10-16 01:25:47 -07006004#endif /* CONFIG_SPARSEMEM */
6005
Mel Gormand9c23402007-10-16 01:26:01 -07006006#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
Mel Gormanba72cb82007-11-28 16:21:13 -08006007
Mel Gormand9c23402007-10-16 01:26:01 -07006008/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
Chen Gang15ca2202013-09-11 14:20:27 -07006009void __paginginit set_pageblock_order(void)
Mel Gormand9c23402007-10-16 01:26:01 -07006010{
Andrew Morton955c1cd2012-05-29 15:06:31 -07006011 unsigned int order;
6012
Mel Gormand9c23402007-10-16 01:26:01 -07006013 /* Check that pageblock_nr_pages has not already been setup */
6014 if (pageblock_order)
6015 return;
6016
Andrew Morton955c1cd2012-05-29 15:06:31 -07006017 if (HPAGE_SHIFT > PAGE_SHIFT)
6018 order = HUGETLB_PAGE_ORDER;
6019 else
6020 order = MAX_ORDER - 1;
6021
Mel Gormand9c23402007-10-16 01:26:01 -07006022 /*
6023 * Assume the largest contiguous order of interest is a huge page.
Andrew Morton955c1cd2012-05-29 15:06:31 -07006024 * This value may be variable depending on boot parameters on IA64 and
6025 * powerpc.
Mel Gormand9c23402007-10-16 01:26:01 -07006026 */
6027 pageblock_order = order;
6028}
6029#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
6030
Mel Gormanba72cb82007-11-28 16:21:13 -08006031/*
6032 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
Andrew Morton955c1cd2012-05-29 15:06:31 -07006033 * is unused as pageblock_order is set at compile-time. See
6034 * include/linux/pageblock-flags.h for the values of pageblock_order based on
6035 * the kernel config
Mel Gormanba72cb82007-11-28 16:21:13 -08006036 */
Chen Gang15ca2202013-09-11 14:20:27 -07006037void __paginginit set_pageblock_order(void)
Mel Gormanba72cb82007-11-28 16:21:13 -08006038{
Mel Gormanba72cb82007-11-28 16:21:13 -08006039}
Mel Gormand9c23402007-10-16 01:26:01 -07006040
6041#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
6042
Jiang Liu01cefae2012-12-12 13:52:19 -08006043static unsigned long __paginginit calc_memmap_size(unsigned long spanned_pages,
6044 unsigned long present_pages)
6045{
6046 unsigned long pages = spanned_pages;
6047
6048 /*
6049 * Provide a more accurate estimation if there are holes within
6050 * the zone and SPARSEMEM is in use. If there are holes within the
6051 * zone, each populated memory region may cost us one or two extra
6052 * memmap pages due to alignment because memmap pages for each
Masahiro Yamada89d790a2017-02-27 14:29:01 -08006053 * populated regions may not be naturally aligned on page boundary.
Jiang Liu01cefae2012-12-12 13:52:19 -08006054 * So the (present_pages >> 4) heuristic is a tradeoff for that.
6055 */
6056 if (spanned_pages > present_pages + (present_pages >> 4) &&
6057 IS_ENABLED(CONFIG_SPARSEMEM))
6058 pages = present_pages;
6059
6060 return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
6061}
6062
Linus Torvalds1da177e2005-04-16 15:20:36 -07006063/*
6064 * Set up the zone data structures:
6065 * - mark all pages reserved
6066 * - mark all memory queues empty
6067 * - clear the memory bitmaps
Minchan Kim6527af52012-07-31 16:46:16 -07006068 *
6069 * NOTE: pgdat should get zeroed by caller.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006070 */
Wei Yang7f3eb552015-09-08 14:59:50 -07006071static void __paginginit free_area_init_core(struct pglist_data *pgdat)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006072{
Christoph Lameter2f1b6242006-09-25 23:31:13 -07006073 enum zone_type j;
Dave Hansened8ece22005-10-29 18:16:50 -07006074 int nid = pgdat->node_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006075
Dave Hansen208d54e2005-10-29 18:16:52 -07006076 pgdat_resize_init(pgdat);
Andrea Arcangeli8177a422012-03-23 20:56:34 +01006077#ifdef CONFIG_NUMA_BALANCING
6078 spin_lock_init(&pgdat->numabalancing_migrate_lock);
6079 pgdat->numabalancing_migrate_nr_pages = 0;
6080 pgdat->numabalancing_migrate_next_window = jiffies;
6081#endif
Kirill A. Shutemova3d0a9182016-02-02 16:57:08 -08006082#ifdef CONFIG_TRANSPARENT_HUGEPAGE
6083 spin_lock_init(&pgdat->split_queue_lock);
6084 INIT_LIST_HEAD(&pgdat->split_queue);
6085 pgdat->split_queue_len = 0;
6086#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07006087 init_waitqueue_head(&pgdat->kswapd_wait);
Mel Gorman55150612012-07-31 16:44:35 -07006088 init_waitqueue_head(&pgdat->pfmemalloc_wait);
Vlastimil Babka698b1b32016-03-17 14:18:08 -07006089#ifdef CONFIG_COMPACTION
6090 init_waitqueue_head(&pgdat->kcompactd_wait);
6091#endif
Joonsoo Kimeefa864b2014-12-12 16:55:46 -08006092 pgdat_page_ext_init(pgdat);
Mel Gormana52633d2016-07-28 15:45:28 -07006093 spin_lock_init(&pgdat->lru_lock);
Mel Gormana9dd0a82016-07-28 15:46:02 -07006094 lruvec_init(node_lruvec(pgdat));
Michal Nazarewicz5f63b722012-01-11 15:16:11 +01006095
Johannes Weiner385386c2017-07-06 15:40:43 -07006096 pgdat->per_cpu_nodestats = &boot_nodestats;
6097
Linus Torvalds1da177e2005-04-16 15:20:36 -07006098 for (j = 0; j < MAX_NR_ZONES; j++) {
6099 struct zone *zone = pgdat->node_zones + j;
Jiang Liu9feedc92012-12-12 13:52:12 -08006100 unsigned long size, realsize, freesize, memmap_pages;
Taku Izumid91749c2016-03-15 14:55:18 -07006101 unsigned long zone_start_pfn = zone->zone_start_pfn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006102
Gu Zhengfebd5942015-06-24 16:57:02 -07006103 size = zone->spanned_pages;
6104 realsize = freesize = zone->present_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006105
Mel Gorman0e0b8642006-09-27 01:49:56 -07006106 /*
Jiang Liu9feedc92012-12-12 13:52:12 -08006107 * Adjust freesize so that it accounts for how much memory
Mel Gorman0e0b8642006-09-27 01:49:56 -07006108 * is used by this zone for memmap. This affects the watermark
6109 * and per-cpu initialisations
6110 */
Jiang Liu01cefae2012-12-12 13:52:19 -08006111 memmap_pages = calc_memmap_size(size, realsize);
Zhong Hongboba914f42014-12-12 16:56:21 -08006112 if (!is_highmem_idx(j)) {
6113 if (freesize >= memmap_pages) {
6114 freesize -= memmap_pages;
6115 if (memmap_pages)
6116 printk(KERN_DEBUG
6117 " %s zone: %lu pages used for memmap\n",
6118 zone_names[j], memmap_pages);
6119 } else
Joe Perches11705322016-03-17 14:19:50 -07006120 pr_warn(" %s zone: %lu pages exceeds freesize %lu\n",
Zhong Hongboba914f42014-12-12 16:56:21 -08006121 zone_names[j], memmap_pages, freesize);
6122 }
Mel Gorman0e0b8642006-09-27 01:49:56 -07006123
Christoph Lameter62672762007-02-10 01:43:07 -08006124 /* Account for reserved pages */
Jiang Liu9feedc92012-12-12 13:52:12 -08006125 if (j == 0 && freesize > dma_reserve) {
6126 freesize -= dma_reserve;
Yinghai Lud903ef92008-10-18 20:27:06 -07006127 printk(KERN_DEBUG " %s zone: %lu pages reserved\n",
Christoph Lameter62672762007-02-10 01:43:07 -08006128 zone_names[0], dma_reserve);
Mel Gorman0e0b8642006-09-27 01:49:56 -07006129 }
6130
Christoph Lameter98d2b0e2006-09-25 23:31:12 -07006131 if (!is_highmem_idx(j))
Jiang Liu9feedc92012-12-12 13:52:12 -08006132 nr_kernel_pages += freesize;
Jiang Liu01cefae2012-12-12 13:52:19 -08006133 /* Charge for highmem memmap if there are enough kernel pages */
6134 else if (nr_kernel_pages > memmap_pages * 2)
6135 nr_kernel_pages -= memmap_pages;
Jiang Liu9feedc92012-12-12 13:52:12 -08006136 nr_all_pages += freesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006137
Jiang Liu9feedc92012-12-12 13:52:12 -08006138 /*
6139 * Set an approximate value for lowmem here, it will be adjusted
6140 * when the bootmem allocator frees pages into the buddy system.
6141 * And all highmem pages will be managed by the buddy system.
6142 */
6143 zone->managed_pages = is_highmem_idx(j) ? realsize : freesize;
Christoph Lameter96146342006-07-03 00:24:13 -07006144#ifdef CONFIG_NUMA
Christoph Lameterd5f541e2006-09-27 01:50:08 -07006145 zone->node = nid;
Christoph Lameter96146342006-07-03 00:24:13 -07006146#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07006147 zone->name = zone_names[j];
Linus Torvalds1da177e2005-04-16 15:20:36 -07006148 zone->zone_pgdat = pgdat;
Mel Gormana52633d2016-07-28 15:45:28 -07006149 spin_lock_init(&zone->lock);
6150 zone_seqlock_init(zone);
Dave Hansened8ece22005-10-29 18:16:50 -07006151 zone_pcp_init(zone);
Johannes Weiner81c0a2b2013-09-11 14:20:47 -07006152
Linus Torvalds1da177e2005-04-16 15:20:36 -07006153 if (!size)
6154 continue;
6155
Andrew Morton955c1cd2012-05-29 15:06:31 -07006156 set_pageblock_order();
Linus Torvalds7c455122013-02-18 09:58:02 -08006157 setup_usemap(pgdat, zone, zone_start_pfn, size);
Michal Hockodc0bbf32017-07-06 15:37:35 -07006158 init_currently_empty_zone(zone, zone_start_pfn, size);
Heiko Carstens76cdd582008-05-14 16:05:52 -07006159 memmap_init(size, nid, j, zone_start_pfn);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006160 }
6161}
6162
Fabian Frederickbd721ea2016-08-02 14:03:33 -07006163static void __ref alloc_node_mem_map(struct pglist_data *pgdat)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006164{
Tony Luckb0aeba72015-11-10 10:09:47 -08006165 unsigned long __maybe_unused start = 0;
Laura Abbotta1c34a32015-11-05 18:48:46 -08006166 unsigned long __maybe_unused offset = 0;
6167
Linus Torvalds1da177e2005-04-16 15:20:36 -07006168 /* Skip empty nodes */
6169 if (!pgdat->node_spanned_pages)
6170 return;
6171
Andy Whitcroftd41dee32005-06-23 00:07:54 -07006172#ifdef CONFIG_FLAT_NODE_MEM_MAP
Tony Luckb0aeba72015-11-10 10:09:47 -08006173 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
6174 offset = pgdat->node_start_pfn - start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006175 /* ia64 gets its own node_mem_map, before this, without bootmem */
6176 if (!pgdat->node_mem_map) {
Tony Luckb0aeba72015-11-10 10:09:47 -08006177 unsigned long size, end;
Andy Whitcroftd41dee32005-06-23 00:07:54 -07006178 struct page *map;
6179
Bob Piccoe984bb42006-05-20 15:00:31 -07006180 /*
6181 * The zone's endpoints aren't required to be MAX_ORDER
6182 * aligned but the node_mem_map endpoints must be in order
6183 * for the buddy allocator to function correctly.
6184 */
Cody P Schafer108bcc92013-02-22 16:35:23 -08006185 end = pgdat_end_pfn(pgdat);
Bob Piccoe984bb42006-05-20 15:00:31 -07006186 end = ALIGN(end, MAX_ORDER_NR_PAGES);
6187 size = (end - start) * sizeof(struct page);
Dave Hansen6f167ec2005-06-23 00:07:39 -07006188 map = alloc_remap(pgdat->node_id, size);
6189 if (!map)
Santosh Shilimkar67828322014-01-21 15:50:25 -08006190 map = memblock_virt_alloc_node_nopanic(size,
6191 pgdat->node_id);
Laura Abbotta1c34a32015-11-05 18:48:46 -08006192 pgdat->node_mem_map = map + offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006193 }
Roman Zippel12d810c2007-05-31 00:40:54 -07006194#ifndef CONFIG_NEED_MULTIPLE_NODES
Linus Torvalds1da177e2005-04-16 15:20:36 -07006195 /*
6196 * With no DISCONTIG, the global mem_map is just set as node 0's
6197 */
Mel Gormanc7132162006-09-27 01:49:43 -07006198 if (pgdat == NODE_DATA(0)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006199 mem_map = NODE_DATA(0)->node_mem_map;
Laura Abbotta1c34a32015-11-05 18:48:46 -08006200#if defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) || defined(CONFIG_FLATMEM)
Mel Gormanc7132162006-09-27 01:49:43 -07006201 if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
Laura Abbotta1c34a32015-11-05 18:48:46 -08006202 mem_map -= offset;
Tejun Heo0ee332c2011-12-08 10:22:09 -08006203#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
Mel Gormanc7132162006-09-27 01:49:43 -07006204 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006205#endif
Andy Whitcroftd41dee32005-06-23 00:07:54 -07006206#endif /* CONFIG_FLAT_NODE_MEM_MAP */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006207}
6208
Johannes Weiner9109fb72008-07-23 21:27:20 -07006209void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
6210 unsigned long node_start_pfn, unsigned long *zholes_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006211{
Johannes Weiner9109fb72008-07-23 21:27:20 -07006212 pg_data_t *pgdat = NODE_DATA(nid);
Zhang Yanfei7960aed2013-07-08 15:59:52 -07006213 unsigned long start_pfn = 0;
6214 unsigned long end_pfn = 0;
Johannes Weiner9109fb72008-07-23 21:27:20 -07006215
Minchan Kim88fdf752012-07-31 16:46:14 -07006216 /* pg_data_t should be reset to zero when it's allocated */
Mel Gorman38087d92016-07-28 15:45:49 -07006217 WARN_ON(pgdat->nr_zones || pgdat->kswapd_classzone_idx);
Minchan Kim88fdf752012-07-31 16:46:14 -07006218
Linus Torvalds1da177e2005-04-16 15:20:36 -07006219 pgdat->node_id = nid;
6220 pgdat->node_start_pfn = node_start_pfn;
Mel Gorman75ef7182016-07-28 15:45:24 -07006221 pgdat->per_cpu_nodestats = NULL;
Zhang Yanfei7960aed2013-07-08 15:59:52 -07006222#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
6223 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
Juergen Gross8d29e182015-02-11 15:26:01 -08006224 pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
Zhen Lei4ada0c52015-09-08 15:04:19 -07006225 (u64)start_pfn << PAGE_SHIFT,
6226 end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0);
Taku Izumid91749c2016-03-15 14:55:18 -07006227#else
6228 start_pfn = node_start_pfn;
Zhang Yanfei7960aed2013-07-08 15:59:52 -07006229#endif
6230 calculate_node_totalpages(pgdat, start_pfn, end_pfn,
6231 zones_size, zholes_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006232
6233 alloc_node_mem_map(pgdat);
Yinghai Lue8c27ac2008-06-01 13:15:22 -07006234#ifdef CONFIG_FLAT_NODE_MEM_MAP
6235 printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
6236 nid, (unsigned long)pgdat,
6237 (unsigned long)pgdat->node_mem_map);
6238#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07006239
Michal Hocko864b9a32017-06-02 14:46:49 -07006240 reset_deferred_meminit(pgdat);
Wei Yang7f3eb552015-09-08 14:59:50 -07006241 free_area_init_core(pgdat);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006242}
6243
Pavel Tatashina4a3ede2017-11-15 17:36:31 -08006244#ifdef CONFIG_HAVE_MEMBLOCK
6245/*
6246 * Only struct pages that are backed by physical memory are zeroed and
6247 * initialized by going through __init_single_page(). But, there are some
6248 * struct pages which are reserved in memblock allocator and their fields
6249 * may be accessed (for example page_to_pfn() on some configuration accesses
6250 * flags). We must explicitly zero those struct pages.
6251 */
6252void __paginginit zero_resv_unavail(void)
6253{
6254 phys_addr_t start, end;
6255 unsigned long pfn;
6256 u64 i, pgcnt;
6257
6258 /*
6259 * Loop through ranges that are reserved, but do not have reported
6260 * physical memory backing.
6261 */
6262 pgcnt = 0;
6263 for_each_resv_unavail_range(i, &start, &end) {
6264 for (pfn = PFN_DOWN(start); pfn < PFN_UP(end); pfn++) {
6265 mm_zero_struct_page(pfn_to_page(pfn));
6266 pgcnt++;
6267 }
6268 }
6269
6270 /*
6271 * Struct pages that do not have backing memory. This could be because
6272 * firmware is using some of this memory, or for some other reasons.
6273 * Once memblock is changed so such behaviour is not allowed: i.e.
6274 * list of "reserved" memory must be a subset of list of "memory", then
6275 * this code can be removed.
6276 */
6277 if (pgcnt)
6278 pr_info("Reserved but unavailable: %lld pages", pgcnt);
6279}
6280#endif /* CONFIG_HAVE_MEMBLOCK */
6281
Tejun Heo0ee332c2011-12-08 10:22:09 -08006282#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
Miklos Szeredi418508c2007-05-23 13:57:55 -07006283
6284#if MAX_NUMNODES > 1
6285/*
6286 * Figure out the number of possible node ids.
6287 */
Cody P Schaferf9872ca2013-04-29 15:08:01 -07006288void __init setup_nr_node_ids(void)
Miklos Szeredi418508c2007-05-23 13:57:55 -07006289{
Wei Yang904a9552015-09-08 14:59:48 -07006290 unsigned int highest;
Miklos Szeredi418508c2007-05-23 13:57:55 -07006291
Wei Yang904a9552015-09-08 14:59:48 -07006292 highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES);
Miklos Szeredi418508c2007-05-23 13:57:55 -07006293 nr_node_ids = highest + 1;
6294}
Miklos Szeredi418508c2007-05-23 13:57:55 -07006295#endif
6296
Mel Gormanc7132162006-09-27 01:49:43 -07006297/**
Tejun Heo1e019792011-07-12 09:45:34 +02006298 * node_map_pfn_alignment - determine the maximum internode alignment
6299 *
6300 * This function should be called after node map is populated and sorted.
6301 * It calculates the maximum power of two alignment which can distinguish
6302 * all the nodes.
6303 *
6304 * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
6305 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the
6306 * nodes are shifted by 256MiB, 256MiB. Note that if only the last node is
6307 * shifted, 1GiB is enough and this function will indicate so.
6308 *
6309 * This is used to test whether pfn -> nid mapping of the chosen memory
6310 * model has fine enough granularity to avoid incorrect mapping for the
6311 * populated node map.
6312 *
6313 * Returns the determined alignment in pfn's. 0 if there is no alignment
6314 * requirement (single node).
6315 */
6316unsigned long __init node_map_pfn_alignment(void)
6317{
6318 unsigned long accl_mask = 0, last_end = 0;
Tejun Heoc13291a2011-07-12 10:46:30 +02006319 unsigned long start, end, mask;
Tejun Heo1e019792011-07-12 09:45:34 +02006320 int last_nid = -1;
Tejun Heoc13291a2011-07-12 10:46:30 +02006321 int i, nid;
Tejun Heo1e019792011-07-12 09:45:34 +02006322
Tejun Heoc13291a2011-07-12 10:46:30 +02006323 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
Tejun Heo1e019792011-07-12 09:45:34 +02006324 if (!start || last_nid < 0 || last_nid == nid) {
6325 last_nid = nid;
6326 last_end = end;
6327 continue;
6328 }
6329
6330 /*
6331 * Start with a mask granular enough to pin-point to the
6332 * start pfn and tick off bits one-by-one until it becomes
6333 * too coarse to separate the current node from the last.
6334 */
6335 mask = ~((1 << __ffs(start)) - 1);
6336 while (mask && last_end <= (start & (mask << 1)))
6337 mask <<= 1;
6338
6339 /* accumulate all internode masks */
6340 accl_mask |= mask;
6341 }
6342
6343 /* convert mask to number of pages */
6344 return ~accl_mask + 1;
6345}
6346
Mel Gormana6af2bc2007-02-10 01:42:57 -08006347/* Find the lowest pfn for a node */
Adrian Bunkb69a7282008-07-23 21:28:12 -07006348static unsigned long __init find_min_pfn_for_node(int nid)
Mel Gormanc7132162006-09-27 01:49:43 -07006349{
Mel Gormana6af2bc2007-02-10 01:42:57 -08006350 unsigned long min_pfn = ULONG_MAX;
Tejun Heoc13291a2011-07-12 10:46:30 +02006351 unsigned long start_pfn;
6352 int i;
Mel Gorman1abbfb42006-11-23 12:01:41 +00006353
Tejun Heoc13291a2011-07-12 10:46:30 +02006354 for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL)
6355 min_pfn = min(min_pfn, start_pfn);
Mel Gormanc7132162006-09-27 01:49:43 -07006356
Mel Gormana6af2bc2007-02-10 01:42:57 -08006357 if (min_pfn == ULONG_MAX) {
Joe Perches11705322016-03-17 14:19:50 -07006358 pr_warn("Could not find start_pfn for node %d\n", nid);
Mel Gormana6af2bc2007-02-10 01:42:57 -08006359 return 0;
6360 }
6361
6362 return min_pfn;
Mel Gormanc7132162006-09-27 01:49:43 -07006363}
6364
6365/**
6366 * find_min_pfn_with_active_regions - Find the minimum PFN registered
6367 *
6368 * It returns the minimum PFN based on information provided via
Zhang Zhen7d018172014-06-04 16:10:53 -07006369 * memblock_set_node().
Mel Gormanc7132162006-09-27 01:49:43 -07006370 */
6371unsigned long __init find_min_pfn_with_active_regions(void)
6372{
6373 return find_min_pfn_for_node(MAX_NUMNODES);
6374}
6375
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07006376/*
6377 * early_calculate_totalpages()
6378 * Sum pages in active regions for movable zone.
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08006379 * Populate N_MEMORY for calculating usable_nodes.
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07006380 */
Adrian Bunk484f51f2007-10-16 01:26:03 -07006381static unsigned long __init early_calculate_totalpages(void)
Mel Gorman7e63efef2007-07-17 04:03:15 -07006382{
Mel Gorman7e63efef2007-07-17 04:03:15 -07006383 unsigned long totalpages = 0;
Tejun Heoc13291a2011-07-12 10:46:30 +02006384 unsigned long start_pfn, end_pfn;
6385 int i, nid;
Mel Gorman7e63efef2007-07-17 04:03:15 -07006386
Tejun Heoc13291a2011-07-12 10:46:30 +02006387 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
6388 unsigned long pages = end_pfn - start_pfn;
6389
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07006390 totalpages += pages;
6391 if (pages)
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08006392 node_set_state(nid, N_MEMORY);
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07006393 }
Pintu Kumarb8af2942013-09-11 14:20:34 -07006394 return totalpages;
Mel Gorman7e63efef2007-07-17 04:03:15 -07006395}
6396
Mel Gorman2a1e2742007-07-17 04:03:12 -07006397/*
6398 * Find the PFN the Movable zone begins in each node. Kernel memory
6399 * is spread evenly between nodes as long as the nodes have enough
6400 * memory. When they don't, some nodes will have more kernelcore than
6401 * others
6402 */
Kautuk Consulb224ef82012-03-21 16:34:15 -07006403static void __init find_zone_movable_pfns_for_nodes(void)
Mel Gorman2a1e2742007-07-17 04:03:12 -07006404{
6405 int i, nid;
6406 unsigned long usable_startpfn;
6407 unsigned long kernelcore_node, kernelcore_remaining;
Yinghai Lu66918dc2009-06-30 11:41:37 -07006408 /* save the state before borrow the nodemask */
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08006409 nodemask_t saved_node_state = node_states[N_MEMORY];
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07006410 unsigned long totalpages = early_calculate_totalpages();
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08006411 int usable_nodes = nodes_weight(node_states[N_MEMORY]);
Emil Medve136199f2014-04-07 15:37:52 -07006412 struct memblock_region *r;
Tang Chenb2f3eeb2014-01-21 15:49:38 -08006413
6414 /* Need to find movable_zone earlier when movable_node is specified. */
6415 find_usable_zone_for_movable();
Mel Gorman2a1e2742007-07-17 04:03:12 -07006416
Mel Gorman7e63efef2007-07-17 04:03:15 -07006417 /*
Tang Chenb2f3eeb2014-01-21 15:49:38 -08006418 * If movable_node is specified, ignore kernelcore and movablecore
6419 * options.
6420 */
6421 if (movable_node_is_enabled()) {
Emil Medve136199f2014-04-07 15:37:52 -07006422 for_each_memblock(memory, r) {
6423 if (!memblock_is_hotpluggable(r))
Tang Chenb2f3eeb2014-01-21 15:49:38 -08006424 continue;
6425
Emil Medve136199f2014-04-07 15:37:52 -07006426 nid = r->nid;
Tang Chenb2f3eeb2014-01-21 15:49:38 -08006427
Emil Medve136199f2014-04-07 15:37:52 -07006428 usable_startpfn = PFN_DOWN(r->base);
Tang Chenb2f3eeb2014-01-21 15:49:38 -08006429 zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
6430 min(usable_startpfn, zone_movable_pfn[nid]) :
6431 usable_startpfn;
6432 }
6433
6434 goto out2;
6435 }
6436
6437 /*
Taku Izumi342332e2016-03-15 14:55:22 -07006438 * If kernelcore=mirror is specified, ignore movablecore option
6439 */
6440 if (mirrored_kernelcore) {
6441 bool mem_below_4gb_not_mirrored = false;
6442
6443 for_each_memblock(memory, r) {
6444 if (memblock_is_mirror(r))
6445 continue;
6446
6447 nid = r->nid;
6448
6449 usable_startpfn = memblock_region_memory_base_pfn(r);
6450
6451 if (usable_startpfn < 0x100000) {
6452 mem_below_4gb_not_mirrored = true;
6453 continue;
6454 }
6455
6456 zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
6457 min(usable_startpfn, zone_movable_pfn[nid]) :
6458 usable_startpfn;
6459 }
6460
6461 if (mem_below_4gb_not_mirrored)
6462 pr_warn("This configuration results in unmirrored kernel memory.");
6463
6464 goto out2;
6465 }
6466
6467 /*
Tang Chenb2f3eeb2014-01-21 15:49:38 -08006468 * If movablecore=nn[KMG] was specified, calculate what size of
Mel Gorman7e63efef2007-07-17 04:03:15 -07006469 * kernelcore that corresponds so that memory usable for
6470 * any allocation type is evenly spread. If both kernelcore
6471 * and movablecore are specified, then the value of kernelcore
6472 * will be used for required_kernelcore if it's greater than
6473 * what movablecore would have allowed.
6474 */
6475 if (required_movablecore) {
Mel Gorman7e63efef2007-07-17 04:03:15 -07006476 unsigned long corepages;
6477
6478 /*
6479 * Round-up so that ZONE_MOVABLE is at least as large as what
6480 * was requested by the user
6481 */
6482 required_movablecore =
6483 roundup(required_movablecore, MAX_ORDER_NR_PAGES);
Xishi Qiu9fd745d2015-11-05 18:48:11 -08006484 required_movablecore = min(totalpages, required_movablecore);
Mel Gorman7e63efef2007-07-17 04:03:15 -07006485 corepages = totalpages - required_movablecore;
6486
6487 required_kernelcore = max(required_kernelcore, corepages);
6488 }
6489
Xishi Qiubde304b2015-11-05 18:48:56 -08006490 /*
6491 * If kernelcore was not specified or kernelcore size is larger
6492 * than totalpages, there is no ZONE_MOVABLE.
6493 */
6494 if (!required_kernelcore || required_kernelcore >= totalpages)
Yinghai Lu66918dc2009-06-30 11:41:37 -07006495 goto out;
Mel Gorman2a1e2742007-07-17 04:03:12 -07006496
6497 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
Mel Gorman2a1e2742007-07-17 04:03:12 -07006498 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
6499
6500restart:
6501 /* Spread kernelcore memory as evenly as possible throughout nodes */
6502 kernelcore_node = required_kernelcore / usable_nodes;
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08006503 for_each_node_state(nid, N_MEMORY) {
Tejun Heoc13291a2011-07-12 10:46:30 +02006504 unsigned long start_pfn, end_pfn;
6505
Mel Gorman2a1e2742007-07-17 04:03:12 -07006506 /*
6507 * Recalculate kernelcore_node if the division per node
6508 * now exceeds what is necessary to satisfy the requested
6509 * amount of memory for the kernel
6510 */
6511 if (required_kernelcore < kernelcore_node)
6512 kernelcore_node = required_kernelcore / usable_nodes;
6513
6514 /*
6515 * As the map is walked, we track how much memory is usable
6516 * by the kernel using kernelcore_remaining. When it is
6517 * 0, the rest of the node is usable by ZONE_MOVABLE
6518 */
6519 kernelcore_remaining = kernelcore_node;
6520
6521 /* Go through each range of PFNs within this node */
Tejun Heoc13291a2011-07-12 10:46:30 +02006522 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
Mel Gorman2a1e2742007-07-17 04:03:12 -07006523 unsigned long size_pages;
6524
Tejun Heoc13291a2011-07-12 10:46:30 +02006525 start_pfn = max(start_pfn, zone_movable_pfn[nid]);
Mel Gorman2a1e2742007-07-17 04:03:12 -07006526 if (start_pfn >= end_pfn)
6527 continue;
6528
6529 /* Account for what is only usable for kernelcore */
6530 if (start_pfn < usable_startpfn) {
6531 unsigned long kernel_pages;
6532 kernel_pages = min(end_pfn, usable_startpfn)
6533 - start_pfn;
6534
6535 kernelcore_remaining -= min(kernel_pages,
6536 kernelcore_remaining);
6537 required_kernelcore -= min(kernel_pages,
6538 required_kernelcore);
6539
6540 /* Continue if range is now fully accounted */
6541 if (end_pfn <= usable_startpfn) {
6542
6543 /*
6544 * Push zone_movable_pfn to the end so
6545 * that if we have to rebalance
6546 * kernelcore across nodes, we will
6547 * not double account here
6548 */
6549 zone_movable_pfn[nid] = end_pfn;
6550 continue;
6551 }
6552 start_pfn = usable_startpfn;
6553 }
6554
6555 /*
6556 * The usable PFN range for ZONE_MOVABLE is from
6557 * start_pfn->end_pfn. Calculate size_pages as the
6558 * number of pages used as kernelcore
6559 */
6560 size_pages = end_pfn - start_pfn;
6561 if (size_pages > kernelcore_remaining)
6562 size_pages = kernelcore_remaining;
6563 zone_movable_pfn[nid] = start_pfn + size_pages;
6564
6565 /*
6566 * Some kernelcore has been met, update counts and
6567 * break if the kernelcore for this node has been
Pintu Kumarb8af2942013-09-11 14:20:34 -07006568 * satisfied
Mel Gorman2a1e2742007-07-17 04:03:12 -07006569 */
6570 required_kernelcore -= min(required_kernelcore,
6571 size_pages);
6572 kernelcore_remaining -= size_pages;
6573 if (!kernelcore_remaining)
6574 break;
6575 }
6576 }
6577
6578 /*
6579 * If there is still required_kernelcore, we do another pass with one
6580 * less node in the count. This will push zone_movable_pfn[nid] further
6581 * along on the nodes that still have memory until kernelcore is
Pintu Kumarb8af2942013-09-11 14:20:34 -07006582 * satisfied
Mel Gorman2a1e2742007-07-17 04:03:12 -07006583 */
6584 usable_nodes--;
6585 if (usable_nodes && required_kernelcore > usable_nodes)
6586 goto restart;
6587
Tang Chenb2f3eeb2014-01-21 15:49:38 -08006588out2:
Mel Gorman2a1e2742007-07-17 04:03:12 -07006589 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
6590 for (nid = 0; nid < MAX_NUMNODES; nid++)
6591 zone_movable_pfn[nid] =
6592 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
Yinghai Lu66918dc2009-06-30 11:41:37 -07006593
Yinghai Lu20e69262013-03-01 14:51:27 -08006594out:
Yinghai Lu66918dc2009-06-30 11:41:37 -07006595 /* restore the node_state */
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08006596 node_states[N_MEMORY] = saved_node_state;
Mel Gorman2a1e2742007-07-17 04:03:12 -07006597}
6598
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08006599/* Any regular or high memory on that node ? */
6600static void check_for_memory(pg_data_t *pgdat, int nid)
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07006601{
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07006602 enum zone_type zone_type;
6603
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08006604 if (N_MEMORY == N_NORMAL_MEMORY)
6605 return;
6606
6607 for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07006608 struct zone *zone = &pgdat->node_zones[zone_type];
Xishi Qiub38a8722013-11-12 15:07:20 -08006609 if (populated_zone(zone)) {
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08006610 node_set_state(nid, N_HIGH_MEMORY);
6611 if (N_NORMAL_MEMORY != N_HIGH_MEMORY &&
6612 zone_type <= ZONE_NORMAL)
6613 node_set_state(nid, N_NORMAL_MEMORY);
Bob Liud0048b02012-01-12 17:19:07 -08006614 break;
6615 }
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07006616 }
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07006617}
6618
Mel Gormanc7132162006-09-27 01:49:43 -07006619/**
6620 * free_area_init_nodes - Initialise all pg_data_t and zone data
Randy Dunlap88ca3b92006-10-04 02:15:25 -07006621 * @max_zone_pfn: an array of max PFNs for each zone
Mel Gormanc7132162006-09-27 01:49:43 -07006622 *
6623 * This will call free_area_init_node() for each active node in the system.
Zhang Zhen7d018172014-06-04 16:10:53 -07006624 * Using the page ranges provided by memblock_set_node(), the size of each
Mel Gormanc7132162006-09-27 01:49:43 -07006625 * zone in each node and their holes is calculated. If the maximum PFN
6626 * between two adjacent zones match, it is assumed that the zone is empty.
6627 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
6628 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
6629 * starts where the previous one ended. For example, ZONE_DMA32 starts
6630 * at arch_max_dma_pfn.
6631 */
6632void __init free_area_init_nodes(unsigned long *max_zone_pfn)
6633{
Tejun Heoc13291a2011-07-12 10:46:30 +02006634 unsigned long start_pfn, end_pfn;
6635 int i, nid;
Mel Gormana6af2bc2007-02-10 01:42:57 -08006636
Mel Gormanc7132162006-09-27 01:49:43 -07006637 /* Record where the zone boundaries are */
6638 memset(arch_zone_lowest_possible_pfn, 0,
6639 sizeof(arch_zone_lowest_possible_pfn));
6640 memset(arch_zone_highest_possible_pfn, 0,
6641 sizeof(arch_zone_highest_possible_pfn));
Oliver O'Halloran90cae1f2016-07-26 15:22:17 -07006642
6643 start_pfn = find_min_pfn_with_active_regions();
6644
6645 for (i = 0; i < MAX_NR_ZONES; i++) {
Mel Gorman2a1e2742007-07-17 04:03:12 -07006646 if (i == ZONE_MOVABLE)
6647 continue;
Oliver O'Halloran90cae1f2016-07-26 15:22:17 -07006648
6649 end_pfn = max(max_zone_pfn[i], start_pfn);
6650 arch_zone_lowest_possible_pfn[i] = start_pfn;
6651 arch_zone_highest_possible_pfn[i] = end_pfn;
6652
6653 start_pfn = end_pfn;
Mel Gormanc7132162006-09-27 01:49:43 -07006654 }
Mel Gorman2a1e2742007-07-17 04:03:12 -07006655
6656 /* Find the PFNs that ZONE_MOVABLE begins at in each node */
6657 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
Kautuk Consulb224ef82012-03-21 16:34:15 -07006658 find_zone_movable_pfns_for_nodes();
Mel Gormanc7132162006-09-27 01:49:43 -07006659
Mel Gormanc7132162006-09-27 01:49:43 -07006660 /* Print out the zone ranges */
Anton Blanchardf88dfff2014-12-10 15:42:53 -08006661 pr_info("Zone ranges:\n");
Mel Gorman2a1e2742007-07-17 04:03:12 -07006662 for (i = 0; i < MAX_NR_ZONES; i++) {
6663 if (i == ZONE_MOVABLE)
6664 continue;
Anton Blanchardf88dfff2014-12-10 15:42:53 -08006665 pr_info(" %-8s ", zone_names[i]);
David Rientjes72f0ba02010-03-05 13:42:14 -08006666 if (arch_zone_lowest_possible_pfn[i] ==
6667 arch_zone_highest_possible_pfn[i])
Anton Blanchardf88dfff2014-12-10 15:42:53 -08006668 pr_cont("empty\n");
David Rientjes72f0ba02010-03-05 13:42:14 -08006669 else
Juergen Gross8d29e182015-02-11 15:26:01 -08006670 pr_cont("[mem %#018Lx-%#018Lx]\n",
6671 (u64)arch_zone_lowest_possible_pfn[i]
6672 << PAGE_SHIFT,
6673 ((u64)arch_zone_highest_possible_pfn[i]
Bjorn Helgaasa62e2f42012-05-29 15:06:30 -07006674 << PAGE_SHIFT) - 1);
Mel Gorman2a1e2742007-07-17 04:03:12 -07006675 }
6676
6677 /* Print out the PFNs ZONE_MOVABLE begins at in each node */
Anton Blanchardf88dfff2014-12-10 15:42:53 -08006678 pr_info("Movable zone start for each node\n");
Mel Gorman2a1e2742007-07-17 04:03:12 -07006679 for (i = 0; i < MAX_NUMNODES; i++) {
6680 if (zone_movable_pfn[i])
Juergen Gross8d29e182015-02-11 15:26:01 -08006681 pr_info(" Node %d: %#018Lx\n", i,
6682 (u64)zone_movable_pfn[i] << PAGE_SHIFT);
Mel Gorman2a1e2742007-07-17 04:03:12 -07006683 }
Mel Gormanc7132162006-09-27 01:49:43 -07006684
Wanpeng Lif2d52fe2012-10-08 16:32:24 -07006685 /* Print out the early node map */
Anton Blanchardf88dfff2014-12-10 15:42:53 -08006686 pr_info("Early memory node ranges\n");
Tejun Heoc13291a2011-07-12 10:46:30 +02006687 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
Juergen Gross8d29e182015-02-11 15:26:01 -08006688 pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid,
6689 (u64)start_pfn << PAGE_SHIFT,
6690 ((u64)end_pfn << PAGE_SHIFT) - 1);
Mel Gormanc7132162006-09-27 01:49:43 -07006691
6692 /* Initialise every node */
Mel Gorman708614e2008-07-23 21:26:51 -07006693 mminit_verify_pageflags_layout();
Christoph Lameter8ef82862007-02-20 13:57:52 -08006694 setup_nr_node_ids();
Mel Gormanc7132162006-09-27 01:49:43 -07006695 for_each_online_node(nid) {
6696 pg_data_t *pgdat = NODE_DATA(nid);
Johannes Weiner9109fb72008-07-23 21:27:20 -07006697 free_area_init_node(nid, NULL,
Mel Gormanc7132162006-09-27 01:49:43 -07006698 find_min_pfn_for_node(nid), NULL);
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07006699
6700 /* Any memory on that node */
6701 if (pgdat->node_present_pages)
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08006702 node_set_state(nid, N_MEMORY);
6703 check_for_memory(pgdat, nid);
Mel Gormanc7132162006-09-27 01:49:43 -07006704 }
Pavel Tatashina4a3ede2017-11-15 17:36:31 -08006705 zero_resv_unavail();
Mel Gormanc7132162006-09-27 01:49:43 -07006706}
Mel Gorman2a1e2742007-07-17 04:03:12 -07006707
Mel Gorman7e63efef2007-07-17 04:03:15 -07006708static int __init cmdline_parse_core(char *p, unsigned long *core)
Mel Gorman2a1e2742007-07-17 04:03:12 -07006709{
6710 unsigned long long coremem;
6711 if (!p)
6712 return -EINVAL;
6713
6714 coremem = memparse(p, &p);
Mel Gorman7e63efef2007-07-17 04:03:15 -07006715 *core = coremem >> PAGE_SHIFT;
Mel Gorman2a1e2742007-07-17 04:03:12 -07006716
Mel Gorman7e63efef2007-07-17 04:03:15 -07006717 /* Paranoid check that UL is enough for the coremem value */
Mel Gorman2a1e2742007-07-17 04:03:12 -07006718 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
6719
6720 return 0;
6721}
Mel Gormaned7ed362007-07-17 04:03:14 -07006722
Mel Gorman7e63efef2007-07-17 04:03:15 -07006723/*
6724 * kernelcore=size sets the amount of memory for use for allocations that
6725 * cannot be reclaimed or migrated.
6726 */
6727static int __init cmdline_parse_kernelcore(char *p)
6728{
Taku Izumi342332e2016-03-15 14:55:22 -07006729 /* parse kernelcore=mirror */
6730 if (parse_option_str(p, "mirror")) {
6731 mirrored_kernelcore = true;
6732 return 0;
6733 }
6734
Mel Gorman7e63efef2007-07-17 04:03:15 -07006735 return cmdline_parse_core(p, &required_kernelcore);
6736}
6737
6738/*
6739 * movablecore=size sets the amount of memory for use for allocations that
6740 * can be reclaimed or migrated.
6741 */
6742static int __init cmdline_parse_movablecore(char *p)
6743{
6744 return cmdline_parse_core(p, &required_movablecore);
6745}
6746
Mel Gormaned7ed362007-07-17 04:03:14 -07006747early_param("kernelcore", cmdline_parse_kernelcore);
Mel Gorman7e63efef2007-07-17 04:03:15 -07006748early_param("movablecore", cmdline_parse_movablecore);
Mel Gormaned7ed362007-07-17 04:03:14 -07006749
Tejun Heo0ee332c2011-12-08 10:22:09 -08006750#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
Mel Gormanc7132162006-09-27 01:49:43 -07006751
Jiang Liuc3d5f5f2013-07-03 15:03:14 -07006752void adjust_managed_page_count(struct page *page, long count)
6753{
6754 spin_lock(&managed_page_count_lock);
6755 page_zone(page)->managed_pages += count;
6756 totalram_pages += count;
Jiang Liu3dcc0572013-07-03 15:03:21 -07006757#ifdef CONFIG_HIGHMEM
6758 if (PageHighMem(page))
6759 totalhigh_pages += count;
6760#endif
Jiang Liuc3d5f5f2013-07-03 15:03:14 -07006761 spin_unlock(&managed_page_count_lock);
6762}
Jiang Liu3dcc0572013-07-03 15:03:21 -07006763EXPORT_SYMBOL(adjust_managed_page_count);
Jiang Liuc3d5f5f2013-07-03 15:03:14 -07006764
Jiang Liu11199692013-07-03 15:02:48 -07006765unsigned long free_reserved_area(void *start, void *end, int poison, char *s)
Jiang Liu69afade2013-04-29 15:06:21 -07006766{
Jiang Liu11199692013-07-03 15:02:48 -07006767 void *pos;
6768 unsigned long pages = 0;
Jiang Liu69afade2013-04-29 15:06:21 -07006769
Jiang Liu11199692013-07-03 15:02:48 -07006770 start = (void *)PAGE_ALIGN((unsigned long)start);
6771 end = (void *)((unsigned long)end & PAGE_MASK);
6772 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
Jiang Liudbe67df2013-07-03 15:02:51 -07006773 if ((unsigned int)poison <= 0xFF)
Jiang Liu11199692013-07-03 15:02:48 -07006774 memset(pos, poison, PAGE_SIZE);
6775 free_reserved_page(virt_to_page(pos));
Jiang Liu69afade2013-04-29 15:06:21 -07006776 }
6777
6778 if (pages && s)
Josh Poimboeufadb1fe92016-10-25 09:51:14 -05006779 pr_info("Freeing %s memory: %ldK\n",
6780 s, pages << (PAGE_SHIFT - 10));
Jiang Liu69afade2013-04-29 15:06:21 -07006781
6782 return pages;
6783}
Jiang Liu11199692013-07-03 15:02:48 -07006784EXPORT_SYMBOL(free_reserved_area);
Jiang Liu69afade2013-04-29 15:06:21 -07006785
Jiang Liucfa11e02013-04-29 15:07:00 -07006786#ifdef CONFIG_HIGHMEM
6787void free_highmem_page(struct page *page)
6788{
6789 __free_reserved_page(page);
6790 totalram_pages++;
Jiang Liu7b4b2a02013-07-03 15:03:11 -07006791 page_zone(page)->managed_pages++;
Jiang Liucfa11e02013-04-29 15:07:00 -07006792 totalhigh_pages++;
6793}
6794#endif
6795
Jiang Liu7ee3d4e2013-07-03 15:03:41 -07006796
6797void __init mem_init_print_info(const char *str)
6798{
6799 unsigned long physpages, codesize, datasize, rosize, bss_size;
6800 unsigned long init_code_size, init_data_size;
6801
6802 physpages = get_num_physpages();
6803 codesize = _etext - _stext;
6804 datasize = _edata - _sdata;
6805 rosize = __end_rodata - __start_rodata;
6806 bss_size = __bss_stop - __bss_start;
6807 init_data_size = __init_end - __init_begin;
6808 init_code_size = _einittext - _sinittext;
6809
6810 /*
6811 * Detect special cases and adjust section sizes accordingly:
6812 * 1) .init.* may be embedded into .data sections
6813 * 2) .init.text.* may be out of [__init_begin, __init_end],
6814 * please refer to arch/tile/kernel/vmlinux.lds.S.
6815 * 3) .rodata.* may be embedded into .text or .data sections.
6816 */
6817#define adj_init_size(start, end, size, pos, adj) \
Pintu Kumarb8af2942013-09-11 14:20:34 -07006818 do { \
6819 if (start <= pos && pos < end && size > adj) \
6820 size -= adj; \
6821 } while (0)
Jiang Liu7ee3d4e2013-07-03 15:03:41 -07006822
6823 adj_init_size(__init_begin, __init_end, init_data_size,
6824 _sinittext, init_code_size);
6825 adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
6826 adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
6827 adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
6828 adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);
6829
6830#undef adj_init_size
6831
Joe Perches756a0252016-03-17 14:19:47 -07006832 pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved"
Jiang Liu7ee3d4e2013-07-03 15:03:41 -07006833#ifdef CONFIG_HIGHMEM
Joe Perches756a0252016-03-17 14:19:47 -07006834 ", %luK highmem"
Jiang Liu7ee3d4e2013-07-03 15:03:41 -07006835#endif
Joe Perches756a0252016-03-17 14:19:47 -07006836 "%s%s)\n",
6837 nr_free_pages() << (PAGE_SHIFT - 10),
6838 physpages << (PAGE_SHIFT - 10),
6839 codesize >> 10, datasize >> 10, rosize >> 10,
6840 (init_data_size + init_code_size) >> 10, bss_size >> 10,
6841 (physpages - totalram_pages - totalcma_pages) << (PAGE_SHIFT - 10),
6842 totalcma_pages << (PAGE_SHIFT - 10),
Jiang Liu7ee3d4e2013-07-03 15:03:41 -07006843#ifdef CONFIG_HIGHMEM
Joe Perches756a0252016-03-17 14:19:47 -07006844 totalhigh_pages << (PAGE_SHIFT - 10),
Jiang Liu7ee3d4e2013-07-03 15:03:41 -07006845#endif
Joe Perches756a0252016-03-17 14:19:47 -07006846 str ? ", " : "", str ? str : "");
Jiang Liu7ee3d4e2013-07-03 15:03:41 -07006847}
6848
Mel Gorman0e0b8642006-09-27 01:49:56 -07006849/**
Randy Dunlap88ca3b92006-10-04 02:15:25 -07006850 * set_dma_reserve - set the specified number of pages reserved in the first zone
6851 * @new_dma_reserve: The number of pages to mark reserved
Mel Gorman0e0b8642006-09-27 01:49:56 -07006852 *
Yaowei Bai013110a2015-09-08 15:04:10 -07006853 * The per-cpu batchsize and zone watermarks are determined by managed_pages.
Mel Gorman0e0b8642006-09-27 01:49:56 -07006854 * In the DMA zone, a significant percentage may be consumed by kernel image
6855 * and other unfreeable allocations which can skew the watermarks badly. This
Randy Dunlap88ca3b92006-10-04 02:15:25 -07006856 * function may optionally be used to account for unfreeable pages in the
6857 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
6858 * smaller per-cpu batchsize.
Mel Gorman0e0b8642006-09-27 01:49:56 -07006859 */
6860void __init set_dma_reserve(unsigned long new_dma_reserve)
6861{
6862 dma_reserve = new_dma_reserve;
6863}
6864
Linus Torvalds1da177e2005-04-16 15:20:36 -07006865void __init free_area_init(unsigned long *zones_size)
6866{
Johannes Weiner9109fb72008-07-23 21:27:20 -07006867 free_area_init_node(0, zones_size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006868 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
Pavel Tatashina4a3ede2017-11-15 17:36:31 -08006869 zero_resv_unavail();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006870}
Linus Torvalds1da177e2005-04-16 15:20:36 -07006871
Sebastian Andrzej Siewior005fd4b2016-11-03 15:50:02 +01006872static int page_alloc_cpu_dead(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006873{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006874
Sebastian Andrzej Siewior005fd4b2016-11-03 15:50:02 +01006875 lru_add_drain_cpu(cpu);
6876 drain_pages(cpu);
Christoph Lameter9f8f2172008-02-04 22:29:11 -08006877
Sebastian Andrzej Siewior005fd4b2016-11-03 15:50:02 +01006878 /*
6879 * Spill the event counters of the dead processor
6880 * into the current processors event counters.
6881 * This artificially elevates the count of the current
6882 * processor.
6883 */
6884 vm_events_fold_cpu(cpu);
Christoph Lameter9f8f2172008-02-04 22:29:11 -08006885
Sebastian Andrzej Siewior005fd4b2016-11-03 15:50:02 +01006886 /*
6887 * Zero the differential counters of the dead processor
6888 * so that the vm statistics are consistent.
6889 *
6890 * This is only okay since the processor is dead and cannot
6891 * race with what we are doing.
6892 */
6893 cpu_vm_stats_fold(cpu);
6894 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006895}
Linus Torvalds1da177e2005-04-16 15:20:36 -07006896
6897void __init page_alloc_init(void)
6898{
Sebastian Andrzej Siewior005fd4b2016-11-03 15:50:02 +01006899 int ret;
6900
6901 ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC_DEAD,
6902 "mm/page_alloc:dead", NULL,
6903 page_alloc_cpu_dead);
6904 WARN_ON(ret < 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006905}
6906
6907/*
Yaowei Bai34b10062015-09-08 15:04:13 -07006908 * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07006909 * or min_free_kbytes changes.
6910 */
6911static void calculate_totalreserve_pages(void)
6912{
6913 struct pglist_data *pgdat;
6914 unsigned long reserve_pages = 0;
Christoph Lameter2f6726e2006-09-25 23:31:18 -07006915 enum zone_type i, j;
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07006916
6917 for_each_online_pgdat(pgdat) {
Mel Gorman281e3722016-07-28 15:46:11 -07006918
6919 pgdat->totalreserve_pages = 0;
6920
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07006921 for (i = 0; i < MAX_NR_ZONES; i++) {
6922 struct zone *zone = pgdat->node_zones + i;
Mel Gorman3484b2d2014-08-06 16:07:14 -07006923 long max = 0;
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07006924
6925 /* Find valid and maximum lowmem_reserve in the zone */
6926 for (j = i; j < MAX_NR_ZONES; j++) {
6927 if (zone->lowmem_reserve[j] > max)
6928 max = zone->lowmem_reserve[j];
6929 }
6930
Mel Gorman41858962009-06-16 15:32:12 -07006931 /* we treat the high watermark as reserved pages. */
6932 max += high_wmark_pages(zone);
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07006933
Jiang Liub40da042013-02-22 16:33:52 -08006934 if (max > zone->managed_pages)
6935 max = zone->managed_pages;
Johannes Weinera8d01432016-01-14 15:20:15 -08006936
Mel Gorman281e3722016-07-28 15:46:11 -07006937 pgdat->totalreserve_pages += max;
Johannes Weinera8d01432016-01-14 15:20:15 -08006938
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07006939 reserve_pages += max;
6940 }
6941 }
6942 totalreserve_pages = reserve_pages;
6943}
6944
6945/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07006946 * setup_per_zone_lowmem_reserve - called whenever
Yaowei Bai34b10062015-09-08 15:04:13 -07006947 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone
Linus Torvalds1da177e2005-04-16 15:20:36 -07006948 * has a correct pages reserved value, so an adequate number of
6949 * pages are left in the zone after a successful __alloc_pages().
6950 */
6951static void setup_per_zone_lowmem_reserve(void)
6952{
6953 struct pglist_data *pgdat;
Christoph Lameter2f6726e2006-09-25 23:31:18 -07006954 enum zone_type j, idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006955
KAMEZAWA Hiroyukiec936fc2006-03-27 01:15:59 -08006956 for_each_online_pgdat(pgdat) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006957 for (j = 0; j < MAX_NR_ZONES; j++) {
6958 struct zone *zone = pgdat->node_zones + j;
Jiang Liub40da042013-02-22 16:33:52 -08006959 unsigned long managed_pages = zone->managed_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006960
6961 zone->lowmem_reserve[j] = 0;
6962
Christoph Lameter2f6726e2006-09-25 23:31:18 -07006963 idx = j;
6964 while (idx) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006965 struct zone *lower_zone;
6966
Christoph Lameter2f6726e2006-09-25 23:31:18 -07006967 idx--;
6968
Linus Torvalds1da177e2005-04-16 15:20:36 -07006969 if (sysctl_lowmem_reserve_ratio[idx] < 1)
6970 sysctl_lowmem_reserve_ratio[idx] = 1;
6971
6972 lower_zone = pgdat->node_zones + idx;
Jiang Liub40da042013-02-22 16:33:52 -08006973 lower_zone->lowmem_reserve[j] = managed_pages /
Linus Torvalds1da177e2005-04-16 15:20:36 -07006974 sysctl_lowmem_reserve_ratio[idx];
Jiang Liub40da042013-02-22 16:33:52 -08006975 managed_pages += lower_zone->managed_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006976 }
6977 }
6978 }
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07006979
6980 /* update totalreserve_pages */
6981 calculate_totalreserve_pages();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006982}
6983
Mel Gormancfd3da12011-04-25 21:36:42 +00006984static void __setup_per_zone_wmarks(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006985{
6986 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
6987 unsigned long lowmem_pages = 0;
6988 struct zone *zone;
6989 unsigned long flags;
6990
6991 /* Calculate total number of !ZONE_HIGHMEM pages */
6992 for_each_zone(zone) {
6993 if (!is_highmem(zone))
Jiang Liub40da042013-02-22 16:33:52 -08006994 lowmem_pages += zone->managed_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006995 }
6996
6997 for_each_zone(zone) {
Andrew Mortonac924c62006-05-15 09:43:59 -07006998 u64 tmp;
6999
Gerald Schaefer1125b4e2008-10-18 20:27:11 -07007000 spin_lock_irqsave(&zone->lock, flags);
Jiang Liub40da042013-02-22 16:33:52 -08007001 tmp = (u64)pages_min * zone->managed_pages;
Andrew Mortonac924c62006-05-15 09:43:59 -07007002 do_div(tmp, lowmem_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007003 if (is_highmem(zone)) {
7004 /*
Nick Piggin669ed172005-11-13 16:06:45 -08007005 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
7006 * need highmem pages, so cap pages_min to a small
7007 * value here.
7008 *
Mel Gorman41858962009-06-16 15:32:12 -07007009 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
Yaowei Bai42ff2702015-04-14 15:47:14 -07007010 * deltas control asynch page reclaim, and so should
Nick Piggin669ed172005-11-13 16:06:45 -08007011 * not be capped for highmem.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007012 */
Andrew Morton90ae8d62013-02-22 16:32:22 -08007013 unsigned long min_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007014
Jiang Liub40da042013-02-22 16:33:52 -08007015 min_pages = zone->managed_pages / 1024;
Andrew Morton90ae8d62013-02-22 16:32:22 -08007016 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
Mel Gorman41858962009-06-16 15:32:12 -07007017 zone->watermark[WMARK_MIN] = min_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007018 } else {
Nick Piggin669ed172005-11-13 16:06:45 -08007019 /*
7020 * If it's a lowmem zone, reserve a number of pages
Linus Torvalds1da177e2005-04-16 15:20:36 -07007021 * proportionate to the zone's size.
7022 */
Mel Gorman41858962009-06-16 15:32:12 -07007023 zone->watermark[WMARK_MIN] = tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007024 }
7025
Johannes Weiner795ae7a2016-03-17 14:19:14 -07007026 /*
7027 * Set the kswapd watermarks distance according to the
7028 * scale factor in proportion to available memory, but
7029 * ensure a minimum size on small systems.
7030 */
7031 tmp = max_t(u64, tmp >> 2,
7032 mult_frac(zone->managed_pages,
7033 watermark_scale_factor, 10000));
7034
7035 zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp;
7036 zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2;
Marek Szyprowski49f223a2012-01-25 12:49:24 +01007037
Gerald Schaefer1125b4e2008-10-18 20:27:11 -07007038 spin_unlock_irqrestore(&zone->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007039 }
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07007040
7041 /* update totalreserve_pages */
7042 calculate_totalreserve_pages();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007043}
7044
Mel Gormancfd3da12011-04-25 21:36:42 +00007045/**
7046 * setup_per_zone_wmarks - called when min_free_kbytes changes
7047 * or when memory is hot-{added|removed}
7048 *
7049 * Ensures that the watermark[min,low,high] values for each zone are set
7050 * correctly with respect to min_free_kbytes.
7051 */
7052void setup_per_zone_wmarks(void)
7053{
Michal Hockob93e0f32017-09-06 16:20:37 -07007054 static DEFINE_SPINLOCK(lock);
7055
7056 spin_lock(&lock);
Mel Gormancfd3da12011-04-25 21:36:42 +00007057 __setup_per_zone_wmarks();
Michal Hockob93e0f32017-09-06 16:20:37 -07007058 spin_unlock(&lock);
Mel Gormancfd3da12011-04-25 21:36:42 +00007059}
7060
Randy Dunlap55a44622009-09-21 17:01:20 -07007061/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07007062 * Initialise min_free_kbytes.
7063 *
7064 * For small machines we want it small (128k min). For large machines
7065 * we want it large (64MB max). But it is not linear, because network
7066 * bandwidth does not increase linearly with machine size. We use
7067 *
Pintu Kumarb8af2942013-09-11 14:20:34 -07007068 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007069 * min_free_kbytes = sqrt(lowmem_kbytes * 16)
7070 *
7071 * which yields
7072 *
7073 * 16MB: 512k
7074 * 32MB: 724k
7075 * 64MB: 1024k
7076 * 128MB: 1448k
7077 * 256MB: 2048k
7078 * 512MB: 2896k
7079 * 1024MB: 4096k
7080 * 2048MB: 5792k
7081 * 4096MB: 8192k
7082 * 8192MB: 11584k
7083 * 16384MB: 16384k
7084 */
KOSAKI Motohiro1b79acc2011-05-24 17:11:32 -07007085int __meminit init_per_zone_wmark_min(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007086{
7087 unsigned long lowmem_kbytes;
Michal Hocko5f127332013-07-08 16:00:40 -07007088 int new_min_free_kbytes;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007089
7090 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
Michal Hocko5f127332013-07-08 16:00:40 -07007091 new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007092
Michal Hocko5f127332013-07-08 16:00:40 -07007093 if (new_min_free_kbytes > user_min_free_kbytes) {
7094 min_free_kbytes = new_min_free_kbytes;
7095 if (min_free_kbytes < 128)
7096 min_free_kbytes = 128;
7097 if (min_free_kbytes > 65536)
7098 min_free_kbytes = 65536;
7099 } else {
7100 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
7101 new_min_free_kbytes, user_min_free_kbytes);
7102 }
Minchan Kimbc75d332009-06-16 15:32:48 -07007103 setup_per_zone_wmarks();
KOSAKI Motohiroa6cccdc2011-05-24 17:11:33 -07007104 refresh_zone_stat_thresholds();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007105 setup_per_zone_lowmem_reserve();
Joonsoo Kim6423aa82016-08-10 16:27:49 -07007106
7107#ifdef CONFIG_NUMA
7108 setup_min_unmapped_ratio();
7109 setup_min_slab_ratio();
7110#endif
7111
Linus Torvalds1da177e2005-04-16 15:20:36 -07007112 return 0;
7113}
Jason Baronbc22af742016-05-05 16:22:12 -07007114core_initcall(init_per_zone_wmark_min)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007115
7116/*
Pintu Kumarb8af2942013-09-11 14:20:34 -07007117 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
Linus Torvalds1da177e2005-04-16 15:20:36 -07007118 * that we can call two helper functions whenever min_free_kbytes
7119 * changes.
7120 */
Joe Perchescccad5b2014-06-06 14:38:09 -07007121int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07007122 void __user *buffer, size_t *length, loff_t *ppos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007123{
Han Pingtianda8c7572014-01-23 15:53:17 -08007124 int rc;
7125
7126 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
7127 if (rc)
7128 return rc;
7129
Michal Hocko5f127332013-07-08 16:00:40 -07007130 if (write) {
7131 user_min_free_kbytes = min_free_kbytes;
Minchan Kimbc75d332009-06-16 15:32:48 -07007132 setup_per_zone_wmarks();
Michal Hocko5f127332013-07-08 16:00:40 -07007133 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007134 return 0;
7135}
7136
Johannes Weiner795ae7a2016-03-17 14:19:14 -07007137int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write,
7138 void __user *buffer, size_t *length, loff_t *ppos)
7139{
7140 int rc;
7141
7142 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
7143 if (rc)
7144 return rc;
7145
7146 if (write)
7147 setup_per_zone_wmarks();
7148
7149 return 0;
7150}
7151
Christoph Lameter96146342006-07-03 00:24:13 -07007152#ifdef CONFIG_NUMA
Joonsoo Kim6423aa82016-08-10 16:27:49 -07007153static void setup_min_unmapped_ratio(void)
Christoph Lameter96146342006-07-03 00:24:13 -07007154{
Joonsoo Kim6423aa82016-08-10 16:27:49 -07007155 pg_data_t *pgdat;
Christoph Lameter96146342006-07-03 00:24:13 -07007156 struct zone *zone;
Christoph Lameter96146342006-07-03 00:24:13 -07007157
Mel Gormana5f5f912016-07-28 15:46:32 -07007158 for_each_online_pgdat(pgdat)
Joonsoo Kim81cbcbc2016-08-10 16:27:46 -07007159 pgdat->min_unmapped_pages = 0;
Mel Gormana5f5f912016-07-28 15:46:32 -07007160
Christoph Lameter96146342006-07-03 00:24:13 -07007161 for_each_zone(zone)
Mel Gormana5f5f912016-07-28 15:46:32 -07007162 zone->zone_pgdat->min_unmapped_pages += (zone->managed_pages *
Christoph Lameter96146342006-07-03 00:24:13 -07007163 sysctl_min_unmapped_ratio) / 100;
Christoph Lameter96146342006-07-03 00:24:13 -07007164}
Christoph Lameter0ff38492006-09-25 23:31:52 -07007165
Joonsoo Kim6423aa82016-08-10 16:27:49 -07007166
7167int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07007168 void __user *buffer, size_t *length, loff_t *ppos)
Christoph Lameter0ff38492006-09-25 23:31:52 -07007169{
Christoph Lameter0ff38492006-09-25 23:31:52 -07007170 int rc;
7171
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07007172 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
Christoph Lameter0ff38492006-09-25 23:31:52 -07007173 if (rc)
7174 return rc;
7175
Joonsoo Kim6423aa82016-08-10 16:27:49 -07007176 setup_min_unmapped_ratio();
7177
7178 return 0;
7179}
7180
7181static void setup_min_slab_ratio(void)
7182{
7183 pg_data_t *pgdat;
7184 struct zone *zone;
7185
Mel Gormana5f5f912016-07-28 15:46:32 -07007186 for_each_online_pgdat(pgdat)
7187 pgdat->min_slab_pages = 0;
7188
Christoph Lameter0ff38492006-09-25 23:31:52 -07007189 for_each_zone(zone)
Mel Gormana5f5f912016-07-28 15:46:32 -07007190 zone->zone_pgdat->min_slab_pages += (zone->managed_pages *
Christoph Lameter0ff38492006-09-25 23:31:52 -07007191 sysctl_min_slab_ratio) / 100;
Joonsoo Kim6423aa82016-08-10 16:27:49 -07007192}
7193
7194int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
7195 void __user *buffer, size_t *length, loff_t *ppos)
7196{
7197 int rc;
7198
7199 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
7200 if (rc)
7201 return rc;
7202
7203 setup_min_slab_ratio();
7204
Christoph Lameter0ff38492006-09-25 23:31:52 -07007205 return 0;
7206}
Christoph Lameter96146342006-07-03 00:24:13 -07007207#endif
7208
Linus Torvalds1da177e2005-04-16 15:20:36 -07007209/*
7210 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
7211 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
7212 * whenever sysctl_lowmem_reserve_ratio changes.
7213 *
7214 * The reserve ratio obviously has absolutely no relation with the
Mel Gorman41858962009-06-16 15:32:12 -07007215 * minimum watermarks. The lowmem reserve ratio can only make sense
Linus Torvalds1da177e2005-04-16 15:20:36 -07007216 * if in function of the boot time zone sizes.
7217 */
Joe Perchescccad5b2014-06-06 14:38:09 -07007218int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07007219 void __user *buffer, size_t *length, loff_t *ppos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007220{
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07007221 proc_dointvec_minmax(table, write, buffer, length, ppos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007222 setup_per_zone_lowmem_reserve();
7223 return 0;
7224}
7225
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08007226/*
7227 * percpu_pagelist_fraction - changes the pcp->high for each zone on each
Pintu Kumarb8af2942013-09-11 14:20:34 -07007228 * cpu. It is the fraction of total pages in each zone that a hot per cpu
7229 * pagelist can have before it gets flushed back to buddy allocator.
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08007230 */
Joe Perchescccad5b2014-06-06 14:38:09 -07007231int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07007232 void __user *buffer, size_t *length, loff_t *ppos)
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08007233{
7234 struct zone *zone;
David Rientjes7cd2b0a2014-06-23 13:22:04 -07007235 int old_percpu_pagelist_fraction;
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08007236 int ret;
7237
Cody P Schaferc8e251f2013-07-03 15:01:29 -07007238 mutex_lock(&pcp_batch_high_lock);
David Rientjes7cd2b0a2014-06-23 13:22:04 -07007239 old_percpu_pagelist_fraction = percpu_pagelist_fraction;
7240
7241 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
7242 if (!write || ret < 0)
7243 goto out;
7244
7245 /* Sanity checking to avoid pcp imbalance */
7246 if (percpu_pagelist_fraction &&
7247 percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) {
7248 percpu_pagelist_fraction = old_percpu_pagelist_fraction;
7249 ret = -EINVAL;
7250 goto out;
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08007251 }
David Rientjes7cd2b0a2014-06-23 13:22:04 -07007252
7253 /* No change? */
7254 if (percpu_pagelist_fraction == old_percpu_pagelist_fraction)
7255 goto out;
7256
7257 for_each_populated_zone(zone) {
7258 unsigned int cpu;
7259
7260 for_each_possible_cpu(cpu)
7261 pageset_set_high_and_batch(zone,
7262 per_cpu_ptr(zone->pageset, cpu));
7263 }
7264out:
Cody P Schaferc8e251f2013-07-03 15:01:29 -07007265 mutex_unlock(&pcp_batch_high_lock);
David Rientjes7cd2b0a2014-06-23 13:22:04 -07007266 return ret;
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08007267}
7268
Rasmus Villemoesa9919c72015-06-24 16:56:28 -07007269#ifdef CONFIG_NUMA
David S. Millerf034b5d2006-08-24 03:08:07 -07007270int hashdist = HASHDIST_DEFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007271
Linus Torvalds1da177e2005-04-16 15:20:36 -07007272static int __init set_hashdist(char *str)
7273{
7274 if (!str)
7275 return 0;
7276 hashdist = simple_strtoul(str, &str, 0);
7277 return 1;
7278}
7279__setup("hashdist=", set_hashdist);
7280#endif
7281
Srikar Dronamrajuf6f34b42016-10-07 16:59:15 -07007282#ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES
7283/*
7284 * Returns the number of pages that arch has reserved but
7285 * is not known to alloc_large_system_hash().
7286 */
7287static unsigned long __init arch_reserved_kernel_pages(void)
7288{
7289 return 0;
7290}
7291#endif
7292
Linus Torvalds1da177e2005-04-16 15:20:36 -07007293/*
Pavel Tatashin90172172017-07-06 15:39:14 -07007294 * Adaptive scale is meant to reduce sizes of hash tables on large memory
7295 * machines. As memory size is increased the scale is also increased but at
7296 * slower pace. Starting from ADAPT_SCALE_BASE (64G), every time memory
7297 * quadruples the scale is increased by one, which means the size of hash table
7298 * only doubles, instead of quadrupling as well.
7299 * Because 32-bit systems cannot have large physical memory, where this scaling
7300 * makes sense, it is disabled on such platforms.
7301 */
7302#if __BITS_PER_LONG > 32
7303#define ADAPT_SCALE_BASE (64ul << 30)
7304#define ADAPT_SCALE_SHIFT 2
7305#define ADAPT_SCALE_NPAGES (ADAPT_SCALE_BASE >> PAGE_SHIFT)
7306#endif
7307
7308/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07007309 * allocate a large system hash table from bootmem
7310 * - it is assumed that the hash table must contain an exact power-of-2
7311 * quantity of entries
7312 * - limit is the number of hash buckets, not the total allocation size
7313 */
7314void *__init alloc_large_system_hash(const char *tablename,
7315 unsigned long bucketsize,
7316 unsigned long numentries,
7317 int scale,
7318 int flags,
7319 unsigned int *_hash_shift,
7320 unsigned int *_hash_mask,
Tim Bird31fe62b2012-05-23 13:33:35 +00007321 unsigned long low_limit,
7322 unsigned long high_limit)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007323{
Tim Bird31fe62b2012-05-23 13:33:35 +00007324 unsigned long long max = high_limit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007325 unsigned long log2qty, size;
7326 void *table = NULL;
Pavel Tatashin3749a8f2017-07-06 15:39:08 -07007327 gfp_t gfp_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007328
7329 /* allow the kernel cmdline to have a say */
7330 if (!numentries) {
7331 /* round applicable memory size up to nearest megabyte */
Andrew Morton04903662006-12-06 20:37:33 -08007332 numentries = nr_kernel_pages;
Srikar Dronamrajuf6f34b42016-10-07 16:59:15 -07007333 numentries -= arch_reserved_kernel_pages();
Jerry Zhoua7e83312013-09-11 14:20:26 -07007334
7335 /* It isn't necessary when PAGE_SIZE >= 1MB */
7336 if (PAGE_SHIFT < 20)
7337 numentries = round_up(numentries, (1<<20)/PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007338
Pavel Tatashin90172172017-07-06 15:39:14 -07007339#if __BITS_PER_LONG > 32
7340 if (!high_limit) {
7341 unsigned long adapt;
7342
7343 for (adapt = ADAPT_SCALE_NPAGES; adapt < numentries;
7344 adapt <<= ADAPT_SCALE_SHIFT)
7345 scale++;
7346 }
7347#endif
7348
Linus Torvalds1da177e2005-04-16 15:20:36 -07007349 /* limit to 1 bucket per 2^scale bytes of low memory */
7350 if (scale > PAGE_SHIFT)
7351 numentries >>= (scale - PAGE_SHIFT);
7352 else
7353 numentries <<= (PAGE_SHIFT - scale);
Paul Mundt9ab37b82007-01-05 16:36:30 -08007354
7355 /* Make sure we've got at least a 0-order allocation.. */
Jan Beulich2c85f512009-09-21 17:03:07 -07007356 if (unlikely(flags & HASH_SMALL)) {
7357 /* Makes no sense without HASH_EARLY */
7358 WARN_ON(!(flags & HASH_EARLY));
7359 if (!(numentries >> *_hash_shift)) {
7360 numentries = 1UL << *_hash_shift;
7361 BUG_ON(!numentries);
7362 }
7363 } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
Paul Mundt9ab37b82007-01-05 16:36:30 -08007364 numentries = PAGE_SIZE / bucketsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007365 }
John Hawkes6e692ed2006-03-25 03:08:02 -08007366 numentries = roundup_pow_of_two(numentries);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007367
7368 /* limit allocation size to 1/16 total memory by default */
7369 if (max == 0) {
7370 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
7371 do_div(max, bucketsize);
7372 }
Dimitri Sivanich074b8512012-02-08 12:39:07 -08007373 max = min(max, 0x80000000ULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007374
Tim Bird31fe62b2012-05-23 13:33:35 +00007375 if (numentries < low_limit)
7376 numentries = low_limit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007377 if (numentries > max)
7378 numentries = max;
7379
David Howellsf0d1b0b2006-12-08 02:37:49 -08007380 log2qty = ilog2(numentries);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007381
Pavel Tatashin3749a8f2017-07-06 15:39:08 -07007382 gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007383 do {
7384 size = bucketsize << log2qty;
Pavel Tatashinea1f5f32017-11-15 17:36:27 -08007385 if (flags & HASH_EARLY) {
7386 if (flags & HASH_ZERO)
7387 table = memblock_virt_alloc_nopanic(size, 0);
7388 else
7389 table = memblock_virt_alloc_raw(size, 0);
7390 } else if (hashdist) {
Pavel Tatashin3749a8f2017-07-06 15:39:08 -07007391 table = __vmalloc(size, gfp_flags, PAGE_KERNEL);
Pavel Tatashinea1f5f32017-11-15 17:36:27 -08007392 } else {
Eric Dumazet1037b832007-07-15 23:38:05 -07007393 /*
7394 * If bucketsize is not a power-of-two, we may free
Mel Gormana1dd2682009-06-16 15:32:19 -07007395 * some pages at the end of hash table which
7396 * alloc_pages_exact() automatically does
Eric Dumazet1037b832007-07-15 23:38:05 -07007397 */
Catalin Marinas264ef8a2009-07-07 10:33:01 +01007398 if (get_order(size) < MAX_ORDER) {
Pavel Tatashin3749a8f2017-07-06 15:39:08 -07007399 table = alloc_pages_exact(size, gfp_flags);
7400 kmemleak_alloc(table, size, 1, gfp_flags);
Catalin Marinas264ef8a2009-07-07 10:33:01 +01007401 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007402 }
7403 } while (!table && size > PAGE_SIZE && --log2qty);
7404
7405 if (!table)
7406 panic("Failed to allocate %s hash table\n", tablename);
7407
Joe Perches11705322016-03-17 14:19:50 -07007408 pr_info("%s hash table entries: %ld (order: %d, %lu bytes)\n",
7409 tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007410
7411 if (_hash_shift)
7412 *_hash_shift = log2qty;
7413 if (_hash_mask)
7414 *_hash_mask = (1 << log2qty) - 1;
7415
7416 return table;
7417}
KAMEZAWA Hiroyukia117e662006-03-27 01:15:25 -08007418
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -07007419/*
Minchan Kim80934512012-07-31 16:43:01 -07007420 * This function checks whether pageblock includes unmovable pages or not.
7421 * If @count is not zero, it is okay to include less @count unmovable pages
7422 *
Pintu Kumarb8af2942013-09-11 14:20:34 -07007423 * PageLRU check without isolation or lru_lock could race so that
Yisheng Xie0efadf42017-02-24 14:57:39 -08007424 * MIGRATE_MOVABLE block might include unmovable pages. And __PageMovable
7425 * check without lock_page also may miss some movable non-lru pages at
7426 * race condition. So you can't expect this function should be exact.
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -07007427 */
Wen Congyangb023f462012-12-11 16:00:45 -08007428bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
Michal Hocko4da2ce22017-11-15 17:33:26 -08007429 int migratetype,
Wen Congyangb023f462012-12-11 16:00:45 -08007430 bool skip_hwpoisoned_pages)
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07007431{
7432 unsigned long pfn, iter, found;
Michal Nazarewicz47118af2011-12-29 13:09:50 +01007433
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07007434 /*
7435 * For avoiding noise data, lru_add_drain_all() should be called
Minchan Kim80934512012-07-31 16:43:01 -07007436 * If ZONE_MOVABLE, the zone never contains unmovable pages
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07007437 */
7438 if (zone_idx(zone) == ZONE_MOVABLE)
Minchan Kim80934512012-07-31 16:43:01 -07007439 return false;
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07007440
Michal Hocko4da2ce22017-11-15 17:33:26 -08007441 /*
7442 * CMA allocations (alloc_contig_range) really need to mark isolate
7443 * CMA pageblocks even when they are not movable in fact so consider
7444 * them movable here.
7445 */
7446 if (is_migrate_cma(migratetype) &&
7447 is_migrate_cma(get_pageblock_migratetype(page)))
7448 return false;
7449
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07007450 pfn = page_to_pfn(page);
7451 for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
7452 unsigned long check = pfn + iter;
7453
Namhyung Kim29723fc2011-02-25 14:44:25 -08007454 if (!pfn_valid_within(check))
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07007455 continue;
Namhyung Kim29723fc2011-02-25 14:44:25 -08007456
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07007457 page = pfn_to_page(check);
Naoya Horiguchic8721bb2013-09-11 14:22:09 -07007458
Michal Hockod7ab3672017-11-15 17:33:30 -08007459 if (PageReserved(page))
7460 return true;
7461
Naoya Horiguchic8721bb2013-09-11 14:22:09 -07007462 /*
7463 * Hugepages are not in LRU lists, but they're movable.
7464 * We need not scan over tail pages bacause we don't
7465 * handle each tail page individually in migration.
7466 */
7467 if (PageHuge(page)) {
7468 iter = round_up(iter + 1, 1<<compound_order(page)) - 1;
7469 continue;
7470 }
7471
Minchan Kim97d255c2012-07-31 16:42:59 -07007472 /*
7473 * We can't use page_count without pin a page
7474 * because another CPU can free compound page.
7475 * This check already skips compound tails of THP
Joonsoo Kim0139aa72016-05-19 17:10:49 -07007476 * because their page->_refcount is zero at all time.
Minchan Kim97d255c2012-07-31 16:42:59 -07007477 */
Joonsoo Kimfe896d12016-03-17 14:19:26 -07007478 if (!page_ref_count(page)) {
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07007479 if (PageBuddy(page))
7480 iter += (1 << page_order(page)) - 1;
7481 continue;
7482 }
Minchan Kim97d255c2012-07-31 16:42:59 -07007483
Wen Congyangb023f462012-12-11 16:00:45 -08007484 /*
7485 * The HWPoisoned page may be not in buddy system, and
7486 * page_count() is not 0.
7487 */
7488 if (skip_hwpoisoned_pages && PageHWPoison(page))
7489 continue;
7490
Yisheng Xie0efadf42017-02-24 14:57:39 -08007491 if (__PageMovable(page))
7492 continue;
7493
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07007494 if (!PageLRU(page))
7495 found++;
7496 /*
Johannes Weiner6b4f7792014-12-12 16:56:13 -08007497 * If there are RECLAIMABLE pages, we need to check
7498 * it. But now, memory offline itself doesn't call
7499 * shrink_node_slabs() and it still to be fixed.
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07007500 */
7501 /*
7502 * If the page is not RAM, page_count()should be 0.
7503 * we don't need more check. This is an _used_ not-movable page.
7504 *
7505 * The problematic thing here is PG_reserved pages. PG_reserved
7506 * is set to both of a memory hole page and a _used_ kernel
7507 * page at boot.
7508 */
7509 if (found > count)
Minchan Kim80934512012-07-31 16:43:01 -07007510 return true;
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07007511 }
Minchan Kim80934512012-07-31 16:43:01 -07007512 return false;
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07007513}
7514
7515bool is_pageblock_removable_nolock(struct page *page)
7516{
Michal Hocko656a0702012-01-20 14:33:58 -08007517 struct zone *zone;
7518 unsigned long pfn;
Michal Hocko687875fb2012-01-20 14:33:55 -08007519
7520 /*
7521 * We have to be careful here because we are iterating over memory
7522 * sections which are not zone aware so we might end up outside of
7523 * the zone but still within the section.
Michal Hocko656a0702012-01-20 14:33:58 -08007524 * We have to take care about the node as well. If the node is offline
7525 * its NODE_DATA will be NULL - see page_zone.
Michal Hocko687875fb2012-01-20 14:33:55 -08007526 */
Michal Hocko656a0702012-01-20 14:33:58 -08007527 if (!node_online(page_to_nid(page)))
7528 return false;
7529
7530 zone = page_zone(page);
7531 pfn = page_to_pfn(page);
Cody P Schafer108bcc92013-02-22 16:35:23 -08007532 if (!zone_spans_pfn(zone, pfn))
Michal Hocko687875fb2012-01-20 14:33:55 -08007533 return false;
7534
Michal Hocko4da2ce22017-11-15 17:33:26 -08007535 return !has_unmovable_pages(zone, page, 0, MIGRATE_MOVABLE, true);
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -07007536}
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07007537
Vlastimil Babka080fe202016-02-05 15:36:41 -08007538#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007539
7540static unsigned long pfn_max_align_down(unsigned long pfn)
7541{
7542 return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
7543 pageblock_nr_pages) - 1);
7544}
7545
7546static unsigned long pfn_max_align_up(unsigned long pfn)
7547{
7548 return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
7549 pageblock_nr_pages));
7550}
7551
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007552/* [start, end) must belong to a single zone. */
Mel Gormanbb13ffe2012-10-08 16:32:41 -07007553static int __alloc_contig_migrate_range(struct compact_control *cc,
7554 unsigned long start, unsigned long end)
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007555{
7556 /* This function is based on compact_zone() from compaction.c. */
Minchan Kimbeb51ea2012-10-08 16:33:51 -07007557 unsigned long nr_reclaimed;
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007558 unsigned long pfn = start;
7559 unsigned int tries = 0;
7560 int ret = 0;
7561
Marek Szyprowskibe49a6e2012-12-12 13:51:19 -08007562 migrate_prep();
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007563
Mel Gormanbb13ffe2012-10-08 16:32:41 -07007564 while (pfn < end || !list_empty(&cc->migratepages)) {
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007565 if (fatal_signal_pending(current)) {
7566 ret = -EINTR;
7567 break;
7568 }
7569
Mel Gormanbb13ffe2012-10-08 16:32:41 -07007570 if (list_empty(&cc->migratepages)) {
7571 cc->nr_migratepages = 0;
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07007572 pfn = isolate_migratepages_range(cc, pfn, end);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007573 if (!pfn) {
7574 ret = -EINTR;
7575 break;
7576 }
7577 tries = 0;
7578 } else if (++tries == 5) {
7579 ret = ret < 0 ? ret : -EBUSY;
7580 break;
7581 }
7582
Minchan Kimbeb51ea2012-10-08 16:33:51 -07007583 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
7584 &cc->migratepages);
7585 cc->nr_migratepages -= nr_reclaimed;
Minchan Kim02c6de82012-10-08 16:31:55 -07007586
Hugh Dickins9c620e22013-02-22 16:35:14 -08007587 ret = migrate_pages(&cc->migratepages, alloc_migrate_target,
David Rientjese0b9dae2014-06-04 16:08:28 -07007588 NULL, 0, cc->mode, MR_CMA);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007589 }
Srinivas Pandruvada2a6f5122013-02-22 16:32:09 -08007590 if (ret < 0) {
7591 putback_movable_pages(&cc->migratepages);
7592 return ret;
7593 }
7594 return 0;
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007595}
7596
7597/**
7598 * alloc_contig_range() -- tries to allocate given range of pages
7599 * @start: start PFN to allocate
7600 * @end: one-past-the-last PFN to allocate
Michal Nazarewicz0815f3d2012-04-03 15:06:15 +02007601 * @migratetype: migratetype of the underlaying pageblocks (either
7602 * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks
7603 * in range must have the same migratetype and it must
7604 * be either of the two.
Lucas Stachca96b622017-02-24 14:58:37 -08007605 * @gfp_mask: GFP mask to use during compaction
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007606 *
7607 * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
7608 * aligned, however it's the caller's responsibility to guarantee that
7609 * we are the only thread that changes migrate type of pageblocks the
7610 * pages fall in.
7611 *
7612 * The PFN range must belong to a single zone.
7613 *
7614 * Returns zero on success or negative error code. On success all
7615 * pages which PFN is in [start, end) are allocated for the caller and
7616 * need to be freed with free_contig_range().
7617 */
Michal Nazarewicz0815f3d2012-04-03 15:06:15 +02007618int alloc_contig_range(unsigned long start, unsigned long end,
Lucas Stachca96b622017-02-24 14:58:37 -08007619 unsigned migratetype, gfp_t gfp_mask)
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007620{
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007621 unsigned long outer_start, outer_end;
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -08007622 unsigned int order;
7623 int ret = 0;
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007624
Mel Gormanbb13ffe2012-10-08 16:32:41 -07007625 struct compact_control cc = {
7626 .nr_migratepages = 0,
7627 .order = -1,
7628 .zone = page_zone(pfn_to_page(start)),
David Rientjese0b9dae2014-06-04 16:08:28 -07007629 .mode = MIGRATE_SYNC,
Mel Gormanbb13ffe2012-10-08 16:32:41 -07007630 .ignore_skip_hint = true,
Michal Hocko7dea19f2017-05-03 14:53:15 -07007631 .gfp_mask = current_gfp_context(gfp_mask),
Mel Gormanbb13ffe2012-10-08 16:32:41 -07007632 };
7633 INIT_LIST_HEAD(&cc.migratepages);
7634
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007635 /*
7636 * What we do here is we mark all pageblocks in range as
7637 * MIGRATE_ISOLATE. Because pageblock and max order pages may
7638 * have different sizes, and due to the way page allocator
7639 * work, we align the range to biggest of the two pages so
7640 * that page allocator won't try to merge buddies from
7641 * different pageblocks and change MIGRATE_ISOLATE to some
7642 * other migration type.
7643 *
7644 * Once the pageblocks are marked as MIGRATE_ISOLATE, we
7645 * migrate the pages from an unaligned range (ie. pages that
7646 * we are interested in). This will put all the pages in
7647 * range back to page allocator as MIGRATE_ISOLATE.
7648 *
7649 * When this is done, we take the pages in range from page
7650 * allocator removing them from the buddy system. This way
7651 * page allocator will never consider using them.
7652 *
7653 * This lets us mark the pageblocks back as
7654 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
7655 * aligned range but not in the unaligned, original range are
7656 * put back to page allocator so that buddy can use them.
7657 */
7658
7659 ret = start_isolate_page_range(pfn_max_align_down(start),
Wen Congyangb023f462012-12-11 16:00:45 -08007660 pfn_max_align_up(end), migratetype,
7661 false);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007662 if (ret)
Bob Liu86a595f2012-10-25 13:37:56 -07007663 return ret;
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007664
Joonsoo Kim8ef58492016-01-14 15:18:45 -08007665 /*
7666 * In case of -EBUSY, we'd like to know which page causes problem.
7667 * So, just fall through. We will check it in test_pages_isolated().
7668 */
Mel Gormanbb13ffe2012-10-08 16:32:41 -07007669 ret = __alloc_contig_migrate_range(&cc, start, end);
Joonsoo Kim8ef58492016-01-14 15:18:45 -08007670 if (ret && ret != -EBUSY)
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007671 goto done;
7672
7673 /*
7674 * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
7675 * aligned blocks that are marked as MIGRATE_ISOLATE. What's
7676 * more, all pages in [start, end) are free in page allocator.
7677 * What we are going to do is to allocate all pages from
7678 * [start, end) (that is remove them from page allocator).
7679 *
7680 * The only problem is that pages at the beginning and at the
7681 * end of interesting range may be not aligned with pages that
7682 * page allocator holds, ie. they can be part of higher order
7683 * pages. Because of this, we reserve the bigger range and
7684 * once this is done free the pages we are not interested in.
7685 *
7686 * We don't have to hold zone->lock here because the pages are
7687 * isolated thus they won't get removed from buddy.
7688 */
7689
7690 lru_add_drain_all();
Vlastimil Babka510f5502014-12-10 15:43:07 -08007691 drain_all_pages(cc.zone);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007692
7693 order = 0;
7694 outer_start = start;
7695 while (!PageBuddy(pfn_to_page(outer_start))) {
7696 if (++order >= MAX_ORDER) {
Joonsoo Kim8ef58492016-01-14 15:18:45 -08007697 outer_start = start;
7698 break;
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007699 }
7700 outer_start &= ~0UL << order;
7701 }
7702
Joonsoo Kim8ef58492016-01-14 15:18:45 -08007703 if (outer_start != start) {
7704 order = page_order(pfn_to_page(outer_start));
7705
7706 /*
7707 * outer_start page could be small order buddy page and
7708 * it doesn't include start page. Adjust outer_start
7709 * in this case to report failed page properly
7710 * on tracepoint in test_pages_isolated()
7711 */
7712 if (outer_start + (1UL << order) <= start)
7713 outer_start = start;
7714 }
7715
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007716 /* Make sure the range is really isolated. */
Wen Congyangb023f462012-12-11 16:00:45 -08007717 if (test_pages_isolated(outer_start, end, false)) {
Jonathan Toppins75dddef2017-08-10 15:23:35 -07007718 pr_info_ratelimited("%s: [%lx, %lx) PFNs busy\n",
Michal Nazarewiczdae803e2014-11-13 15:19:27 -08007719 __func__, outer_start, end);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007720 ret = -EBUSY;
7721 goto done;
7722 }
7723
Marek Szyprowski49f223a2012-01-25 12:49:24 +01007724 /* Grab isolated pages from freelists. */
Mel Gormanbb13ffe2012-10-08 16:32:41 -07007725 outer_end = isolate_freepages_range(&cc, outer_start, end);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007726 if (!outer_end) {
7727 ret = -EBUSY;
7728 goto done;
7729 }
7730
7731 /* Free head and tail (if any) */
7732 if (start != outer_start)
7733 free_contig_range(outer_start, start - outer_start);
7734 if (end != outer_end)
7735 free_contig_range(end, outer_end - end);
7736
7737done:
7738 undo_isolate_page_range(pfn_max_align_down(start),
Michal Nazarewicz0815f3d2012-04-03 15:06:15 +02007739 pfn_max_align_up(end), migratetype);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007740 return ret;
7741}
7742
7743void free_contig_range(unsigned long pfn, unsigned nr_pages)
7744{
Marek Szyprowskibcc2b022012-12-20 15:05:18 -08007745 unsigned int count = 0;
7746
7747 for (; nr_pages--; pfn++) {
7748 struct page *page = pfn_to_page(pfn);
7749
7750 count += page_count(page) != 1;
7751 __free_page(page);
7752 }
7753 WARN(count != 0, "%d pages are still in use!\n", count);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007754}
7755#endif
7756
Jiang Liu4ed7e022012-07-31 16:43:35 -07007757#ifdef CONFIG_MEMORY_HOTPLUG
Cody P Schafer0a647f32013-07-03 15:01:33 -07007758/*
7759 * The zone indicated has a new number of managed_pages; batch sizes and percpu
7760 * page high values need to be recalulated.
7761 */
Jiang Liu4ed7e022012-07-31 16:43:35 -07007762void __meminit zone_pcp_update(struct zone *zone)
7763{
Cody P Schafer0a647f32013-07-03 15:01:33 -07007764 unsigned cpu;
Cody P Schaferc8e251f2013-07-03 15:01:29 -07007765 mutex_lock(&pcp_batch_high_lock);
Cody P Schafer0a647f32013-07-03 15:01:33 -07007766 for_each_possible_cpu(cpu)
Cody P Schafer169f6c12013-07-03 15:01:41 -07007767 pageset_set_high_and_batch(zone,
7768 per_cpu_ptr(zone->pageset, cpu));
Cody P Schaferc8e251f2013-07-03 15:01:29 -07007769 mutex_unlock(&pcp_batch_high_lock);
Jiang Liu4ed7e022012-07-31 16:43:35 -07007770}
7771#endif
7772
Jiang Liu340175b2012-07-31 16:43:32 -07007773void zone_pcp_reset(struct zone *zone)
7774{
7775 unsigned long flags;
Minchan Kim5a883812012-10-08 16:33:39 -07007776 int cpu;
7777 struct per_cpu_pageset *pset;
Jiang Liu340175b2012-07-31 16:43:32 -07007778
7779 /* avoid races with drain_pages() */
7780 local_irq_save(flags);
7781 if (zone->pageset != &boot_pageset) {
Minchan Kim5a883812012-10-08 16:33:39 -07007782 for_each_online_cpu(cpu) {
7783 pset = per_cpu_ptr(zone->pageset, cpu);
7784 drain_zonestat(zone, pset);
7785 }
Jiang Liu340175b2012-07-31 16:43:32 -07007786 free_percpu(zone->pageset);
7787 zone->pageset = &boot_pageset;
7788 }
7789 local_irq_restore(flags);
7790}
7791
Wen Congyang6dcd73d2012-12-11 16:01:01 -08007792#ifdef CONFIG_MEMORY_HOTREMOVE
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07007793/*
Joonsoo Kimb9eb6312016-05-19 17:12:06 -07007794 * All pages in the range must be in a single zone and isolated
7795 * before calling this.
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07007796 */
7797void
7798__offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
7799{
7800 struct page *page;
7801 struct zone *zone;
Mel Gorman7aeb09f2014-06-04 16:10:21 -07007802 unsigned int order, i;
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07007803 unsigned long pfn;
7804 unsigned long flags;
7805 /* find the first valid pfn */
7806 for (pfn = start_pfn; pfn < end_pfn; pfn++)
7807 if (pfn_valid(pfn))
7808 break;
7809 if (pfn == end_pfn)
7810 return;
Michal Hocko2d070ea2017-07-06 15:37:56 -07007811 offline_mem_sections(pfn, end_pfn);
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07007812 zone = page_zone(pfn_to_page(pfn));
7813 spin_lock_irqsave(&zone->lock, flags);
7814 pfn = start_pfn;
7815 while (pfn < end_pfn) {
7816 if (!pfn_valid(pfn)) {
7817 pfn++;
7818 continue;
7819 }
7820 page = pfn_to_page(pfn);
Wen Congyangb023f462012-12-11 16:00:45 -08007821 /*
7822 * The HWPoisoned page may be not in buddy system, and
7823 * page_count() is not 0.
7824 */
7825 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
7826 pfn++;
7827 SetPageReserved(page);
7828 continue;
7829 }
7830
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07007831 BUG_ON(page_count(page));
7832 BUG_ON(!PageBuddy(page));
7833 order = page_order(page);
7834#ifdef CONFIG_DEBUG_VM
Joe Perches11705322016-03-17 14:19:50 -07007835 pr_info("remove from free list %lx %d %lx\n",
7836 pfn, 1 << order, end_pfn);
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07007837#endif
7838 list_del(&page->lru);
7839 rmv_page_order(page);
7840 zone->free_area[order].nr_free--;
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07007841 for (i = 0; i < (1 << order); i++)
7842 SetPageReserved((page+i));
7843 pfn += (1 << order);
7844 }
7845 spin_unlock_irqrestore(&zone->lock, flags);
7846}
7847#endif
Wu Fengguang8d22ba12009-12-16 12:19:58 +01007848
Wu Fengguang8d22ba12009-12-16 12:19:58 +01007849bool is_free_buddy_page(struct page *page)
7850{
7851 struct zone *zone = page_zone(page);
7852 unsigned long pfn = page_to_pfn(page);
7853 unsigned long flags;
Mel Gorman7aeb09f2014-06-04 16:10:21 -07007854 unsigned int order;
Wu Fengguang8d22ba12009-12-16 12:19:58 +01007855
7856 spin_lock_irqsave(&zone->lock, flags);
7857 for (order = 0; order < MAX_ORDER; order++) {
7858 struct page *page_head = page - (pfn & ((1 << order) - 1));
7859
7860 if (PageBuddy(page_head) && page_order(page_head) >= order)
7861 break;
7862 }
7863 spin_unlock_irqrestore(&zone->lock, flags);
7864
7865 return order < MAX_ORDER;
7866}