blob: 0fde7470368df77c19e5e0eca472916f40d33775 [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
Christoph Lameterf6ac2352006-06-30 01:55:32 -07002/*
3 * linux/mm/vmstat.c
4 *
5 * Manages VM statistics
6 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
Christoph Lameter2244b952006-06-30 01:55:33 -07007 *
8 * zoned VM statistics
9 * Copyright (C) 2006 Silicon Graphics, Inc.,
10 * Christoph Lameter <christoph@lameter.com>
Christoph Lameter7cc36bb2014-10-09 15:29:43 -070011 * Copyright (C) 2008-2014 Christoph Lameter
Christoph Lameterf6ac2352006-06-30 01:55:32 -070012 */
Alexey Dobriyan8f32f7e2008-10-06 04:13:52 +040013#include <linux/fs.h>
Christoph Lameterf6ac2352006-06-30 01:55:32 -070014#include <linux/mm.h>
Alexey Dobriyan4e950f62007-07-30 02:36:13 +040015#include <linux/err.h>
Christoph Lameter2244b952006-06-30 01:55:33 -070016#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090017#include <linux/slab.h>
Christoph Lameterdf9ecab2006-08-31 21:27:35 -070018#include <linux/cpu.h>
Christoph Lameter7cc36bb2014-10-09 15:29:43 -070019#include <linux/cpumask.h>
Adrian Bunkc748e132008-07-23 21:27:03 -070020#include <linux/vmstat.h>
Andrew Morton3c486872015-02-10 14:09:43 -080021#include <linux/proc_fs.h>
22#include <linux/seq_file.h>
23#include <linux/debugfs.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040024#include <linux/sched.h>
Mel Gormanf1a5ab12010-05-24 14:32:26 -070025#include <linux/math64.h>
Michael Rubin79da8262010-10-26 14:21:36 -070026#include <linux/writeback.h>
Namhyung Kim36deb0b2010-10-26 14:22:04 -070027#include <linux/compaction.h>
Lisa Du6e543d52013-09-11 14:22:36 -070028#include <linux/mm_inline.h>
Joonsoo Kim48c96a32014-12-12 16:56:01 -080029#include <linux/page_ext.h>
30#include <linux/page_owner.h>
Lisa Du6e543d52013-09-11 14:22:36 -070031
32#include "internal.h"
Christoph Lameterf6ac2352006-06-30 01:55:32 -070033
Kemi Wang45180852017-11-15 17:38:22 -080034#ifdef CONFIG_NUMA
35int sysctl_vm_numa_stat = ENABLE_NUMA_STAT;
36
37/* zero numa counters within a zone */
38static void zero_zone_numa_counters(struct zone *zone)
39{
40 int item, cpu;
41
Mel Gormanf19298b2021-06-28 19:41:44 -070042 for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++) {
43 atomic_long_set(&zone->vm_numa_event[item], 0);
44 for_each_online_cpu(cpu) {
45 per_cpu_ptr(zone->per_cpu_zonestats, cpu)->vm_numa_event[item]
Kemi Wang45180852017-11-15 17:38:22 -080046 = 0;
Mel Gormanf19298b2021-06-28 19:41:44 -070047 }
Kemi Wang45180852017-11-15 17:38:22 -080048 }
49}
50
51/* zero numa counters of all the populated zones */
52static void zero_zones_numa_counters(void)
53{
54 struct zone *zone;
55
56 for_each_populated_zone(zone)
57 zero_zone_numa_counters(zone);
58}
59
60/* zero global numa counters */
61static void zero_global_numa_counters(void)
62{
63 int item;
64
Mel Gormanf19298b2021-06-28 19:41:44 -070065 for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++)
66 atomic_long_set(&vm_numa_event[item], 0);
Kemi Wang45180852017-11-15 17:38:22 -080067}
68
69static void invalid_numa_statistics(void)
70{
71 zero_zones_numa_counters();
72 zero_global_numa_counters();
73}
74
75static DEFINE_MUTEX(vm_numa_stat_lock);
76
77int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write,
Christoph Hellwig32927392020-04-24 08:43:38 +020078 void *buffer, size_t *length, loff_t *ppos)
Kemi Wang45180852017-11-15 17:38:22 -080079{
80 int ret, oldval;
81
82 mutex_lock(&vm_numa_stat_lock);
83 if (write)
84 oldval = sysctl_vm_numa_stat;
85 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
86 if (ret || !write)
87 goto out;
88
89 if (oldval == sysctl_vm_numa_stat)
90 goto out;
91 else if (sysctl_vm_numa_stat == ENABLE_NUMA_STAT) {
92 static_branch_enable(&vm_numa_stat_key);
93 pr_info("enable numa statistics\n");
94 } else {
95 static_branch_disable(&vm_numa_stat_key);
96 invalid_numa_statistics();
97 pr_info("disable numa statistics, and clear numa counters\n");
98 }
99
100out:
101 mutex_unlock(&vm_numa_stat_lock);
102 return ret;
103}
104#endif
105
Christoph Lameterf8891e52006-06-30 01:55:45 -0700106#ifdef CONFIG_VM_EVENT_COUNTERS
107DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
108EXPORT_PER_CPU_SYMBOL(vm_event_states);
109
Minchan Kim31f961a2010-08-09 17:18:59 -0700110static void sum_vm_events(unsigned long *ret)
Christoph Lameterf8891e52006-06-30 01:55:45 -0700111{
Christoph Lameter9eccf2a2008-02-04 22:29:22 -0800112 int cpu;
Christoph Lameterf8891e52006-06-30 01:55:45 -0700113 int i;
114
115 memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
116
Minchan Kim31f961a2010-08-09 17:18:59 -0700117 for_each_online_cpu(cpu) {
Christoph Lameterf8891e52006-06-30 01:55:45 -0700118 struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
119
Christoph Lameterf8891e52006-06-30 01:55:45 -0700120 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
121 ret[i] += this->event[i];
122 }
123}
124
125/*
126 * Accumulate the vm event counters across all CPUs.
127 * The result is unavoidably approximate - it can change
128 * during and after execution of this function.
129*/
130void all_vm_events(unsigned long *ret)
131{
Sebastian Andrzej Siewior7625ecc2021-08-03 16:16:03 +0200132 cpus_read_lock();
Minchan Kim31f961a2010-08-09 17:18:59 -0700133 sum_vm_events(ret);
Sebastian Andrzej Siewior7625ecc2021-08-03 16:16:03 +0200134 cpus_read_unlock();
Christoph Lameterf8891e52006-06-30 01:55:45 -0700135}
Heiko Carstens32dd66f2006-07-10 04:44:31 -0700136EXPORT_SYMBOL_GPL(all_vm_events);
Christoph Lameterf8891e52006-06-30 01:55:45 -0700137
Christoph Lameterf8891e52006-06-30 01:55:45 -0700138/*
139 * Fold the foreign cpu events into our own.
140 *
141 * This is adding to the events on one processor
142 * but keeps the global counts constant.
143 */
144void vm_events_fold_cpu(int cpu)
145{
146 struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
147 int i;
148
149 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
150 count_vm_events(i, fold_state->event[i]);
151 fold_state->event[i] = 0;
152 }
153}
Christoph Lameterf8891e52006-06-30 01:55:45 -0700154
155#endif /* CONFIG_VM_EVENT_COUNTERS */
156
Christoph Lameter2244b952006-06-30 01:55:33 -0700157/*
158 * Manage combined zone based / global counters
159 *
160 * vm_stat contains the global counters
161 */
Mel Gorman75ef7182016-07-28 15:45:24 -0700162atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
163atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS] __cacheline_aligned_in_smp;
Mel Gormanf19298b2021-06-28 19:41:44 -0700164atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS] __cacheline_aligned_in_smp;
Mel Gorman75ef7182016-07-28 15:45:24 -0700165EXPORT_SYMBOL(vm_zone_stat);
166EXPORT_SYMBOL(vm_node_stat);
Christoph Lameter2244b952006-06-30 01:55:33 -0700167
Geert Uytterhoevenebeac3e2021-11-05 13:40:28 -0700168#ifdef CONFIG_NUMA
169static void fold_vm_zone_numa_events(struct zone *zone)
170{
171 unsigned long zone_numa_events[NR_VM_NUMA_EVENT_ITEMS] = { 0, };
172 int cpu;
173 enum numa_stat_item item;
174
175 for_each_online_cpu(cpu) {
176 struct per_cpu_zonestat *pzstats;
177
178 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
179 for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++)
180 zone_numa_events[item] += xchg(&pzstats->vm_numa_event[item], 0);
181 }
182
183 for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++)
184 zone_numa_event_add(zone_numa_events[item], zone, item);
185}
186
187void fold_vm_numa_events(void)
188{
189 struct zone *zone;
190
191 for_each_populated_zone(zone)
192 fold_vm_zone_numa_events(zone);
193}
194#endif
195
Christoph Lameter2244b952006-06-30 01:55:33 -0700196#ifdef CONFIG_SMP
197
Mel Gormanb44129b2011-01-13 15:45:43 -0800198int calculate_pressure_threshold(struct zone *zone)
Mel Gorman88f5acf2011-01-13 15:45:41 -0800199{
200 int threshold;
201 int watermark_distance;
202
203 /*
204 * As vmstats are not up to date, there is drift between the estimated
205 * and real values. For high thresholds and a high number of CPUs, it
206 * is possible for the min watermark to be breached while the estimated
207 * value looks fine. The pressure threshold is a reduced value such
208 * that even the maximum amount of drift will not accidentally breach
209 * the min watermark
210 */
211 watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
212 threshold = max(1, (int)(watermark_distance / num_online_cpus()));
213
214 /*
215 * Maximum threshold is 125
216 */
217 threshold = min(125, threshold);
218
219 return threshold;
220}
221
Mel Gormanb44129b2011-01-13 15:45:43 -0800222int calculate_normal_threshold(struct zone *zone)
Christoph Lameterdf9ecab2006-08-31 21:27:35 -0700223{
224 int threshold;
225 int mem; /* memory in 128 MB units */
226
227 /*
228 * The threshold scales with the number of processors and the amount
229 * of memory per zone. More memory means that we can defer updates for
230 * longer, more processors could lead to more contention.
231 * fls() is used to have a cheap way of logarithmic scaling.
232 *
233 * Some sample thresholds:
234 *
Miaohe Linea15ba12021-09-02 15:01:03 -0700235 * Threshold Processors (fls) Zonesize fls(mem)+1
Christoph Lameterdf9ecab2006-08-31 21:27:35 -0700236 * ------------------------------------------------------------------
237 * 8 1 1 0.9-1 GB 4
238 * 16 2 2 0.9-1 GB 4
239 * 20 2 2 1-2 GB 5
240 * 24 2 2 2-4 GB 6
241 * 28 2 2 4-8 GB 7
242 * 32 2 2 8-16 GB 8
243 * 4 2 2 <128M 1
244 * 30 4 3 2-4 GB 5
245 * 48 4 3 8-16 GB 8
246 * 32 8 4 1-2 GB 4
247 * 32 8 4 0.9-1GB 4
248 * 10 16 5 <128M 1
249 * 40 16 5 900M 4
250 * 70 64 7 2-4 GB 5
251 * 84 64 7 4-8 GB 6
252 * 108 512 9 4-8 GB 6
253 * 125 1024 10 8-16 GB 8
254 * 125 1024 10 16-32 GB 9
255 */
256
Arun KS9705bea2018-12-28 00:34:24 -0800257 mem = zone_managed_pages(zone) >> (27 - PAGE_SHIFT);
Christoph Lameterdf9ecab2006-08-31 21:27:35 -0700258
259 threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
260
261 /*
262 * Maximum threshold is 125
263 */
264 threshold = min(125, threshold);
265
266 return threshold;
267}
Christoph Lameter2244b952006-06-30 01:55:33 -0700268
269/*
Christoph Lameterdf9ecab2006-08-31 21:27:35 -0700270 * Refresh the thresholds for each zone.
Christoph Lameter2244b952006-06-30 01:55:33 -0700271 */
KOSAKI Motohiroa6cccdc2011-05-24 17:11:33 -0700272void refresh_zone_stat_thresholds(void)
Christoph Lameter2244b952006-06-30 01:55:33 -0700273{
Mel Gorman75ef7182016-07-28 15:45:24 -0700274 struct pglist_data *pgdat;
Christoph Lameterdf9ecab2006-08-31 21:27:35 -0700275 struct zone *zone;
276 int cpu;
277 int threshold;
278
Mel Gorman75ef7182016-07-28 15:45:24 -0700279 /* Zero current pgdat thresholds */
280 for_each_online_pgdat(pgdat) {
281 for_each_online_cpu(cpu) {
282 per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold = 0;
283 }
284 }
285
KOSAKI Motohiroee99c712009-03-31 15:19:31 -0700286 for_each_populated_zone(zone) {
Mel Gorman75ef7182016-07-28 15:45:24 -0700287 struct pglist_data *pgdat = zone->zone_pgdat;
Christoph Lameteraa454842010-09-09 16:38:17 -0700288 unsigned long max_drift, tolerate_drift;
289
Mel Gormanb44129b2011-01-13 15:45:43 -0800290 threshold = calculate_normal_threshold(zone);
Christoph Lameterdf9ecab2006-08-31 21:27:35 -0700291
Mel Gorman75ef7182016-07-28 15:45:24 -0700292 for_each_online_cpu(cpu) {
293 int pgdat_threshold;
294
Mel Gorman28f836b2021-06-28 19:41:38 -0700295 per_cpu_ptr(zone->per_cpu_zonestats, cpu)->stat_threshold
Christoph Lameter99dcc3e2010-01-05 15:34:51 +0900296 = threshold;
Kemi Wang1d90ca82017-09-08 16:12:52 -0700297
Mel Gorman75ef7182016-07-28 15:45:24 -0700298 /* Base nodestat threshold on the largest populated zone. */
299 pgdat_threshold = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold;
300 per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold
301 = max(threshold, pgdat_threshold);
302 }
303
Christoph Lameteraa454842010-09-09 16:38:17 -0700304 /*
305 * Only set percpu_drift_mark if there is a danger that
306 * NR_FREE_PAGES reports the low watermark is ok when in fact
307 * the min watermark could be breached by an allocation
308 */
309 tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
310 max_drift = num_online_cpus() * threshold;
311 if (max_drift > tolerate_drift)
312 zone->percpu_drift_mark = high_wmark_pages(zone) +
313 max_drift;
Christoph Lameterdf9ecab2006-08-31 21:27:35 -0700314 }
Christoph Lameter2244b952006-06-30 01:55:33 -0700315}
316
Mel Gormanb44129b2011-01-13 15:45:43 -0800317void set_pgdat_percpu_threshold(pg_data_t *pgdat,
318 int (*calculate_pressure)(struct zone *))
Mel Gorman88f5acf2011-01-13 15:45:41 -0800319{
320 struct zone *zone;
321 int cpu;
322 int threshold;
323 int i;
324
Mel Gorman88f5acf2011-01-13 15:45:41 -0800325 for (i = 0; i < pgdat->nr_zones; i++) {
326 zone = &pgdat->node_zones[i];
327 if (!zone->percpu_drift_mark)
328 continue;
329
Mel Gormanb44129b2011-01-13 15:45:43 -0800330 threshold = (*calculate_pressure)(zone);
Kemi Wang1d90ca82017-09-08 16:12:52 -0700331 for_each_online_cpu(cpu)
Mel Gorman28f836b2021-06-28 19:41:38 -0700332 per_cpu_ptr(zone->per_cpu_zonestats, cpu)->stat_threshold
Mel Gorman88f5acf2011-01-13 15:45:41 -0800333 = threshold;
334 }
Mel Gorman88f5acf2011-01-13 15:45:41 -0800335}
336
Christoph Lameter2244b952006-06-30 01:55:33 -0700337/*
Jianyu Zhanbea04b02014-06-04 16:09:51 -0700338 * For use when we know that interrupts are disabled,
339 * or when we know that preemption is disabled and that
340 * particular counter cannot be updated from interrupt context.
Christoph Lameter2244b952006-06-30 01:55:33 -0700341 */
342void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
Heiko Carstens6cdb18a2015-12-29 14:54:32 -0800343 long delta)
Christoph Lameter2244b952006-06-30 01:55:33 -0700344{
Mel Gorman28f836b2021-06-28 19:41:38 -0700345 struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
Christoph Lameter12938a92010-12-06 11:16:20 -0600346 s8 __percpu *p = pcp->vm_stat_diff + item;
Christoph Lameter2244b952006-06-30 01:55:33 -0700347 long x;
Christoph Lameter12938a92010-12-06 11:16:20 -0600348 long t;
Christoph Lameter2244b952006-06-30 01:55:33 -0700349
Ingo Molnarc68ed792021-09-08 15:17:57 -0700350 /*
351 * Accurate vmstat updates require a RMW. On !PREEMPT_RT kernels,
352 * atomicity is provided by IRQs being disabled -- either explicitly
353 * or via local_lock_irq. On PREEMPT_RT, local_lock_irq only disables
354 * CPU migrations and preemption potentially corrupts a counter so
355 * disable preemption.
356 */
357 if (IS_ENABLED(CONFIG_PREEMPT_RT))
358 preempt_disable();
359
Christoph Lameter12938a92010-12-06 11:16:20 -0600360 x = delta + __this_cpu_read(*p);
Christoph Lameter2244b952006-06-30 01:55:33 -0700361
Christoph Lameter12938a92010-12-06 11:16:20 -0600362 t = __this_cpu_read(pcp->stat_threshold);
363
Miaohe Lin40610072020-10-15 20:07:36 -0700364 if (unlikely(abs(x) > t)) {
Christoph Lameter2244b952006-06-30 01:55:33 -0700365 zone_page_state_add(x, zone, item);
366 x = 0;
367 }
Christoph Lameter12938a92010-12-06 11:16:20 -0600368 __this_cpu_write(*p, x);
Ingo Molnarc68ed792021-09-08 15:17:57 -0700369
370 if (IS_ENABLED(CONFIG_PREEMPT_RT))
371 preempt_enable();
Christoph Lameter2244b952006-06-30 01:55:33 -0700372}
373EXPORT_SYMBOL(__mod_zone_page_state);
374
Mel Gorman75ef7182016-07-28 15:45:24 -0700375void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
376 long delta)
377{
378 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
379 s8 __percpu *p = pcp->vm_node_stat_diff + item;
380 long x;
381 long t;
382
Roman Gushchinea426c22020-08-06 23:20:35 -0700383 if (vmstat_item_in_bytes(item)) {
Johannes Weiner629484a2021-02-25 17:16:51 -0800384 /*
385 * Only cgroups use subpage accounting right now; at
386 * the global level, these items still change in
387 * multiples of whole pages. Store them as pages
388 * internally to keep the per-cpu counters compact.
389 */
Roman Gushchinea426c22020-08-06 23:20:35 -0700390 VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
391 delta >>= PAGE_SHIFT;
392 }
393
Ingo Molnarc68ed792021-09-08 15:17:57 -0700394 /* See __mod_node_page_state */
395 if (IS_ENABLED(CONFIG_PREEMPT_RT))
396 preempt_disable();
397
Mel Gorman75ef7182016-07-28 15:45:24 -0700398 x = delta + __this_cpu_read(*p);
399
400 t = __this_cpu_read(pcp->stat_threshold);
401
Miaohe Lin40610072020-10-15 20:07:36 -0700402 if (unlikely(abs(x) > t)) {
Mel Gorman75ef7182016-07-28 15:45:24 -0700403 node_page_state_add(x, pgdat, item);
404 x = 0;
405 }
406 __this_cpu_write(*p, x);
Ingo Molnarc68ed792021-09-08 15:17:57 -0700407
408 if (IS_ENABLED(CONFIG_PREEMPT_RT))
409 preempt_enable();
Mel Gorman75ef7182016-07-28 15:45:24 -0700410}
411EXPORT_SYMBOL(__mod_node_page_state);
412
Christoph Lameter2244b952006-06-30 01:55:33 -0700413/*
Christoph Lameter2244b952006-06-30 01:55:33 -0700414 * Optimized increment and decrement functions.
415 *
416 * These are only for a single page and therefore can take a struct page *
417 * argument instead of struct zone *. This allows the inclusion of the code
418 * generated for page_zone(page) into the optimized functions.
419 *
420 * No overflow check is necessary and therefore the differential can be
421 * incremented or decremented in place which may allow the compilers to
422 * generate better code.
Christoph Lameter2244b952006-06-30 01:55:33 -0700423 * The increment or decrement is known and therefore one boundary check can
424 * be omitted.
425 *
Christoph Lameterdf9ecab2006-08-31 21:27:35 -0700426 * NOTE: These functions are very performance sensitive. Change only
427 * with care.
428 *
Christoph Lameter2244b952006-06-30 01:55:33 -0700429 * Some processors have inc/dec instructions that are atomic vs an interrupt.
430 * However, the code must first determine the differential location in a zone
431 * based on the processor number and then inc/dec the counter. There is no
432 * guarantee without disabling preemption that the processor will not change
433 * in between and therefore the atomicity vs. interrupt cannot be exploited
434 * in a useful way here.
435 */
Christoph Lameterc8785382007-02-10 01:43:01 -0800436void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
Christoph Lameter2244b952006-06-30 01:55:33 -0700437{
Mel Gorman28f836b2021-06-28 19:41:38 -0700438 struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
Christoph Lameter12938a92010-12-06 11:16:20 -0600439 s8 __percpu *p = pcp->vm_stat_diff + item;
440 s8 v, t;
Christoph Lameter2244b952006-06-30 01:55:33 -0700441
Ingo Molnarc68ed792021-09-08 15:17:57 -0700442 /* See __mod_node_page_state */
443 if (IS_ENABLED(CONFIG_PREEMPT_RT))
444 preempt_disable();
445
Christoph Lameter908ee0f2010-12-06 11:40:02 -0600446 v = __this_cpu_inc_return(*p);
Christoph Lameter12938a92010-12-06 11:16:20 -0600447 t = __this_cpu_read(pcp->stat_threshold);
448 if (unlikely(v > t)) {
449 s8 overstep = t >> 1;
Christoph Lameter2244b952006-06-30 01:55:33 -0700450
Christoph Lameter12938a92010-12-06 11:16:20 -0600451 zone_page_state_add(v + overstep, zone, item);
452 __this_cpu_write(*p, -overstep);
Christoph Lameter2244b952006-06-30 01:55:33 -0700453 }
Ingo Molnarc68ed792021-09-08 15:17:57 -0700454
455 if (IS_ENABLED(CONFIG_PREEMPT_RT))
456 preempt_enable();
Christoph Lameter2244b952006-06-30 01:55:33 -0700457}
Christoph Lameterca889e62006-06-30 01:55:44 -0700458
Mel Gorman75ef7182016-07-28 15:45:24 -0700459void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
460{
461 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
462 s8 __percpu *p = pcp->vm_node_stat_diff + item;
463 s8 v, t;
464
Roman Gushchinea426c22020-08-06 23:20:35 -0700465 VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
466
Ingo Molnarc68ed792021-09-08 15:17:57 -0700467 /* See __mod_node_page_state */
468 if (IS_ENABLED(CONFIG_PREEMPT_RT))
469 preempt_disable();
470
Mel Gorman75ef7182016-07-28 15:45:24 -0700471 v = __this_cpu_inc_return(*p);
472 t = __this_cpu_read(pcp->stat_threshold);
473 if (unlikely(v > t)) {
474 s8 overstep = t >> 1;
475
476 node_page_state_add(v + overstep, pgdat, item);
477 __this_cpu_write(*p, -overstep);
478 }
Ingo Molnarc68ed792021-09-08 15:17:57 -0700479
480 if (IS_ENABLED(CONFIG_PREEMPT_RT))
481 preempt_enable();
Mel Gorman75ef7182016-07-28 15:45:24 -0700482}
483
Christoph Lameterca889e62006-06-30 01:55:44 -0700484void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
485{
486 __inc_zone_state(page_zone(page), item);
487}
Christoph Lameter2244b952006-06-30 01:55:33 -0700488EXPORT_SYMBOL(__inc_zone_page_state);
489
Mel Gorman75ef7182016-07-28 15:45:24 -0700490void __inc_node_page_state(struct page *page, enum node_stat_item item)
491{
492 __inc_node_state(page_pgdat(page), item);
493}
494EXPORT_SYMBOL(__inc_node_page_state);
495
Christoph Lameterc8785382007-02-10 01:43:01 -0800496void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
Christoph Lameter2244b952006-06-30 01:55:33 -0700497{
Mel Gorman28f836b2021-06-28 19:41:38 -0700498 struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
Christoph Lameter12938a92010-12-06 11:16:20 -0600499 s8 __percpu *p = pcp->vm_stat_diff + item;
500 s8 v, t;
Christoph Lameter2244b952006-06-30 01:55:33 -0700501
Ingo Molnarc68ed792021-09-08 15:17:57 -0700502 /* See __mod_node_page_state */
503 if (IS_ENABLED(CONFIG_PREEMPT_RT))
504 preempt_disable();
505
Christoph Lameter908ee0f2010-12-06 11:40:02 -0600506 v = __this_cpu_dec_return(*p);
Christoph Lameter12938a92010-12-06 11:16:20 -0600507 t = __this_cpu_read(pcp->stat_threshold);
508 if (unlikely(v < - t)) {
509 s8 overstep = t >> 1;
Christoph Lameter2244b952006-06-30 01:55:33 -0700510
Christoph Lameter12938a92010-12-06 11:16:20 -0600511 zone_page_state_add(v - overstep, zone, item);
512 __this_cpu_write(*p, overstep);
Christoph Lameter2244b952006-06-30 01:55:33 -0700513 }
Ingo Molnarc68ed792021-09-08 15:17:57 -0700514
515 if (IS_ENABLED(CONFIG_PREEMPT_RT))
516 preempt_enable();
Christoph Lameter2244b952006-06-30 01:55:33 -0700517}
Christoph Lameterc8785382007-02-10 01:43:01 -0800518
Mel Gorman75ef7182016-07-28 15:45:24 -0700519void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
520{
521 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
522 s8 __percpu *p = pcp->vm_node_stat_diff + item;
523 s8 v, t;
524
Roman Gushchinea426c22020-08-06 23:20:35 -0700525 VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
526
Ingo Molnarc68ed792021-09-08 15:17:57 -0700527 /* See __mod_node_page_state */
528 if (IS_ENABLED(CONFIG_PREEMPT_RT))
529 preempt_disable();
530
Mel Gorman75ef7182016-07-28 15:45:24 -0700531 v = __this_cpu_dec_return(*p);
532 t = __this_cpu_read(pcp->stat_threshold);
533 if (unlikely(v < - t)) {
534 s8 overstep = t >> 1;
535
536 node_page_state_add(v - overstep, pgdat, item);
537 __this_cpu_write(*p, overstep);
538 }
Ingo Molnarc68ed792021-09-08 15:17:57 -0700539
540 if (IS_ENABLED(CONFIG_PREEMPT_RT))
541 preempt_enable();
Mel Gorman75ef7182016-07-28 15:45:24 -0700542}
543
Christoph Lameterc8785382007-02-10 01:43:01 -0800544void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
545{
546 __dec_zone_state(page_zone(page), item);
547}
Christoph Lameter2244b952006-06-30 01:55:33 -0700548EXPORT_SYMBOL(__dec_zone_page_state);
549
Mel Gorman75ef7182016-07-28 15:45:24 -0700550void __dec_node_page_state(struct page *page, enum node_stat_item item)
551{
552 __dec_node_state(page_pgdat(page), item);
553}
554EXPORT_SYMBOL(__dec_node_page_state);
555
Heiko Carstens41561532012-01-12 17:17:30 -0800556#ifdef CONFIG_HAVE_CMPXCHG_LOCAL
Christoph Lameter7c839122010-12-14 10:28:46 -0600557/*
558 * If we have cmpxchg_local support then we do not need to incur the overhead
559 * that comes with local_irq_save/restore if we use this_cpu_cmpxchg.
560 *
561 * mod_state() modifies the zone counter state through atomic per cpu
562 * operations.
563 *
564 * Overstep mode specifies how overstep should handled:
565 * 0 No overstepping
566 * 1 Overstepping half of threshold
567 * -1 Overstepping minus half of threshold
568*/
Mel Gorman75ef7182016-07-28 15:45:24 -0700569static inline void mod_zone_state(struct zone *zone,
570 enum zone_stat_item item, long delta, int overstep_mode)
Christoph Lameter7c839122010-12-14 10:28:46 -0600571{
Mel Gorman28f836b2021-06-28 19:41:38 -0700572 struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
Christoph Lameter7c839122010-12-14 10:28:46 -0600573 s8 __percpu *p = pcp->vm_stat_diff + item;
574 long o, n, t, z;
575
576 do {
577 z = 0; /* overflow to zone counters */
578
579 /*
580 * The fetching of the stat_threshold is racy. We may apply
581 * a counter threshold to the wrong the cpu if we get
Christoph Lameterd3bc2362011-04-14 15:21:58 -0700582 * rescheduled while executing here. However, the next
583 * counter update will apply the threshold again and
584 * therefore bring the counter under the threshold again.
585 *
586 * Most of the time the thresholds are the same anyways
587 * for all cpus in a zone.
Christoph Lameter7c839122010-12-14 10:28:46 -0600588 */
589 t = this_cpu_read(pcp->stat_threshold);
590
591 o = this_cpu_read(*p);
592 n = delta + o;
593
Miaohe Lin40610072020-10-15 20:07:36 -0700594 if (abs(n) > t) {
Christoph Lameter7c839122010-12-14 10:28:46 -0600595 int os = overstep_mode * (t >> 1) ;
596
597 /* Overflow must be added to zone counters */
598 z = n + os;
599 n = -os;
600 }
601 } while (this_cpu_cmpxchg(*p, o, n) != o);
602
603 if (z)
604 zone_page_state_add(z, zone, item);
605}
606
607void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
Heiko Carstens6cdb18a2015-12-29 14:54:32 -0800608 long delta)
Christoph Lameter7c839122010-12-14 10:28:46 -0600609{
Mel Gorman75ef7182016-07-28 15:45:24 -0700610 mod_zone_state(zone, item, delta, 0);
Christoph Lameter7c839122010-12-14 10:28:46 -0600611}
612EXPORT_SYMBOL(mod_zone_page_state);
613
Christoph Lameter7c839122010-12-14 10:28:46 -0600614void inc_zone_page_state(struct page *page, enum zone_stat_item item)
615{
Mel Gorman75ef7182016-07-28 15:45:24 -0700616 mod_zone_state(page_zone(page), item, 1, 1);
Christoph Lameter7c839122010-12-14 10:28:46 -0600617}
618EXPORT_SYMBOL(inc_zone_page_state);
619
620void dec_zone_page_state(struct page *page, enum zone_stat_item item)
621{
Mel Gorman75ef7182016-07-28 15:45:24 -0700622 mod_zone_state(page_zone(page), item, -1, -1);
Christoph Lameter7c839122010-12-14 10:28:46 -0600623}
624EXPORT_SYMBOL(dec_zone_page_state);
Mel Gorman75ef7182016-07-28 15:45:24 -0700625
626static inline void mod_node_state(struct pglist_data *pgdat,
627 enum node_stat_item item, int delta, int overstep_mode)
628{
629 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
630 s8 __percpu *p = pcp->vm_node_stat_diff + item;
631 long o, n, t, z;
632
Roman Gushchinea426c22020-08-06 23:20:35 -0700633 if (vmstat_item_in_bytes(item)) {
Johannes Weiner629484a2021-02-25 17:16:51 -0800634 /*
635 * Only cgroups use subpage accounting right now; at
636 * the global level, these items still change in
637 * multiples of whole pages. Store them as pages
638 * internally to keep the per-cpu counters compact.
639 */
Roman Gushchinea426c22020-08-06 23:20:35 -0700640 VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
641 delta >>= PAGE_SHIFT;
642 }
643
Mel Gorman75ef7182016-07-28 15:45:24 -0700644 do {
645 z = 0; /* overflow to node counters */
646
647 /*
648 * The fetching of the stat_threshold is racy. We may apply
649 * a counter threshold to the wrong the cpu if we get
650 * rescheduled while executing here. However, the next
651 * counter update will apply the threshold again and
652 * therefore bring the counter under the threshold again.
653 *
654 * Most of the time the thresholds are the same anyways
655 * for all cpus in a node.
656 */
657 t = this_cpu_read(pcp->stat_threshold);
658
659 o = this_cpu_read(*p);
660 n = delta + o;
661
Miaohe Lin40610072020-10-15 20:07:36 -0700662 if (abs(n) > t) {
Mel Gorman75ef7182016-07-28 15:45:24 -0700663 int os = overstep_mode * (t >> 1) ;
664
665 /* Overflow must be added to node counters */
666 z = n + os;
667 n = -os;
668 }
669 } while (this_cpu_cmpxchg(*p, o, n) != o);
670
671 if (z)
672 node_page_state_add(z, pgdat, item);
673}
674
675void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
676 long delta)
677{
678 mod_node_state(pgdat, item, delta, 0);
679}
680EXPORT_SYMBOL(mod_node_page_state);
681
682void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
683{
684 mod_node_state(pgdat, item, 1, 1);
685}
686
687void inc_node_page_state(struct page *page, enum node_stat_item item)
688{
689 mod_node_state(page_pgdat(page), item, 1, 1);
690}
691EXPORT_SYMBOL(inc_node_page_state);
692
693void dec_node_page_state(struct page *page, enum node_stat_item item)
694{
695 mod_node_state(page_pgdat(page), item, -1, -1);
696}
697EXPORT_SYMBOL(dec_node_page_state);
Christoph Lameter7c839122010-12-14 10:28:46 -0600698#else
699/*
700 * Use interrupt disable to serialize counter updates
701 */
702void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
Heiko Carstens6cdb18a2015-12-29 14:54:32 -0800703 long delta)
Christoph Lameter7c839122010-12-14 10:28:46 -0600704{
705 unsigned long flags;
706
707 local_irq_save(flags);
708 __mod_zone_page_state(zone, item, delta);
709 local_irq_restore(flags);
710}
711EXPORT_SYMBOL(mod_zone_page_state);
712
Christoph Lameter2244b952006-06-30 01:55:33 -0700713void inc_zone_page_state(struct page *page, enum zone_stat_item item)
714{
715 unsigned long flags;
716 struct zone *zone;
Christoph Lameter2244b952006-06-30 01:55:33 -0700717
718 zone = page_zone(page);
719 local_irq_save(flags);
Christoph Lameterca889e62006-06-30 01:55:44 -0700720 __inc_zone_state(zone, item);
Christoph Lameter2244b952006-06-30 01:55:33 -0700721 local_irq_restore(flags);
722}
723EXPORT_SYMBOL(inc_zone_page_state);
724
725void dec_zone_page_state(struct page *page, enum zone_stat_item item)
726{
727 unsigned long flags;
Christoph Lameter2244b952006-06-30 01:55:33 -0700728
Christoph Lameter2244b952006-06-30 01:55:33 -0700729 local_irq_save(flags);
Christoph Lametera302eb42006-08-31 21:27:34 -0700730 __dec_zone_page_state(page, item);
Christoph Lameter2244b952006-06-30 01:55:33 -0700731 local_irq_restore(flags);
732}
733EXPORT_SYMBOL(dec_zone_page_state);
734
Mel Gorman75ef7182016-07-28 15:45:24 -0700735void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
736{
737 unsigned long flags;
738
739 local_irq_save(flags);
740 __inc_node_state(pgdat, item);
741 local_irq_restore(flags);
742}
743EXPORT_SYMBOL(inc_node_state);
744
745void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
746 long delta)
747{
748 unsigned long flags;
749
750 local_irq_save(flags);
751 __mod_node_page_state(pgdat, item, delta);
752 local_irq_restore(flags);
753}
754EXPORT_SYMBOL(mod_node_page_state);
755
756void inc_node_page_state(struct page *page, enum node_stat_item item)
757{
758 unsigned long flags;
759 struct pglist_data *pgdat;
760
761 pgdat = page_pgdat(page);
762 local_irq_save(flags);
763 __inc_node_state(pgdat, item);
764 local_irq_restore(flags);
765}
766EXPORT_SYMBOL(inc_node_page_state);
767
768void dec_node_page_state(struct page *page, enum node_stat_item item)
769{
770 unsigned long flags;
771
772 local_irq_save(flags);
773 __dec_node_page_state(page, item);
774 local_irq_restore(flags);
775}
776EXPORT_SYMBOL(dec_node_page_state);
777#endif
Christoph Lameter7cc36bb2014-10-09 15:29:43 -0700778
779/*
780 * Fold a differential into the global counters.
781 * Returns the number of counters updated.
782 */
Mel Gorman75ef7182016-07-28 15:45:24 -0700783static int fold_diff(int *zone_diff, int *node_diff)
Christoph Lameter4edb0742013-09-11 14:21:31 -0700784{
785 int i;
Christoph Lameter7cc36bb2014-10-09 15:29:43 -0700786 int changes = 0;
Christoph Lameter4edb0742013-09-11 14:21:31 -0700787
788 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
Mel Gorman75ef7182016-07-28 15:45:24 -0700789 if (zone_diff[i]) {
790 atomic_long_add(zone_diff[i], &vm_zone_stat[i]);
791 changes++;
792 }
793
794 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
795 if (node_diff[i]) {
796 atomic_long_add(node_diff[i], &vm_node_stat[i]);
Christoph Lameter7cc36bb2014-10-09 15:29:43 -0700797 changes++;
798 }
799 return changes;
Christoph Lameter4edb0742013-09-11 14:21:31 -0700800}
Mel Gormanf19298b2021-06-28 19:41:44 -0700801
Christoph Lameter2244b952006-06-30 01:55:33 -0700802/*
Christoph Lameter2bb921e2013-09-11 14:21:30 -0700803 * Update the zone counters for the current cpu.
Christoph Lametera7f75e22008-02-04 22:29:16 -0800804 *
Christoph Lameter4037d452007-05-09 02:35:14 -0700805 * Note that refresh_cpu_vm_stats strives to only access
806 * node local memory. The per cpu pagesets on remote zones are placed
807 * in the memory local to the processor using that pageset. So the
808 * loop over all zones will access a series of cachelines local to
809 * the processor.
810 *
811 * The call to zone_page_state_add updates the cachelines with the
812 * statistics in the remote zone struct as well as the global cachelines
813 * with the global counters. These could cause remote node cache line
814 * bouncing and will have to be only done when necessary.
Christoph Lameter7cc36bb2014-10-09 15:29:43 -0700815 *
816 * The function returns the number of global counters updated.
Christoph Lameter2244b952006-06-30 01:55:33 -0700817 */
Christoph Lameter0eb77e92016-01-14 15:21:40 -0800818static int refresh_cpu_vm_stats(bool do_pagesets)
Christoph Lameter2244b952006-06-30 01:55:33 -0700819{
Mel Gorman75ef7182016-07-28 15:45:24 -0700820 struct pglist_data *pgdat;
Christoph Lameter2244b952006-06-30 01:55:33 -0700821 struct zone *zone;
822 int i;
Mel Gorman75ef7182016-07-28 15:45:24 -0700823 int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
824 int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
Christoph Lameter7cc36bb2014-10-09 15:29:43 -0700825 int changes = 0;
Christoph Lameter2244b952006-06-30 01:55:33 -0700826
KOSAKI Motohiroee99c712009-03-31 15:19:31 -0700827 for_each_populated_zone(zone) {
Mel Gorman28f836b2021-06-28 19:41:38 -0700828 struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats;
829#ifdef CONFIG_NUMA
830 struct per_cpu_pages __percpu *pcp = zone->per_cpu_pageset;
831#endif
Christoph Lameter2244b952006-06-30 01:55:33 -0700832
Christoph Lameterfbc2edb2013-09-11 14:21:32 -0700833 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
834 int v;
Christoph Lameter2244b952006-06-30 01:55:33 -0700835
Mel Gorman28f836b2021-06-28 19:41:38 -0700836 v = this_cpu_xchg(pzstats->vm_stat_diff[i], 0);
Christoph Lameterfbc2edb2013-09-11 14:21:32 -0700837 if (v) {
Christoph Lametera7f75e22008-02-04 22:29:16 -0800838
Christoph Lametera7f75e22008-02-04 22:29:16 -0800839 atomic_long_add(v, &zone->vm_stat[i]);
Mel Gorman75ef7182016-07-28 15:45:24 -0700840 global_zone_diff[i] += v;
Christoph Lameter4037d452007-05-09 02:35:14 -0700841#ifdef CONFIG_NUMA
842 /* 3 seconds idle till flush */
Mel Gorman28f836b2021-06-28 19:41:38 -0700843 __this_cpu_write(pcp->expire, 3);
Christoph Lameter4037d452007-05-09 02:35:14 -0700844#endif
Christoph Lameter2244b952006-06-30 01:55:33 -0700845 }
Christoph Lameterfbc2edb2013-09-11 14:21:32 -0700846 }
Christoph Lameter4037d452007-05-09 02:35:14 -0700847#ifdef CONFIG_NUMA
Kemi Wang3a321d22017-09-08 16:12:48 -0700848
Christoph Lameter0eb77e92016-01-14 15:21:40 -0800849 if (do_pagesets) {
850 cond_resched();
851 /*
852 * Deal with draining the remote pageset of this
853 * processor
854 *
855 * Check if there are pages remaining in this pageset
856 * if not then there is nothing to expire.
857 */
Mel Gorman28f836b2021-06-28 19:41:38 -0700858 if (!__this_cpu_read(pcp->expire) ||
859 !__this_cpu_read(pcp->count))
Christoph Lameter0eb77e92016-01-14 15:21:40 -0800860 continue;
Christoph Lameter4037d452007-05-09 02:35:14 -0700861
Christoph Lameter0eb77e92016-01-14 15:21:40 -0800862 /*
863 * We never drain zones local to this processor.
864 */
865 if (zone_to_nid(zone) == numa_node_id()) {
Mel Gorman28f836b2021-06-28 19:41:38 -0700866 __this_cpu_write(pcp->expire, 0);
Christoph Lameter0eb77e92016-01-14 15:21:40 -0800867 continue;
868 }
Christoph Lameter4037d452007-05-09 02:35:14 -0700869
Mel Gorman28f836b2021-06-28 19:41:38 -0700870 if (__this_cpu_dec_return(pcp->expire))
Christoph Lameter0eb77e92016-01-14 15:21:40 -0800871 continue;
Christoph Lameter4037d452007-05-09 02:35:14 -0700872
Mel Gorman28f836b2021-06-28 19:41:38 -0700873 if (__this_cpu_read(pcp->count)) {
874 drain_zone_pages(zone, this_cpu_ptr(pcp));
Christoph Lameter0eb77e92016-01-14 15:21:40 -0800875 changes++;
876 }
Christoph Lameter7cc36bb2014-10-09 15:29:43 -0700877 }
Christoph Lameter4037d452007-05-09 02:35:14 -0700878#endif
Christoph Lameter2244b952006-06-30 01:55:33 -0700879 }
Mel Gorman75ef7182016-07-28 15:45:24 -0700880
881 for_each_online_pgdat(pgdat) {
882 struct per_cpu_nodestat __percpu *p = pgdat->per_cpu_nodestats;
883
884 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
885 int v;
886
887 v = this_cpu_xchg(p->vm_node_stat_diff[i], 0);
888 if (v) {
889 atomic_long_add(v, &pgdat->vm_stat[i]);
890 global_node_diff[i] += v;
891 }
892 }
893 }
894
895 changes += fold_diff(global_zone_diff, global_node_diff);
Christoph Lameter7cc36bb2014-10-09 15:29:43 -0700896 return changes;
Christoph Lameter2244b952006-06-30 01:55:33 -0700897}
898
Cody P Schafer40f4b1e2013-04-29 15:08:38 -0700899/*
Christoph Lameter2bb921e2013-09-11 14:21:30 -0700900 * Fold the data for an offline cpu into the global array.
901 * There cannot be any access by the offline cpu and therefore
902 * synchronization is simplified.
903 */
904void cpu_vm_stats_fold(int cpu)
905{
Mel Gorman75ef7182016-07-28 15:45:24 -0700906 struct pglist_data *pgdat;
Christoph Lameter2bb921e2013-09-11 14:21:30 -0700907 struct zone *zone;
908 int i;
Mel Gorman75ef7182016-07-28 15:45:24 -0700909 int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
910 int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
Christoph Lameter2bb921e2013-09-11 14:21:30 -0700911
912 for_each_populated_zone(zone) {
Mel Gorman28f836b2021-06-28 19:41:38 -0700913 struct per_cpu_zonestat *pzstats;
Christoph Lameter2bb921e2013-09-11 14:21:30 -0700914
Mel Gorman28f836b2021-06-28 19:41:38 -0700915 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
Christoph Lameter2bb921e2013-09-11 14:21:30 -0700916
Mel Gormanf19298b2021-06-28 19:41:44 -0700917 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
Mel Gorman28f836b2021-06-28 19:41:38 -0700918 if (pzstats->vm_stat_diff[i]) {
Christoph Lameter2bb921e2013-09-11 14:21:30 -0700919 int v;
920
Mel Gorman28f836b2021-06-28 19:41:38 -0700921 v = pzstats->vm_stat_diff[i];
922 pzstats->vm_stat_diff[i] = 0;
Christoph Lameter2bb921e2013-09-11 14:21:30 -0700923 atomic_long_add(v, &zone->vm_stat[i]);
Mel Gorman75ef7182016-07-28 15:45:24 -0700924 global_zone_diff[i] += v;
Christoph Lameter2bb921e2013-09-11 14:21:30 -0700925 }
Mel Gormanf19298b2021-06-28 19:41:44 -0700926 }
Kemi Wang3a321d22017-09-08 16:12:48 -0700927#ifdef CONFIG_NUMA
Mel Gormanf19298b2021-06-28 19:41:44 -0700928 for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++) {
929 if (pzstats->vm_numa_event[i]) {
930 unsigned long v;
Kemi Wang3a321d22017-09-08 16:12:48 -0700931
Mel Gormanf19298b2021-06-28 19:41:44 -0700932 v = pzstats->vm_numa_event[i];
933 pzstats->vm_numa_event[i] = 0;
934 zone_numa_event_add(v, zone, i);
Kemi Wang3a321d22017-09-08 16:12:48 -0700935 }
Mel Gormanf19298b2021-06-28 19:41:44 -0700936 }
Kemi Wang3a321d22017-09-08 16:12:48 -0700937#endif
Christoph Lameter2bb921e2013-09-11 14:21:30 -0700938 }
939
Mel Gorman75ef7182016-07-28 15:45:24 -0700940 for_each_online_pgdat(pgdat) {
941 struct per_cpu_nodestat *p;
942
943 p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu);
944
945 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
946 if (p->vm_node_stat_diff[i]) {
947 int v;
948
949 v = p->vm_node_stat_diff[i];
950 p->vm_node_stat_diff[i] = 0;
951 atomic_long_add(v, &pgdat->vm_stat[i]);
952 global_node_diff[i] += v;
953 }
954 }
955
956 fold_diff(global_zone_diff, global_node_diff);
Christoph Lameter2bb921e2013-09-11 14:21:30 -0700957}
958
959/*
Cody P Schafer40f4b1e2013-04-29 15:08:38 -0700960 * this is only called if !populated_zone(zone), which implies no other users of
Ingo Molnarf0953a12021-05-06 18:06:47 -0700961 * pset->vm_stat_diff[] exist.
Cody P Schafer40f4b1e2013-04-29 15:08:38 -0700962 */
Mel Gorman28f836b2021-06-28 19:41:38 -0700963void drain_zonestat(struct zone *zone, struct per_cpu_zonestat *pzstats)
Minchan Kim5a883812012-10-08 16:33:39 -0700964{
Mel Gormanf19298b2021-06-28 19:41:44 -0700965 unsigned long v;
Minchan Kim5a883812012-10-08 16:33:39 -0700966 int i;
967
Mel Gormanf19298b2021-06-28 19:41:44 -0700968 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
Mel Gorman28f836b2021-06-28 19:41:38 -0700969 if (pzstats->vm_stat_diff[i]) {
Mel Gormanf19298b2021-06-28 19:41:44 -0700970 v = pzstats->vm_stat_diff[i];
Mel Gorman28f836b2021-06-28 19:41:38 -0700971 pzstats->vm_stat_diff[i] = 0;
Mel Gormanf19298b2021-06-28 19:41:44 -0700972 zone_page_state_add(v, zone, i);
Minchan Kim5a883812012-10-08 16:33:39 -0700973 }
Mel Gormanf19298b2021-06-28 19:41:44 -0700974 }
Kemi Wang3a321d22017-09-08 16:12:48 -0700975
976#ifdef CONFIG_NUMA
Mel Gormanf19298b2021-06-28 19:41:44 -0700977 for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++) {
978 if (pzstats->vm_numa_event[i]) {
979 v = pzstats->vm_numa_event[i];
980 pzstats->vm_numa_event[i] = 0;
981 zone_numa_event_add(v, zone, i);
Kemi Wang3a321d22017-09-08 16:12:48 -0700982 }
Mel Gormanf19298b2021-06-28 19:41:44 -0700983 }
Kemi Wang3a321d22017-09-08 16:12:48 -0700984#endif
Minchan Kim5a883812012-10-08 16:33:39 -0700985}
Christoph Lameter2244b952006-06-30 01:55:33 -0700986#endif
987
Christoph Lameterca889e62006-06-30 01:55:44 -0700988#ifdef CONFIG_NUMA
989/*
Mel Gorman75ef7182016-07-28 15:45:24 -0700990 * Determine the per node value of a stat item. This function
991 * is called frequently in a NUMA machine, so try to be as
992 * frugal as possible.
Andrew Mortonc2d42c12015-11-05 18:48:43 -0800993 */
Mel Gorman75ef7182016-07-28 15:45:24 -0700994unsigned long sum_zone_node_page_state(int node,
995 enum zone_stat_item item)
Andrew Mortonc2d42c12015-11-05 18:48:43 -0800996{
997 struct zone *zones = NODE_DATA(node)->node_zones;
Joonsoo Kime87d59f2016-05-19 17:12:29 -0700998 int i;
999 unsigned long count = 0;
Andrew Mortonc2d42c12015-11-05 18:48:43 -08001000
Joonsoo Kime87d59f2016-05-19 17:12:29 -07001001 for (i = 0; i < MAX_NR_ZONES; i++)
1002 count += zone_page_state(zones + i, item);
1003
1004 return count;
Andrew Mortonc2d42c12015-11-05 18:48:43 -08001005}
1006
Mel Gormanf19298b2021-06-28 19:41:44 -07001007/* Determine the per node value of a numa stat item. */
1008unsigned long sum_zone_numa_event_state(int node,
Kemi Wang3a321d22017-09-08 16:12:48 -07001009 enum numa_stat_item item)
1010{
1011 struct zone *zones = NODE_DATA(node)->node_zones;
Kemi Wang3a321d22017-09-08 16:12:48 -07001012 unsigned long count = 0;
Mel Gormanf19298b2021-06-28 19:41:44 -07001013 int i;
Kemi Wang3a321d22017-09-08 16:12:48 -07001014
1015 for (i = 0; i < MAX_NR_ZONES; i++)
Mel Gormanf19298b2021-06-28 19:41:44 -07001016 count += zone_numa_event_state(zones + i, item);
Kemi Wang3a321d22017-09-08 16:12:48 -07001017
1018 return count;
1019}
1020
Mel Gorman75ef7182016-07-28 15:45:24 -07001021/*
1022 * Determine the per node value of a stat item.
1023 */
Roman Gushchinea426c22020-08-06 23:20:35 -07001024unsigned long node_page_state_pages(struct pglist_data *pgdat,
1025 enum node_stat_item item)
Mel Gorman75ef7182016-07-28 15:45:24 -07001026{
1027 long x = atomic_long_read(&pgdat->vm_stat[item]);
1028#ifdef CONFIG_SMP
1029 if (x < 0)
1030 x = 0;
1031#endif
1032 return x;
1033}
Roman Gushchinea426c22020-08-06 23:20:35 -07001034
1035unsigned long node_page_state(struct pglist_data *pgdat,
1036 enum node_stat_item item)
1037{
1038 VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
1039
1040 return node_page_state_pages(pgdat, item);
1041}
Christoph Lameterca889e62006-06-30 01:55:44 -07001042#endif
1043
Mel Gormand7a57522010-05-24 14:32:25 -07001044#ifdef CONFIG_COMPACTION
Namhyung Kim36deb0b2010-10-26 14:22:04 -07001045
Mel Gormand7a57522010-05-24 14:32:25 -07001046struct contig_page_info {
1047 unsigned long free_pages;
1048 unsigned long free_blocks_total;
1049 unsigned long free_blocks_suitable;
1050};
1051
1052/*
1053 * Calculate the number of free pages in a zone, how many contiguous
1054 * pages are free and how many are large enough to satisfy an allocation of
1055 * the target size. Note that this function makes no attempt to estimate
1056 * how many suitable free blocks there *might* be if MOVABLE pages were
1057 * migrated. Calculating that is possible, but expensive and can be
1058 * figured out from userspace
1059 */
1060static void fill_contig_page_info(struct zone *zone,
1061 unsigned int suitable_order,
1062 struct contig_page_info *info)
1063{
1064 unsigned int order;
1065
1066 info->free_pages = 0;
1067 info->free_blocks_total = 0;
1068 info->free_blocks_suitable = 0;
1069
1070 for (order = 0; order < MAX_ORDER; order++) {
1071 unsigned long blocks;
1072
Liu Shixinaf1c31a2021-11-05 13:43:59 -07001073 /*
1074 * Count number of free blocks.
1075 *
1076 * Access to nr_free is lockless as nr_free is used only for
1077 * diagnostic purposes. Use data_race to avoid KCSAN warning.
1078 */
1079 blocks = data_race(zone->free_area[order].nr_free);
Mel Gormand7a57522010-05-24 14:32:25 -07001080 info->free_blocks_total += blocks;
1081
1082 /* Count free base pages */
1083 info->free_pages += blocks << order;
1084
1085 /* Count the suitable free blocks */
1086 if (order >= suitable_order)
1087 info->free_blocks_suitable += blocks <<
1088 (order - suitable_order);
1089 }
1090}
Mel Gormanf1a5ab12010-05-24 14:32:26 -07001091
1092/*
1093 * A fragmentation index only makes sense if an allocation of a requested
1094 * size would fail. If that is true, the fragmentation index indicates
1095 * whether external fragmentation or a lack of memory was the problem.
1096 * The value can be used to determine if page reclaim or compaction
1097 * should be used
1098 */
Mel Gorman56de7262010-05-24 14:32:30 -07001099static int __fragmentation_index(unsigned int order, struct contig_page_info *info)
Mel Gormanf1a5ab12010-05-24 14:32:26 -07001100{
1101 unsigned long requested = 1UL << order;
1102
Wen Yang88d6ac42017-09-06 16:24:06 -07001103 if (WARN_ON_ONCE(order >= MAX_ORDER))
1104 return 0;
1105
Mel Gormanf1a5ab12010-05-24 14:32:26 -07001106 if (!info->free_blocks_total)
1107 return 0;
1108
1109 /* Fragmentation index only makes sense when a request would fail */
1110 if (info->free_blocks_suitable)
1111 return -1000;
1112
1113 /*
1114 * Index is between 0 and 1 so return within 3 decimal places
1115 *
1116 * 0 => allocation would fail due to lack of memory
1117 * 1 => allocation would fail due to fragmentation
1118 */
1119 return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
1120}
Mel Gorman56de7262010-05-24 14:32:30 -07001121
Nitin Guptafacdaa92020-08-11 18:31:00 -07001122/*
1123 * Calculates external fragmentation within a zone wrt the given order.
1124 * It is defined as the percentage of pages found in blocks of size
1125 * less than 1 << order. It returns values in range [0, 100].
1126 */
Nitin Guptad34c0a72020-08-11 18:31:07 -07001127unsigned int extfrag_for_order(struct zone *zone, unsigned int order)
Nitin Guptafacdaa92020-08-11 18:31:00 -07001128{
1129 struct contig_page_info info;
1130
1131 fill_contig_page_info(zone, order, &info);
1132 if (info.free_pages == 0)
1133 return 0;
1134
1135 return div_u64((info.free_pages -
1136 (info.free_blocks_suitable << order)) * 100,
1137 info.free_pages);
1138}
1139
Mel Gorman56de7262010-05-24 14:32:30 -07001140/* Same as __fragmentation index but allocs contig_page_info on stack */
1141int fragmentation_index(struct zone *zone, unsigned int order)
1142{
1143 struct contig_page_info info;
1144
1145 fill_contig_page_info(zone, order, &info);
1146 return __fragmentation_index(order, &info);
1147}
Mel Gormand7a57522010-05-24 14:32:25 -07001148#endif
1149
Konstantin Khlebnikovebc5d83d2019-12-04 16:49:53 -08001150#if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || \
1151 defined(CONFIG_NUMA) || defined(CONFIG_MEMCG)
KOSAKI Motohirofa25c502011-05-24 17:11:28 -07001152#ifdef CONFIG_ZONE_DMA
1153#define TEXT_FOR_DMA(xx) xx "_dma",
1154#else
1155#define TEXT_FOR_DMA(xx)
1156#endif
1157
1158#ifdef CONFIG_ZONE_DMA32
1159#define TEXT_FOR_DMA32(xx) xx "_dma32",
1160#else
1161#define TEXT_FOR_DMA32(xx)
1162#endif
1163
1164#ifdef CONFIG_HIGHMEM
1165#define TEXT_FOR_HIGHMEM(xx) xx "_high",
1166#else
1167#define TEXT_FOR_HIGHMEM(xx)
1168#endif
1169
1170#define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
1171 TEXT_FOR_HIGHMEM(xx) xx "_movable",
1172
1173const char * const vmstat_text[] = {
NeilBrown8d928902020-06-01 21:48:21 -07001174 /* enum zone_stat_item counters */
KOSAKI Motohirofa25c502011-05-24 17:11:28 -07001175 "nr_free_pages",
Minchan Kim71c799f2016-07-28 15:47:26 -07001176 "nr_zone_inactive_anon",
1177 "nr_zone_active_anon",
1178 "nr_zone_inactive_file",
1179 "nr_zone_active_file",
1180 "nr_zone_unevictable",
Mel Gorman5a1c84b2016-07-28 15:47:31 -07001181 "nr_zone_write_pending",
KOSAKI Motohirofa25c502011-05-24 17:11:28 -07001182 "nr_mlock",
KOSAKI Motohirofa25c502011-05-24 17:11:28 -07001183 "nr_bounce",
Minchan Kim91537fe2016-07-26 15:24:45 -07001184#if IS_ENABLED(CONFIG_ZSMALLOC)
1185 "nr_zspages",
1186#endif
Kemi Wang3a321d22017-09-08 16:12:48 -07001187 "nr_free_cma",
1188
1189 /* enum numa_stat_item counters */
KOSAKI Motohirofa25c502011-05-24 17:11:28 -07001190#ifdef CONFIG_NUMA
1191 "numa_hit",
1192 "numa_miss",
1193 "numa_foreign",
1194 "numa_interleave",
1195 "numa_local",
1196 "numa_other",
1197#endif
Konstantin Khlebnikov09316c02014-10-09 15:29:32 -07001198
Konstantin Khlebnikov9d7ea9a2019-12-04 16:49:50 -08001199 /* enum node_stat_item counters */
Mel Gorman599d0c92016-07-28 15:45:31 -07001200 "nr_inactive_anon",
1201 "nr_active_anon",
1202 "nr_inactive_file",
1203 "nr_active_file",
1204 "nr_unevictable",
Johannes Weiner385386c2017-07-06 15:40:43 -07001205 "nr_slab_reclaimable",
1206 "nr_slab_unreclaimable",
Mel Gorman599d0c92016-07-28 15:45:31 -07001207 "nr_isolated_anon",
1208 "nr_isolated_file",
Johannes Weiner68d48e62018-10-26 15:06:39 -07001209 "workingset_nodes",
Joonsoo Kim170b04b72020-08-11 18:30:43 -07001210 "workingset_refault_anon",
1211 "workingset_refault_file",
1212 "workingset_activate_anon",
1213 "workingset_activate_file",
1214 "workingset_restore_anon",
1215 "workingset_restore_file",
Mel Gorman1e6b10852016-07-28 15:46:08 -07001216 "workingset_nodereclaim",
Mel Gorman50658e22016-07-28 15:46:14 -07001217 "nr_anon_pages",
1218 "nr_mapped",
Mel Gorman11fb9982016-07-28 15:46:20 -07001219 "nr_file_pages",
1220 "nr_dirty",
1221 "nr_writeback",
1222 "nr_writeback_temp",
1223 "nr_shmem",
1224 "nr_shmem_hugepages",
1225 "nr_shmem_pmdmapped",
Song Liu60fbf0a2019-09-23 15:37:54 -07001226 "nr_file_hugepages",
1227 "nr_file_pmdmapped",
Mel Gorman11fb9982016-07-28 15:46:20 -07001228 "nr_anon_transparent_hugepages",
Mel Gormanc4a25632016-07-28 15:46:23 -07001229 "nr_vmscan_write",
1230 "nr_vmscan_immediate_reclaim",
1231 "nr_dirtied",
1232 "nr_written",
Mel Gorman8cd7c582021-11-05 13:42:25 -07001233 "nr_throttled_written",
Vlastimil Babkab29940c2018-10-26 15:05:46 -07001234 "nr_kernel_misc_reclaimable",
John Hubbard1970dc62020-04-01 21:05:37 -07001235 "nr_foll_pin_acquired",
1236 "nr_foll_pin_released",
Shakeel Butt991e7672020-08-06 23:21:37 -07001237 "nr_kernel_stack",
1238#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
1239 "nr_shadow_call_stack",
1240#endif
Shakeel Buttf0c0c112020-12-14 19:07:17 -08001241 "nr_page_table_pages",
Shakeel Buttb6038942021-02-24 12:03:55 -08001242#ifdef CONFIG_SWAP
1243 "nr_swapcached",
1244#endif
Mel Gorman599d0c92016-07-28 15:45:31 -07001245
Konstantin Khlebnikov09316c02014-10-09 15:29:32 -07001246 /* enum writeback_stat_item counters */
KOSAKI Motohirofa25c502011-05-24 17:11:28 -07001247 "nr_dirty_threshold",
1248 "nr_dirty_background_threshold",
1249
Konstantin Khlebnikovebc5d83d2019-12-04 16:49:53 -08001250#if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG)
Konstantin Khlebnikov09316c02014-10-09 15:29:32 -07001251 /* enum vm_event_item counters */
KOSAKI Motohirofa25c502011-05-24 17:11:28 -07001252 "pgpgin",
1253 "pgpgout",
1254 "pswpin",
1255 "pswpout",
1256
1257 TEXTS_FOR_ZONES("pgalloc")
Mel Gorman7cc30fc2016-07-28 15:46:59 -07001258 TEXTS_FOR_ZONES("allocstall")
1259 TEXTS_FOR_ZONES("pgskip")
KOSAKI Motohirofa25c502011-05-24 17:11:28 -07001260
1261 "pgfree",
1262 "pgactivate",
1263 "pgdeactivate",
Shaohua Lif7ad2a62017-05-03 14:52:29 -07001264 "pglazyfree",
KOSAKI Motohirofa25c502011-05-24 17:11:28 -07001265
1266 "pgfault",
1267 "pgmajfault",
Minchan Kim854e9ed2016-01-15 16:54:53 -08001268 "pglazyfreed",
KOSAKI Motohirofa25c502011-05-24 17:11:28 -07001269
Mel Gorman599d0c92016-07-28 15:45:31 -07001270 "pgrefill",
Peter Xu798a6b82020-08-21 19:49:58 -04001271 "pgreuse",
Mel Gorman599d0c92016-07-28 15:45:31 -07001272 "pgsteal_kswapd",
1273 "pgsteal_direct",
Yang Shi668e4142021-09-02 14:59:19 -07001274 "pgdemote_kswapd",
1275 "pgdemote_direct",
Mel Gorman599d0c92016-07-28 15:45:31 -07001276 "pgscan_kswapd",
1277 "pgscan_direct",
Mel Gorman68243e72012-07-31 16:44:39 -07001278 "pgscan_direct_throttle",
Johannes Weiner497a6c12020-06-03 16:02:34 -07001279 "pgscan_anon",
1280 "pgscan_file",
1281 "pgsteal_anon",
1282 "pgsteal_file",
KOSAKI Motohirofa25c502011-05-24 17:11:28 -07001283
1284#ifdef CONFIG_NUMA
1285 "zone_reclaim_failed",
1286#endif
1287 "pginodesteal",
1288 "slabs_scanned",
KOSAKI Motohirofa25c502011-05-24 17:11:28 -07001289 "kswapd_inodesteal",
1290 "kswapd_low_wmark_hit_quickly",
1291 "kswapd_high_wmark_hit_quickly",
KOSAKI Motohirofa25c502011-05-24 17:11:28 -07001292 "pageoutrun",
KOSAKI Motohirofa25c502011-05-24 17:11:28 -07001293
1294 "pgrotated",
1295
Dave Hansen5509a5d2014-04-03 14:48:19 -07001296 "drop_pagecache",
1297 "drop_slab",
Konstantin Khlebnikov8e675f72017-07-06 15:40:28 -07001298 "oom_kill",
Dave Hansen5509a5d2014-04-03 14:48:19 -07001299
Mel Gorman03c5a6e2012-11-02 14:52:48 +00001300#ifdef CONFIG_NUMA_BALANCING
1301 "numa_pte_updates",
Mel Gorman72403b42013-11-12 15:08:32 -08001302 "numa_huge_pte_updates",
Mel Gorman03c5a6e2012-11-02 14:52:48 +00001303 "numa_hint_faults",
1304 "numa_hint_faults_local",
1305 "numa_pages_migrated",
1306#endif
Mel Gorman5647bc22012-10-19 10:46:20 +01001307#ifdef CONFIG_MIGRATION
1308 "pgmigrate_success",
1309 "pgmigrate_fail",
Anshuman Khandual1a5bae22020-08-11 18:31:51 -07001310 "thp_migration_success",
1311 "thp_migration_fail",
1312 "thp_migration_split",
Mel Gorman5647bc22012-10-19 10:46:20 +01001313#endif
KOSAKI Motohirofa25c502011-05-24 17:11:28 -07001314#ifdef CONFIG_COMPACTION
Mel Gorman397487d2012-10-19 12:00:10 +01001315 "compact_migrate_scanned",
1316 "compact_free_scanned",
1317 "compact_isolated",
KOSAKI Motohirofa25c502011-05-24 17:11:28 -07001318 "compact_stall",
1319 "compact_fail",
1320 "compact_success",
Vlastimil Babka698b1b32016-03-17 14:18:08 -07001321 "compact_daemon_wake",
David Rientjes7f354a52017-02-22 15:44:50 -08001322 "compact_daemon_migrate_scanned",
1323 "compact_daemon_free_scanned",
KOSAKI Motohirofa25c502011-05-24 17:11:28 -07001324#endif
1325
1326#ifdef CONFIG_HUGETLB_PAGE
1327 "htlb_buddy_alloc_success",
1328 "htlb_buddy_alloc_fail",
1329#endif
Minchan Kimbbb26922021-05-04 18:37:19 -07001330#ifdef CONFIG_CMA
1331 "cma_alloc_success",
1332 "cma_alloc_fail",
1333#endif
KOSAKI Motohirofa25c502011-05-24 17:11:28 -07001334 "unevictable_pgs_culled",
1335 "unevictable_pgs_scanned",
1336 "unevictable_pgs_rescued",
1337 "unevictable_pgs_mlocked",
1338 "unevictable_pgs_munlocked",
1339 "unevictable_pgs_cleared",
1340 "unevictable_pgs_stranded",
KOSAKI Motohirofa25c502011-05-24 17:11:28 -07001341
1342#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1343 "thp_fault_alloc",
1344 "thp_fault_fallback",
David Rientjes85b9f462020-04-06 20:04:28 -07001345 "thp_fault_fallback_charge",
KOSAKI Motohirofa25c502011-05-24 17:11:28 -07001346 "thp_collapse_alloc",
1347 "thp_collapse_alloc_failed",
Kirill A. Shutemov95ecedc2016-07-26 15:25:31 -07001348 "thp_file_alloc",
David Rientjesdcdf11e2020-04-06 20:04:25 -07001349 "thp_file_fallback",
David Rientjes85b9f462020-04-06 20:04:28 -07001350 "thp_file_fallback_charge",
Kirill A. Shutemov95ecedc2016-07-26 15:25:31 -07001351 "thp_file_mapped",
Kirill A. Shutemov122afea2016-01-15 16:52:46 -08001352 "thp_split_page",
1353 "thp_split_page_failed",
Kirill A. Shutemovf9719a02016-03-17 14:18:45 -07001354 "thp_deferred_split_page",
Kirill A. Shutemov122afea2016-01-15 16:52:46 -08001355 "thp_split_pmd",
Yisheng Xiece9311c2017-03-09 16:17:00 -08001356#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1357 "thp_split_pud",
1358#endif
Kirill A. Shutemovd8a8e1f2012-12-12 13:51:09 -08001359 "thp_zero_page_alloc",
1360 "thp_zero_page_alloc_failed",
Huang Ying225311a2017-09-06 16:22:30 -07001361 "thp_swpout",
Huang Yingfe490cc2017-09-06 16:22:52 -07001362 "thp_swpout_fallback",
KOSAKI Motohirofa25c502011-05-24 17:11:28 -07001363#endif
Konstantin Khlebnikov09316c02014-10-09 15:29:32 -07001364#ifdef CONFIG_MEMORY_BALLOON
1365 "balloon_inflate",
1366 "balloon_deflate",
1367#ifdef CONFIG_BALLOON_COMPACTION
1368 "balloon_migrate",
1369#endif
1370#endif /* CONFIG_MEMORY_BALLOON */
Mel Gormanec659932014-01-21 14:33:16 -08001371#ifdef CONFIG_DEBUG_TLBFLUSH
Dave Hansen9824cf92013-09-11 14:20:23 -07001372 "nr_tlb_remote_flush",
1373 "nr_tlb_remote_flush_received",
1374 "nr_tlb_local_flush_all",
1375 "nr_tlb_local_flush_one",
Mel Gormanec659932014-01-21 14:33:16 -08001376#endif /* CONFIG_DEBUG_TLBFLUSH */
KOSAKI Motohirofa25c502011-05-24 17:11:28 -07001377
Davidlohr Bueso4f115142014-06-04 16:06:46 -07001378#ifdef CONFIG_DEBUG_VM_VMACACHE
1379 "vmacache_find_calls",
1380 "vmacache_find_hits",
1381#endif
Huang Yingcbc65df2017-09-06 16:24:29 -07001382#ifdef CONFIG_SWAP
1383 "swap_ra",
1384 "swap_ra_hit",
1385#endif
Saravanan D575299e2021-05-04 18:38:03 -07001386#ifdef CONFIG_X86
1387 "direct_map_level2_splits",
1388 "direct_map_level3_splits",
1389#endif
Konstantin Khlebnikovebc5d83d2019-12-04 16:49:53 -08001390#endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */
KOSAKI Motohirofa25c502011-05-24 17:11:28 -07001391};
Konstantin Khlebnikovebc5d83d2019-12-04 16:49:53 -08001392#endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA || CONFIG_MEMCG */
KOSAKI Motohirofa25c502011-05-24 17:11:28 -07001393
Andrew Morton3c486872015-02-10 14:09:43 -08001394#if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)) || \
1395 defined(CONFIG_PROC_FS)
1396static void *frag_start(struct seq_file *m, loff_t *pos)
1397{
1398 pg_data_t *pgdat;
1399 loff_t node = *pos;
1400
1401 for (pgdat = first_online_pgdat();
1402 pgdat && node;
1403 pgdat = next_online_pgdat(pgdat))
1404 --node;
1405
1406 return pgdat;
1407}
1408
1409static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
1410{
1411 pg_data_t *pgdat = (pg_data_t *)arg;
1412
1413 (*pos)++;
1414 return next_online_pgdat(pgdat);
1415}
1416
1417static void frag_stop(struct seq_file *m, void *arg)
1418{
1419}
1420
David Rientjesb2bd8592017-05-03 14:52:59 -07001421/*
1422 * Walk zones in a node and print using a callback.
1423 * If @assert_populated is true, only use callback for zones that are populated.
1424 */
Andrew Morton3c486872015-02-10 14:09:43 -08001425static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
Vinayak Menon727c0802017-07-10 15:49:17 -07001426 bool assert_populated, bool nolock,
Andrew Morton3c486872015-02-10 14:09:43 -08001427 void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
1428{
1429 struct zone *zone;
1430 struct zone *node_zones = pgdat->node_zones;
1431 unsigned long flags;
1432
1433 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
David Rientjesb2bd8592017-05-03 14:52:59 -07001434 if (assert_populated && !populated_zone(zone))
Andrew Morton3c486872015-02-10 14:09:43 -08001435 continue;
1436
Vinayak Menon727c0802017-07-10 15:49:17 -07001437 if (!nolock)
1438 spin_lock_irqsave(&zone->lock, flags);
Andrew Morton3c486872015-02-10 14:09:43 -08001439 print(m, pgdat, zone);
Vinayak Menon727c0802017-07-10 15:49:17 -07001440 if (!nolock)
1441 spin_unlock_irqrestore(&zone->lock, flags);
Andrew Morton3c486872015-02-10 14:09:43 -08001442 }
1443}
1444#endif
1445
Mel Gormand7a57522010-05-24 14:32:25 -07001446#ifdef CONFIG_PROC_FS
Mel Gorman467c9962007-10-16 01:26:02 -07001447static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
1448 struct zone *zone)
1449{
1450 int order;
1451
1452 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1453 for (order = 0; order < MAX_ORDER; ++order)
Liu Shixinaf1c31a2021-11-05 13:43:59 -07001454 /*
1455 * Access to nr_free is lockless as nr_free is used only for
1456 * printing purposes. Use data_race to avoid KCSAN warning.
1457 */
1458 seq_printf(m, "%6lu ", data_race(zone->free_area[order].nr_free));
Mel Gorman467c9962007-10-16 01:26:02 -07001459 seq_putc(m, '\n');
1460}
1461
1462/*
1463 * This walks the free areas for each zone.
1464 */
1465static int frag_show(struct seq_file *m, void *arg)
1466{
1467 pg_data_t *pgdat = (pg_data_t *)arg;
Vinayak Menon727c0802017-07-10 15:49:17 -07001468 walk_zones_in_node(m, pgdat, true, false, frag_show_print);
Mel Gorman467c9962007-10-16 01:26:02 -07001469 return 0;
1470}
1471
1472static void pagetypeinfo_showfree_print(struct seq_file *m,
1473 pg_data_t *pgdat, struct zone *zone)
1474{
1475 int order, mtype;
1476
1477 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
1478 seq_printf(m, "Node %4d, zone %8s, type %12s ",
1479 pgdat->node_id,
1480 zone->name,
1481 migratetype_names[mtype]);
1482 for (order = 0; order < MAX_ORDER; ++order) {
1483 unsigned long freecount = 0;
1484 struct free_area *area;
1485 struct list_head *curr;
Michal Hocko93b3a672019-11-05 21:16:44 -08001486 bool overflow = false;
Mel Gorman467c9962007-10-16 01:26:02 -07001487
1488 area = &(zone->free_area[order]);
1489
Michal Hocko93b3a672019-11-05 21:16:44 -08001490 list_for_each(curr, &area->free_list[mtype]) {
1491 /*
1492 * Cap the free_list iteration because it might
1493 * be really large and we are under a spinlock
1494 * so a long time spent here could trigger a
1495 * hard lockup detector. Anyway this is a
1496 * debugging tool so knowing there is a handful
1497 * of pages of this order should be more than
1498 * sufficient.
1499 */
1500 if (++freecount >= 100000) {
1501 overflow = true;
1502 break;
1503 }
1504 }
1505 seq_printf(m, "%s%6lu ", overflow ? ">" : "", freecount);
1506 spin_unlock_irq(&zone->lock);
1507 cond_resched();
1508 spin_lock_irq(&zone->lock);
Mel Gorman467c9962007-10-16 01:26:02 -07001509 }
Christoph Lameterf6ac2352006-06-30 01:55:32 -07001510 seq_putc(m, '\n');
1511 }
Mel Gorman467c9962007-10-16 01:26:02 -07001512}
1513
1514/* Print out the free pages at each order for each migatetype */
Miaohe Lin33090af2021-09-02 15:01:08 -07001515static void pagetypeinfo_showfree(struct seq_file *m, void *arg)
Mel Gorman467c9962007-10-16 01:26:02 -07001516{
1517 int order;
1518 pg_data_t *pgdat = (pg_data_t *)arg;
1519
1520 /* Print header */
1521 seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
1522 for (order = 0; order < MAX_ORDER; ++order)
1523 seq_printf(m, "%6d ", order);
1524 seq_putc(m, '\n');
1525
Vinayak Menon727c0802017-07-10 15:49:17 -07001526 walk_zones_in_node(m, pgdat, true, false, pagetypeinfo_showfree_print);
Mel Gorman467c9962007-10-16 01:26:02 -07001527}
1528
1529static void pagetypeinfo_showblockcount_print(struct seq_file *m,
1530 pg_data_t *pgdat, struct zone *zone)
1531{
1532 int mtype;
1533 unsigned long pfn;
1534 unsigned long start_pfn = zone->zone_start_pfn;
Cody P Schafer108bcc92013-02-22 16:35:23 -08001535 unsigned long end_pfn = zone_end_pfn(zone);
Mel Gorman467c9962007-10-16 01:26:02 -07001536 unsigned long count[MIGRATE_TYPES] = { 0, };
1537
1538 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
1539 struct page *page;
1540
Michal Hockod336e942017-07-06 15:38:07 -07001541 page = pfn_to_online_page(pfn);
1542 if (!page)
Mel Gorman467c9962007-10-16 01:26:02 -07001543 continue;
1544
Joonsoo Kima91c43c2016-05-19 17:12:10 -07001545 if (page_zone(page) != zone)
1546 continue;
1547
Mel Gorman467c9962007-10-16 01:26:02 -07001548 mtype = get_pageblock_migratetype(page);
1549
Mel Gormane80d6a22008-08-14 11:10:14 +01001550 if (mtype < MIGRATE_TYPES)
1551 count[mtype]++;
Mel Gorman467c9962007-10-16 01:26:02 -07001552 }
1553
1554 /* Print counts */
1555 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1556 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1557 seq_printf(m, "%12lu ", count[mtype]);
1558 seq_putc(m, '\n');
1559}
1560
SeongJae Parkf113e642017-09-06 16:24:23 -07001561/* Print out the number of pageblocks for each migratetype */
Miaohe Lin33090af2021-09-02 15:01:08 -07001562static void pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
Mel Gorman467c9962007-10-16 01:26:02 -07001563{
1564 int mtype;
1565 pg_data_t *pgdat = (pg_data_t *)arg;
1566
1567 seq_printf(m, "\n%-23s", "Number of blocks type ");
1568 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1569 seq_printf(m, "%12s ", migratetype_names[mtype]);
1570 seq_putc(m, '\n');
Vinayak Menon727c0802017-07-10 15:49:17 -07001571 walk_zones_in_node(m, pgdat, true, false,
1572 pagetypeinfo_showblockcount_print);
Mel Gorman467c9962007-10-16 01:26:02 -07001573}
1574
Joonsoo Kim48c96a32014-12-12 16:56:01 -08001575/*
1576 * Print out the number of pageblocks for each migratetype that contain pages
1577 * of other types. This gives an indication of how well fallbacks are being
1578 * contained by rmqueue_fallback(). It requires information from PAGE_OWNER
1579 * to determine what is going on
1580 */
1581static void pagetypeinfo_showmixedcount(struct seq_file *m, pg_data_t *pgdat)
1582{
1583#ifdef CONFIG_PAGE_OWNER
1584 int mtype;
1585
Vlastimil Babka7dd80b82016-03-15 14:56:12 -07001586 if (!static_branch_unlikely(&page_owner_inited))
Joonsoo Kim48c96a32014-12-12 16:56:01 -08001587 return;
1588
1589 drain_all_pages(NULL);
1590
1591 seq_printf(m, "\n%-23s", "Number of mixed blocks ");
1592 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1593 seq_printf(m, "%12s ", migratetype_names[mtype]);
1594 seq_putc(m, '\n');
1595
Vinayak Menon727c0802017-07-10 15:49:17 -07001596 walk_zones_in_node(m, pgdat, true, true,
1597 pagetypeinfo_showmixedcount_print);
Joonsoo Kim48c96a32014-12-12 16:56:01 -08001598#endif /* CONFIG_PAGE_OWNER */
1599}
1600
Mel Gorman467c9962007-10-16 01:26:02 -07001601/*
1602 * This prints out statistics in relation to grouping pages by mobility.
1603 * It is expensive to collect so do not constantly read the file.
1604 */
1605static int pagetypeinfo_show(struct seq_file *m, void *arg)
1606{
1607 pg_data_t *pgdat = (pg_data_t *)arg;
1608
KOSAKI Motohiro41b25a32008-04-30 00:52:13 -07001609 /* check memoryless node */
Lai Jiangshana47b53c2012-12-12 13:51:37 -08001610 if (!node_state(pgdat->node_id, N_MEMORY))
KOSAKI Motohiro41b25a32008-04-30 00:52:13 -07001611 return 0;
1612
Mel Gorman467c9962007-10-16 01:26:02 -07001613 seq_printf(m, "Page block order: %d\n", pageblock_order);
1614 seq_printf(m, "Pages per block: %lu\n", pageblock_nr_pages);
1615 seq_putc(m, '\n');
1616 pagetypeinfo_showfree(m, pgdat);
1617 pagetypeinfo_showblockcount(m, pgdat);
Joonsoo Kim48c96a32014-12-12 16:56:01 -08001618 pagetypeinfo_showmixedcount(m, pgdat);
Mel Gorman467c9962007-10-16 01:26:02 -07001619
Christoph Lameterf6ac2352006-06-30 01:55:32 -07001620 return 0;
1621}
1622
Alexey Dobriyan8f32f7e2008-10-06 04:13:52 +04001623static const struct seq_operations fragmentation_op = {
Christoph Lameterf6ac2352006-06-30 01:55:32 -07001624 .start = frag_start,
1625 .next = frag_next,
1626 .stop = frag_stop,
1627 .show = frag_show,
1628};
1629
Alexey Dobriyan74e2e8e2008-10-06 04:15:36 +04001630static const struct seq_operations pagetypeinfo_op = {
Mel Gorman467c9962007-10-16 01:26:02 -07001631 .start = frag_start,
1632 .next = frag_next,
1633 .stop = frag_stop,
1634 .show = pagetypeinfo_show,
1635};
1636
Mel Gormane2ecc8a2016-07-28 15:47:02 -07001637static bool is_zone_first_populated(pg_data_t *pgdat, struct zone *zone)
1638{
1639 int zid;
1640
1641 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1642 struct zone *compare = &pgdat->node_zones[zid];
1643
1644 if (populated_zone(compare))
1645 return zone == compare;
1646 }
1647
Mel Gormane2ecc8a2016-07-28 15:47:02 -07001648 return false;
1649}
1650
Mel Gorman467c9962007-10-16 01:26:02 -07001651static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
1652 struct zone *zone)
1653{
1654 int i;
1655 seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
Mel Gormane2ecc8a2016-07-28 15:47:02 -07001656 if (is_zone_first_populated(pgdat, zone)) {
1657 seq_printf(m, "\n per-node stats");
1658 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
Muchun Song69473e52021-02-24 12:03:23 -08001659 unsigned long pages = node_page_state_pages(pgdat, i);
1660
1661 if (vmstat_item_print_in_thp(i))
1662 pages /= HPAGE_PMD_NR;
Konstantin Khlebnikov9d7ea9a2019-12-04 16:49:50 -08001663 seq_printf(m, "\n %-12s %lu", node_stat_name(i),
Muchun Song69473e52021-02-24 12:03:23 -08001664 pages);
Mel Gormane2ecc8a2016-07-28 15:47:02 -07001665 }
1666 }
Mel Gorman467c9962007-10-16 01:26:02 -07001667 seq_printf(m,
1668 "\n pages free %lu"
Liangcai Fana6ea8b52021-11-05 13:40:37 -07001669 "\n boost %lu"
Mel Gorman467c9962007-10-16 01:26:02 -07001670 "\n min %lu"
1671 "\n low %lu"
1672 "\n high %lu"
Mel Gorman467c9962007-10-16 01:26:02 -07001673 "\n spanned %lu"
Jiang Liu9feedc92012-12-12 13:52:12 -08001674 "\n present %lu"
David Hildenbrand3c381db2021-02-25 17:16:40 -08001675 "\n managed %lu"
1676 "\n cma %lu",
Mel Gorman88f5acf2011-01-13 15:45:41 -08001677 zone_page_state(zone, NR_FREE_PAGES),
Liangcai Fana6ea8b52021-11-05 13:40:37 -07001678 zone->watermark_boost,
Mel Gorman41858962009-06-16 15:32:12 -07001679 min_wmark_pages(zone),
1680 low_wmark_pages(zone),
1681 high_wmark_pages(zone),
Mel Gorman467c9962007-10-16 01:26:02 -07001682 zone->spanned_pages,
Jiang Liu9feedc92012-12-12 13:52:12 -08001683 zone->present_pages,
David Hildenbrand3c381db2021-02-25 17:16:40 -08001684 zone_managed_pages(zone),
1685 zone_cma_pages(zone));
Mel Gorman467c9962007-10-16 01:26:02 -07001686
Mel Gorman467c9962007-10-16 01:26:02 -07001687 seq_printf(m,
Mel Gorman3484b2d2014-08-06 16:07:14 -07001688 "\n protection: (%ld",
Mel Gorman467c9962007-10-16 01:26:02 -07001689 zone->lowmem_reserve[0]);
1690 for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
Mel Gorman3484b2d2014-08-06 16:07:14 -07001691 seq_printf(m, ", %ld", zone->lowmem_reserve[i]);
David Rientjes7dfb8bf2017-05-03 14:53:02 -07001692 seq_putc(m, ')');
1693
Baoquan Hea8a4b7a2020-08-14 17:30:07 -07001694 /* If unpopulated, no other information is useful */
1695 if (!populated_zone(zone)) {
1696 seq_putc(m, '\n');
1697 return;
1698 }
1699
David Rientjes7dfb8bf2017-05-03 14:53:02 -07001700 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
Konstantin Khlebnikov9d7ea9a2019-12-04 16:49:50 -08001701 seq_printf(m, "\n %-12s %lu", zone_stat_name(i),
1702 zone_page_state(zone, i));
David Rientjes7dfb8bf2017-05-03 14:53:02 -07001703
Kemi Wang3a321d22017-09-08 16:12:48 -07001704#ifdef CONFIG_NUMA
Mel Gormanf19298b2021-06-28 19:41:44 -07001705 for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++)
Konstantin Khlebnikov9d7ea9a2019-12-04 16:49:50 -08001706 seq_printf(m, "\n %-12s %lu", numa_stat_name(i),
Mel Gormanf19298b2021-06-28 19:41:44 -07001707 zone_numa_event_state(zone, i));
Kemi Wang3a321d22017-09-08 16:12:48 -07001708#endif
1709
David Rientjes7dfb8bf2017-05-03 14:53:02 -07001710 seq_printf(m, "\n pagesets");
Mel Gorman467c9962007-10-16 01:26:02 -07001711 for_each_online_cpu(i) {
Mel Gorman28f836b2021-06-28 19:41:38 -07001712 struct per_cpu_pages *pcp;
1713 struct per_cpu_zonestat __maybe_unused *pzstats;
Mel Gorman467c9962007-10-16 01:26:02 -07001714
Mel Gorman28f836b2021-06-28 19:41:38 -07001715 pcp = per_cpu_ptr(zone->per_cpu_pageset, i);
Christoph Lameter3dfa5722008-02-04 22:29:19 -08001716 seq_printf(m,
1717 "\n cpu: %i"
1718 "\n count: %i"
1719 "\n high: %i"
1720 "\n batch: %i",
1721 i,
Mel Gorman28f836b2021-06-28 19:41:38 -07001722 pcp->count,
1723 pcp->high,
1724 pcp->batch);
Mel Gorman467c9962007-10-16 01:26:02 -07001725#ifdef CONFIG_SMP
Mel Gorman28f836b2021-06-28 19:41:38 -07001726 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, i);
Mel Gorman467c9962007-10-16 01:26:02 -07001727 seq_printf(m, "\n vm stats threshold: %d",
Mel Gorman28f836b2021-06-28 19:41:38 -07001728 pzstats->stat_threshold);
Mel Gorman467c9962007-10-16 01:26:02 -07001729#endif
1730 }
1731 seq_printf(m,
Mel Gorman599d0c92016-07-28 15:45:31 -07001732 "\n node_unreclaimable: %u"
Andrey Ryabinin3a50d142017-11-15 17:34:15 -08001733 "\n start_pfn: %lu",
Johannes Weinerc73322d2017-05-03 14:51:51 -07001734 pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES,
Andrey Ryabinin3a50d142017-11-15 17:34:15 -08001735 zone->zone_start_pfn);
Mel Gorman467c9962007-10-16 01:26:02 -07001736 seq_putc(m, '\n');
1737}
1738
Christoph Lameterf6ac2352006-06-30 01:55:32 -07001739/*
David Rientjesb2bd8592017-05-03 14:52:59 -07001740 * Output information about zones in @pgdat. All zones are printed regardless
1741 * of whether they are populated or not: lowmem_reserve_ratio operates on the
1742 * set of all zones and userspace would not be aware of such zones if they are
1743 * suppressed here (zoneinfo displays the effect of lowmem_reserve_ratio).
Christoph Lameterf6ac2352006-06-30 01:55:32 -07001744 */
1745static int zoneinfo_show(struct seq_file *m, void *arg)
1746{
Mel Gorman467c9962007-10-16 01:26:02 -07001747 pg_data_t *pgdat = (pg_data_t *)arg;
Vinayak Menon727c0802017-07-10 15:49:17 -07001748 walk_zones_in_node(m, pgdat, false, false, zoneinfo_show_print);
Christoph Lameterf6ac2352006-06-30 01:55:32 -07001749 return 0;
1750}
1751
Alexey Dobriyan5c9fe622008-10-06 04:19:42 +04001752static const struct seq_operations zoneinfo_op = {
Christoph Lameterf6ac2352006-06-30 01:55:32 -07001753 .start = frag_start, /* iterate over all zones. The same as in
1754 * fragmentation. */
1755 .next = frag_next,
1756 .stop = frag_stop,
1757 .show = zoneinfo_show,
1758};
1759
Konstantin Khlebnikov9d7ea9a2019-12-04 16:49:50 -08001760#define NR_VMSTAT_ITEMS (NR_VM_ZONE_STAT_ITEMS + \
Mel Gormanf19298b2021-06-28 19:41:44 -07001761 NR_VM_NUMA_EVENT_ITEMS + \
Konstantin Khlebnikov9d7ea9a2019-12-04 16:49:50 -08001762 NR_VM_NODE_STAT_ITEMS + \
1763 NR_VM_WRITEBACK_STAT_ITEMS + \
1764 (IS_ENABLED(CONFIG_VM_EVENT_COUNTERS) ? \
1765 NR_VM_EVENT_ITEMS : 0))
Michael Rubin79da8262010-10-26 14:21:36 -07001766
Christoph Lameterf6ac2352006-06-30 01:55:32 -07001767static void *vmstat_start(struct seq_file *m, loff_t *pos)
1768{
Christoph Lameter2244b952006-06-30 01:55:33 -07001769 unsigned long *v;
Konstantin Khlebnikov9d7ea9a2019-12-04 16:49:50 -08001770 int i;
Christoph Lameterf6ac2352006-06-30 01:55:32 -07001771
Konstantin Khlebnikov9d7ea9a2019-12-04 16:49:50 -08001772 if (*pos >= NR_VMSTAT_ITEMS)
Christoph Lameterf6ac2352006-06-30 01:55:32 -07001773 return NULL;
1774
Konstantin Khlebnikov9d7ea9a2019-12-04 16:49:50 -08001775 BUILD_BUG_ON(ARRAY_SIZE(vmstat_text) < NR_VMSTAT_ITEMS);
Mel Gormanf19298b2021-06-28 19:41:44 -07001776 fold_vm_numa_events();
Konstantin Khlebnikov9d7ea9a2019-12-04 16:49:50 -08001777 v = kmalloc_array(NR_VMSTAT_ITEMS, sizeof(unsigned long), GFP_KERNEL);
Christoph Lameter2244b952006-06-30 01:55:33 -07001778 m->private = v;
1779 if (!v)
Christoph Lameterf6ac2352006-06-30 01:55:32 -07001780 return ERR_PTR(-ENOMEM);
Christoph Lameter2244b952006-06-30 01:55:33 -07001781 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
Michal Hockoc41f0122017-09-06 16:23:36 -07001782 v[i] = global_zone_page_state(i);
Michael Rubin79da8262010-10-26 14:21:36 -07001783 v += NR_VM_ZONE_STAT_ITEMS;
1784
Kemi Wang3a321d22017-09-08 16:12:48 -07001785#ifdef CONFIG_NUMA
Mel Gormanf19298b2021-06-28 19:41:44 -07001786 for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++)
1787 v[i] = global_numa_event_state(i);
1788 v += NR_VM_NUMA_EVENT_ITEMS;
Kemi Wang3a321d22017-09-08 16:12:48 -07001789#endif
1790
Muchun Song69473e52021-02-24 12:03:23 -08001791 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
Roman Gushchinea426c22020-08-06 23:20:35 -07001792 v[i] = global_node_page_state_pages(i);
Muchun Song69473e52021-02-24 12:03:23 -08001793 if (vmstat_item_print_in_thp(i))
1794 v[i] /= HPAGE_PMD_NR;
1795 }
Mel Gorman75ef7182016-07-28 15:45:24 -07001796 v += NR_VM_NODE_STAT_ITEMS;
1797
Michael Rubin79da8262010-10-26 14:21:36 -07001798 global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
1799 v + NR_DIRTY_THRESHOLD);
1800 v += NR_VM_WRITEBACK_STAT_ITEMS;
1801
Christoph Lameterf8891e52006-06-30 01:55:45 -07001802#ifdef CONFIG_VM_EVENT_COUNTERS
Michael Rubin79da8262010-10-26 14:21:36 -07001803 all_vm_events(v);
1804 v[PGPGIN] /= 2; /* sectors -> kbytes */
1805 v[PGPGOUT] /= 2;
Christoph Lameterf8891e52006-06-30 01:55:45 -07001806#endif
Wu Fengguangff8b16d2010-11-04 01:56:49 +08001807 return (unsigned long *)m->private + *pos;
Christoph Lameterf6ac2352006-06-30 01:55:32 -07001808}
1809
1810static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
1811{
1812 (*pos)++;
Konstantin Khlebnikov9d7ea9a2019-12-04 16:49:50 -08001813 if (*pos >= NR_VMSTAT_ITEMS)
Christoph Lameterf6ac2352006-06-30 01:55:32 -07001814 return NULL;
1815 return (unsigned long *)m->private + *pos;
1816}
1817
1818static int vmstat_show(struct seq_file *m, void *arg)
1819{
1820 unsigned long *l = arg;
1821 unsigned long off = l - (unsigned long *)m->private;
Alexey Dobriyan68ba0322016-10-07 17:02:14 -07001822
1823 seq_puts(m, vmstat_text[off]);
Joe Perches75ba1d02016-10-07 17:02:20 -07001824 seq_put_decimal_ull(m, " ", *l);
Alexey Dobriyan68ba0322016-10-07 17:02:14 -07001825 seq_putc(m, '\n');
NeilBrown8d928902020-06-01 21:48:21 -07001826
1827 if (off == NR_VMSTAT_ITEMS - 1) {
1828 /*
1829 * We've come to the end - add any deprecated counters to avoid
1830 * breaking userspace which might depend on them being present.
1831 */
1832 seq_puts(m, "nr_unstable 0\n");
1833 }
Christoph Lameterf6ac2352006-06-30 01:55:32 -07001834 return 0;
1835}
1836
1837static void vmstat_stop(struct seq_file *m, void *arg)
1838{
1839 kfree(m->private);
1840 m->private = NULL;
1841}
1842
Alexey Dobriyanb6aa44a2008-10-06 04:17:48 +04001843static const struct seq_operations vmstat_op = {
Christoph Lameterf6ac2352006-06-30 01:55:32 -07001844 .start = vmstat_start,
1845 .next = vmstat_next,
1846 .stop = vmstat_stop,
1847 .show = vmstat_show,
1848};
Christoph Lameterf6ac2352006-06-30 01:55:32 -07001849#endif /* CONFIG_PROC_FS */
1850
Christoph Lameterdf9ecab2006-08-31 21:27:35 -07001851#ifdef CONFIG_SMP
Christoph Lameterd1187ed2007-05-09 02:35:12 -07001852static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
Christoph Lameter77461ab2007-05-09 02:35:13 -07001853int sysctl_stat_interval __read_mostly = HZ;
Christoph Lameterd1187ed2007-05-09 02:35:12 -07001854
Hugh Dickins52b6f462016-05-19 17:12:50 -07001855#ifdef CONFIG_PROC_FS
1856static void refresh_vm_stats(struct work_struct *work)
1857{
1858 refresh_cpu_vm_stats(true);
1859}
1860
1861int vmstat_refresh(struct ctl_table *table, int write,
Christoph Hellwig32927392020-04-24 08:43:38 +02001862 void *buffer, size_t *lenp, loff_t *ppos)
Hugh Dickins52b6f462016-05-19 17:12:50 -07001863{
1864 long val;
1865 int err;
1866 int i;
1867
1868 /*
1869 * The regular update, every sysctl_stat_interval, may come later
1870 * than expected: leaving a significant amount in per_cpu buckets.
1871 * This is particularly misleading when checking a quantity of HUGE
1872 * pages, immediately after running a test. /proc/sys/vm/stat_refresh,
1873 * which can equally be echo'ed to or cat'ted from (by root),
1874 * can be used to update the stats just before reading them.
1875 *
Michal Hockoc41f0122017-09-06 16:23:36 -07001876 * Oh, and since global_zone_page_state() etc. are so careful to hide
Hugh Dickins52b6f462016-05-19 17:12:50 -07001877 * transiently negative values, report an error here if any of
1878 * the stats is negative, so we know to go looking for imbalance.
1879 */
1880 err = schedule_on_each_cpu(refresh_vm_stats);
1881 if (err)
1882 return err;
1883 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
Hugh Dickins75083aa2021-05-04 18:37:57 -07001884 /*
1885 * Skip checking stats known to go negative occasionally.
1886 */
1887 switch (i) {
1888 case NR_ZONE_WRITE_PENDING:
1889 case NR_FREE_CMA_PAGES:
1890 continue;
1891 }
Mel Gorman75ef7182016-07-28 15:45:24 -07001892 val = atomic_long_read(&vm_zone_stat[i]);
Hugh Dickins52b6f462016-05-19 17:12:50 -07001893 if (val < 0) {
Johannes Weinerc822f622017-05-03 14:52:10 -07001894 pr_warn("%s: %s %ld\n",
Konstantin Khlebnikov9d7ea9a2019-12-04 16:49:50 -08001895 __func__, zone_stat_name(i), val);
Hugh Dickins52b6f462016-05-19 17:12:50 -07001896 }
1897 }
Hugh Dickins76d8cc32021-05-04 18:37:51 -07001898 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
Hugh Dickins75083aa2021-05-04 18:37:57 -07001899 /*
1900 * Skip checking stats known to go negative occasionally.
1901 */
1902 switch (i) {
1903 case NR_WRITEBACK:
1904 continue;
1905 }
Hugh Dickins76d8cc32021-05-04 18:37:51 -07001906 val = atomic_long_read(&vm_node_stat[i]);
1907 if (val < 0) {
1908 pr_warn("%s: %s %ld\n",
1909 __func__, node_stat_name(i), val);
Hugh Dickins76d8cc32021-05-04 18:37:51 -07001910 }
1911 }
Hugh Dickins52b6f462016-05-19 17:12:50 -07001912 if (write)
1913 *ppos += *lenp;
1914 else
1915 *lenp = 0;
1916 return 0;
1917}
1918#endif /* CONFIG_PROC_FS */
1919
Christoph Lameterd1187ed2007-05-09 02:35:12 -07001920static void vmstat_update(struct work_struct *w)
1921{
Christoph Lameter0eb77e92016-01-14 15:21:40 -08001922 if (refresh_cpu_vm_stats(true)) {
Christoph Lameter7cc36bb2014-10-09 15:29:43 -07001923 /*
1924 * Counters were updated so we expect more updates
1925 * to occur in the future. Keep on running the
1926 * update worker thread.
1927 */
Michal Hockoce612872017-04-07 16:05:05 -07001928 queue_delayed_work_on(smp_processor_id(), mm_percpu_wq,
Michal Hockof01f17d2016-02-05 15:36:24 -08001929 this_cpu_ptr(&vmstat_work),
1930 round_jiffies_relative(sysctl_stat_interval));
Christoph Lameter7cc36bb2014-10-09 15:29:43 -07001931 }
Christoph Lameterd1187ed2007-05-09 02:35:12 -07001932}
1933
Christoph Lameter7cc36bb2014-10-09 15:29:43 -07001934/*
1935 * Check if the diffs for a certain cpu indicate that
1936 * an update is needed.
1937 */
1938static bool need_update(int cpu)
Christoph Lameterd1187ed2007-05-09 02:35:12 -07001939{
Johannes Weiner2bbd00a2021-02-25 17:16:47 -08001940 pg_data_t *last_pgdat = NULL;
Christoph Lameter7cc36bb2014-10-09 15:29:43 -07001941 struct zone *zone;
Christoph Lameterd1187ed2007-05-09 02:35:12 -07001942
Christoph Lameter7cc36bb2014-10-09 15:29:43 -07001943 for_each_populated_zone(zone) {
Mel Gorman28f836b2021-06-28 19:41:38 -07001944 struct per_cpu_zonestat *pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
Johannes Weiner2bbd00a2021-02-25 17:16:47 -08001945 struct per_cpu_nodestat *n;
Mel Gorman28f836b2021-06-28 19:41:38 -07001946
Christoph Lameter7cc36bb2014-10-09 15:29:43 -07001947 /*
1948 * The fast way of checking if there are any vmstat diffs.
Christoph Lameter7cc36bb2014-10-09 15:29:43 -07001949 */
Miaohe Lin64632fd2021-09-02 15:01:05 -07001950 if (memchr_inv(pzstats->vm_stat_diff, 0, sizeof(pzstats->vm_stat_diff)))
Christoph Lameter7cc36bb2014-10-09 15:29:43 -07001951 return true;
Mel Gormanf19298b2021-06-28 19:41:44 -07001952
Johannes Weiner2bbd00a2021-02-25 17:16:47 -08001953 if (last_pgdat == zone->zone_pgdat)
1954 continue;
1955 last_pgdat = zone->zone_pgdat;
1956 n = per_cpu_ptr(zone->zone_pgdat->per_cpu_nodestats, cpu);
Miaohe Lin64632fd2021-09-02 15:01:05 -07001957 if (memchr_inv(n->vm_node_stat_diff, 0, sizeof(n->vm_node_stat_diff)))
1958 return true;
Christoph Lameter7cc36bb2014-10-09 15:29:43 -07001959 }
1960 return false;
1961}
1962
Christoph Lameter7b8da4c2016-05-20 16:58:21 -07001963/*
1964 * Switch off vmstat processing and then fold all the remaining differentials
1965 * until the diffs stay at zero. The function is used by NOHZ and can only be
1966 * invoked when tick processing is not active.
1967 */
Michal Hockof01f17d2016-02-05 15:36:24 -08001968void quiet_vmstat(void)
1969{
1970 if (system_state != SYSTEM_RUNNING)
1971 return;
1972
Christoph Lameter7b8da4c2016-05-20 16:58:21 -07001973 if (!delayed_work_pending(this_cpu_ptr(&vmstat_work)))
Michal Hockof01f17d2016-02-05 15:36:24 -08001974 return;
1975
1976 if (!need_update(smp_processor_id()))
1977 return;
1978
1979 /*
1980 * Just refresh counters and do not care about the pending delayed
1981 * vmstat_update. It doesn't fire that often to matter and canceling
1982 * it would be too expensive from this path.
1983 * vmstat_shepherd will take care about that for us.
1984 */
1985 refresh_cpu_vm_stats(false);
1986}
1987
Christoph Lameter7cc36bb2014-10-09 15:29:43 -07001988/*
1989 * Shepherd worker thread that checks the
1990 * differentials of processors that have their worker
1991 * threads for vm statistics updates disabled because of
1992 * inactivity.
1993 */
1994static void vmstat_shepherd(struct work_struct *w);
1995
Christoph Lameter0eb77e92016-01-14 15:21:40 -08001996static DECLARE_DEFERRABLE_WORK(shepherd, vmstat_shepherd);
Christoph Lameter7cc36bb2014-10-09 15:29:43 -07001997
1998static void vmstat_shepherd(struct work_struct *w)
1999{
2000 int cpu;
2001
Sebastian Andrzej Siewior7625ecc2021-08-03 16:16:03 +02002002 cpus_read_lock();
Christoph Lameter7cc36bb2014-10-09 15:29:43 -07002003 /* Check processors whose vmstat worker threads have been disabled */
Christoph Lameter7b8da4c2016-05-20 16:58:21 -07002004 for_each_online_cpu(cpu) {
Michal Hockof01f17d2016-02-05 15:36:24 -08002005 struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
Christoph Lameter7cc36bb2014-10-09 15:29:43 -07002006
Christoph Lameter7b8da4c2016-05-20 16:58:21 -07002007 if (!delayed_work_pending(dw) && need_update(cpu))
Michal Hockoce612872017-04-07 16:05:05 -07002008 queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0);
Jiang Biaofbcc8182021-02-25 17:16:54 -08002009
2010 cond_resched();
Michal Hockof01f17d2016-02-05 15:36:24 -08002011 }
Sebastian Andrzej Siewior7625ecc2021-08-03 16:16:03 +02002012 cpus_read_unlock();
Christoph Lameter7cc36bb2014-10-09 15:29:43 -07002013
2014 schedule_delayed_work(&shepherd,
2015 round_jiffies_relative(sysctl_stat_interval));
Christoph Lameter7cc36bb2014-10-09 15:29:43 -07002016}
2017
2018static void __init start_shepherd_timer(void)
2019{
2020 int cpu;
2021
2022 for_each_possible_cpu(cpu)
Michal Hockoccde8bd2016-02-05 15:36:27 -08002023 INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu),
Christoph Lameter7cc36bb2014-10-09 15:29:43 -07002024 vmstat_update);
2025
Christoph Lameter7cc36bb2014-10-09 15:29:43 -07002026 schedule_delayed_work(&shepherd,
2027 round_jiffies_relative(sysctl_stat_interval));
Christoph Lameterd1187ed2007-05-09 02:35:12 -07002028}
2029
Tim Chen03e86db2016-10-07 17:00:02 -07002030static void __init init_cpu_node_state(void)
2031{
Sebastian Andrzej Siewior4c501322016-11-29 15:51:14 +01002032 int node;
Tim Chen03e86db2016-10-07 17:00:02 -07002033
Sebastian Andrzej Siewior4c501322016-11-29 15:51:14 +01002034 for_each_online_node(node) {
2035 if (cpumask_weight(cpumask_of_node(node)) > 0)
2036 node_set_state(node, N_CPU);
2037 }
Tim Chen03e86db2016-10-07 17:00:02 -07002038}
2039
Sebastian Andrzej Siewior5438da92016-11-29 15:52:21 +01002040static int vmstat_cpu_online(unsigned int cpu)
2041{
2042 refresh_zone_stat_thresholds();
2043 node_set_state(cpu_to_node(cpu), N_CPU);
2044 return 0;
2045}
2046
2047static int vmstat_cpu_down_prep(unsigned int cpu)
2048{
2049 cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
2050 return 0;
2051}
2052
2053static int vmstat_cpu_dead(unsigned int cpu)
Toshi Kani807a1bd2013-11-12 15:08:13 -08002054{
Sebastian Andrzej Siewior4c501322016-11-29 15:51:14 +01002055 const struct cpumask *node_cpus;
Sebastian Andrzej Siewior5438da92016-11-29 15:52:21 +01002056 int node;
Toshi Kani807a1bd2013-11-12 15:08:13 -08002057
Sebastian Andrzej Siewior5438da92016-11-29 15:52:21 +01002058 node = cpu_to_node(cpu);
2059
2060 refresh_zone_stat_thresholds();
Sebastian Andrzej Siewior4c501322016-11-29 15:51:14 +01002061 node_cpus = cpumask_of_node(node);
2062 if (cpumask_weight(node_cpus) > 0)
Sebastian Andrzej Siewior5438da92016-11-29 15:52:21 +01002063 return 0;
Toshi Kani807a1bd2013-11-12 15:08:13 -08002064
2065 node_clear_state(node, N_CPU);
Sebastian Andrzej Siewior5438da92016-11-29 15:52:21 +01002066 return 0;
Toshi Kani807a1bd2013-11-12 15:08:13 -08002067}
2068
Alexey Dobriyan8f32f7e2008-10-06 04:13:52 +04002069#endif
Christoph Lameterdf9ecab2006-08-31 21:27:35 -07002070
Michal Hockoce612872017-04-07 16:05:05 -07002071struct workqueue_struct *mm_percpu_wq;
2072
Michal Hocko597b7302017-03-31 15:11:47 -07002073void __init init_mm_internals(void)
Christoph Lameterdf9ecab2006-08-31 21:27:35 -07002074{
Michal Hockoce612872017-04-07 16:05:05 -07002075 int ret __maybe_unused;
Sebastian Andrzej Siewior5438da92016-11-29 15:52:21 +01002076
Michal Hocko80d136e2017-04-19 09:52:46 +02002077 mm_percpu_wq = alloc_workqueue("mm_percpu_wq", WQ_MEM_RECLAIM, 0);
Michal Hockoce612872017-04-07 16:05:05 -07002078
2079#ifdef CONFIG_SMP
Sebastian Andrzej Siewior5438da92016-11-29 15:52:21 +01002080 ret = cpuhp_setup_state_nocalls(CPUHP_MM_VMSTAT_DEAD, "mm/vmstat:dead",
2081 NULL, vmstat_cpu_dead);
2082 if (ret < 0)
2083 pr_err("vmstat: failed to register 'dead' hotplug state\n");
2084
2085 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "mm/vmstat:online",
2086 vmstat_cpu_online,
2087 vmstat_cpu_down_prep);
2088 if (ret < 0)
2089 pr_err("vmstat: failed to register 'online' hotplug state\n");
2090
Sebastian Andrzej Siewior7625ecc2021-08-03 16:16:03 +02002091 cpus_read_lock();
Tim Chen03e86db2016-10-07 17:00:02 -07002092 init_cpu_node_state();
Sebastian Andrzej Siewior7625ecc2021-08-03 16:16:03 +02002093 cpus_read_unlock();
Christoph Lameterd1187ed2007-05-09 02:35:12 -07002094
Christoph Lameter7cc36bb2014-10-09 15:29:43 -07002095 start_shepherd_timer();
Alexey Dobriyan8f32f7e2008-10-06 04:13:52 +04002096#endif
2097#ifdef CONFIG_PROC_FS
Christoph Hellwigfddda2b2018-04-13 19:44:18 +02002098 proc_create_seq("buddyinfo", 0444, NULL, &fragmentation_op);
Michal Hockoabaed012019-11-05 21:16:40 -08002099 proc_create_seq("pagetypeinfo", 0400, NULL, &pagetypeinfo_op);
Christoph Hellwigfddda2b2018-04-13 19:44:18 +02002100 proc_create_seq("vmstat", 0444, NULL, &vmstat_op);
2101 proc_create_seq("zoneinfo", 0444, NULL, &zoneinfo_op);
Alexey Dobriyan8f32f7e2008-10-06 04:13:52 +04002102#endif
Christoph Lameterdf9ecab2006-08-31 21:27:35 -07002103}
Mel Gormand7a57522010-05-24 14:32:25 -07002104
2105#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
Mel Gormand7a57522010-05-24 14:32:25 -07002106
2107/*
2108 * Return an index indicating how much of the available free memory is
2109 * unusable for an allocation of the requested size.
2110 */
2111static int unusable_free_index(unsigned int order,
2112 struct contig_page_info *info)
2113{
2114 /* No free memory is interpreted as all free memory is unusable */
2115 if (info->free_pages == 0)
2116 return 1000;
2117
2118 /*
2119 * Index should be a value between 0 and 1. Return a value to 3
2120 * decimal places.
2121 *
2122 * 0 => no fragmentation
2123 * 1 => high fragmentation
2124 */
2125 return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages);
2126
2127}
2128
2129static void unusable_show_print(struct seq_file *m,
2130 pg_data_t *pgdat, struct zone *zone)
2131{
2132 unsigned int order;
2133 int index;
2134 struct contig_page_info info;
2135
2136 seq_printf(m, "Node %d, zone %8s ",
2137 pgdat->node_id,
2138 zone->name);
2139 for (order = 0; order < MAX_ORDER; ++order) {
2140 fill_contig_page_info(zone, order, &info);
2141 index = unusable_free_index(order, &info);
2142 seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
2143 }
2144
2145 seq_putc(m, '\n');
2146}
2147
2148/*
2149 * Display unusable free space index
2150 *
2151 * The unusable free space index measures how much of the available free
2152 * memory cannot be used to satisfy an allocation of a given size and is a
2153 * value between 0 and 1. The higher the value, the more of free memory is
2154 * unusable and by implication, the worse the external fragmentation is. This
2155 * can be expressed as a percentage by multiplying by 100.
2156 */
2157static int unusable_show(struct seq_file *m, void *arg)
2158{
2159 pg_data_t *pgdat = (pg_data_t *)arg;
2160
2161 /* check memoryless node */
Lai Jiangshana47b53c2012-12-12 13:51:37 -08002162 if (!node_state(pgdat->node_id, N_MEMORY))
Mel Gormand7a57522010-05-24 14:32:25 -07002163 return 0;
2164
Vinayak Menon727c0802017-07-10 15:49:17 -07002165 walk_zones_in_node(m, pgdat, true, false, unusable_show_print);
Mel Gormand7a57522010-05-24 14:32:25 -07002166
2167 return 0;
2168}
2169
Kefeng Wang01a99562020-06-04 16:51:08 -07002170static const struct seq_operations unusable_sops = {
Mel Gormand7a57522010-05-24 14:32:25 -07002171 .start = frag_start,
2172 .next = frag_next,
2173 .stop = frag_stop,
2174 .show = unusable_show,
2175};
2176
Kefeng Wang01a99562020-06-04 16:51:08 -07002177DEFINE_SEQ_ATTRIBUTE(unusable);
Mel Gormand7a57522010-05-24 14:32:25 -07002178
Mel Gormanf1a5ab12010-05-24 14:32:26 -07002179static void extfrag_show_print(struct seq_file *m,
2180 pg_data_t *pgdat, struct zone *zone)
2181{
2182 unsigned int order;
2183 int index;
2184
2185 /* Alloc on stack as interrupts are disabled for zone walk */
2186 struct contig_page_info info;
2187
2188 seq_printf(m, "Node %d, zone %8s ",
2189 pgdat->node_id,
2190 zone->name);
2191 for (order = 0; order < MAX_ORDER; ++order) {
2192 fill_contig_page_info(zone, order, &info);
Mel Gorman56de7262010-05-24 14:32:30 -07002193 index = __fragmentation_index(order, &info);
Mel Gormanf1a5ab12010-05-24 14:32:26 -07002194 seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
2195 }
2196
2197 seq_putc(m, '\n');
2198}
2199
2200/*
2201 * Display fragmentation index for orders that allocations would fail for
2202 */
2203static int extfrag_show(struct seq_file *m, void *arg)
2204{
2205 pg_data_t *pgdat = (pg_data_t *)arg;
2206
Vinayak Menon727c0802017-07-10 15:49:17 -07002207 walk_zones_in_node(m, pgdat, true, false, extfrag_show_print);
Mel Gormanf1a5ab12010-05-24 14:32:26 -07002208
2209 return 0;
2210}
2211
Kefeng Wang01a99562020-06-04 16:51:08 -07002212static const struct seq_operations extfrag_sops = {
Mel Gormanf1a5ab12010-05-24 14:32:26 -07002213 .start = frag_start,
2214 .next = frag_next,
2215 .stop = frag_stop,
2216 .show = extfrag_show,
2217};
2218
Kefeng Wang01a99562020-06-04 16:51:08 -07002219DEFINE_SEQ_ATTRIBUTE(extfrag);
Mel Gormanf1a5ab12010-05-24 14:32:26 -07002220
Mel Gormand7a57522010-05-24 14:32:25 -07002221static int __init extfrag_debug_init(void)
2222{
Sasikantha babubde8bd82012-05-29 15:06:22 -07002223 struct dentry *extfrag_debug_root;
2224
Mel Gormand7a57522010-05-24 14:32:25 -07002225 extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
Mel Gormand7a57522010-05-24 14:32:25 -07002226
Greg Kroah-Hartmand9f79792019-03-05 15:46:09 -08002227 debugfs_create_file("unusable_index", 0444, extfrag_debug_root, NULL,
Kefeng Wang01a99562020-06-04 16:51:08 -07002228 &unusable_fops);
Mel Gormand7a57522010-05-24 14:32:25 -07002229
Greg Kroah-Hartmand9f79792019-03-05 15:46:09 -08002230 debugfs_create_file("extfrag_index", 0444, extfrag_debug_root, NULL,
Kefeng Wang01a99562020-06-04 16:51:08 -07002231 &extfrag_fops);
Mel Gormanf1a5ab12010-05-24 14:32:26 -07002232
Mel Gormand7a57522010-05-24 14:32:25 -07002233 return 0;
2234}
2235
2236module_init(extfrag_debug_init);
2237#endif