blob: a4c2317d8b9f770ac67d0b0bc03cf36f464573fa [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Christoph Lameterf6ac2352006-06-30 01:55:32 -07002#ifndef _LINUX_VMSTAT_H
3#define _LINUX_VMSTAT_H
4
5#include <linux/types.h>
6#include <linux/percpu.h>
Christoph Lameter2244b952006-06-30 01:55:33 -07007#include <linux/mmzone.h>
Andrew Mortonf042e702011-05-26 16:25:24 -07008#include <linux/vm_event_item.h>
Arun Sharma600634972011-07-26 16:09:06 -07009#include <linux/atomic.h>
Kemi Wang45180852017-11-15 17:38:22 -080010#include <linux/static_key.h>
Christoph Lameterf6ac2352006-06-30 01:55:32 -070011
Adrian Bunkc748e132008-07-23 21:27:03 -070012extern int sysctl_stat_interval;
13
Kemi Wang45180852017-11-15 17:38:22 -080014#ifdef CONFIG_NUMA
15#define ENABLE_NUMA_STAT 1
16#define DISABLE_NUMA_STAT 0
17extern int sysctl_vm_numa_stat;
18DECLARE_STATIC_KEY_TRUE(vm_numa_stat_key);
19extern int sysctl_vm_numa_stat_handler(struct ctl_table *table,
20 int write, void __user *buffer, size_t *length, loff_t *ppos);
21#endif
22
Andrew Morton780a0652007-02-10 01:44:41 -080023#ifdef CONFIG_VM_EVENT_COUNTERS
24/*
25 * Light weight per cpu counter implementation.
26 *
27 * Counters should only be incremented and no critical kernel component
28 * should rely on the counter values.
29 *
30 * Counters are handled completely inline. On many platforms the code
31 * generated will simply be the increment of a global address.
32 */
33
Christoph Lameterf8891e52006-06-30 01:55:45 -070034struct vm_event_state {
35 unsigned long event[NR_VM_EVENT_ITEMS];
36};
Christoph Lameterf6ac2352006-06-30 01:55:32 -070037
Christoph Lameterf8891e52006-06-30 01:55:45 -070038DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
Christoph Lameterf6ac2352006-06-30 01:55:32 -070039
Christoph Lameter293b6a42014-04-07 15:39:43 -070040/*
41 * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the
42 * local_irq_disable overhead.
43 */
Christoph Lameterf8891e52006-06-30 01:55:45 -070044static inline void __count_vm_event(enum vm_event_item item)
45{
Christoph Lameter293b6a42014-04-07 15:39:43 -070046 raw_cpu_inc(vm_event_states.event[item]);
Christoph Lameterf8891e52006-06-30 01:55:45 -070047}
Christoph Lameterf6ac2352006-06-30 01:55:32 -070048
Christoph Lameterf8891e52006-06-30 01:55:45 -070049static inline void count_vm_event(enum vm_event_item item)
50{
Rusty Russelldd17c8f2009-10-29 22:34:15 +090051 this_cpu_inc(vm_event_states.event[item]);
Christoph Lameterf8891e52006-06-30 01:55:45 -070052}
Christoph Lameterf6ac2352006-06-30 01:55:32 -070053
Christoph Lameterf8891e52006-06-30 01:55:45 -070054static inline void __count_vm_events(enum vm_event_item item, long delta)
55{
Christoph Lameter293b6a42014-04-07 15:39:43 -070056 raw_cpu_add(vm_event_states.event[item], delta);
Christoph Lameterf8891e52006-06-30 01:55:45 -070057}
Christoph Lameterf6ac2352006-06-30 01:55:32 -070058
Christoph Lameterf8891e52006-06-30 01:55:45 -070059static inline void count_vm_events(enum vm_event_item item, long delta)
60{
Rusty Russelldd17c8f2009-10-29 22:34:15 +090061 this_cpu_add(vm_event_states.event[item], delta);
Christoph Lameterf8891e52006-06-30 01:55:45 -070062}
Christoph Lameterf6ac2352006-06-30 01:55:32 -070063
Christoph Lameterf8891e52006-06-30 01:55:45 -070064extern void all_vm_events(unsigned long *);
Yijing Wangf1cb0872013-04-29 15:08:14 -070065
Christoph Lameterf8891e52006-06-30 01:55:45 -070066extern void vm_events_fold_cpu(int cpu);
Christoph Lameterf6ac2352006-06-30 01:55:32 -070067
Christoph Lameterf8891e52006-06-30 01:55:45 -070068#else
Christoph Lameterf6ac2352006-06-30 01:55:32 -070069
Christoph Lameterf8891e52006-06-30 01:55:45 -070070/* Disable counters */
Andrew Morton780a0652007-02-10 01:44:41 -080071static inline void count_vm_event(enum vm_event_item item)
72{
73}
74static inline void count_vm_events(enum vm_event_item item, long delta)
75{
76}
77static inline void __count_vm_event(enum vm_event_item item)
78{
79}
80static inline void __count_vm_events(enum vm_event_item item, long delta)
81{
82}
83static inline void all_vm_events(unsigned long *ret)
84{
85}
86static inline void vm_events_fold_cpu(int cpu)
87{
88}
Christoph Lameterf6ac2352006-06-30 01:55:32 -070089
Christoph Lameterf8891e52006-06-30 01:55:45 -070090#endif /* CONFIG_VM_EVENT_COUNTERS */
91
Mel Gorman03c5a6e2012-11-02 14:52:48 +000092#ifdef CONFIG_NUMA_BALANCING
93#define count_vm_numa_event(x) count_vm_event(x)
94#define count_vm_numa_events(x, y) count_vm_events(x, y)
95#else
96#define count_vm_numa_event(x) do {} while (0)
Mel Gorman3c0ff462013-02-22 16:34:29 -080097#define count_vm_numa_events(x, y) do { (void)(y); } while (0)
Mel Gorman03c5a6e2012-11-02 14:52:48 +000098#endif /* CONFIG_NUMA_BALANCING */
99
Mel Gormanec659932014-01-21 14:33:16 -0800100#ifdef CONFIG_DEBUG_TLBFLUSH
101#define count_vm_tlb_event(x) count_vm_event(x)
102#define count_vm_tlb_events(x, y) count_vm_events(x, y)
103#else
104#define count_vm_tlb_event(x) do {} while (0)
105#define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
106#endif
107
Davidlohr Bueso4f115142014-06-04 16:06:46 -0700108#ifdef CONFIG_DEBUG_VM_VMACACHE
109#define count_vm_vmacache_event(x) count_vm_event(x)
110#else
111#define count_vm_vmacache_event(x) do {} while (0)
112#endif
113
Mel Gorman16709d12016-07-28 15:46:56 -0700114#define __count_zid_vm_events(item, zid, delta) \
115 __count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta)
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700116
Christoph Lameter2244b952006-06-30 01:55:33 -0700117/*
Mel Gorman75ef7182016-07-28 15:45:24 -0700118 * Zone and node-based page accounting with per cpu differentials.
Christoph Lameter2244b952006-06-30 01:55:33 -0700119 */
Mel Gorman75ef7182016-07-28 15:45:24 -0700120extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS];
Kemi Wang3a321d22017-09-08 16:12:48 -0700121extern atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS];
Mel Gorman75ef7182016-07-28 15:45:24 -0700122extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS];
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700123
Kemi Wang3a321d22017-09-08 16:12:48 -0700124#ifdef CONFIG_NUMA
125static inline void zone_numa_state_add(long x, struct zone *zone,
126 enum numa_stat_item item)
127{
128 atomic_long_add(x, &zone->vm_numa_stat[item]);
129 atomic_long_add(x, &vm_numa_stat[item]);
130}
131
132static inline unsigned long global_numa_state(enum numa_stat_item item)
133{
134 long x = atomic_long_read(&vm_numa_stat[item]);
135
136 return x;
137}
138
Kemi Wang63803222017-09-08 16:12:55 -0700139static inline unsigned long zone_numa_state_snapshot(struct zone *zone,
Kemi Wang3a321d22017-09-08 16:12:48 -0700140 enum numa_stat_item item)
141{
142 long x = atomic_long_read(&zone->vm_numa_stat[item]);
Kemi Wang63803222017-09-08 16:12:55 -0700143 int cpu;
144
145 for_each_online_cpu(cpu)
146 x += per_cpu_ptr(zone->pageset, cpu)->vm_numa_stat_diff[item];
Kemi Wang3a321d22017-09-08 16:12:48 -0700147
148 return x;
149}
150#endif /* CONFIG_NUMA */
151
Christoph Lameter2244b952006-06-30 01:55:33 -0700152static inline void zone_page_state_add(long x, struct zone *zone,
153 enum zone_stat_item item)
154{
155 atomic_long_add(x, &zone->vm_stat[item]);
Mel Gorman75ef7182016-07-28 15:45:24 -0700156 atomic_long_add(x, &vm_zone_stat[item]);
157}
158
159static inline void node_page_state_add(long x, struct pglist_data *pgdat,
160 enum node_stat_item item)
161{
162 atomic_long_add(x, &pgdat->vm_stat[item]);
163 atomic_long_add(x, &vm_node_stat[item]);
Christoph Lameter2244b952006-06-30 01:55:33 -0700164}
165
Michal Hockoc41f0122017-09-06 16:23:36 -0700166static inline unsigned long global_zone_page_state(enum zone_stat_item item)
Christoph Lameter2244b952006-06-30 01:55:33 -0700167{
Mel Gorman75ef7182016-07-28 15:45:24 -0700168 long x = atomic_long_read(&vm_zone_stat[item]);
169#ifdef CONFIG_SMP
170 if (x < 0)
171 x = 0;
172#endif
173 return x;
174}
175
176static inline unsigned long global_node_page_state(enum node_stat_item item)
177{
178 long x = atomic_long_read(&vm_node_stat[item]);
Christoph Lameter2244b952006-06-30 01:55:33 -0700179#ifdef CONFIG_SMP
180 if (x < 0)
181 x = 0;
182#endif
183 return x;
184}
185
186static inline unsigned long zone_page_state(struct zone *zone,
187 enum zone_stat_item item)
188{
189 long x = atomic_long_read(&zone->vm_stat[item]);
190#ifdef CONFIG_SMP
191 if (x < 0)
192 x = 0;
193#endif
194 return x;
195}
196
Christoph Lameteraa454842010-09-09 16:38:17 -0700197/*
198 * More accurate version that also considers the currently pending
199 * deltas. For that we need to loop over all cpus to find the current
200 * deltas. There is no synchronization so the result cannot be
201 * exactly accurate either.
202 */
203static inline unsigned long zone_page_state_snapshot(struct zone *zone,
204 enum zone_stat_item item)
205{
206 long x = atomic_long_read(&zone->vm_stat[item]);
207
208#ifdef CONFIG_SMP
209 int cpu;
210 for_each_online_cpu(cpu)
211 x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
212
213 if (x < 0)
214 x = 0;
215#endif
216 return x;
217}
218
Christoph Lameter2244b952006-06-30 01:55:33 -0700219#ifdef CONFIG_NUMA
Kemi Wang3a321d22017-09-08 16:12:48 -0700220extern void __inc_numa_state(struct zone *zone, enum numa_stat_item item);
Mel Gorman75ef7182016-07-28 15:45:24 -0700221extern unsigned long sum_zone_node_page_state(int node,
Kemi Wang3a321d22017-09-08 16:12:48 -0700222 enum zone_stat_item item);
223extern unsigned long sum_zone_numa_state(int node, enum numa_stat_item item);
Mel Gorman75ef7182016-07-28 15:45:24 -0700224extern unsigned long node_page_state(struct pglist_data *pgdat,
225 enum node_stat_item item);
Christoph Lameter2244b952006-06-30 01:55:33 -0700226#else
Michal Hockoc41f0122017-09-06 16:23:36 -0700227#define sum_zone_node_page_state(node, item) global_zone_page_state(item)
Mel Gorman75ef7182016-07-28 15:45:24 -0700228#define node_page_state(node, item) global_node_page_state(item)
Christoph Lameterca889e62006-06-30 01:55:44 -0700229#endif /* CONFIG_NUMA */
Christoph Lameter2244b952006-06-30 01:55:33 -0700230
Christoph Lameter2244b952006-06-30 01:55:33 -0700231#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
232#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
Mel Gorman75ef7182016-07-28 15:45:24 -0700233#define add_node_page_state(__p, __i, __d) mod_node_page_state(__p, __i, __d)
234#define sub_node_page_state(__p, __i, __d) mod_node_page_state(__p, __i, -(__d))
Christoph Lameter2244b952006-06-30 01:55:33 -0700235
Christoph Lameter2244b952006-06-30 01:55:33 -0700236#ifdef CONFIG_SMP
Heiko Carstens6cdb18a2015-12-29 14:54:32 -0800237void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long);
Christoph Lameter2244b952006-06-30 01:55:33 -0700238void __inc_zone_page_state(struct page *, enum zone_stat_item);
239void __dec_zone_page_state(struct page *, enum zone_stat_item);
240
Mel Gorman75ef7182016-07-28 15:45:24 -0700241void __mod_node_page_state(struct pglist_data *, enum node_stat_item item, long);
242void __inc_node_page_state(struct page *, enum node_stat_item);
243void __dec_node_page_state(struct page *, enum node_stat_item);
244
Heiko Carstens6cdb18a2015-12-29 14:54:32 -0800245void mod_zone_page_state(struct zone *, enum zone_stat_item, long);
Christoph Lameter2244b952006-06-30 01:55:33 -0700246void inc_zone_page_state(struct page *, enum zone_stat_item);
247void dec_zone_page_state(struct page *, enum zone_stat_item);
248
Mel Gorman75ef7182016-07-28 15:45:24 -0700249void mod_node_page_state(struct pglist_data *, enum node_stat_item, long);
250void inc_node_page_state(struct page *, enum node_stat_item);
251void dec_node_page_state(struct page *, enum node_stat_item);
252
Mel Gorman75ef7182016-07-28 15:45:24 -0700253extern void inc_node_state(struct pglist_data *, enum node_stat_item);
Christoph Lameterc8785382007-02-10 01:43:01 -0800254extern void __inc_zone_state(struct zone *, enum zone_stat_item);
Mel Gorman75ef7182016-07-28 15:45:24 -0700255extern void __inc_node_state(struct pglist_data *, enum node_stat_item);
Christoph Lameterc8785382007-02-10 01:43:01 -0800256extern void dec_zone_state(struct zone *, enum zone_stat_item);
257extern void __dec_zone_state(struct zone *, enum zone_stat_item);
Mel Gorman75ef7182016-07-28 15:45:24 -0700258extern void __dec_node_state(struct pglist_data *, enum node_stat_item);
Christoph Lameter2244b952006-06-30 01:55:33 -0700259
Christoph Lameter0eb77e92016-01-14 15:21:40 -0800260void quiet_vmstat(void);
Christoph Lameter2bb921e2013-09-11 14:21:30 -0700261void cpu_vm_stats_fold(int cpu);
KOSAKI Motohiroa6cccdc2011-05-24 17:11:33 -0700262void refresh_zone_stat_thresholds(void);
Mel Gormanb44129b2011-01-13 15:45:43 -0800263
Hugh Dickins52b6f462016-05-19 17:12:50 -0700264struct ctl_table;
265int vmstat_refresh(struct ctl_table *, int write,
266 void __user *buffer, size_t *lenp, loff_t *ppos);
267
Minchan Kim5a883812012-10-08 16:33:39 -0700268void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
269
Mel Gormanb44129b2011-01-13 15:45:43 -0800270int calculate_pressure_threshold(struct zone *zone);
271int calculate_normal_threshold(struct zone *zone);
272void set_pgdat_percpu_threshold(pg_data_t *pgdat,
273 int (*calculate_pressure)(struct zone *));
Christoph Lameter2244b952006-06-30 01:55:33 -0700274#else /* CONFIG_SMP */
275
276/*
277 * We do not maintain differentials in a single processor configuration.
278 * The functions directly modify the zone and global counters.
279 */
280static inline void __mod_zone_page_state(struct zone *zone,
Heiko Carstens6cdb18a2015-12-29 14:54:32 -0800281 enum zone_stat_item item, long delta)
Christoph Lameter2244b952006-06-30 01:55:33 -0700282{
283 zone_page_state_add(delta, zone, item);
284}
285
Mel Gorman75ef7182016-07-28 15:45:24 -0700286static inline void __mod_node_page_state(struct pglist_data *pgdat,
287 enum node_stat_item item, int delta)
288{
289 node_page_state_add(delta, pgdat, item);
290}
291
Christoph Lameter7f4599e2006-07-10 04:44:30 -0700292static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
293{
294 atomic_long_inc(&zone->vm_stat[item]);
Mel Gorman75ef7182016-07-28 15:45:24 -0700295 atomic_long_inc(&vm_zone_stat[item]);
296}
297
298static inline void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
299{
300 atomic_long_inc(&pgdat->vm_stat[item]);
301 atomic_long_inc(&vm_node_stat[item]);
Christoph Lameter7f4599e2006-07-10 04:44:30 -0700302}
303
Christoph Lameterc8785382007-02-10 01:43:01 -0800304static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
305{
306 atomic_long_dec(&zone->vm_stat[item]);
Mel Gorman75ef7182016-07-28 15:45:24 -0700307 atomic_long_dec(&vm_zone_stat[item]);
308}
309
310static inline void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
311{
312 atomic_long_dec(&pgdat->vm_stat[item]);
313 atomic_long_dec(&vm_node_stat[item]);
Christoph Lameterc8785382007-02-10 01:43:01 -0800314}
315
Johannes Weiner6a3ed212014-04-03 14:47:34 -0700316static inline void __inc_zone_page_state(struct page *page,
317 enum zone_stat_item item)
318{
319 __inc_zone_state(page_zone(page), item);
320}
321
Mel Gorman75ef7182016-07-28 15:45:24 -0700322static inline void __inc_node_page_state(struct page *page,
323 enum node_stat_item item)
324{
325 __inc_node_state(page_pgdat(page), item);
326}
327
328
Christoph Lameter2244b952006-06-30 01:55:33 -0700329static inline void __dec_zone_page_state(struct page *page,
330 enum zone_stat_item item)
331{
Uwe Kleine-König57ce36f2008-02-25 16:45:03 +0100332 __dec_zone_state(page_zone(page), item);
Christoph Lameter2244b952006-06-30 01:55:33 -0700333}
334
Mel Gorman75ef7182016-07-28 15:45:24 -0700335static inline void __dec_node_page_state(struct page *page,
336 enum node_stat_item item)
337{
338 __dec_node_state(page_pgdat(page), item);
339}
340
341
Christoph Lameter2244b952006-06-30 01:55:33 -0700342/*
343 * We only use atomic operations to update counters. So there is no need to
344 * disable interrupts.
345 */
346#define inc_zone_page_state __inc_zone_page_state
347#define dec_zone_page_state __dec_zone_page_state
348#define mod_zone_page_state __mod_zone_page_state
349
Mel Gorman75ef7182016-07-28 15:45:24 -0700350#define inc_node_page_state __inc_node_page_state
351#define dec_node_page_state __dec_node_page_state
352#define mod_node_page_state __mod_node_page_state
353
Johannes Weiner6a3ed212014-04-03 14:47:34 -0700354#define inc_zone_state __inc_zone_state
Mel Gorman75ef7182016-07-28 15:45:24 -0700355#define inc_node_state __inc_node_state
Johannes Weiner6a3ed212014-04-03 14:47:34 -0700356#define dec_zone_state __dec_zone_state
357
Mel Gormanb44129b2011-01-13 15:45:43 -0800358#define set_pgdat_percpu_threshold(pgdat, callback) { }
Mel Gorman88f5acf2011-01-13 15:45:41 -0800359
KOSAKI Motohiroa6cccdc2011-05-24 17:11:33 -0700360static inline void refresh_zone_stat_thresholds(void) { }
Christoph Lameter2bb921e2013-09-11 14:21:30 -0700361static inline void cpu_vm_stats_fold(int cpu) { }
Christoph Lameter0eb77e92016-01-14 15:21:40 -0800362static inline void quiet_vmstat(void) { }
KOSAKI Motohiroa6cccdc2011-05-24 17:11:33 -0700363
Minchan Kim5a883812012-10-08 16:33:39 -0700364static inline void drain_zonestat(struct zone *zone,
365 struct per_cpu_pageset *pset) { }
KOSAKI Motohirofa25c502011-05-24 17:11:28 -0700366#endif /* CONFIG_SMP */
367
Bartlomiej Zolnierkiewiczd1ce7492012-10-08 16:32:02 -0700368static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
369 int migratetype)
370{
371 __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
372 if (is_migrate_cma(migratetype))
373 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
374}
375
KOSAKI Motohirofa25c502011-05-24 17:11:28 -0700376extern const char * const vmstat_text[];
Christoph Lameter2244b952006-06-30 01:55:33 -0700377
378#endif /* _LINUX_VMSTAT_H */