blob: bdeda4b079fe8987699fe620f3b1f51d2b875f2d [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Christoph Lameterf6ac2352006-06-30 01:55:32 -07002#ifndef _LINUX_VMSTAT_H
3#define _LINUX_VMSTAT_H
4
5#include <linux/types.h>
6#include <linux/percpu.h>
Christoph Lameter2244b952006-06-30 01:55:33 -07007#include <linux/mmzone.h>
Andrew Mortonf042e702011-05-26 16:25:24 -07008#include <linux/vm_event_item.h>
Arun Sharma600634972011-07-26 16:09:06 -07009#include <linux/atomic.h>
Kemi Wang45180852017-11-15 17:38:22 -080010#include <linux/static_key.h>
Christoph Lameterf6ac2352006-06-30 01:55:32 -070011
Adrian Bunkc748e132008-07-23 21:27:03 -070012extern int sysctl_stat_interval;
13
Kemi Wang45180852017-11-15 17:38:22 -080014#ifdef CONFIG_NUMA
15#define ENABLE_NUMA_STAT 1
16#define DISABLE_NUMA_STAT 0
17extern int sysctl_vm_numa_stat;
18DECLARE_STATIC_KEY_TRUE(vm_numa_stat_key);
19extern int sysctl_vm_numa_stat_handler(struct ctl_table *table,
20 int write, void __user *buffer, size_t *length, loff_t *ppos);
21#endif
22
Steven Rostedtd51d1e62018-04-10 16:28:07 -070023struct reclaim_stat {
24 unsigned nr_dirty;
25 unsigned nr_unqueued_dirty;
26 unsigned nr_congested;
27 unsigned nr_writeback;
28 unsigned nr_immediate;
Kirill Tkhai886cf192019-05-13 17:16:51 -070029 unsigned nr_activate[2];
Steven Rostedtd51d1e62018-04-10 16:28:07 -070030 unsigned nr_ref_keep;
31 unsigned nr_unmap_fail;
32};
33
Andrew Morton780a0652007-02-10 01:44:41 -080034#ifdef CONFIG_VM_EVENT_COUNTERS
35/*
36 * Light weight per cpu counter implementation.
37 *
38 * Counters should only be incremented and no critical kernel component
39 * should rely on the counter values.
40 *
41 * Counters are handled completely inline. On many platforms the code
42 * generated will simply be the increment of a global address.
43 */
44
Christoph Lameterf8891e52006-06-30 01:55:45 -070045struct vm_event_state {
46 unsigned long event[NR_VM_EVENT_ITEMS];
47};
Christoph Lameterf6ac2352006-06-30 01:55:32 -070048
Christoph Lameterf8891e52006-06-30 01:55:45 -070049DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
Christoph Lameterf6ac2352006-06-30 01:55:32 -070050
Christoph Lameter293b6a42014-04-07 15:39:43 -070051/*
52 * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the
53 * local_irq_disable overhead.
54 */
Christoph Lameterf8891e52006-06-30 01:55:45 -070055static inline void __count_vm_event(enum vm_event_item item)
56{
Christoph Lameter293b6a42014-04-07 15:39:43 -070057 raw_cpu_inc(vm_event_states.event[item]);
Christoph Lameterf8891e52006-06-30 01:55:45 -070058}
Christoph Lameterf6ac2352006-06-30 01:55:32 -070059
Christoph Lameterf8891e52006-06-30 01:55:45 -070060static inline void count_vm_event(enum vm_event_item item)
61{
Rusty Russelldd17c8f2009-10-29 22:34:15 +090062 this_cpu_inc(vm_event_states.event[item]);
Christoph Lameterf8891e52006-06-30 01:55:45 -070063}
Christoph Lameterf6ac2352006-06-30 01:55:32 -070064
Christoph Lameterf8891e52006-06-30 01:55:45 -070065static inline void __count_vm_events(enum vm_event_item item, long delta)
66{
Christoph Lameter293b6a42014-04-07 15:39:43 -070067 raw_cpu_add(vm_event_states.event[item], delta);
Christoph Lameterf8891e52006-06-30 01:55:45 -070068}
Christoph Lameterf6ac2352006-06-30 01:55:32 -070069
Christoph Lameterf8891e52006-06-30 01:55:45 -070070static inline void count_vm_events(enum vm_event_item item, long delta)
71{
Rusty Russelldd17c8f2009-10-29 22:34:15 +090072 this_cpu_add(vm_event_states.event[item], delta);
Christoph Lameterf8891e52006-06-30 01:55:45 -070073}
Christoph Lameterf6ac2352006-06-30 01:55:32 -070074
Christoph Lameterf8891e52006-06-30 01:55:45 -070075extern void all_vm_events(unsigned long *);
Yijing Wangf1cb0872013-04-29 15:08:14 -070076
Christoph Lameterf8891e52006-06-30 01:55:45 -070077extern void vm_events_fold_cpu(int cpu);
Christoph Lameterf6ac2352006-06-30 01:55:32 -070078
Christoph Lameterf8891e52006-06-30 01:55:45 -070079#else
Christoph Lameterf6ac2352006-06-30 01:55:32 -070080
Christoph Lameterf8891e52006-06-30 01:55:45 -070081/* Disable counters */
Andrew Morton780a0652007-02-10 01:44:41 -080082static inline void count_vm_event(enum vm_event_item item)
83{
84}
85static inline void count_vm_events(enum vm_event_item item, long delta)
86{
87}
88static inline void __count_vm_event(enum vm_event_item item)
89{
90}
91static inline void __count_vm_events(enum vm_event_item item, long delta)
92{
93}
94static inline void all_vm_events(unsigned long *ret)
95{
96}
97static inline void vm_events_fold_cpu(int cpu)
98{
99}
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700100
Christoph Lameterf8891e52006-06-30 01:55:45 -0700101#endif /* CONFIG_VM_EVENT_COUNTERS */
102
Mel Gorman03c5a6e2012-11-02 14:52:48 +0000103#ifdef CONFIG_NUMA_BALANCING
104#define count_vm_numa_event(x) count_vm_event(x)
105#define count_vm_numa_events(x, y) count_vm_events(x, y)
106#else
107#define count_vm_numa_event(x) do {} while (0)
Mel Gorman3c0ff462013-02-22 16:34:29 -0800108#define count_vm_numa_events(x, y) do { (void)(y); } while (0)
Mel Gorman03c5a6e2012-11-02 14:52:48 +0000109#endif /* CONFIG_NUMA_BALANCING */
110
Mel Gormanec659932014-01-21 14:33:16 -0800111#ifdef CONFIG_DEBUG_TLBFLUSH
112#define count_vm_tlb_event(x) count_vm_event(x)
113#define count_vm_tlb_events(x, y) count_vm_events(x, y)
114#else
115#define count_vm_tlb_event(x) do {} while (0)
116#define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
117#endif
118
Davidlohr Bueso4f115142014-06-04 16:06:46 -0700119#ifdef CONFIG_DEBUG_VM_VMACACHE
120#define count_vm_vmacache_event(x) count_vm_event(x)
121#else
122#define count_vm_vmacache_event(x) do {} while (0)
123#endif
124
Mel Gorman16709d12016-07-28 15:46:56 -0700125#define __count_zid_vm_events(item, zid, delta) \
126 __count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta)
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700127
Christoph Lameter2244b952006-06-30 01:55:33 -0700128/*
Mel Gorman75ef7182016-07-28 15:45:24 -0700129 * Zone and node-based page accounting with per cpu differentials.
Christoph Lameter2244b952006-06-30 01:55:33 -0700130 */
Mel Gorman75ef7182016-07-28 15:45:24 -0700131extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS];
Kemi Wang3a321d22017-09-08 16:12:48 -0700132extern atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS];
Mel Gorman75ef7182016-07-28 15:45:24 -0700133extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS];
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700134
Kemi Wang3a321d22017-09-08 16:12:48 -0700135#ifdef CONFIG_NUMA
136static inline void zone_numa_state_add(long x, struct zone *zone,
137 enum numa_stat_item item)
138{
139 atomic_long_add(x, &zone->vm_numa_stat[item]);
140 atomic_long_add(x, &vm_numa_stat[item]);
141}
142
143static inline unsigned long global_numa_state(enum numa_stat_item item)
144{
145 long x = atomic_long_read(&vm_numa_stat[item]);
146
147 return x;
148}
149
Kemi Wang63803222017-09-08 16:12:55 -0700150static inline unsigned long zone_numa_state_snapshot(struct zone *zone,
Kemi Wang3a321d22017-09-08 16:12:48 -0700151 enum numa_stat_item item)
152{
153 long x = atomic_long_read(&zone->vm_numa_stat[item]);
Kemi Wang63803222017-09-08 16:12:55 -0700154 int cpu;
155
156 for_each_online_cpu(cpu)
157 x += per_cpu_ptr(zone->pageset, cpu)->vm_numa_stat_diff[item];
Kemi Wang3a321d22017-09-08 16:12:48 -0700158
159 return x;
160}
161#endif /* CONFIG_NUMA */
162
Christoph Lameter2244b952006-06-30 01:55:33 -0700163static inline void zone_page_state_add(long x, struct zone *zone,
164 enum zone_stat_item item)
165{
166 atomic_long_add(x, &zone->vm_stat[item]);
Mel Gorman75ef7182016-07-28 15:45:24 -0700167 atomic_long_add(x, &vm_zone_stat[item]);
168}
169
170static inline void node_page_state_add(long x, struct pglist_data *pgdat,
171 enum node_stat_item item)
172{
173 atomic_long_add(x, &pgdat->vm_stat[item]);
174 atomic_long_add(x, &vm_node_stat[item]);
Christoph Lameter2244b952006-06-30 01:55:33 -0700175}
176
Michal Hockoc41f0122017-09-06 16:23:36 -0700177static inline unsigned long global_zone_page_state(enum zone_stat_item item)
Christoph Lameter2244b952006-06-30 01:55:33 -0700178{
Mel Gorman75ef7182016-07-28 15:45:24 -0700179 long x = atomic_long_read(&vm_zone_stat[item]);
180#ifdef CONFIG_SMP
181 if (x < 0)
182 x = 0;
183#endif
184 return x;
185}
186
187static inline unsigned long global_node_page_state(enum node_stat_item item)
188{
189 long x = atomic_long_read(&vm_node_stat[item]);
Christoph Lameter2244b952006-06-30 01:55:33 -0700190#ifdef CONFIG_SMP
191 if (x < 0)
192 x = 0;
193#endif
194 return x;
195}
196
197static inline unsigned long zone_page_state(struct zone *zone,
198 enum zone_stat_item item)
199{
200 long x = atomic_long_read(&zone->vm_stat[item]);
201#ifdef CONFIG_SMP
202 if (x < 0)
203 x = 0;
204#endif
205 return x;
206}
207
Christoph Lameteraa454842010-09-09 16:38:17 -0700208/*
209 * More accurate version that also considers the currently pending
210 * deltas. For that we need to loop over all cpus to find the current
211 * deltas. There is no synchronization so the result cannot be
212 * exactly accurate either.
213 */
214static inline unsigned long zone_page_state_snapshot(struct zone *zone,
215 enum zone_stat_item item)
216{
217 long x = atomic_long_read(&zone->vm_stat[item]);
218
219#ifdef CONFIG_SMP
220 int cpu;
221 for_each_online_cpu(cpu)
222 x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
223
224 if (x < 0)
225 x = 0;
226#endif
227 return x;
228}
229
Christoph Lameter2244b952006-06-30 01:55:33 -0700230#ifdef CONFIG_NUMA
Kemi Wang3a321d22017-09-08 16:12:48 -0700231extern void __inc_numa_state(struct zone *zone, enum numa_stat_item item);
Mel Gorman75ef7182016-07-28 15:45:24 -0700232extern unsigned long sum_zone_node_page_state(int node,
Kemi Wang3a321d22017-09-08 16:12:48 -0700233 enum zone_stat_item item);
234extern unsigned long sum_zone_numa_state(int node, enum numa_stat_item item);
Mel Gorman75ef7182016-07-28 15:45:24 -0700235extern unsigned long node_page_state(struct pglist_data *pgdat,
236 enum node_stat_item item);
Christoph Lameter2244b952006-06-30 01:55:33 -0700237#else
Michal Hockoc41f0122017-09-06 16:23:36 -0700238#define sum_zone_node_page_state(node, item) global_zone_page_state(item)
Mel Gorman75ef7182016-07-28 15:45:24 -0700239#define node_page_state(node, item) global_node_page_state(item)
Christoph Lameterca889e62006-06-30 01:55:44 -0700240#endif /* CONFIG_NUMA */
Christoph Lameter2244b952006-06-30 01:55:33 -0700241
Christoph Lameter2244b952006-06-30 01:55:33 -0700242#ifdef CONFIG_SMP
Heiko Carstens6cdb18a2015-12-29 14:54:32 -0800243void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long);
Christoph Lameter2244b952006-06-30 01:55:33 -0700244void __inc_zone_page_state(struct page *, enum zone_stat_item);
245void __dec_zone_page_state(struct page *, enum zone_stat_item);
246
Mel Gorman75ef7182016-07-28 15:45:24 -0700247void __mod_node_page_state(struct pglist_data *, enum node_stat_item item, long);
248void __inc_node_page_state(struct page *, enum node_stat_item);
249void __dec_node_page_state(struct page *, enum node_stat_item);
250
Heiko Carstens6cdb18a2015-12-29 14:54:32 -0800251void mod_zone_page_state(struct zone *, enum zone_stat_item, long);
Christoph Lameter2244b952006-06-30 01:55:33 -0700252void inc_zone_page_state(struct page *, enum zone_stat_item);
253void dec_zone_page_state(struct page *, enum zone_stat_item);
254
Mel Gorman75ef7182016-07-28 15:45:24 -0700255void mod_node_page_state(struct pglist_data *, enum node_stat_item, long);
256void inc_node_page_state(struct page *, enum node_stat_item);
257void dec_node_page_state(struct page *, enum node_stat_item);
258
Mel Gorman75ef7182016-07-28 15:45:24 -0700259extern void inc_node_state(struct pglist_data *, enum node_stat_item);
Christoph Lameterc8785382007-02-10 01:43:01 -0800260extern void __inc_zone_state(struct zone *, enum zone_stat_item);
Mel Gorman75ef7182016-07-28 15:45:24 -0700261extern void __inc_node_state(struct pglist_data *, enum node_stat_item);
Christoph Lameterc8785382007-02-10 01:43:01 -0800262extern void dec_zone_state(struct zone *, enum zone_stat_item);
263extern void __dec_zone_state(struct zone *, enum zone_stat_item);
Mel Gorman75ef7182016-07-28 15:45:24 -0700264extern void __dec_node_state(struct pglist_data *, enum node_stat_item);
Christoph Lameter2244b952006-06-30 01:55:33 -0700265
Christoph Lameter0eb77e92016-01-14 15:21:40 -0800266void quiet_vmstat(void);
Christoph Lameter2bb921e2013-09-11 14:21:30 -0700267void cpu_vm_stats_fold(int cpu);
KOSAKI Motohiroa6cccdc2011-05-24 17:11:33 -0700268void refresh_zone_stat_thresholds(void);
Mel Gormanb44129b2011-01-13 15:45:43 -0800269
Hugh Dickins52b6f462016-05-19 17:12:50 -0700270struct ctl_table;
271int vmstat_refresh(struct ctl_table *, int write,
272 void __user *buffer, size_t *lenp, loff_t *ppos);
273
Minchan Kim5a883812012-10-08 16:33:39 -0700274void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
275
Mel Gormanb44129b2011-01-13 15:45:43 -0800276int calculate_pressure_threshold(struct zone *zone);
277int calculate_normal_threshold(struct zone *zone);
278void set_pgdat_percpu_threshold(pg_data_t *pgdat,
279 int (*calculate_pressure)(struct zone *));
Christoph Lameter2244b952006-06-30 01:55:33 -0700280#else /* CONFIG_SMP */
281
282/*
283 * We do not maintain differentials in a single processor configuration.
284 * The functions directly modify the zone and global counters.
285 */
286static inline void __mod_zone_page_state(struct zone *zone,
Heiko Carstens6cdb18a2015-12-29 14:54:32 -0800287 enum zone_stat_item item, long delta)
Christoph Lameter2244b952006-06-30 01:55:33 -0700288{
289 zone_page_state_add(delta, zone, item);
290}
291
Mel Gorman75ef7182016-07-28 15:45:24 -0700292static inline void __mod_node_page_state(struct pglist_data *pgdat,
293 enum node_stat_item item, int delta)
294{
295 node_page_state_add(delta, pgdat, item);
296}
297
Christoph Lameter7f4599e2006-07-10 04:44:30 -0700298static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
299{
300 atomic_long_inc(&zone->vm_stat[item]);
Mel Gorman75ef7182016-07-28 15:45:24 -0700301 atomic_long_inc(&vm_zone_stat[item]);
302}
303
304static inline void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
305{
306 atomic_long_inc(&pgdat->vm_stat[item]);
307 atomic_long_inc(&vm_node_stat[item]);
Christoph Lameter7f4599e2006-07-10 04:44:30 -0700308}
309
Christoph Lameterc8785382007-02-10 01:43:01 -0800310static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
311{
312 atomic_long_dec(&zone->vm_stat[item]);
Mel Gorman75ef7182016-07-28 15:45:24 -0700313 atomic_long_dec(&vm_zone_stat[item]);
314}
315
316static inline void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
317{
318 atomic_long_dec(&pgdat->vm_stat[item]);
319 atomic_long_dec(&vm_node_stat[item]);
Christoph Lameterc8785382007-02-10 01:43:01 -0800320}
321
Johannes Weiner6a3ed212014-04-03 14:47:34 -0700322static inline void __inc_zone_page_state(struct page *page,
323 enum zone_stat_item item)
324{
325 __inc_zone_state(page_zone(page), item);
326}
327
Mel Gorman75ef7182016-07-28 15:45:24 -0700328static inline void __inc_node_page_state(struct page *page,
329 enum node_stat_item item)
330{
331 __inc_node_state(page_pgdat(page), item);
332}
333
334
Christoph Lameter2244b952006-06-30 01:55:33 -0700335static inline void __dec_zone_page_state(struct page *page,
336 enum zone_stat_item item)
337{
Uwe Kleine-König57ce36f2008-02-25 16:45:03 +0100338 __dec_zone_state(page_zone(page), item);
Christoph Lameter2244b952006-06-30 01:55:33 -0700339}
340
Mel Gorman75ef7182016-07-28 15:45:24 -0700341static inline void __dec_node_page_state(struct page *page,
342 enum node_stat_item item)
343{
344 __dec_node_state(page_pgdat(page), item);
345}
346
347
Christoph Lameter2244b952006-06-30 01:55:33 -0700348/*
349 * We only use atomic operations to update counters. So there is no need to
350 * disable interrupts.
351 */
352#define inc_zone_page_state __inc_zone_page_state
353#define dec_zone_page_state __dec_zone_page_state
354#define mod_zone_page_state __mod_zone_page_state
355
Mel Gorman75ef7182016-07-28 15:45:24 -0700356#define inc_node_page_state __inc_node_page_state
357#define dec_node_page_state __dec_node_page_state
358#define mod_node_page_state __mod_node_page_state
359
Johannes Weiner6a3ed212014-04-03 14:47:34 -0700360#define inc_zone_state __inc_zone_state
Mel Gorman75ef7182016-07-28 15:45:24 -0700361#define inc_node_state __inc_node_state
Johannes Weiner6a3ed212014-04-03 14:47:34 -0700362#define dec_zone_state __dec_zone_state
363
Mel Gormanb44129b2011-01-13 15:45:43 -0800364#define set_pgdat_percpu_threshold(pgdat, callback) { }
Mel Gorman88f5acf2011-01-13 15:45:41 -0800365
KOSAKI Motohiroa6cccdc2011-05-24 17:11:33 -0700366static inline void refresh_zone_stat_thresholds(void) { }
Christoph Lameter2bb921e2013-09-11 14:21:30 -0700367static inline void cpu_vm_stats_fold(int cpu) { }
Christoph Lameter0eb77e92016-01-14 15:21:40 -0800368static inline void quiet_vmstat(void) { }
KOSAKI Motohiroa6cccdc2011-05-24 17:11:33 -0700369
Minchan Kim5a883812012-10-08 16:33:39 -0700370static inline void drain_zonestat(struct zone *zone,
371 struct per_cpu_pageset *pset) { }
KOSAKI Motohirofa25c502011-05-24 17:11:28 -0700372#endif /* CONFIG_SMP */
373
Bartlomiej Zolnierkiewiczd1ce7492012-10-08 16:32:02 -0700374static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
375 int migratetype)
376{
377 __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
378 if (is_migrate_cma(migratetype))
379 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
380}
381
KOSAKI Motohirofa25c502011-05-24 17:11:28 -0700382extern const char * const vmstat_text[];
Christoph Lameter2244b952006-06-30 01:55:33 -0700383
384#endif /* _LINUX_VMSTAT_H */