blob: 1e0cb72e0598bb668f1207c6e031f2b731321187 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Christoph Lameterf6ac2352006-06-30 01:55:32 -07002#ifndef _LINUX_VMSTAT_H
3#define _LINUX_VMSTAT_H
4
5#include <linux/types.h>
6#include <linux/percpu.h>
Christoph Lameter2244b952006-06-30 01:55:33 -07007#include <linux/mmzone.h>
Andrew Mortonf042e702011-05-26 16:25:24 -07008#include <linux/vm_event_item.h>
Arun Sharma600634972011-07-26 16:09:06 -07009#include <linux/atomic.h>
Christoph Lameterf6ac2352006-06-30 01:55:32 -070010
Adrian Bunkc748e132008-07-23 21:27:03 -070011extern int sysctl_stat_interval;
12
Andrew Morton780a0652007-02-10 01:44:41 -080013#ifdef CONFIG_VM_EVENT_COUNTERS
14/*
15 * Light weight per cpu counter implementation.
16 *
17 * Counters should only be incremented and no critical kernel component
18 * should rely on the counter values.
19 *
20 * Counters are handled completely inline. On many platforms the code
21 * generated will simply be the increment of a global address.
22 */
23
Christoph Lameterf8891e52006-06-30 01:55:45 -070024struct vm_event_state {
25 unsigned long event[NR_VM_EVENT_ITEMS];
26};
Christoph Lameterf6ac2352006-06-30 01:55:32 -070027
Christoph Lameterf8891e52006-06-30 01:55:45 -070028DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
Christoph Lameterf6ac2352006-06-30 01:55:32 -070029
Christoph Lameter293b6a42014-04-07 15:39:43 -070030/*
31 * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the
32 * local_irq_disable overhead.
33 */
Christoph Lameterf8891e52006-06-30 01:55:45 -070034static inline void __count_vm_event(enum vm_event_item item)
35{
Christoph Lameter293b6a42014-04-07 15:39:43 -070036 raw_cpu_inc(vm_event_states.event[item]);
Christoph Lameterf8891e52006-06-30 01:55:45 -070037}
Christoph Lameterf6ac2352006-06-30 01:55:32 -070038
Christoph Lameterf8891e52006-06-30 01:55:45 -070039static inline void count_vm_event(enum vm_event_item item)
40{
Rusty Russelldd17c8f2009-10-29 22:34:15 +090041 this_cpu_inc(vm_event_states.event[item]);
Christoph Lameterf8891e52006-06-30 01:55:45 -070042}
Christoph Lameterf6ac2352006-06-30 01:55:32 -070043
Christoph Lameterf8891e52006-06-30 01:55:45 -070044static inline void __count_vm_events(enum vm_event_item item, long delta)
45{
Christoph Lameter293b6a42014-04-07 15:39:43 -070046 raw_cpu_add(vm_event_states.event[item], delta);
Christoph Lameterf8891e52006-06-30 01:55:45 -070047}
Christoph Lameterf6ac2352006-06-30 01:55:32 -070048
Christoph Lameterf8891e52006-06-30 01:55:45 -070049static inline void count_vm_events(enum vm_event_item item, long delta)
50{
Rusty Russelldd17c8f2009-10-29 22:34:15 +090051 this_cpu_add(vm_event_states.event[item], delta);
Christoph Lameterf8891e52006-06-30 01:55:45 -070052}
Christoph Lameterf6ac2352006-06-30 01:55:32 -070053
Christoph Lameterf8891e52006-06-30 01:55:45 -070054extern void all_vm_events(unsigned long *);
Yijing Wangf1cb0872013-04-29 15:08:14 -070055
Christoph Lameterf8891e52006-06-30 01:55:45 -070056extern void vm_events_fold_cpu(int cpu);
Christoph Lameterf6ac2352006-06-30 01:55:32 -070057
Christoph Lameterf8891e52006-06-30 01:55:45 -070058#else
Christoph Lameterf6ac2352006-06-30 01:55:32 -070059
Christoph Lameterf8891e52006-06-30 01:55:45 -070060/* Disable counters */
Andrew Morton780a0652007-02-10 01:44:41 -080061static inline void count_vm_event(enum vm_event_item item)
62{
63}
64static inline void count_vm_events(enum vm_event_item item, long delta)
65{
66}
67static inline void __count_vm_event(enum vm_event_item item)
68{
69}
70static inline void __count_vm_events(enum vm_event_item item, long delta)
71{
72}
73static inline void all_vm_events(unsigned long *ret)
74{
75}
76static inline void vm_events_fold_cpu(int cpu)
77{
78}
Christoph Lameterf6ac2352006-06-30 01:55:32 -070079
Christoph Lameterf8891e52006-06-30 01:55:45 -070080#endif /* CONFIG_VM_EVENT_COUNTERS */
81
Mel Gorman03c5a6e2012-11-02 14:52:48 +000082#ifdef CONFIG_NUMA_BALANCING
83#define count_vm_numa_event(x) count_vm_event(x)
84#define count_vm_numa_events(x, y) count_vm_events(x, y)
85#else
86#define count_vm_numa_event(x) do {} while (0)
Mel Gorman3c0ff462013-02-22 16:34:29 -080087#define count_vm_numa_events(x, y) do { (void)(y); } while (0)
Mel Gorman03c5a6e2012-11-02 14:52:48 +000088#endif /* CONFIG_NUMA_BALANCING */
89
Mel Gormanec659932014-01-21 14:33:16 -080090#ifdef CONFIG_DEBUG_TLBFLUSH
91#define count_vm_tlb_event(x) count_vm_event(x)
92#define count_vm_tlb_events(x, y) count_vm_events(x, y)
93#else
94#define count_vm_tlb_event(x) do {} while (0)
95#define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
96#endif
97
Davidlohr Bueso4f115142014-06-04 16:06:46 -070098#ifdef CONFIG_DEBUG_VM_VMACACHE
99#define count_vm_vmacache_event(x) count_vm_event(x)
100#else
101#define count_vm_vmacache_event(x) do {} while (0)
102#endif
103
Mel Gorman16709d12016-07-28 15:46:56 -0700104#define __count_zid_vm_events(item, zid, delta) \
105 __count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta)
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700106
Christoph Lameter2244b952006-06-30 01:55:33 -0700107/*
Mel Gorman75ef7182016-07-28 15:45:24 -0700108 * Zone and node-based page accounting with per cpu differentials.
Christoph Lameter2244b952006-06-30 01:55:33 -0700109 */
Mel Gorman75ef7182016-07-28 15:45:24 -0700110extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS];
Kemi Wang3a321d22017-09-08 16:12:48 -0700111extern atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS];
Mel Gorman75ef7182016-07-28 15:45:24 -0700112extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS];
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700113
Kemi Wang3a321d22017-09-08 16:12:48 -0700114#ifdef CONFIG_NUMA
115static inline void zone_numa_state_add(long x, struct zone *zone,
116 enum numa_stat_item item)
117{
118 atomic_long_add(x, &zone->vm_numa_stat[item]);
119 atomic_long_add(x, &vm_numa_stat[item]);
120}
121
122static inline unsigned long global_numa_state(enum numa_stat_item item)
123{
124 long x = atomic_long_read(&vm_numa_stat[item]);
125
126 return x;
127}
128
Kemi Wang63803222017-09-08 16:12:55 -0700129static inline unsigned long zone_numa_state_snapshot(struct zone *zone,
Kemi Wang3a321d22017-09-08 16:12:48 -0700130 enum numa_stat_item item)
131{
132 long x = atomic_long_read(&zone->vm_numa_stat[item]);
Kemi Wang63803222017-09-08 16:12:55 -0700133 int cpu;
134
135 for_each_online_cpu(cpu)
136 x += per_cpu_ptr(zone->pageset, cpu)->vm_numa_stat_diff[item];
Kemi Wang3a321d22017-09-08 16:12:48 -0700137
138 return x;
139}
140#endif /* CONFIG_NUMA */
141
Christoph Lameter2244b952006-06-30 01:55:33 -0700142static inline void zone_page_state_add(long x, struct zone *zone,
143 enum zone_stat_item item)
144{
145 atomic_long_add(x, &zone->vm_stat[item]);
Mel Gorman75ef7182016-07-28 15:45:24 -0700146 atomic_long_add(x, &vm_zone_stat[item]);
147}
148
149static inline void node_page_state_add(long x, struct pglist_data *pgdat,
150 enum node_stat_item item)
151{
152 atomic_long_add(x, &pgdat->vm_stat[item]);
153 atomic_long_add(x, &vm_node_stat[item]);
Christoph Lameter2244b952006-06-30 01:55:33 -0700154}
155
Michal Hockoc41f0122017-09-06 16:23:36 -0700156static inline unsigned long global_zone_page_state(enum zone_stat_item item)
Christoph Lameter2244b952006-06-30 01:55:33 -0700157{
Mel Gorman75ef7182016-07-28 15:45:24 -0700158 long x = atomic_long_read(&vm_zone_stat[item]);
159#ifdef CONFIG_SMP
160 if (x < 0)
161 x = 0;
162#endif
163 return x;
164}
165
166static inline unsigned long global_node_page_state(enum node_stat_item item)
167{
168 long x = atomic_long_read(&vm_node_stat[item]);
Christoph Lameter2244b952006-06-30 01:55:33 -0700169#ifdef CONFIG_SMP
170 if (x < 0)
171 x = 0;
172#endif
173 return x;
174}
175
176static inline unsigned long zone_page_state(struct zone *zone,
177 enum zone_stat_item item)
178{
179 long x = atomic_long_read(&zone->vm_stat[item]);
180#ifdef CONFIG_SMP
181 if (x < 0)
182 x = 0;
183#endif
184 return x;
185}
186
Christoph Lameteraa454842010-09-09 16:38:17 -0700187/*
188 * More accurate version that also considers the currently pending
189 * deltas. For that we need to loop over all cpus to find the current
190 * deltas. There is no synchronization so the result cannot be
191 * exactly accurate either.
192 */
193static inline unsigned long zone_page_state_snapshot(struct zone *zone,
194 enum zone_stat_item item)
195{
196 long x = atomic_long_read(&zone->vm_stat[item]);
197
198#ifdef CONFIG_SMP
199 int cpu;
200 for_each_online_cpu(cpu)
201 x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
202
203 if (x < 0)
204 x = 0;
205#endif
206 return x;
207}
208
Mel Gorman599d0c92016-07-28 15:45:31 -0700209static inline unsigned long node_page_state_snapshot(pg_data_t *pgdat,
210 enum node_stat_item item)
211{
212 long x = atomic_long_read(&pgdat->vm_stat[item]);
213
214#ifdef CONFIG_SMP
215 int cpu;
216 for_each_online_cpu(cpu)
217 x += per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->vm_node_stat_diff[item];
218
219 if (x < 0)
220 x = 0;
221#endif
222 return x;
223}
224
225
Christoph Lameter2244b952006-06-30 01:55:33 -0700226#ifdef CONFIG_NUMA
Kemi Wang3a321d22017-09-08 16:12:48 -0700227extern void __inc_numa_state(struct zone *zone, enum numa_stat_item item);
Mel Gorman75ef7182016-07-28 15:45:24 -0700228extern unsigned long sum_zone_node_page_state(int node,
Kemi Wang3a321d22017-09-08 16:12:48 -0700229 enum zone_stat_item item);
230extern unsigned long sum_zone_numa_state(int node, enum numa_stat_item item);
Mel Gorman75ef7182016-07-28 15:45:24 -0700231extern unsigned long node_page_state(struct pglist_data *pgdat,
232 enum node_stat_item item);
Christoph Lameter2244b952006-06-30 01:55:33 -0700233#else
Michal Hockoc41f0122017-09-06 16:23:36 -0700234#define sum_zone_node_page_state(node, item) global_zone_page_state(item)
Mel Gorman75ef7182016-07-28 15:45:24 -0700235#define node_page_state(node, item) global_node_page_state(item)
Christoph Lameterca889e62006-06-30 01:55:44 -0700236#endif /* CONFIG_NUMA */
Christoph Lameter2244b952006-06-30 01:55:33 -0700237
Christoph Lameter2244b952006-06-30 01:55:33 -0700238#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
239#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
Mel Gorman75ef7182016-07-28 15:45:24 -0700240#define add_node_page_state(__p, __i, __d) mod_node_page_state(__p, __i, __d)
241#define sub_node_page_state(__p, __i, __d) mod_node_page_state(__p, __i, -(__d))
Christoph Lameter2244b952006-06-30 01:55:33 -0700242
Christoph Lameter2244b952006-06-30 01:55:33 -0700243#ifdef CONFIG_SMP
Heiko Carstens6cdb18a2015-12-29 14:54:32 -0800244void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long);
Christoph Lameter2244b952006-06-30 01:55:33 -0700245void __inc_zone_page_state(struct page *, enum zone_stat_item);
246void __dec_zone_page_state(struct page *, enum zone_stat_item);
247
Mel Gorman75ef7182016-07-28 15:45:24 -0700248void __mod_node_page_state(struct pglist_data *, enum node_stat_item item, long);
249void __inc_node_page_state(struct page *, enum node_stat_item);
250void __dec_node_page_state(struct page *, enum node_stat_item);
251
Heiko Carstens6cdb18a2015-12-29 14:54:32 -0800252void mod_zone_page_state(struct zone *, enum zone_stat_item, long);
Christoph Lameter2244b952006-06-30 01:55:33 -0700253void inc_zone_page_state(struct page *, enum zone_stat_item);
254void dec_zone_page_state(struct page *, enum zone_stat_item);
255
Mel Gorman75ef7182016-07-28 15:45:24 -0700256void mod_node_page_state(struct pglist_data *, enum node_stat_item, long);
257void inc_node_page_state(struct page *, enum node_stat_item);
258void dec_node_page_state(struct page *, enum node_stat_item);
259
Mel Gorman75ef7182016-07-28 15:45:24 -0700260extern void inc_node_state(struct pglist_data *, enum node_stat_item);
Christoph Lameterc8785382007-02-10 01:43:01 -0800261extern void __inc_zone_state(struct zone *, enum zone_stat_item);
Mel Gorman75ef7182016-07-28 15:45:24 -0700262extern void __inc_node_state(struct pglist_data *, enum node_stat_item);
Christoph Lameterc8785382007-02-10 01:43:01 -0800263extern void dec_zone_state(struct zone *, enum zone_stat_item);
264extern void __dec_zone_state(struct zone *, enum zone_stat_item);
Mel Gorman75ef7182016-07-28 15:45:24 -0700265extern void __dec_node_state(struct pglist_data *, enum node_stat_item);
Christoph Lameter2244b952006-06-30 01:55:33 -0700266
Christoph Lameter0eb77e92016-01-14 15:21:40 -0800267void quiet_vmstat(void);
Christoph Lameter2bb921e2013-09-11 14:21:30 -0700268void cpu_vm_stats_fold(int cpu);
KOSAKI Motohiroa6cccdc2011-05-24 17:11:33 -0700269void refresh_zone_stat_thresholds(void);
Mel Gormanb44129b2011-01-13 15:45:43 -0800270
Hugh Dickins52b6f462016-05-19 17:12:50 -0700271struct ctl_table;
272int vmstat_refresh(struct ctl_table *, int write,
273 void __user *buffer, size_t *lenp, loff_t *ppos);
274
Minchan Kim5a883812012-10-08 16:33:39 -0700275void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
276
Mel Gormanb44129b2011-01-13 15:45:43 -0800277int calculate_pressure_threshold(struct zone *zone);
278int calculate_normal_threshold(struct zone *zone);
279void set_pgdat_percpu_threshold(pg_data_t *pgdat,
280 int (*calculate_pressure)(struct zone *));
Christoph Lameter2244b952006-06-30 01:55:33 -0700281#else /* CONFIG_SMP */
282
283/*
284 * We do not maintain differentials in a single processor configuration.
285 * The functions directly modify the zone and global counters.
286 */
287static inline void __mod_zone_page_state(struct zone *zone,
Heiko Carstens6cdb18a2015-12-29 14:54:32 -0800288 enum zone_stat_item item, long delta)
Christoph Lameter2244b952006-06-30 01:55:33 -0700289{
290 zone_page_state_add(delta, zone, item);
291}
292
Mel Gorman75ef7182016-07-28 15:45:24 -0700293static inline void __mod_node_page_state(struct pglist_data *pgdat,
294 enum node_stat_item item, int delta)
295{
296 node_page_state_add(delta, pgdat, item);
297}
298
Christoph Lameter7f4599e2006-07-10 04:44:30 -0700299static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
300{
301 atomic_long_inc(&zone->vm_stat[item]);
Mel Gorman75ef7182016-07-28 15:45:24 -0700302 atomic_long_inc(&vm_zone_stat[item]);
303}
304
305static inline void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
306{
307 atomic_long_inc(&pgdat->vm_stat[item]);
308 atomic_long_inc(&vm_node_stat[item]);
Christoph Lameter7f4599e2006-07-10 04:44:30 -0700309}
310
Christoph Lameterc8785382007-02-10 01:43:01 -0800311static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
312{
313 atomic_long_dec(&zone->vm_stat[item]);
Mel Gorman75ef7182016-07-28 15:45:24 -0700314 atomic_long_dec(&vm_zone_stat[item]);
315}
316
317static inline void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
318{
319 atomic_long_dec(&pgdat->vm_stat[item]);
320 atomic_long_dec(&vm_node_stat[item]);
Christoph Lameterc8785382007-02-10 01:43:01 -0800321}
322
Johannes Weiner6a3ed212014-04-03 14:47:34 -0700323static inline void __inc_zone_page_state(struct page *page,
324 enum zone_stat_item item)
325{
326 __inc_zone_state(page_zone(page), item);
327}
328
Mel Gorman75ef7182016-07-28 15:45:24 -0700329static inline void __inc_node_page_state(struct page *page,
330 enum node_stat_item item)
331{
332 __inc_node_state(page_pgdat(page), item);
333}
334
335
Christoph Lameter2244b952006-06-30 01:55:33 -0700336static inline void __dec_zone_page_state(struct page *page,
337 enum zone_stat_item item)
338{
Uwe Kleine-König57ce36f2008-02-25 16:45:03 +0100339 __dec_zone_state(page_zone(page), item);
Christoph Lameter2244b952006-06-30 01:55:33 -0700340}
341
Mel Gorman75ef7182016-07-28 15:45:24 -0700342static inline void __dec_node_page_state(struct page *page,
343 enum node_stat_item item)
344{
345 __dec_node_state(page_pgdat(page), item);
346}
347
348
Christoph Lameter2244b952006-06-30 01:55:33 -0700349/*
350 * We only use atomic operations to update counters. So there is no need to
351 * disable interrupts.
352 */
353#define inc_zone_page_state __inc_zone_page_state
354#define dec_zone_page_state __dec_zone_page_state
355#define mod_zone_page_state __mod_zone_page_state
356
Mel Gorman75ef7182016-07-28 15:45:24 -0700357#define inc_node_page_state __inc_node_page_state
358#define dec_node_page_state __dec_node_page_state
359#define mod_node_page_state __mod_node_page_state
360
Johannes Weiner6a3ed212014-04-03 14:47:34 -0700361#define inc_zone_state __inc_zone_state
Mel Gorman75ef7182016-07-28 15:45:24 -0700362#define inc_node_state __inc_node_state
Johannes Weiner6a3ed212014-04-03 14:47:34 -0700363#define dec_zone_state __dec_zone_state
364
Mel Gormanb44129b2011-01-13 15:45:43 -0800365#define set_pgdat_percpu_threshold(pgdat, callback) { }
Mel Gorman88f5acf2011-01-13 15:45:41 -0800366
KOSAKI Motohiroa6cccdc2011-05-24 17:11:33 -0700367static inline void refresh_zone_stat_thresholds(void) { }
Christoph Lameter2bb921e2013-09-11 14:21:30 -0700368static inline void cpu_vm_stats_fold(int cpu) { }
Christoph Lameter0eb77e92016-01-14 15:21:40 -0800369static inline void quiet_vmstat(void) { }
KOSAKI Motohiroa6cccdc2011-05-24 17:11:33 -0700370
Minchan Kim5a883812012-10-08 16:33:39 -0700371static inline void drain_zonestat(struct zone *zone,
372 struct per_cpu_pageset *pset) { }
KOSAKI Motohirofa25c502011-05-24 17:11:28 -0700373#endif /* CONFIG_SMP */
374
Bartlomiej Zolnierkiewiczd1ce7492012-10-08 16:32:02 -0700375static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
376 int migratetype)
377{
378 __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
379 if (is_migrate_cma(migratetype))
380 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
381}
382
KOSAKI Motohirofa25c502011-05-24 17:11:28 -0700383extern const char * const vmstat_text[];
Christoph Lameter2244b952006-06-30 01:55:33 -0700384
385#endif /* _LINUX_VMSTAT_H */