Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 2 | #ifndef _LINUX_VMSTAT_H |
| 3 | #define _LINUX_VMSTAT_H |
| 4 | |
| 5 | #include <linux/types.h> |
| 6 | #include <linux/percpu.h> |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 7 | #include <linux/mmzone.h> |
Andrew Morton | f042e70 | 2011-05-26 16:25:24 -0700 | [diff] [blame] | 8 | #include <linux/vm_event_item.h> |
Arun Sharma | 60063497 | 2011-07-26 16:09:06 -0700 | [diff] [blame] | 9 | #include <linux/atomic.h> |
Kemi Wang | 4518085 | 2017-11-15 17:38:22 -0800 | [diff] [blame] | 10 | #include <linux/static_key.h> |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 11 | |
Adrian Bunk | c748e13 | 2008-07-23 21:27:03 -0700 | [diff] [blame] | 12 | extern int sysctl_stat_interval; |
| 13 | |
Kemi Wang | 4518085 | 2017-11-15 17:38:22 -0800 | [diff] [blame] | 14 | #ifdef CONFIG_NUMA |
| 15 | #define ENABLE_NUMA_STAT 1 |
| 16 | #define DISABLE_NUMA_STAT 0 |
| 17 | extern int sysctl_vm_numa_stat; |
| 18 | DECLARE_STATIC_KEY_TRUE(vm_numa_stat_key); |
| 19 | extern int sysctl_vm_numa_stat_handler(struct ctl_table *table, |
| 20 | int write, void __user *buffer, size_t *length, loff_t *ppos); |
| 21 | #endif |
| 22 | |
Steven Rostedt | d51d1e6 | 2018-04-10 16:28:07 -0700 | [diff] [blame] | 23 | struct reclaim_stat { |
| 24 | unsigned nr_dirty; |
| 25 | unsigned nr_unqueued_dirty; |
| 26 | unsigned nr_congested; |
| 27 | unsigned nr_writeback; |
| 28 | unsigned nr_immediate; |
Kirill Tkhai | 886cf19 | 2019-05-13 17:16:51 -0700 | [diff] [blame] | 29 | unsigned nr_activate[2]; |
Steven Rostedt | d51d1e6 | 2018-04-10 16:28:07 -0700 | [diff] [blame] | 30 | unsigned nr_ref_keep; |
| 31 | unsigned nr_unmap_fail; |
| 32 | }; |
| 33 | |
Andrew Morton | 780a065 | 2007-02-10 01:44:41 -0800 | [diff] [blame] | 34 | #ifdef CONFIG_VM_EVENT_COUNTERS |
| 35 | /* |
| 36 | * Light weight per cpu counter implementation. |
| 37 | * |
| 38 | * Counters should only be incremented and no critical kernel component |
| 39 | * should rely on the counter values. |
| 40 | * |
| 41 | * Counters are handled completely inline. On many platforms the code |
| 42 | * generated will simply be the increment of a global address. |
| 43 | */ |
| 44 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 45 | struct vm_event_state { |
| 46 | unsigned long event[NR_VM_EVENT_ITEMS]; |
| 47 | }; |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 48 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 49 | DECLARE_PER_CPU(struct vm_event_state, vm_event_states); |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 50 | |
Christoph Lameter | 293b6a4 | 2014-04-07 15:39:43 -0700 | [diff] [blame] | 51 | /* |
| 52 | * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the |
| 53 | * local_irq_disable overhead. |
| 54 | */ |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 55 | static inline void __count_vm_event(enum vm_event_item item) |
| 56 | { |
Christoph Lameter | 293b6a4 | 2014-04-07 15:39:43 -0700 | [diff] [blame] | 57 | raw_cpu_inc(vm_event_states.event[item]); |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 58 | } |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 59 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 60 | static inline void count_vm_event(enum vm_event_item item) |
| 61 | { |
Rusty Russell | dd17c8f | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 62 | this_cpu_inc(vm_event_states.event[item]); |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 63 | } |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 64 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 65 | static inline void __count_vm_events(enum vm_event_item item, long delta) |
| 66 | { |
Christoph Lameter | 293b6a4 | 2014-04-07 15:39:43 -0700 | [diff] [blame] | 67 | raw_cpu_add(vm_event_states.event[item], delta); |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 68 | } |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 69 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 70 | static inline void count_vm_events(enum vm_event_item item, long delta) |
| 71 | { |
Rusty Russell | dd17c8f | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 72 | this_cpu_add(vm_event_states.event[item], delta); |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 73 | } |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 74 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 75 | extern void all_vm_events(unsigned long *); |
Yijing Wang | f1cb087 | 2013-04-29 15:08:14 -0700 | [diff] [blame] | 76 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 77 | extern void vm_events_fold_cpu(int cpu); |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 78 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 79 | #else |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 80 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 81 | /* Disable counters */ |
Andrew Morton | 780a065 | 2007-02-10 01:44:41 -0800 | [diff] [blame] | 82 | static inline void count_vm_event(enum vm_event_item item) |
| 83 | { |
| 84 | } |
| 85 | static inline void count_vm_events(enum vm_event_item item, long delta) |
| 86 | { |
| 87 | } |
| 88 | static inline void __count_vm_event(enum vm_event_item item) |
| 89 | { |
| 90 | } |
| 91 | static inline void __count_vm_events(enum vm_event_item item, long delta) |
| 92 | { |
| 93 | } |
| 94 | static inline void all_vm_events(unsigned long *ret) |
| 95 | { |
| 96 | } |
| 97 | static inline void vm_events_fold_cpu(int cpu) |
| 98 | { |
| 99 | } |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 100 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 101 | #endif /* CONFIG_VM_EVENT_COUNTERS */ |
| 102 | |
Mel Gorman | 03c5a6e | 2012-11-02 14:52:48 +0000 | [diff] [blame] | 103 | #ifdef CONFIG_NUMA_BALANCING |
| 104 | #define count_vm_numa_event(x) count_vm_event(x) |
| 105 | #define count_vm_numa_events(x, y) count_vm_events(x, y) |
| 106 | #else |
| 107 | #define count_vm_numa_event(x) do {} while (0) |
Mel Gorman | 3c0ff46 | 2013-02-22 16:34:29 -0800 | [diff] [blame] | 108 | #define count_vm_numa_events(x, y) do { (void)(y); } while (0) |
Mel Gorman | 03c5a6e | 2012-11-02 14:52:48 +0000 | [diff] [blame] | 109 | #endif /* CONFIG_NUMA_BALANCING */ |
| 110 | |
Mel Gorman | ec65993 | 2014-01-21 14:33:16 -0800 | [diff] [blame] | 111 | #ifdef CONFIG_DEBUG_TLBFLUSH |
| 112 | #define count_vm_tlb_event(x) count_vm_event(x) |
| 113 | #define count_vm_tlb_events(x, y) count_vm_events(x, y) |
| 114 | #else |
| 115 | #define count_vm_tlb_event(x) do {} while (0) |
| 116 | #define count_vm_tlb_events(x, y) do { (void)(y); } while (0) |
| 117 | #endif |
| 118 | |
Davidlohr Bueso | 4f11514 | 2014-06-04 16:06:46 -0700 | [diff] [blame] | 119 | #ifdef CONFIG_DEBUG_VM_VMACACHE |
| 120 | #define count_vm_vmacache_event(x) count_vm_event(x) |
| 121 | #else |
| 122 | #define count_vm_vmacache_event(x) do {} while (0) |
| 123 | #endif |
| 124 | |
Mel Gorman | 16709d1 | 2016-07-28 15:46:56 -0700 | [diff] [blame] | 125 | #define __count_zid_vm_events(item, zid, delta) \ |
| 126 | __count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta) |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 127 | |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 128 | /* |
Mel Gorman | 75ef718 | 2016-07-28 15:45:24 -0700 | [diff] [blame] | 129 | * Zone and node-based page accounting with per cpu differentials. |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 130 | */ |
Mel Gorman | 75ef718 | 2016-07-28 15:45:24 -0700 | [diff] [blame] | 131 | extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS]; |
Kemi Wang | 3a321d2 | 2017-09-08 16:12:48 -0700 | [diff] [blame] | 132 | extern atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS]; |
Mel Gorman | 75ef718 | 2016-07-28 15:45:24 -0700 | [diff] [blame] | 133 | extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS]; |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 134 | |
Kemi Wang | 3a321d2 | 2017-09-08 16:12:48 -0700 | [diff] [blame] | 135 | #ifdef CONFIG_NUMA |
| 136 | static inline void zone_numa_state_add(long x, struct zone *zone, |
| 137 | enum numa_stat_item item) |
| 138 | { |
| 139 | atomic_long_add(x, &zone->vm_numa_stat[item]); |
| 140 | atomic_long_add(x, &vm_numa_stat[item]); |
| 141 | } |
| 142 | |
| 143 | static inline unsigned long global_numa_state(enum numa_stat_item item) |
| 144 | { |
| 145 | long x = atomic_long_read(&vm_numa_stat[item]); |
| 146 | |
| 147 | return x; |
| 148 | } |
| 149 | |
Kemi Wang | 6380322 | 2017-09-08 16:12:55 -0700 | [diff] [blame] | 150 | static inline unsigned long zone_numa_state_snapshot(struct zone *zone, |
Kemi Wang | 3a321d2 | 2017-09-08 16:12:48 -0700 | [diff] [blame] | 151 | enum numa_stat_item item) |
| 152 | { |
| 153 | long x = atomic_long_read(&zone->vm_numa_stat[item]); |
Kemi Wang | 6380322 | 2017-09-08 16:12:55 -0700 | [diff] [blame] | 154 | int cpu; |
| 155 | |
| 156 | for_each_online_cpu(cpu) |
| 157 | x += per_cpu_ptr(zone->pageset, cpu)->vm_numa_stat_diff[item]; |
Kemi Wang | 3a321d2 | 2017-09-08 16:12:48 -0700 | [diff] [blame] | 158 | |
| 159 | return x; |
| 160 | } |
| 161 | #endif /* CONFIG_NUMA */ |
| 162 | |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 163 | static inline void zone_page_state_add(long x, struct zone *zone, |
| 164 | enum zone_stat_item item) |
| 165 | { |
| 166 | atomic_long_add(x, &zone->vm_stat[item]); |
Mel Gorman | 75ef718 | 2016-07-28 15:45:24 -0700 | [diff] [blame] | 167 | atomic_long_add(x, &vm_zone_stat[item]); |
| 168 | } |
| 169 | |
| 170 | static inline void node_page_state_add(long x, struct pglist_data *pgdat, |
| 171 | enum node_stat_item item) |
| 172 | { |
| 173 | atomic_long_add(x, &pgdat->vm_stat[item]); |
| 174 | atomic_long_add(x, &vm_node_stat[item]); |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 175 | } |
| 176 | |
Michal Hocko | c41f012 | 2017-09-06 16:23:36 -0700 | [diff] [blame] | 177 | static inline unsigned long global_zone_page_state(enum zone_stat_item item) |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 178 | { |
Mel Gorman | 75ef718 | 2016-07-28 15:45:24 -0700 | [diff] [blame] | 179 | long x = atomic_long_read(&vm_zone_stat[item]); |
| 180 | #ifdef CONFIG_SMP |
| 181 | if (x < 0) |
| 182 | x = 0; |
| 183 | #endif |
| 184 | return x; |
| 185 | } |
| 186 | |
| 187 | static inline unsigned long global_node_page_state(enum node_stat_item item) |
| 188 | { |
| 189 | long x = atomic_long_read(&vm_node_stat[item]); |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 190 | #ifdef CONFIG_SMP |
| 191 | if (x < 0) |
| 192 | x = 0; |
| 193 | #endif |
| 194 | return x; |
| 195 | } |
| 196 | |
| 197 | static inline unsigned long zone_page_state(struct zone *zone, |
| 198 | enum zone_stat_item item) |
| 199 | { |
| 200 | long x = atomic_long_read(&zone->vm_stat[item]); |
| 201 | #ifdef CONFIG_SMP |
| 202 | if (x < 0) |
| 203 | x = 0; |
| 204 | #endif |
| 205 | return x; |
| 206 | } |
| 207 | |
Christoph Lameter | aa45484 | 2010-09-09 16:38:17 -0700 | [diff] [blame] | 208 | /* |
| 209 | * More accurate version that also considers the currently pending |
| 210 | * deltas. For that we need to loop over all cpus to find the current |
| 211 | * deltas. There is no synchronization so the result cannot be |
| 212 | * exactly accurate either. |
| 213 | */ |
| 214 | static inline unsigned long zone_page_state_snapshot(struct zone *zone, |
| 215 | enum zone_stat_item item) |
| 216 | { |
| 217 | long x = atomic_long_read(&zone->vm_stat[item]); |
| 218 | |
| 219 | #ifdef CONFIG_SMP |
| 220 | int cpu; |
| 221 | for_each_online_cpu(cpu) |
| 222 | x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item]; |
| 223 | |
| 224 | if (x < 0) |
| 225 | x = 0; |
| 226 | #endif |
| 227 | return x; |
| 228 | } |
| 229 | |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 230 | #ifdef CONFIG_NUMA |
Kemi Wang | 3a321d2 | 2017-09-08 16:12:48 -0700 | [diff] [blame] | 231 | extern void __inc_numa_state(struct zone *zone, enum numa_stat_item item); |
Mel Gorman | 75ef718 | 2016-07-28 15:45:24 -0700 | [diff] [blame] | 232 | extern unsigned long sum_zone_node_page_state(int node, |
Kemi Wang | 3a321d2 | 2017-09-08 16:12:48 -0700 | [diff] [blame] | 233 | enum zone_stat_item item); |
| 234 | extern unsigned long sum_zone_numa_state(int node, enum numa_stat_item item); |
Mel Gorman | 75ef718 | 2016-07-28 15:45:24 -0700 | [diff] [blame] | 235 | extern unsigned long node_page_state(struct pglist_data *pgdat, |
| 236 | enum node_stat_item item); |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 237 | #else |
Michal Hocko | c41f012 | 2017-09-06 16:23:36 -0700 | [diff] [blame] | 238 | #define sum_zone_node_page_state(node, item) global_zone_page_state(item) |
Mel Gorman | 75ef718 | 2016-07-28 15:45:24 -0700 | [diff] [blame] | 239 | #define node_page_state(node, item) global_node_page_state(item) |
Christoph Lameter | ca889e6 | 2006-06-30 01:55:44 -0700 | [diff] [blame] | 240 | #endif /* CONFIG_NUMA */ |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 241 | |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 242 | #ifdef CONFIG_SMP |
Heiko Carstens | 6cdb18a | 2015-12-29 14:54:32 -0800 | [diff] [blame] | 243 | void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long); |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 244 | void __inc_zone_page_state(struct page *, enum zone_stat_item); |
| 245 | void __dec_zone_page_state(struct page *, enum zone_stat_item); |
| 246 | |
Mel Gorman | 75ef718 | 2016-07-28 15:45:24 -0700 | [diff] [blame] | 247 | void __mod_node_page_state(struct pglist_data *, enum node_stat_item item, long); |
| 248 | void __inc_node_page_state(struct page *, enum node_stat_item); |
| 249 | void __dec_node_page_state(struct page *, enum node_stat_item); |
| 250 | |
Heiko Carstens | 6cdb18a | 2015-12-29 14:54:32 -0800 | [diff] [blame] | 251 | void mod_zone_page_state(struct zone *, enum zone_stat_item, long); |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 252 | void inc_zone_page_state(struct page *, enum zone_stat_item); |
| 253 | void dec_zone_page_state(struct page *, enum zone_stat_item); |
| 254 | |
Mel Gorman | 75ef718 | 2016-07-28 15:45:24 -0700 | [diff] [blame] | 255 | void mod_node_page_state(struct pglist_data *, enum node_stat_item, long); |
| 256 | void inc_node_page_state(struct page *, enum node_stat_item); |
| 257 | void dec_node_page_state(struct page *, enum node_stat_item); |
| 258 | |
Mel Gorman | 75ef718 | 2016-07-28 15:45:24 -0700 | [diff] [blame] | 259 | extern void inc_node_state(struct pglist_data *, enum node_stat_item); |
Christoph Lameter | c878538 | 2007-02-10 01:43:01 -0800 | [diff] [blame] | 260 | extern void __inc_zone_state(struct zone *, enum zone_stat_item); |
Mel Gorman | 75ef718 | 2016-07-28 15:45:24 -0700 | [diff] [blame] | 261 | extern void __inc_node_state(struct pglist_data *, enum node_stat_item); |
Christoph Lameter | c878538 | 2007-02-10 01:43:01 -0800 | [diff] [blame] | 262 | extern void dec_zone_state(struct zone *, enum zone_stat_item); |
| 263 | extern void __dec_zone_state(struct zone *, enum zone_stat_item); |
Mel Gorman | 75ef718 | 2016-07-28 15:45:24 -0700 | [diff] [blame] | 264 | extern void __dec_node_state(struct pglist_data *, enum node_stat_item); |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 265 | |
Christoph Lameter | 0eb77e9 | 2016-01-14 15:21:40 -0800 | [diff] [blame] | 266 | void quiet_vmstat(void); |
Christoph Lameter | 2bb921e | 2013-09-11 14:21:30 -0700 | [diff] [blame] | 267 | void cpu_vm_stats_fold(int cpu); |
KOSAKI Motohiro | a6cccdc | 2011-05-24 17:11:33 -0700 | [diff] [blame] | 268 | void refresh_zone_stat_thresholds(void); |
Mel Gorman | b44129b | 2011-01-13 15:45:43 -0800 | [diff] [blame] | 269 | |
Hugh Dickins | 52b6f46 | 2016-05-19 17:12:50 -0700 | [diff] [blame] | 270 | struct ctl_table; |
| 271 | int vmstat_refresh(struct ctl_table *, int write, |
| 272 | void __user *buffer, size_t *lenp, loff_t *ppos); |
| 273 | |
Minchan Kim | 5a88381 | 2012-10-08 16:33:39 -0700 | [diff] [blame] | 274 | void drain_zonestat(struct zone *zone, struct per_cpu_pageset *); |
| 275 | |
Mel Gorman | b44129b | 2011-01-13 15:45:43 -0800 | [diff] [blame] | 276 | int calculate_pressure_threshold(struct zone *zone); |
| 277 | int calculate_normal_threshold(struct zone *zone); |
| 278 | void set_pgdat_percpu_threshold(pg_data_t *pgdat, |
| 279 | int (*calculate_pressure)(struct zone *)); |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 280 | #else /* CONFIG_SMP */ |
| 281 | |
| 282 | /* |
| 283 | * We do not maintain differentials in a single processor configuration. |
| 284 | * The functions directly modify the zone and global counters. |
| 285 | */ |
| 286 | static inline void __mod_zone_page_state(struct zone *zone, |
Heiko Carstens | 6cdb18a | 2015-12-29 14:54:32 -0800 | [diff] [blame] | 287 | enum zone_stat_item item, long delta) |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 288 | { |
| 289 | zone_page_state_add(delta, zone, item); |
| 290 | } |
| 291 | |
Mel Gorman | 75ef718 | 2016-07-28 15:45:24 -0700 | [diff] [blame] | 292 | static inline void __mod_node_page_state(struct pglist_data *pgdat, |
| 293 | enum node_stat_item item, int delta) |
| 294 | { |
| 295 | node_page_state_add(delta, pgdat, item); |
| 296 | } |
| 297 | |
Christoph Lameter | 7f4599e | 2006-07-10 04:44:30 -0700 | [diff] [blame] | 298 | static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item) |
| 299 | { |
| 300 | atomic_long_inc(&zone->vm_stat[item]); |
Mel Gorman | 75ef718 | 2016-07-28 15:45:24 -0700 | [diff] [blame] | 301 | atomic_long_inc(&vm_zone_stat[item]); |
| 302 | } |
| 303 | |
| 304 | static inline void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item) |
| 305 | { |
| 306 | atomic_long_inc(&pgdat->vm_stat[item]); |
| 307 | atomic_long_inc(&vm_node_stat[item]); |
Christoph Lameter | 7f4599e | 2006-07-10 04:44:30 -0700 | [diff] [blame] | 308 | } |
| 309 | |
Christoph Lameter | c878538 | 2007-02-10 01:43:01 -0800 | [diff] [blame] | 310 | static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item) |
| 311 | { |
| 312 | atomic_long_dec(&zone->vm_stat[item]); |
Mel Gorman | 75ef718 | 2016-07-28 15:45:24 -0700 | [diff] [blame] | 313 | atomic_long_dec(&vm_zone_stat[item]); |
| 314 | } |
| 315 | |
| 316 | static inline void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item) |
| 317 | { |
| 318 | atomic_long_dec(&pgdat->vm_stat[item]); |
| 319 | atomic_long_dec(&vm_node_stat[item]); |
Christoph Lameter | c878538 | 2007-02-10 01:43:01 -0800 | [diff] [blame] | 320 | } |
| 321 | |
Johannes Weiner | 6a3ed21 | 2014-04-03 14:47:34 -0700 | [diff] [blame] | 322 | static inline void __inc_zone_page_state(struct page *page, |
| 323 | enum zone_stat_item item) |
| 324 | { |
| 325 | __inc_zone_state(page_zone(page), item); |
| 326 | } |
| 327 | |
Mel Gorman | 75ef718 | 2016-07-28 15:45:24 -0700 | [diff] [blame] | 328 | static inline void __inc_node_page_state(struct page *page, |
| 329 | enum node_stat_item item) |
| 330 | { |
| 331 | __inc_node_state(page_pgdat(page), item); |
| 332 | } |
| 333 | |
| 334 | |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 335 | static inline void __dec_zone_page_state(struct page *page, |
| 336 | enum zone_stat_item item) |
| 337 | { |
Uwe Kleine-König | 57ce36f | 2008-02-25 16:45:03 +0100 | [diff] [blame] | 338 | __dec_zone_state(page_zone(page), item); |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 339 | } |
| 340 | |
Mel Gorman | 75ef718 | 2016-07-28 15:45:24 -0700 | [diff] [blame] | 341 | static inline void __dec_node_page_state(struct page *page, |
| 342 | enum node_stat_item item) |
| 343 | { |
| 344 | __dec_node_state(page_pgdat(page), item); |
| 345 | } |
| 346 | |
| 347 | |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 348 | /* |
| 349 | * We only use atomic operations to update counters. So there is no need to |
| 350 | * disable interrupts. |
| 351 | */ |
| 352 | #define inc_zone_page_state __inc_zone_page_state |
| 353 | #define dec_zone_page_state __dec_zone_page_state |
| 354 | #define mod_zone_page_state __mod_zone_page_state |
| 355 | |
Mel Gorman | 75ef718 | 2016-07-28 15:45:24 -0700 | [diff] [blame] | 356 | #define inc_node_page_state __inc_node_page_state |
| 357 | #define dec_node_page_state __dec_node_page_state |
| 358 | #define mod_node_page_state __mod_node_page_state |
| 359 | |
Johannes Weiner | 6a3ed21 | 2014-04-03 14:47:34 -0700 | [diff] [blame] | 360 | #define inc_zone_state __inc_zone_state |
Mel Gorman | 75ef718 | 2016-07-28 15:45:24 -0700 | [diff] [blame] | 361 | #define inc_node_state __inc_node_state |
Johannes Weiner | 6a3ed21 | 2014-04-03 14:47:34 -0700 | [diff] [blame] | 362 | #define dec_zone_state __dec_zone_state |
| 363 | |
Mel Gorman | b44129b | 2011-01-13 15:45:43 -0800 | [diff] [blame] | 364 | #define set_pgdat_percpu_threshold(pgdat, callback) { } |
Mel Gorman | 88f5acf | 2011-01-13 15:45:41 -0800 | [diff] [blame] | 365 | |
KOSAKI Motohiro | a6cccdc | 2011-05-24 17:11:33 -0700 | [diff] [blame] | 366 | static inline void refresh_zone_stat_thresholds(void) { } |
Christoph Lameter | 2bb921e | 2013-09-11 14:21:30 -0700 | [diff] [blame] | 367 | static inline void cpu_vm_stats_fold(int cpu) { } |
Christoph Lameter | 0eb77e9 | 2016-01-14 15:21:40 -0800 | [diff] [blame] | 368 | static inline void quiet_vmstat(void) { } |
KOSAKI Motohiro | a6cccdc | 2011-05-24 17:11:33 -0700 | [diff] [blame] | 369 | |
Minchan Kim | 5a88381 | 2012-10-08 16:33:39 -0700 | [diff] [blame] | 370 | static inline void drain_zonestat(struct zone *zone, |
| 371 | struct per_cpu_pageset *pset) { } |
KOSAKI Motohiro | fa25c50 | 2011-05-24 17:11:28 -0700 | [diff] [blame] | 372 | #endif /* CONFIG_SMP */ |
| 373 | |
Bartlomiej Zolnierkiewicz | d1ce749 | 2012-10-08 16:32:02 -0700 | [diff] [blame] | 374 | static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages, |
| 375 | int migratetype) |
| 376 | { |
| 377 | __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages); |
| 378 | if (is_migrate_cma(migratetype)) |
| 379 | __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages); |
| 380 | } |
| 381 | |
KOSAKI Motohiro | fa25c50 | 2011-05-24 17:11:28 -0700 | [diff] [blame] | 382 | extern const char * const vmstat_text[]; |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 383 | |
| 384 | #endif /* _LINUX_VMSTAT_H */ |