Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_VMSTAT_H |
| 2 | #define _LINUX_VMSTAT_H |
| 3 | |
| 4 | #include <linux/types.h> |
| 5 | #include <linux/percpu.h> |
Christoph Lameter | 9617729 | 2007-02-10 01:43:03 -0800 | [diff] [blame] | 6 | #include <linux/mm.h> |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 7 | #include <linux/mmzone.h> |
| 8 | #include <asm/atomic.h> |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 9 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 10 | #ifdef CONFIG_VM_EVENT_COUNTERS |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 11 | /* |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 12 | * Light weight per cpu counter implementation. |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 13 | * |
Paul Jackson | 2aea4fb | 2006-12-22 01:06:10 -0800 | [diff] [blame] | 14 | * Counters should only be incremented. You need to set EMBEDDED |
| 15 | * to disable VM_EVENT_COUNTERS. Things like procps (vmstat, |
| 16 | * top, etc) use /proc/vmstat and depend on these counters. |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 17 | * |
| 18 | * Counters are handled completely inline. On many platforms the code |
| 19 | * generated will simply be the increment of a global address. |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 20 | */ |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 21 | |
Christoph Lameter | 27bf71c | 2006-09-25 23:31:15 -0700 | [diff] [blame] | 22 | #ifdef CONFIG_ZONE_DMA32 |
| 23 | #define DMA32_ZONE(xx) xx##_DMA32, |
| 24 | #else |
| 25 | #define DMA32_ZONE(xx) |
| 26 | #endif |
| 27 | |
| 28 | #ifdef CONFIG_HIGHMEM |
| 29 | #define HIGHMEM_ZONE(xx) , xx##_HIGH |
| 30 | #else |
| 31 | #define HIGHMEM_ZONE(xx) |
| 32 | #endif |
| 33 | |
| 34 | #define FOR_ALL_ZONES(xx) xx##_DMA, DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 35 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 36 | enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, |
| 37 | FOR_ALL_ZONES(PGALLOC), |
| 38 | PGFREE, PGACTIVATE, PGDEACTIVATE, |
| 39 | PGFAULT, PGMAJFAULT, |
| 40 | FOR_ALL_ZONES(PGREFILL), |
| 41 | FOR_ALL_ZONES(PGSTEAL), |
| 42 | FOR_ALL_ZONES(PGSCAN_KSWAPD), |
| 43 | FOR_ALL_ZONES(PGSCAN_DIRECT), |
| 44 | PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL, |
| 45 | PAGEOUTRUN, ALLOCSTALL, PGROTATED, |
| 46 | NR_VM_EVENT_ITEMS |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 47 | }; |
| 48 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 49 | struct vm_event_state { |
| 50 | unsigned long event[NR_VM_EVENT_ITEMS]; |
| 51 | }; |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 52 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 53 | DECLARE_PER_CPU(struct vm_event_state, vm_event_states); |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 54 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 55 | static inline void __count_vm_event(enum vm_event_item item) |
| 56 | { |
Jan Blunck | 38cbcdc | 2006-08-05 12:14:14 -0700 | [diff] [blame] | 57 | __get_cpu_var(vm_event_states).event[item]++; |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 58 | } |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 59 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 60 | static inline void count_vm_event(enum vm_event_item item) |
| 61 | { |
Jan Blunck | 38cbcdc | 2006-08-05 12:14:14 -0700 | [diff] [blame] | 62 | get_cpu_var(vm_event_states).event[item]++; |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 63 | put_cpu(); |
| 64 | } |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 65 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 66 | static inline void __count_vm_events(enum vm_event_item item, long delta) |
| 67 | { |
Jan Blunck | 38cbcdc | 2006-08-05 12:14:14 -0700 | [diff] [blame] | 68 | __get_cpu_var(vm_event_states).event[item] += delta; |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 69 | } |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 70 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 71 | static inline void count_vm_events(enum vm_event_item item, long delta) |
| 72 | { |
Jan Blunck | 38cbcdc | 2006-08-05 12:14:14 -0700 | [diff] [blame] | 73 | get_cpu_var(vm_event_states).event[item] += delta; |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 74 | put_cpu(); |
| 75 | } |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 76 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 77 | extern void all_vm_events(unsigned long *); |
Magnus Damm | e903387 | 2006-12-22 01:08:01 -0800 | [diff] [blame] | 78 | #ifdef CONFIG_HOTPLUG |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 79 | extern void vm_events_fold_cpu(int cpu); |
Magnus Damm | e903387 | 2006-12-22 01:08:01 -0800 | [diff] [blame] | 80 | #else |
| 81 | static inline void vm_events_fold_cpu(int cpu) |
| 82 | { |
| 83 | } |
| 84 | #endif |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 85 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 86 | #else |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 87 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 88 | /* Disable counters */ |
| 89 | #define get_cpu_vm_events(e) 0L |
| 90 | #define count_vm_event(e) do { } while (0) |
| 91 | #define count_vm_events(e,d) do { } while (0) |
| 92 | #define __count_vm_event(e) do { } while (0) |
| 93 | #define __count_vm_events(e,d) do { } while (0) |
| 94 | #define vm_events_fold_cpu(x) do { } while (0) |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 95 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 96 | #endif /* CONFIG_VM_EVENT_COUNTERS */ |
| 97 | |
| 98 | #define __count_zone_vm_events(item, zone, delta) \ |
| 99 | __count_vm_events(item##_DMA + zone_idx(zone), delta) |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 100 | |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 101 | /* |
| 102 | * Zone based page accounting with per cpu differentials. |
| 103 | */ |
| 104 | extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 105 | |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 106 | static inline void zone_page_state_add(long x, struct zone *zone, |
| 107 | enum zone_stat_item item) |
| 108 | { |
| 109 | atomic_long_add(x, &zone->vm_stat[item]); |
| 110 | atomic_long_add(x, &vm_stat[item]); |
| 111 | } |
| 112 | |
| 113 | static inline unsigned long global_page_state(enum zone_stat_item item) |
| 114 | { |
| 115 | long x = atomic_long_read(&vm_stat[item]); |
| 116 | #ifdef CONFIG_SMP |
| 117 | if (x < 0) |
| 118 | x = 0; |
| 119 | #endif |
| 120 | return x; |
| 121 | } |
| 122 | |
| 123 | static inline unsigned long zone_page_state(struct zone *zone, |
| 124 | enum zone_stat_item item) |
| 125 | { |
| 126 | long x = atomic_long_read(&zone->vm_stat[item]); |
| 127 | #ifdef CONFIG_SMP |
| 128 | if (x < 0) |
| 129 | x = 0; |
| 130 | #endif |
| 131 | return x; |
| 132 | } |
| 133 | |
| 134 | #ifdef CONFIG_NUMA |
| 135 | /* |
| 136 | * Determine the per node value of a stat item. This function |
| 137 | * is called frequently in a NUMA machine, so try to be as |
| 138 | * frugal as possible. |
| 139 | */ |
| 140 | static inline unsigned long node_page_state(int node, |
| 141 | enum zone_stat_item item) |
| 142 | { |
| 143 | struct zone *zones = NODE_DATA(node)->node_zones; |
| 144 | |
| 145 | return |
Christoph Lameter | fb0e794 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 146 | #ifdef CONFIG_ZONE_DMA32 |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 147 | zone_page_state(&zones[ZONE_DMA32], item) + |
| 148 | #endif |
| 149 | zone_page_state(&zones[ZONE_NORMAL], item) + |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 150 | #ifdef CONFIG_HIGHMEM |
| 151 | zone_page_state(&zones[ZONE_HIGHMEM], item) + |
| 152 | #endif |
| 153 | zone_page_state(&zones[ZONE_DMA], item); |
| 154 | } |
Christoph Lameter | ca889e6 | 2006-06-30 01:55:44 -0700 | [diff] [blame] | 155 | |
| 156 | extern void zone_statistics(struct zonelist *, struct zone *); |
| 157 | |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 158 | #else |
Christoph Lameter | ca889e6 | 2006-06-30 01:55:44 -0700 | [diff] [blame] | 159 | |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 160 | #define node_page_state(node, item) global_page_state(item) |
Christoph Lameter | ca889e6 | 2006-06-30 01:55:44 -0700 | [diff] [blame] | 161 | #define zone_statistics(_zl,_z) do { } while (0) |
| 162 | |
| 163 | #endif /* CONFIG_NUMA */ |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 164 | |
| 165 | #define __add_zone_page_state(__z, __i, __d) \ |
| 166 | __mod_zone_page_state(__z, __i, __d) |
| 167 | #define __sub_zone_page_state(__z, __i, __d) \ |
| 168 | __mod_zone_page_state(__z, __i,-(__d)) |
| 169 | |
| 170 | #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d) |
| 171 | #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d)) |
| 172 | |
| 173 | static inline void zap_zone_vm_stats(struct zone *zone) |
| 174 | { |
| 175 | memset(zone->vm_stat, 0, sizeof(zone->vm_stat)); |
| 176 | } |
| 177 | |
Christoph Lameter | ca889e6 | 2006-06-30 01:55:44 -0700 | [diff] [blame] | 178 | extern void inc_zone_state(struct zone *, enum zone_stat_item); |
| 179 | |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 180 | #ifdef CONFIG_SMP |
| 181 | void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int); |
| 182 | void __inc_zone_page_state(struct page *, enum zone_stat_item); |
| 183 | void __dec_zone_page_state(struct page *, enum zone_stat_item); |
| 184 | |
| 185 | void mod_zone_page_state(struct zone *, enum zone_stat_item, int); |
| 186 | void inc_zone_page_state(struct page *, enum zone_stat_item); |
| 187 | void dec_zone_page_state(struct page *, enum zone_stat_item); |
| 188 | |
| 189 | extern void inc_zone_state(struct zone *, enum zone_stat_item); |
Christoph Lameter | c878538 | 2007-02-10 01:43:01 -0800 | [diff] [blame] | 190 | extern void __inc_zone_state(struct zone *, enum zone_stat_item); |
| 191 | extern void dec_zone_state(struct zone *, enum zone_stat_item); |
| 192 | extern void __dec_zone_state(struct zone *, enum zone_stat_item); |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 193 | |
| 194 | void refresh_cpu_vm_stats(int); |
| 195 | void refresh_vm_stats(void); |
| 196 | |
| 197 | #else /* CONFIG_SMP */ |
| 198 | |
| 199 | /* |
| 200 | * We do not maintain differentials in a single processor configuration. |
| 201 | * The functions directly modify the zone and global counters. |
| 202 | */ |
| 203 | static inline void __mod_zone_page_state(struct zone *zone, |
| 204 | enum zone_stat_item item, int delta) |
| 205 | { |
| 206 | zone_page_state_add(delta, zone, item); |
| 207 | } |
| 208 | |
Christoph Lameter | 7f4599e | 2006-07-10 04:44:30 -0700 | [diff] [blame] | 209 | static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item) |
| 210 | { |
| 211 | atomic_long_inc(&zone->vm_stat[item]); |
| 212 | atomic_long_inc(&vm_stat[item]); |
| 213 | } |
| 214 | |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 215 | static inline void __inc_zone_page_state(struct page *page, |
| 216 | enum zone_stat_item item) |
| 217 | { |
Christoph Lameter | 7f4599e | 2006-07-10 04:44:30 -0700 | [diff] [blame] | 218 | __inc_zone_state(page_zone(page), item); |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 219 | } |
| 220 | |
Christoph Lameter | c878538 | 2007-02-10 01:43:01 -0800 | [diff] [blame] | 221 | static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item) |
| 222 | { |
| 223 | atomic_long_dec(&zone->vm_stat[item]); |
| 224 | atomic_long_dec(&vm_stat[item]); |
| 225 | } |
| 226 | |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 227 | static inline void __dec_zone_page_state(struct page *page, |
| 228 | enum zone_stat_item item) |
| 229 | { |
| 230 | atomic_long_dec(&page_zone(page)->vm_stat[item]); |
| 231 | atomic_long_dec(&vm_stat[item]); |
| 232 | } |
| 233 | |
| 234 | /* |
| 235 | * We only use atomic operations to update counters. So there is no need to |
| 236 | * disable interrupts. |
| 237 | */ |
| 238 | #define inc_zone_page_state __inc_zone_page_state |
| 239 | #define dec_zone_page_state __dec_zone_page_state |
| 240 | #define mod_zone_page_state __mod_zone_page_state |
| 241 | |
| 242 | static inline void refresh_cpu_vm_stats(int cpu) { } |
| 243 | static inline void refresh_vm_stats(void) { } |
| 244 | #endif |
| 245 | |
| 246 | #endif /* _LINUX_VMSTAT_H */ |