Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_VMSTAT_H |
| 2 | #define _LINUX_VMSTAT_H |
| 3 | |
| 4 | #include <linux/types.h> |
| 5 | #include <linux/percpu.h> |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame^] | 6 | #include <linux/config.h> |
| 7 | #include <linux/mmzone.h> |
| 8 | #include <asm/atomic.h> |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 9 | |
| 10 | /* |
| 11 | * Global page accounting. One instance per CPU. Only unsigned longs are |
| 12 | * allowed. |
| 13 | * |
| 14 | * - Fields can be modified with xxx_page_state and xxx_page_state_zone at |
| 15 | * any time safely (which protects the instance from modification by |
| 16 | * interrupt. |
| 17 | * - The __xxx_page_state variants can be used safely when interrupts are |
| 18 | * disabled. |
| 19 | * - The __xxx_page_state variants can be used if the field is only |
| 20 | * modified from process context and protected from preemption, or only |
| 21 | * modified from interrupt context. In this case, the field should be |
| 22 | * commented here. |
| 23 | */ |
| 24 | struct page_state { |
| 25 | unsigned long nr_dirty; /* Dirty writeable pages */ |
| 26 | unsigned long nr_writeback; /* Pages under writeback */ |
| 27 | unsigned long nr_unstable; /* NFS unstable pages */ |
| 28 | unsigned long nr_page_table_pages;/* Pages used for pagetables */ |
| 29 | unsigned long nr_mapped; /* mapped into pagetables. |
| 30 | * only modified from process context */ |
| 31 | unsigned long nr_slab; /* In slab */ |
| 32 | #define GET_PAGE_STATE_LAST nr_slab |
| 33 | |
| 34 | /* |
| 35 | * The below are zeroed by get_page_state(). Use get_full_page_state() |
| 36 | * to add up all these. |
| 37 | */ |
| 38 | unsigned long pgpgin; /* Disk reads */ |
| 39 | unsigned long pgpgout; /* Disk writes */ |
| 40 | unsigned long pswpin; /* swap reads */ |
| 41 | unsigned long pswpout; /* swap writes */ |
| 42 | |
| 43 | unsigned long pgalloc_high; /* page allocations */ |
| 44 | unsigned long pgalloc_normal; |
| 45 | unsigned long pgalloc_dma32; |
| 46 | unsigned long pgalloc_dma; |
| 47 | |
| 48 | unsigned long pgfree; /* page freeings */ |
| 49 | unsigned long pgactivate; /* pages moved inactive->active */ |
| 50 | unsigned long pgdeactivate; /* pages moved active->inactive */ |
| 51 | |
| 52 | unsigned long pgfault; /* faults (major+minor) */ |
| 53 | unsigned long pgmajfault; /* faults (major only) */ |
| 54 | |
| 55 | unsigned long pgrefill_high; /* inspected in refill_inactive_zone */ |
| 56 | unsigned long pgrefill_normal; |
| 57 | unsigned long pgrefill_dma32; |
| 58 | unsigned long pgrefill_dma; |
| 59 | |
| 60 | unsigned long pgsteal_high; /* total highmem pages reclaimed */ |
| 61 | unsigned long pgsteal_normal; |
| 62 | unsigned long pgsteal_dma32; |
| 63 | unsigned long pgsteal_dma; |
| 64 | |
| 65 | unsigned long pgscan_kswapd_high;/* total highmem pages scanned */ |
| 66 | unsigned long pgscan_kswapd_normal; |
| 67 | unsigned long pgscan_kswapd_dma32; |
| 68 | unsigned long pgscan_kswapd_dma; |
| 69 | |
| 70 | unsigned long pgscan_direct_high;/* total highmem pages scanned */ |
| 71 | unsigned long pgscan_direct_normal; |
| 72 | unsigned long pgscan_direct_dma32; |
| 73 | unsigned long pgscan_direct_dma; |
| 74 | |
| 75 | unsigned long pginodesteal; /* pages reclaimed via inode freeing */ |
| 76 | unsigned long slabs_scanned; /* slab objects scanned */ |
| 77 | unsigned long kswapd_steal; /* pages reclaimed by kswapd */ |
| 78 | unsigned long kswapd_inodesteal;/* reclaimed via kswapd inode freeing */ |
| 79 | unsigned long pageoutrun; /* kswapd's calls to page reclaim */ |
| 80 | unsigned long allocstall; /* direct reclaim calls */ |
| 81 | |
| 82 | unsigned long pgrotated; /* pages rotated to tail of the LRU */ |
| 83 | unsigned long nr_bounce; /* pages for bounce buffers */ |
| 84 | }; |
| 85 | |
| 86 | extern void get_page_state(struct page_state *ret); |
| 87 | extern void get_page_state_node(struct page_state *ret, int node); |
| 88 | extern void get_full_page_state(struct page_state *ret); |
| 89 | extern unsigned long read_page_state_offset(unsigned long offset); |
| 90 | extern void mod_page_state_offset(unsigned long offset, unsigned long delta); |
| 91 | extern void __mod_page_state_offset(unsigned long offset, unsigned long delta); |
| 92 | |
| 93 | #define read_page_state(member) \ |
| 94 | read_page_state_offset(offsetof(struct page_state, member)) |
| 95 | |
| 96 | #define mod_page_state(member, delta) \ |
| 97 | mod_page_state_offset(offsetof(struct page_state, member), (delta)) |
| 98 | |
| 99 | #define __mod_page_state(member, delta) \ |
| 100 | __mod_page_state_offset(offsetof(struct page_state, member), (delta)) |
| 101 | |
| 102 | #define inc_page_state(member) mod_page_state(member, 1UL) |
| 103 | #define dec_page_state(member) mod_page_state(member, 0UL - 1) |
| 104 | #define add_page_state(member,delta) mod_page_state(member, (delta)) |
| 105 | #define sub_page_state(member,delta) mod_page_state(member, 0UL - (delta)) |
| 106 | |
| 107 | #define __inc_page_state(member) __mod_page_state(member, 1UL) |
| 108 | #define __dec_page_state(member) __mod_page_state(member, 0UL - 1) |
| 109 | #define __add_page_state(member,delta) __mod_page_state(member, (delta)) |
| 110 | #define __sub_page_state(member,delta) __mod_page_state(member, 0UL - (delta)) |
| 111 | |
| 112 | #define page_state(member) (*__page_state(offsetof(struct page_state, member))) |
| 113 | |
| 114 | #define state_zone_offset(zone, member) \ |
| 115 | ({ \ |
| 116 | unsigned offset; \ |
| 117 | if (is_highmem(zone)) \ |
| 118 | offset = offsetof(struct page_state, member##_high); \ |
| 119 | else if (is_normal(zone)) \ |
| 120 | offset = offsetof(struct page_state, member##_normal); \ |
| 121 | else if (is_dma32(zone)) \ |
| 122 | offset = offsetof(struct page_state, member##_dma32); \ |
| 123 | else \ |
| 124 | offset = offsetof(struct page_state, member##_dma); \ |
| 125 | offset; \ |
| 126 | }) |
| 127 | |
| 128 | #define __mod_page_state_zone(zone, member, delta) \ |
| 129 | do { \ |
| 130 | __mod_page_state_offset(state_zone_offset(zone, member), (delta)); \ |
| 131 | } while (0) |
| 132 | |
| 133 | #define mod_page_state_zone(zone, member, delta) \ |
| 134 | do { \ |
| 135 | mod_page_state_offset(state_zone_offset(zone, member), (delta)); \ |
| 136 | } while (0) |
| 137 | |
| 138 | DECLARE_PER_CPU(struct page_state, page_states); |
| 139 | |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame^] | 140 | /* |
| 141 | * Zone based page accounting with per cpu differentials. |
| 142 | */ |
| 143 | extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 144 | |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame^] | 145 | static inline void zone_page_state_add(long x, struct zone *zone, |
| 146 | enum zone_stat_item item) |
| 147 | { |
| 148 | atomic_long_add(x, &zone->vm_stat[item]); |
| 149 | atomic_long_add(x, &vm_stat[item]); |
| 150 | } |
| 151 | |
| 152 | static inline unsigned long global_page_state(enum zone_stat_item item) |
| 153 | { |
| 154 | long x = atomic_long_read(&vm_stat[item]); |
| 155 | #ifdef CONFIG_SMP |
| 156 | if (x < 0) |
| 157 | x = 0; |
| 158 | #endif |
| 159 | return x; |
| 160 | } |
| 161 | |
| 162 | static inline unsigned long zone_page_state(struct zone *zone, |
| 163 | enum zone_stat_item item) |
| 164 | { |
| 165 | long x = atomic_long_read(&zone->vm_stat[item]); |
| 166 | #ifdef CONFIG_SMP |
| 167 | if (x < 0) |
| 168 | x = 0; |
| 169 | #endif |
| 170 | return x; |
| 171 | } |
| 172 | |
| 173 | #ifdef CONFIG_NUMA |
| 174 | /* |
| 175 | * Determine the per node value of a stat item. This function |
| 176 | * is called frequently in a NUMA machine, so try to be as |
| 177 | * frugal as possible. |
| 178 | */ |
| 179 | static inline unsigned long node_page_state(int node, |
| 180 | enum zone_stat_item item) |
| 181 | { |
| 182 | struct zone *zones = NODE_DATA(node)->node_zones; |
| 183 | |
| 184 | return |
| 185 | #ifndef CONFIG_DMA_IS_NORMAL |
| 186 | #if !defined(CONFIG_DMA_IS_DMA32) && BITS_PER_LONG >= 64 |
| 187 | zone_page_state(&zones[ZONE_DMA32], item) + |
| 188 | #endif |
| 189 | zone_page_state(&zones[ZONE_NORMAL], item) + |
| 190 | #endif |
| 191 | #ifdef CONFIG_HIGHMEM |
| 192 | zone_page_state(&zones[ZONE_HIGHMEM], item) + |
| 193 | #endif |
| 194 | zone_page_state(&zones[ZONE_DMA], item); |
| 195 | } |
| 196 | #else |
| 197 | #define node_page_state(node, item) global_page_state(item) |
| 198 | #endif |
| 199 | |
| 200 | #define __add_zone_page_state(__z, __i, __d) \ |
| 201 | __mod_zone_page_state(__z, __i, __d) |
| 202 | #define __sub_zone_page_state(__z, __i, __d) \ |
| 203 | __mod_zone_page_state(__z, __i,-(__d)) |
| 204 | |
| 205 | #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d) |
| 206 | #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d)) |
| 207 | |
| 208 | static inline void zap_zone_vm_stats(struct zone *zone) |
| 209 | { |
| 210 | memset(zone->vm_stat, 0, sizeof(zone->vm_stat)); |
| 211 | } |
| 212 | |
| 213 | #ifdef CONFIG_SMP |
| 214 | void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int); |
| 215 | void __inc_zone_page_state(struct page *, enum zone_stat_item); |
| 216 | void __dec_zone_page_state(struct page *, enum zone_stat_item); |
| 217 | |
| 218 | void mod_zone_page_state(struct zone *, enum zone_stat_item, int); |
| 219 | void inc_zone_page_state(struct page *, enum zone_stat_item); |
| 220 | void dec_zone_page_state(struct page *, enum zone_stat_item); |
| 221 | |
| 222 | extern void inc_zone_state(struct zone *, enum zone_stat_item); |
| 223 | |
| 224 | void refresh_cpu_vm_stats(int); |
| 225 | void refresh_vm_stats(void); |
| 226 | |
| 227 | #else /* CONFIG_SMP */ |
| 228 | |
| 229 | /* |
| 230 | * We do not maintain differentials in a single processor configuration. |
| 231 | * The functions directly modify the zone and global counters. |
| 232 | */ |
| 233 | static inline void __mod_zone_page_state(struct zone *zone, |
| 234 | enum zone_stat_item item, int delta) |
| 235 | { |
| 236 | zone_page_state_add(delta, zone, item); |
| 237 | } |
| 238 | |
| 239 | static inline void __inc_zone_page_state(struct page *page, |
| 240 | enum zone_stat_item item) |
| 241 | { |
| 242 | atomic_long_inc(&page_zone(page)->vm_stat[item]); |
| 243 | atomic_long_inc(&vm_stat[item]); |
| 244 | } |
| 245 | |
| 246 | static inline void __dec_zone_page_state(struct page *page, |
| 247 | enum zone_stat_item item) |
| 248 | { |
| 249 | atomic_long_dec(&page_zone(page)->vm_stat[item]); |
| 250 | atomic_long_dec(&vm_stat[item]); |
| 251 | } |
| 252 | |
| 253 | /* |
| 254 | * We only use atomic operations to update counters. So there is no need to |
| 255 | * disable interrupts. |
| 256 | */ |
| 257 | #define inc_zone_page_state __inc_zone_page_state |
| 258 | #define dec_zone_page_state __dec_zone_page_state |
| 259 | #define mod_zone_page_state __mod_zone_page_state |
| 260 | |
| 261 | static inline void refresh_cpu_vm_stats(int cpu) { } |
| 262 | static inline void refresh_vm_stats(void) { } |
| 263 | #endif |
| 264 | |
| 265 | #endif /* _LINUX_VMSTAT_H */ |