Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/mm/vmstat.c |
| 3 | * |
| 4 | * Manages VM statistics |
| 5 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 6 | * |
| 7 | * zoned VM statistics |
| 8 | * Copyright (C) 2006 Silicon Graphics, Inc., |
| 9 | * Christoph Lameter <christoph@lameter.com> |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 10 | */ |
| 11 | |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 12 | #include <linux/mm.h> |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 13 | #include <linux/module.h> |
Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 14 | #include <linux/cpu.h> |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 15 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 16 | #ifdef CONFIG_VM_EVENT_COUNTERS |
| 17 | DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}}; |
| 18 | EXPORT_PER_CPU_SYMBOL(vm_event_states); |
| 19 | |
| 20 | static void sum_vm_events(unsigned long *ret, cpumask_t *cpumask) |
| 21 | { |
| 22 | int cpu = 0; |
| 23 | int i; |
| 24 | |
| 25 | memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long)); |
| 26 | |
| 27 | cpu = first_cpu(*cpumask); |
| 28 | while (cpu < NR_CPUS) { |
| 29 | struct vm_event_state *this = &per_cpu(vm_event_states, cpu); |
| 30 | |
| 31 | cpu = next_cpu(cpu, *cpumask); |
| 32 | |
| 33 | if (cpu < NR_CPUS) |
| 34 | prefetch(&per_cpu(vm_event_states, cpu)); |
| 35 | |
| 36 | |
| 37 | for (i = 0; i < NR_VM_EVENT_ITEMS; i++) |
| 38 | ret[i] += this->event[i]; |
| 39 | } |
| 40 | } |
| 41 | |
| 42 | /* |
| 43 | * Accumulate the vm event counters across all CPUs. |
| 44 | * The result is unavoidably approximate - it can change |
| 45 | * during and after execution of this function. |
| 46 | */ |
| 47 | void all_vm_events(unsigned long *ret) |
| 48 | { |
| 49 | sum_vm_events(ret, &cpu_online_map); |
| 50 | } |
Heiko Carstens | 32dd66f | 2006-07-10 04:44:31 -0700 | [diff] [blame] | 51 | EXPORT_SYMBOL_GPL(all_vm_events); |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 52 | |
| 53 | #ifdef CONFIG_HOTPLUG |
| 54 | /* |
| 55 | * Fold the foreign cpu events into our own. |
| 56 | * |
| 57 | * This is adding to the events on one processor |
| 58 | * but keeps the global counts constant. |
| 59 | */ |
| 60 | void vm_events_fold_cpu(int cpu) |
| 61 | { |
| 62 | struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu); |
| 63 | int i; |
| 64 | |
| 65 | for (i = 0; i < NR_VM_EVENT_ITEMS; i++) { |
| 66 | count_vm_events(i, fold_state->event[i]); |
| 67 | fold_state->event[i] = 0; |
| 68 | } |
| 69 | } |
| 70 | #endif /* CONFIG_HOTPLUG */ |
| 71 | |
| 72 | #endif /* CONFIG_VM_EVENT_COUNTERS */ |
| 73 | |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 74 | /* |
| 75 | * Manage combined zone based / global counters |
| 76 | * |
| 77 | * vm_stat contains the global counters |
| 78 | */ |
| 79 | atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; |
| 80 | EXPORT_SYMBOL(vm_stat); |
| 81 | |
| 82 | #ifdef CONFIG_SMP |
| 83 | |
Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 84 | static int calculate_threshold(struct zone *zone) |
| 85 | { |
| 86 | int threshold; |
| 87 | int mem; /* memory in 128 MB units */ |
| 88 | |
| 89 | /* |
| 90 | * The threshold scales with the number of processors and the amount |
| 91 | * of memory per zone. More memory means that we can defer updates for |
| 92 | * longer, more processors could lead to more contention. |
| 93 | * fls() is used to have a cheap way of logarithmic scaling. |
| 94 | * |
| 95 | * Some sample thresholds: |
| 96 | * |
| 97 | * Threshold Processors (fls) Zonesize fls(mem+1) |
| 98 | * ------------------------------------------------------------------ |
| 99 | * 8 1 1 0.9-1 GB 4 |
| 100 | * 16 2 2 0.9-1 GB 4 |
| 101 | * 20 2 2 1-2 GB 5 |
| 102 | * 24 2 2 2-4 GB 6 |
| 103 | * 28 2 2 4-8 GB 7 |
| 104 | * 32 2 2 8-16 GB 8 |
| 105 | * 4 2 2 <128M 1 |
| 106 | * 30 4 3 2-4 GB 5 |
| 107 | * 48 4 3 8-16 GB 8 |
| 108 | * 32 8 4 1-2 GB 4 |
| 109 | * 32 8 4 0.9-1GB 4 |
| 110 | * 10 16 5 <128M 1 |
| 111 | * 40 16 5 900M 4 |
| 112 | * 70 64 7 2-4 GB 5 |
| 113 | * 84 64 7 4-8 GB 6 |
| 114 | * 108 512 9 4-8 GB 6 |
| 115 | * 125 1024 10 8-16 GB 8 |
| 116 | * 125 1024 10 16-32 GB 9 |
| 117 | */ |
| 118 | |
| 119 | mem = zone->present_pages >> (27 - PAGE_SHIFT); |
| 120 | |
| 121 | threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem)); |
| 122 | |
| 123 | /* |
| 124 | * Maximum threshold is 125 |
| 125 | */ |
| 126 | threshold = min(125, threshold); |
| 127 | |
| 128 | return threshold; |
| 129 | } |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 130 | |
| 131 | /* |
Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 132 | * Refresh the thresholds for each zone. |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 133 | */ |
Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 134 | static void refresh_zone_stat_thresholds(void) |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 135 | { |
Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 136 | struct zone *zone; |
| 137 | int cpu; |
| 138 | int threshold; |
| 139 | |
| 140 | for_each_zone(zone) { |
| 141 | |
| 142 | if (!zone->present_pages) |
| 143 | continue; |
| 144 | |
| 145 | threshold = calculate_threshold(zone); |
| 146 | |
| 147 | for_each_online_cpu(cpu) |
| 148 | zone_pcp(zone, cpu)->stat_threshold = threshold; |
| 149 | } |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 150 | } |
| 151 | |
| 152 | /* |
| 153 | * For use when we know that interrupts are disabled. |
| 154 | */ |
| 155 | void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, |
| 156 | int delta) |
| 157 | { |
Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 158 | struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); |
| 159 | s8 *p = pcp->vm_stat_diff + item; |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 160 | long x; |
| 161 | |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 162 | x = delta + *p; |
| 163 | |
Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 164 | if (unlikely(x > pcp->stat_threshold || x < -pcp->stat_threshold)) { |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 165 | zone_page_state_add(x, zone, item); |
| 166 | x = 0; |
| 167 | } |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 168 | *p = x; |
| 169 | } |
| 170 | EXPORT_SYMBOL(__mod_zone_page_state); |
| 171 | |
| 172 | /* |
| 173 | * For an unknown interrupt state |
| 174 | */ |
| 175 | void mod_zone_page_state(struct zone *zone, enum zone_stat_item item, |
| 176 | int delta) |
| 177 | { |
| 178 | unsigned long flags; |
| 179 | |
| 180 | local_irq_save(flags); |
| 181 | __mod_zone_page_state(zone, item, delta); |
| 182 | local_irq_restore(flags); |
| 183 | } |
| 184 | EXPORT_SYMBOL(mod_zone_page_state); |
| 185 | |
| 186 | /* |
| 187 | * Optimized increment and decrement functions. |
| 188 | * |
| 189 | * These are only for a single page and therefore can take a struct page * |
| 190 | * argument instead of struct zone *. This allows the inclusion of the code |
| 191 | * generated for page_zone(page) into the optimized functions. |
| 192 | * |
| 193 | * No overflow check is necessary and therefore the differential can be |
| 194 | * incremented or decremented in place which may allow the compilers to |
| 195 | * generate better code. |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 196 | * The increment or decrement is known and therefore one boundary check can |
| 197 | * be omitted. |
| 198 | * |
Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 199 | * NOTE: These functions are very performance sensitive. Change only |
| 200 | * with care. |
| 201 | * |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 202 | * Some processors have inc/dec instructions that are atomic vs an interrupt. |
| 203 | * However, the code must first determine the differential location in a zone |
| 204 | * based on the processor number and then inc/dec the counter. There is no |
| 205 | * guarantee without disabling preemption that the processor will not change |
| 206 | * in between and therefore the atomicity vs. interrupt cannot be exploited |
| 207 | * in a useful way here. |
| 208 | */ |
Christoph Lameter | c878538 | 2007-02-10 01:43:01 -0800 | [diff] [blame] | 209 | void __inc_zone_state(struct zone *zone, enum zone_stat_item item) |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 210 | { |
Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 211 | struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); |
| 212 | s8 *p = pcp->vm_stat_diff + item; |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 213 | |
| 214 | (*p)++; |
| 215 | |
Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 216 | if (unlikely(*p > pcp->stat_threshold)) { |
| 217 | int overstep = pcp->stat_threshold / 2; |
| 218 | |
| 219 | zone_page_state_add(*p + overstep, zone, item); |
| 220 | *p = -overstep; |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 221 | } |
| 222 | } |
Christoph Lameter | ca889e6 | 2006-06-30 01:55:44 -0700 | [diff] [blame] | 223 | |
| 224 | void __inc_zone_page_state(struct page *page, enum zone_stat_item item) |
| 225 | { |
| 226 | __inc_zone_state(page_zone(page), item); |
| 227 | } |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 228 | EXPORT_SYMBOL(__inc_zone_page_state); |
| 229 | |
Christoph Lameter | c878538 | 2007-02-10 01:43:01 -0800 | [diff] [blame] | 230 | void __dec_zone_state(struct zone *zone, enum zone_stat_item item) |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 231 | { |
Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 232 | struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); |
| 233 | s8 *p = pcp->vm_stat_diff + item; |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 234 | |
| 235 | (*p)--; |
| 236 | |
Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 237 | if (unlikely(*p < - pcp->stat_threshold)) { |
| 238 | int overstep = pcp->stat_threshold / 2; |
| 239 | |
| 240 | zone_page_state_add(*p - overstep, zone, item); |
| 241 | *p = overstep; |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 242 | } |
| 243 | } |
Christoph Lameter | c878538 | 2007-02-10 01:43:01 -0800 | [diff] [blame] | 244 | |
| 245 | void __dec_zone_page_state(struct page *page, enum zone_stat_item item) |
| 246 | { |
| 247 | __dec_zone_state(page_zone(page), item); |
| 248 | } |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 249 | EXPORT_SYMBOL(__dec_zone_page_state); |
| 250 | |
Christoph Lameter | ca889e6 | 2006-06-30 01:55:44 -0700 | [diff] [blame] | 251 | void inc_zone_state(struct zone *zone, enum zone_stat_item item) |
| 252 | { |
| 253 | unsigned long flags; |
| 254 | |
| 255 | local_irq_save(flags); |
| 256 | __inc_zone_state(zone, item); |
| 257 | local_irq_restore(flags); |
| 258 | } |
| 259 | |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 260 | void inc_zone_page_state(struct page *page, enum zone_stat_item item) |
| 261 | { |
| 262 | unsigned long flags; |
| 263 | struct zone *zone; |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 264 | |
| 265 | zone = page_zone(page); |
| 266 | local_irq_save(flags); |
Christoph Lameter | ca889e6 | 2006-06-30 01:55:44 -0700 | [diff] [blame] | 267 | __inc_zone_state(zone, item); |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 268 | local_irq_restore(flags); |
| 269 | } |
| 270 | EXPORT_SYMBOL(inc_zone_page_state); |
| 271 | |
| 272 | void dec_zone_page_state(struct page *page, enum zone_stat_item item) |
| 273 | { |
| 274 | unsigned long flags; |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 275 | |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 276 | local_irq_save(flags); |
Christoph Lameter | a302eb4 | 2006-08-31 21:27:34 -0700 | [diff] [blame] | 277 | __dec_zone_page_state(page, item); |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 278 | local_irq_restore(flags); |
| 279 | } |
| 280 | EXPORT_SYMBOL(dec_zone_page_state); |
| 281 | |
| 282 | /* |
| 283 | * Update the zone counters for one cpu. |
Christoph Lameter | 4037d45 | 2007-05-09 02:35:14 -0700 | [diff] [blame] | 284 | * |
| 285 | * Note that refresh_cpu_vm_stats strives to only access |
| 286 | * node local memory. The per cpu pagesets on remote zones are placed |
| 287 | * in the memory local to the processor using that pageset. So the |
| 288 | * loop over all zones will access a series of cachelines local to |
| 289 | * the processor. |
| 290 | * |
| 291 | * The call to zone_page_state_add updates the cachelines with the |
| 292 | * statistics in the remote zone struct as well as the global cachelines |
| 293 | * with the global counters. These could cause remote node cache line |
| 294 | * bouncing and will have to be only done when necessary. |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 295 | */ |
| 296 | void refresh_cpu_vm_stats(int cpu) |
| 297 | { |
| 298 | struct zone *zone; |
| 299 | int i; |
| 300 | unsigned long flags; |
| 301 | |
| 302 | for_each_zone(zone) { |
Christoph Lameter | 4037d45 | 2007-05-09 02:35:14 -0700 | [diff] [blame] | 303 | struct per_cpu_pageset *p; |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 304 | |
Christoph Lameter | 39bbcb8 | 2006-09-25 23:31:49 -0700 | [diff] [blame] | 305 | if (!populated_zone(zone)) |
| 306 | continue; |
| 307 | |
Christoph Lameter | 4037d45 | 2007-05-09 02:35:14 -0700 | [diff] [blame] | 308 | p = zone_pcp(zone, cpu); |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 309 | |
| 310 | for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) |
Christoph Lameter | 4037d45 | 2007-05-09 02:35:14 -0700 | [diff] [blame] | 311 | if (p->vm_stat_diff[i]) { |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 312 | local_irq_save(flags); |
Christoph Lameter | 4037d45 | 2007-05-09 02:35:14 -0700 | [diff] [blame] | 313 | zone_page_state_add(p->vm_stat_diff[i], |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 314 | zone, i); |
Christoph Lameter | 4037d45 | 2007-05-09 02:35:14 -0700 | [diff] [blame] | 315 | p->vm_stat_diff[i] = 0; |
| 316 | #ifdef CONFIG_NUMA |
| 317 | /* 3 seconds idle till flush */ |
| 318 | p->expire = 3; |
| 319 | #endif |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 320 | local_irq_restore(flags); |
| 321 | } |
Christoph Lameter | 4037d45 | 2007-05-09 02:35:14 -0700 | [diff] [blame] | 322 | #ifdef CONFIG_NUMA |
| 323 | /* |
| 324 | * Deal with draining the remote pageset of this |
| 325 | * processor |
| 326 | * |
| 327 | * Check if there are pages remaining in this pageset |
| 328 | * if not then there is nothing to expire. |
| 329 | */ |
| 330 | if (!p->expire || (!p->pcp[0].count && !p->pcp[1].count)) |
| 331 | continue; |
| 332 | |
| 333 | /* |
| 334 | * We never drain zones local to this processor. |
| 335 | */ |
| 336 | if (zone_to_nid(zone) == numa_node_id()) { |
| 337 | p->expire = 0; |
| 338 | continue; |
| 339 | } |
| 340 | |
| 341 | p->expire--; |
| 342 | if (p->expire) |
| 343 | continue; |
| 344 | |
| 345 | if (p->pcp[0].count) |
| 346 | drain_zone_pages(zone, p->pcp + 0); |
| 347 | |
| 348 | if (p->pcp[1].count) |
| 349 | drain_zone_pages(zone, p->pcp + 1); |
| 350 | #endif |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 351 | } |
| 352 | } |
| 353 | |
| 354 | static void __refresh_cpu_vm_stats(void *dummy) |
| 355 | { |
| 356 | refresh_cpu_vm_stats(smp_processor_id()); |
| 357 | } |
| 358 | |
| 359 | /* |
| 360 | * Consolidate all counters. |
| 361 | * |
| 362 | * Note that the result is less inaccurate but still inaccurate |
| 363 | * if concurrent processes are allowed to run. |
| 364 | */ |
| 365 | void refresh_vm_stats(void) |
| 366 | { |
| 367 | on_each_cpu(__refresh_cpu_vm_stats, NULL, 0, 1); |
| 368 | } |
| 369 | EXPORT_SYMBOL(refresh_vm_stats); |
| 370 | |
| 371 | #endif |
| 372 | |
Christoph Lameter | ca889e6 | 2006-06-30 01:55:44 -0700 | [diff] [blame] | 373 | #ifdef CONFIG_NUMA |
| 374 | /* |
| 375 | * zonelist = the list of zones passed to the allocator |
| 376 | * z = the zone from which the allocation occurred. |
| 377 | * |
| 378 | * Must be called with interrupts disabled. |
| 379 | */ |
| 380 | void zone_statistics(struct zonelist *zonelist, struct zone *z) |
| 381 | { |
| 382 | if (z->zone_pgdat == zonelist->zones[0]->zone_pgdat) { |
| 383 | __inc_zone_state(z, NUMA_HIT); |
| 384 | } else { |
| 385 | __inc_zone_state(z, NUMA_MISS); |
| 386 | __inc_zone_state(zonelist->zones[0], NUMA_FOREIGN); |
| 387 | } |
Christoph Lameter | 5d29234 | 2006-09-27 01:50:10 -0700 | [diff] [blame] | 388 | if (z->node == numa_node_id()) |
Christoph Lameter | ca889e6 | 2006-06-30 01:55:44 -0700 | [diff] [blame] | 389 | __inc_zone_state(z, NUMA_LOCAL); |
| 390 | else |
| 391 | __inc_zone_state(z, NUMA_OTHER); |
| 392 | } |
| 393 | #endif |
| 394 | |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 395 | #ifdef CONFIG_PROC_FS |
| 396 | |
| 397 | #include <linux/seq_file.h> |
| 398 | |
| 399 | static void *frag_start(struct seq_file *m, loff_t *pos) |
| 400 | { |
| 401 | pg_data_t *pgdat; |
| 402 | loff_t node = *pos; |
| 403 | for (pgdat = first_online_pgdat(); |
| 404 | pgdat && node; |
| 405 | pgdat = next_online_pgdat(pgdat)) |
| 406 | --node; |
| 407 | |
| 408 | return pgdat; |
| 409 | } |
| 410 | |
| 411 | static void *frag_next(struct seq_file *m, void *arg, loff_t *pos) |
| 412 | { |
| 413 | pg_data_t *pgdat = (pg_data_t *)arg; |
| 414 | |
| 415 | (*pos)++; |
| 416 | return next_online_pgdat(pgdat); |
| 417 | } |
| 418 | |
| 419 | static void frag_stop(struct seq_file *m, void *arg) |
| 420 | { |
| 421 | } |
| 422 | |
| 423 | /* |
| 424 | * This walks the free areas for each zone. |
| 425 | */ |
| 426 | static int frag_show(struct seq_file *m, void *arg) |
| 427 | { |
| 428 | pg_data_t *pgdat = (pg_data_t *)arg; |
| 429 | struct zone *zone; |
| 430 | struct zone *node_zones = pgdat->node_zones; |
| 431 | unsigned long flags; |
| 432 | int order; |
| 433 | |
| 434 | for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { |
| 435 | if (!populated_zone(zone)) |
| 436 | continue; |
| 437 | |
| 438 | spin_lock_irqsave(&zone->lock, flags); |
| 439 | seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name); |
| 440 | for (order = 0; order < MAX_ORDER; ++order) |
| 441 | seq_printf(m, "%6lu ", zone->free_area[order].nr_free); |
| 442 | spin_unlock_irqrestore(&zone->lock, flags); |
| 443 | seq_putc(m, '\n'); |
| 444 | } |
| 445 | return 0; |
| 446 | } |
| 447 | |
Helge Deller | 15ad7cd | 2006-12-06 20:40:36 -0800 | [diff] [blame] | 448 | const struct seq_operations fragmentation_op = { |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 449 | .start = frag_start, |
| 450 | .next = frag_next, |
| 451 | .stop = frag_stop, |
| 452 | .show = frag_show, |
| 453 | }; |
| 454 | |
Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 455 | #ifdef CONFIG_ZONE_DMA |
| 456 | #define TEXT_FOR_DMA(xx) xx "_dma", |
| 457 | #else |
| 458 | #define TEXT_FOR_DMA(xx) |
| 459 | #endif |
| 460 | |
Christoph Lameter | 27bf71c | 2006-09-25 23:31:15 -0700 | [diff] [blame] | 461 | #ifdef CONFIG_ZONE_DMA32 |
| 462 | #define TEXT_FOR_DMA32(xx) xx "_dma32", |
| 463 | #else |
| 464 | #define TEXT_FOR_DMA32(xx) |
| 465 | #endif |
| 466 | |
| 467 | #ifdef CONFIG_HIGHMEM |
| 468 | #define TEXT_FOR_HIGHMEM(xx) xx "_high", |
| 469 | #else |
| 470 | #define TEXT_FOR_HIGHMEM(xx) |
| 471 | #endif |
| 472 | |
Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 473 | #define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \ |
Christoph Lameter | 27bf71c | 2006-09-25 23:31:15 -0700 | [diff] [blame] | 474 | TEXT_FOR_HIGHMEM(xx) |
| 475 | |
Helge Deller | 15ad7cd | 2006-12-06 20:40:36 -0800 | [diff] [blame] | 476 | static const char * const vmstat_text[] = { |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 477 | /* Zoned VM counters */ |
Christoph Lameter | d23ad42 | 2007-02-10 01:43:02 -0800 | [diff] [blame] | 478 | "nr_free_pages", |
Christoph Lameter | c878538 | 2007-02-10 01:43:01 -0800 | [diff] [blame] | 479 | "nr_active", |
| 480 | "nr_inactive", |
Christoph Lameter | f3dbd34 | 2006-06-30 01:55:36 -0700 | [diff] [blame] | 481 | "nr_anon_pages", |
Christoph Lameter | 65ba55f | 2006-06-30 01:55:34 -0700 | [diff] [blame] | 482 | "nr_mapped", |
Christoph Lameter | 347ce43 | 2006-06-30 01:55:35 -0700 | [diff] [blame] | 483 | "nr_file_pages", |
Christoph Lameter | 51ed449 | 2007-02-10 01:43:02 -0800 | [diff] [blame] | 484 | "nr_dirty", |
| 485 | "nr_writeback", |
Christoph Lameter | 972d1a7 | 2006-09-25 23:31:51 -0700 | [diff] [blame] | 486 | "nr_slab_reclaimable", |
| 487 | "nr_slab_unreclaimable", |
Christoph Lameter | df849a1 | 2006-06-30 01:55:38 -0700 | [diff] [blame] | 488 | "nr_page_table_pages", |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 489 | "nr_unstable", |
Christoph Lameter | d2c5e30 | 2006-06-30 01:55:41 -0700 | [diff] [blame] | 490 | "nr_bounce", |
Andrew Morton | e129b5c | 2006-09-27 01:50:00 -0700 | [diff] [blame] | 491 | "nr_vmscan_write", |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 492 | |
Christoph Lameter | ca889e6 | 2006-06-30 01:55:44 -0700 | [diff] [blame] | 493 | #ifdef CONFIG_NUMA |
| 494 | "numa_hit", |
| 495 | "numa_miss", |
| 496 | "numa_foreign", |
| 497 | "numa_interleave", |
| 498 | "numa_local", |
| 499 | "numa_other", |
| 500 | #endif |
| 501 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 502 | #ifdef CONFIG_VM_EVENT_COUNTERS |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 503 | "pgpgin", |
| 504 | "pgpgout", |
| 505 | "pswpin", |
| 506 | "pswpout", |
| 507 | |
Christoph Lameter | 27bf71c | 2006-09-25 23:31:15 -0700 | [diff] [blame] | 508 | TEXTS_FOR_ZONES("pgalloc") |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 509 | |
| 510 | "pgfree", |
| 511 | "pgactivate", |
| 512 | "pgdeactivate", |
| 513 | |
| 514 | "pgfault", |
| 515 | "pgmajfault", |
| 516 | |
Christoph Lameter | 27bf71c | 2006-09-25 23:31:15 -0700 | [diff] [blame] | 517 | TEXTS_FOR_ZONES("pgrefill") |
| 518 | TEXTS_FOR_ZONES("pgsteal") |
| 519 | TEXTS_FOR_ZONES("pgscan_kswapd") |
| 520 | TEXTS_FOR_ZONES("pgscan_direct") |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 521 | |
| 522 | "pginodesteal", |
| 523 | "slabs_scanned", |
| 524 | "kswapd_steal", |
| 525 | "kswapd_inodesteal", |
| 526 | "pageoutrun", |
| 527 | "allocstall", |
| 528 | |
| 529 | "pgrotated", |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 530 | #endif |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 531 | }; |
| 532 | |
| 533 | /* |
| 534 | * Output information about zones in @pgdat. |
| 535 | */ |
| 536 | static int zoneinfo_show(struct seq_file *m, void *arg) |
| 537 | { |
| 538 | pg_data_t *pgdat = arg; |
| 539 | struct zone *zone; |
| 540 | struct zone *node_zones = pgdat->node_zones; |
| 541 | unsigned long flags; |
| 542 | |
| 543 | for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; zone++) { |
| 544 | int i; |
| 545 | |
| 546 | if (!populated_zone(zone)) |
| 547 | continue; |
| 548 | |
| 549 | spin_lock_irqsave(&zone->lock, flags); |
| 550 | seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name); |
| 551 | seq_printf(m, |
| 552 | "\n pages free %lu" |
| 553 | "\n min %lu" |
| 554 | "\n low %lu" |
| 555 | "\n high %lu" |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 556 | "\n scanned %lu (a: %lu i: %lu)" |
| 557 | "\n spanned %lu" |
| 558 | "\n present %lu", |
Christoph Lameter | d23ad42 | 2007-02-10 01:43:02 -0800 | [diff] [blame] | 559 | zone_page_state(zone, NR_FREE_PAGES), |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 560 | zone->pages_min, |
| 561 | zone->pages_low, |
| 562 | zone->pages_high, |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 563 | zone->pages_scanned, |
| 564 | zone->nr_scan_active, zone->nr_scan_inactive, |
| 565 | zone->spanned_pages, |
| 566 | zone->present_pages); |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 567 | |
| 568 | for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) |
| 569 | seq_printf(m, "\n %-12s %lu", vmstat_text[i], |
| 570 | zone_page_state(zone, i)); |
| 571 | |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 572 | seq_printf(m, |
| 573 | "\n protection: (%lu", |
| 574 | zone->lowmem_reserve[0]); |
| 575 | for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++) |
| 576 | seq_printf(m, ", %lu", zone->lowmem_reserve[i]); |
| 577 | seq_printf(m, |
| 578 | ")" |
| 579 | "\n pagesets"); |
| 580 | for_each_online_cpu(i) { |
| 581 | struct per_cpu_pageset *pageset; |
| 582 | int j; |
| 583 | |
| 584 | pageset = zone_pcp(zone, i); |
| 585 | for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) { |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 586 | seq_printf(m, |
| 587 | "\n cpu: %i pcp: %i" |
| 588 | "\n count: %i" |
| 589 | "\n high: %i" |
| 590 | "\n batch: %i", |
| 591 | i, j, |
| 592 | pageset->pcp[j].count, |
| 593 | pageset->pcp[j].high, |
| 594 | pageset->pcp[j].batch); |
| 595 | } |
Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 596 | #ifdef CONFIG_SMP |
| 597 | seq_printf(m, "\n vm stats threshold: %d", |
| 598 | pageset->stat_threshold); |
| 599 | #endif |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 600 | } |
| 601 | seq_printf(m, |
| 602 | "\n all_unreclaimable: %u" |
| 603 | "\n prev_priority: %i" |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 604 | "\n start_pfn: %lu", |
| 605 | zone->all_unreclaimable, |
| 606 | zone->prev_priority, |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 607 | zone->zone_start_pfn); |
| 608 | spin_unlock_irqrestore(&zone->lock, flags); |
| 609 | seq_putc(m, '\n'); |
| 610 | } |
| 611 | return 0; |
| 612 | } |
| 613 | |
Helge Deller | 15ad7cd | 2006-12-06 20:40:36 -0800 | [diff] [blame] | 614 | const struct seq_operations zoneinfo_op = { |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 615 | .start = frag_start, /* iterate over all zones. The same as in |
| 616 | * fragmentation. */ |
| 617 | .next = frag_next, |
| 618 | .stop = frag_stop, |
| 619 | .show = zoneinfo_show, |
| 620 | }; |
| 621 | |
| 622 | static void *vmstat_start(struct seq_file *m, loff_t *pos) |
| 623 | { |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 624 | unsigned long *v; |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 625 | #ifdef CONFIG_VM_EVENT_COUNTERS |
| 626 | unsigned long *e; |
| 627 | #endif |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 628 | int i; |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 629 | |
| 630 | if (*pos >= ARRAY_SIZE(vmstat_text)) |
| 631 | return NULL; |
| 632 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 633 | #ifdef CONFIG_VM_EVENT_COUNTERS |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 634 | v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long) |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 635 | + sizeof(struct vm_event_state), GFP_KERNEL); |
| 636 | #else |
| 637 | v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long), |
| 638 | GFP_KERNEL); |
| 639 | #endif |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 640 | m->private = v; |
| 641 | if (!v) |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 642 | return ERR_PTR(-ENOMEM); |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 643 | for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) |
| 644 | v[i] = global_page_state(i); |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 645 | #ifdef CONFIG_VM_EVENT_COUNTERS |
| 646 | e = v + NR_VM_ZONE_STAT_ITEMS; |
| 647 | all_vm_events(e); |
| 648 | e[PGPGIN] /= 2; /* sectors -> kbytes */ |
| 649 | e[PGPGOUT] /= 2; |
| 650 | #endif |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 651 | return v + *pos; |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 652 | } |
| 653 | |
| 654 | static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos) |
| 655 | { |
| 656 | (*pos)++; |
| 657 | if (*pos >= ARRAY_SIZE(vmstat_text)) |
| 658 | return NULL; |
| 659 | return (unsigned long *)m->private + *pos; |
| 660 | } |
| 661 | |
| 662 | static int vmstat_show(struct seq_file *m, void *arg) |
| 663 | { |
| 664 | unsigned long *l = arg; |
| 665 | unsigned long off = l - (unsigned long *)m->private; |
| 666 | |
| 667 | seq_printf(m, "%s %lu\n", vmstat_text[off], *l); |
| 668 | return 0; |
| 669 | } |
| 670 | |
| 671 | static void vmstat_stop(struct seq_file *m, void *arg) |
| 672 | { |
| 673 | kfree(m->private); |
| 674 | m->private = NULL; |
| 675 | } |
| 676 | |
Helge Deller | 15ad7cd | 2006-12-06 20:40:36 -0800 | [diff] [blame] | 677 | const struct seq_operations vmstat_op = { |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 678 | .start = vmstat_start, |
| 679 | .next = vmstat_next, |
| 680 | .stop = vmstat_stop, |
| 681 | .show = vmstat_show, |
| 682 | }; |
| 683 | |
| 684 | #endif /* CONFIG_PROC_FS */ |
| 685 | |
Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 686 | #ifdef CONFIG_SMP |
Christoph Lameter | d1187ed | 2007-05-09 02:35:12 -0700 | [diff] [blame] | 687 | static DEFINE_PER_CPU(struct delayed_work, vmstat_work); |
Christoph Lameter | 77461ab | 2007-05-09 02:35:13 -0700 | [diff] [blame] | 688 | int sysctl_stat_interval __read_mostly = HZ; |
Christoph Lameter | d1187ed | 2007-05-09 02:35:12 -0700 | [diff] [blame] | 689 | |
| 690 | static void vmstat_update(struct work_struct *w) |
| 691 | { |
| 692 | refresh_cpu_vm_stats(smp_processor_id()); |
Christoph Lameter | 77461ab | 2007-05-09 02:35:13 -0700 | [diff] [blame] | 693 | schedule_delayed_work(&__get_cpu_var(vmstat_work), |
| 694 | sysctl_stat_interval); |
Christoph Lameter | d1187ed | 2007-05-09 02:35:12 -0700 | [diff] [blame] | 695 | } |
| 696 | |
| 697 | static void __devinit start_cpu_timer(int cpu) |
| 698 | { |
| 699 | struct delayed_work *vmstat_work = &per_cpu(vmstat_work, cpu); |
| 700 | |
Christoph Lameter | 39bf627 | 2007-05-10 22:22:21 -0700 | [diff] [blame^] | 701 | INIT_DELAYED_WORK_DEFERRABLE(vmstat_work, vmstat_update); |
Christoph Lameter | d1187ed | 2007-05-09 02:35:12 -0700 | [diff] [blame] | 702 | schedule_delayed_work_on(cpu, vmstat_work, HZ + cpu); |
| 703 | } |
| 704 | |
Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 705 | /* |
| 706 | * Use the cpu notifier to insure that the thresholds are recalculated |
| 707 | * when necessary. |
| 708 | */ |
| 709 | static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb, |
| 710 | unsigned long action, |
| 711 | void *hcpu) |
| 712 | { |
Christoph Lameter | d1187ed | 2007-05-09 02:35:12 -0700 | [diff] [blame] | 713 | long cpu = (long)hcpu; |
| 714 | |
Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 715 | switch (action) { |
Christoph Lameter | d1187ed | 2007-05-09 02:35:12 -0700 | [diff] [blame] | 716 | case CPU_ONLINE: |
| 717 | case CPU_ONLINE_FROZEN: |
| 718 | start_cpu_timer(cpu); |
| 719 | break; |
| 720 | case CPU_DOWN_PREPARE: |
| 721 | case CPU_DOWN_PREPARE_FROZEN: |
| 722 | cancel_rearming_delayed_work(&per_cpu(vmstat_work, cpu)); |
| 723 | per_cpu(vmstat_work, cpu).work.func = NULL; |
| 724 | break; |
| 725 | case CPU_DOWN_FAILED: |
| 726 | case CPU_DOWN_FAILED_FROZEN: |
| 727 | start_cpu_timer(cpu); |
| 728 | break; |
Andy Whitcroft | ce421c7 | 2006-12-06 20:33:08 -0800 | [diff] [blame] | 729 | case CPU_DEAD: |
Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 730 | case CPU_DEAD_FROZEN: |
Andy Whitcroft | ce421c7 | 2006-12-06 20:33:08 -0800 | [diff] [blame] | 731 | refresh_zone_stat_thresholds(); |
| 732 | break; |
| 733 | default: |
| 734 | break; |
Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 735 | } |
| 736 | return NOTIFY_OK; |
| 737 | } |
| 738 | |
| 739 | static struct notifier_block __cpuinitdata vmstat_notifier = |
| 740 | { &vmstat_cpuup_callback, NULL, 0 }; |
| 741 | |
| 742 | int __init setup_vmstat(void) |
| 743 | { |
Christoph Lameter | d1187ed | 2007-05-09 02:35:12 -0700 | [diff] [blame] | 744 | int cpu; |
| 745 | |
Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 746 | refresh_zone_stat_thresholds(); |
| 747 | register_cpu_notifier(&vmstat_notifier); |
Christoph Lameter | d1187ed | 2007-05-09 02:35:12 -0700 | [diff] [blame] | 748 | |
| 749 | for_each_online_cpu(cpu) |
| 750 | start_cpu_timer(cpu); |
Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 751 | return 0; |
| 752 | } |
| 753 | module_init(setup_vmstat) |
| 754 | #endif |