Christoph Lameter | 6225e93 | 2007-05-06 14:49:50 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Quicklist support. |
| 3 | * |
| 4 | * Quicklists are light weight lists of pages that have a defined state |
| 5 | * on alloc and free. Pages must be in the quicklist specific defined state |
| 6 | * (zero by default) when the page is freed. It seems that the initial idea |
| 7 | * for such lists first came from Dave Miller and then various other people |
| 8 | * improved on it. |
| 9 | * |
| 10 | * Copyright (C) 2007 SGI, |
| 11 | * Christoph Lameter <clameter@sgi.com> |
| 12 | * Generalized, added support for multiple lists and |
| 13 | * constructors / destructors. |
| 14 | */ |
| 15 | #include <linux/kernel.h> |
| 16 | |
| 17 | #include <linux/mm.h> |
| 18 | #include <linux/mmzone.h> |
| 19 | #include <linux/module.h> |
| 20 | #include <linux/quicklist.h> |
| 21 | |
| 22 | DEFINE_PER_CPU(struct quicklist, quicklist)[CONFIG_NR_QUICK]; |
| 23 | |
| 24 | #define FRACTION_OF_NODE_MEM 16 |
| 25 | |
| 26 | static unsigned long max_pages(unsigned long min_pages) |
| 27 | { |
| 28 | unsigned long node_free_pages, max; |
KOSAKI Motohiro | b954185 | 2008-09-02 14:35:58 -0700 | [diff] [blame^] | 29 | int node = numa_node_id(); |
| 30 | struct zone *zones = NODE_DATA(node)->node_zones; |
| 31 | int num_cpus_on_node; |
| 32 | node_to_cpumask_ptr(cpumask_on_node, node); |
Christoph Lameter | 6225e93 | 2007-05-06 14:49:50 -0700 | [diff] [blame] | 33 | |
Christoph Lameter | 96990a4 | 2008-01-14 00:55:14 -0800 | [diff] [blame] | 34 | node_free_pages = |
| 35 | #ifdef CONFIG_ZONE_DMA |
| 36 | zone_page_state(&zones[ZONE_DMA], NR_FREE_PAGES) + |
| 37 | #endif |
| 38 | #ifdef CONFIG_ZONE_DMA32 |
| 39 | zone_page_state(&zones[ZONE_DMA32], NR_FREE_PAGES) + |
| 40 | #endif |
| 41 | zone_page_state(&zones[ZONE_NORMAL], NR_FREE_PAGES); |
| 42 | |
Christoph Lameter | 6225e93 | 2007-05-06 14:49:50 -0700 | [diff] [blame] | 43 | max = node_free_pages / FRACTION_OF_NODE_MEM; |
KOSAKI Motohiro | b954185 | 2008-09-02 14:35:58 -0700 | [diff] [blame^] | 44 | |
| 45 | num_cpus_on_node = cpus_weight_nr(*cpumask_on_node); |
| 46 | max /= num_cpus_on_node; |
| 47 | |
Christoph Lameter | 6225e93 | 2007-05-06 14:49:50 -0700 | [diff] [blame] | 48 | return max(max, min_pages); |
| 49 | } |
| 50 | |
| 51 | static long min_pages_to_free(struct quicklist *q, |
| 52 | unsigned long min_pages, long max_free) |
| 53 | { |
| 54 | long pages_to_free; |
| 55 | |
| 56 | pages_to_free = q->nr_pages - max_pages(min_pages); |
| 57 | |
| 58 | return min(pages_to_free, max_free); |
| 59 | } |
| 60 | |
| 61 | /* |
| 62 | * Trim down the number of pages in the quicklist |
| 63 | */ |
| 64 | void quicklist_trim(int nr, void (*dtor)(void *), |
| 65 | unsigned long min_pages, unsigned long max_free) |
| 66 | { |
| 67 | long pages_to_free; |
| 68 | struct quicklist *q; |
| 69 | |
| 70 | q = &get_cpu_var(quicklist)[nr]; |
| 71 | if (q->nr_pages > min_pages) { |
| 72 | pages_to_free = min_pages_to_free(q, min_pages, max_free); |
| 73 | |
| 74 | while (pages_to_free > 0) { |
| 75 | /* |
| 76 | * We pass a gfp_t of 0 to quicklist_alloc here |
| 77 | * because we will never call into the page allocator. |
| 78 | */ |
| 79 | void *p = quicklist_alloc(nr, 0, NULL); |
| 80 | |
| 81 | if (dtor) |
| 82 | dtor(p); |
| 83 | free_page((unsigned long)p); |
| 84 | pages_to_free--; |
| 85 | } |
| 86 | } |
| 87 | put_cpu_var(quicklist); |
| 88 | } |
| 89 | |
| 90 | unsigned long quicklist_total_size(void) |
| 91 | { |
| 92 | unsigned long count = 0; |
| 93 | int cpu; |
| 94 | struct quicklist *ql, *q; |
| 95 | |
| 96 | for_each_online_cpu(cpu) { |
| 97 | ql = per_cpu(quicklist, cpu); |
| 98 | for (q = ql; q < ql + CONFIG_NR_QUICK; q++) |
| 99 | count += q->nr_pages; |
| 100 | } |
| 101 | return count; |
| 102 | } |
| 103 | |