KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 1 | #include <linux/mm.h> |
| 2 | #include <linux/mmzone.h> |
| 3 | #include <linux/bootmem.h> |
| 4 | #include <linux/bit_spinlock.h> |
| 5 | #include <linux/page_cgroup.h> |
| 6 | #include <linux/hash.h> |
KAMEZAWA Hiroyuki | 94b6da5 | 2008-10-22 14:15:05 -0700 | [diff] [blame] | 7 | #include <linux/slab.h> |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 8 | #include <linux/memory.h> |
Paul Mundt | 4c821042 | 2008-10-22 14:14:58 -0700 | [diff] [blame] | 9 | #include <linux/vmalloc.h> |
KAMEZAWA Hiroyuki | 94b6da5 | 2008-10-22 14:15:05 -0700 | [diff] [blame] | 10 | #include <linux/cgroup.h> |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 11 | #include <linux/swapops.h> |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 12 | |
| 13 | static void __meminit |
| 14 | __init_page_cgroup(struct page_cgroup *pc, unsigned long pfn) |
| 15 | { |
| 16 | pc->flags = 0; |
| 17 | pc->mem_cgroup = NULL; |
| 18 | pc->page = pfn_to_page(pfn); |
KAMEZAWA Hiroyuki | 08e552c | 2009-01-07 18:08:01 -0800 | [diff] [blame] | 19 | INIT_LIST_HEAD(&pc->lru); |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 20 | } |
| 21 | static unsigned long total_usage; |
| 22 | |
| 23 | #if !defined(CONFIG_SPARSEMEM) |
| 24 | |
| 25 | |
Al Viro | 3116848 | 2008-11-22 17:33:24 +0000 | [diff] [blame] | 26 | void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat) |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 27 | { |
| 28 | pgdat->node_page_cgroup = NULL; |
| 29 | } |
| 30 | |
| 31 | struct page_cgroup *lookup_page_cgroup(struct page *page) |
| 32 | { |
| 33 | unsigned long pfn = page_to_pfn(page); |
| 34 | unsigned long offset; |
| 35 | struct page_cgroup *base; |
| 36 | |
| 37 | base = NODE_DATA(page_to_nid(page))->node_page_cgroup; |
| 38 | if (unlikely(!base)) |
| 39 | return NULL; |
| 40 | |
| 41 | offset = pfn - NODE_DATA(page_to_nid(page))->node_start_pfn; |
| 42 | return base + offset; |
| 43 | } |
| 44 | |
| 45 | static int __init alloc_node_page_cgroup(int nid) |
| 46 | { |
| 47 | struct page_cgroup *base, *pc; |
| 48 | unsigned long table_size; |
| 49 | unsigned long start_pfn, nr_pages, index; |
| 50 | |
| 51 | start_pfn = NODE_DATA(nid)->node_start_pfn; |
| 52 | nr_pages = NODE_DATA(nid)->node_spanned_pages; |
| 53 | |
KAMEZAWA Hiroyuki | 653d22c | 2008-12-09 13:14:20 -0800 | [diff] [blame] | 54 | if (!nr_pages) |
| 55 | return 0; |
| 56 | |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 57 | table_size = sizeof(struct page_cgroup) * nr_pages; |
KAMEZAWA Hiroyuki | ca371c0 | 2009-06-12 10:33:53 +0300 | [diff] [blame] | 58 | |
| 59 | base = __alloc_bootmem_node_nopanic(NODE_DATA(nid), |
| 60 | table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); |
| 61 | if (!base) |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 62 | return -ENOMEM; |
| 63 | for (index = 0; index < nr_pages; index++) { |
| 64 | pc = base + index; |
| 65 | __init_page_cgroup(pc, start_pfn + index); |
| 66 | } |
| 67 | NODE_DATA(nid)->node_page_cgroup = base; |
| 68 | total_usage += table_size; |
| 69 | return 0; |
| 70 | } |
| 71 | |
KAMEZAWA Hiroyuki | ca371c0 | 2009-06-12 10:33:53 +0300 | [diff] [blame] | 72 | void __init page_cgroup_init_flatmem(void) |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 73 | { |
| 74 | |
| 75 | int nid, fail; |
| 76 | |
Hirokazu Takahashi | f8d66542 | 2009-01-07 18:08:02 -0800 | [diff] [blame] | 77 | if (mem_cgroup_disabled()) |
KAMEZAWA Hiroyuki | 94b6da5 | 2008-10-22 14:15:05 -0700 | [diff] [blame] | 78 | return; |
| 79 | |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 80 | for_each_online_node(nid) { |
| 81 | fail = alloc_node_page_cgroup(nid); |
| 82 | if (fail) |
| 83 | goto fail; |
| 84 | } |
| 85 | printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage); |
Randy Dunlap | 8ca739e | 2009-06-17 16:26:32 -0700 | [diff] [blame] | 86 | printk(KERN_INFO "please try 'cgroup_disable=memory' option if you" |
| 87 | " don't want memory cgroups\n"); |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 88 | return; |
| 89 | fail: |
Randy Dunlap | 8ca739e | 2009-06-17 16:26:32 -0700 | [diff] [blame] | 90 | printk(KERN_CRIT "allocation of page_cgroup failed.\n"); |
| 91 | printk(KERN_CRIT "please try 'cgroup_disable=memory' boot option\n"); |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 92 | panic("Out of memory"); |
| 93 | } |
| 94 | |
| 95 | #else /* CONFIG_FLAT_NODE_MEM_MAP */ |
| 96 | |
| 97 | struct page_cgroup *lookup_page_cgroup(struct page *page) |
| 98 | { |
| 99 | unsigned long pfn = page_to_pfn(page); |
| 100 | struct mem_section *section = __pfn_to_section(pfn); |
| 101 | |
Balbir Singh | d69b042 | 2009-06-17 16:26:34 -0700 | [diff] [blame] | 102 | if (!section->page_cgroup) |
| 103 | return NULL; |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 104 | return section->page_cgroup + pfn; |
| 105 | } |
| 106 | |
Al Viro | 3116848 | 2008-11-22 17:33:24 +0000 | [diff] [blame] | 107 | /* __alloc_bootmem...() is protected by !slab_available() */ |
KOSAKI Motohiro | feb1669 | 2009-01-06 14:39:43 -0800 | [diff] [blame] | 108 | static int __init_refok init_section_page_cgroup(unsigned long pfn) |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 109 | { |
Fernando Luis Vazquez Cao | 0753b0e | 2009-01-07 18:07:51 -0800 | [diff] [blame] | 110 | struct mem_section *section = __pfn_to_section(pfn); |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 111 | struct page_cgroup *base, *pc; |
| 112 | unsigned long table_size; |
| 113 | int nid, index; |
| 114 | |
KAMEZAWA Hiroyuki | dc19f9d | 2008-12-01 13:13:48 -0800 | [diff] [blame] | 115 | if (!section->page_cgroup) { |
| 116 | nid = page_to_nid(pfn_to_page(pfn)); |
| 117 | table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION; |
KAMEZAWA Hiroyuki | ca371c0 | 2009-06-12 10:33:53 +0300 | [diff] [blame] | 118 | VM_BUG_ON(!slab_is_available()); |
Shaohua Li | f52407c | 2009-09-21 17:01:19 -0700 | [diff] [blame] | 119 | if (node_state(nid, N_HIGH_MEMORY)) { |
| 120 | base = kmalloc_node(table_size, |
KAMEZAWA Hiroyuki | ca371c0 | 2009-06-12 10:33:53 +0300 | [diff] [blame] | 121 | GFP_KERNEL | __GFP_NOWARN, nid); |
Shaohua Li | f52407c | 2009-09-21 17:01:19 -0700 | [diff] [blame] | 122 | if (!base) |
| 123 | base = vmalloc_node(table_size, nid); |
| 124 | } else { |
| 125 | base = kmalloc(table_size, GFP_KERNEL | __GFP_NOWARN); |
| 126 | if (!base) |
| 127 | base = vmalloc(table_size); |
| 128 | } |
KAMEZAWA Hiroyuki | dc19f9d | 2008-12-01 13:13:48 -0800 | [diff] [blame] | 129 | } else { |
| 130 | /* |
| 131 | * We don't have to allocate page_cgroup again, but |
| 132 | * address of memmap may be changed. So, we have to initialize |
| 133 | * again. |
| 134 | */ |
| 135 | base = section->page_cgroup + pfn; |
| 136 | table_size = 0; |
| 137 | /* check address of memmap is changed or not. */ |
| 138 | if (base->page == pfn_to_page(pfn)) |
| 139 | return 0; |
KAMEZAWA Hiroyuki | 94b6da5 | 2008-10-22 14:15:05 -0700 | [diff] [blame] | 140 | } |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 141 | |
| 142 | if (!base) { |
| 143 | printk(KERN_ERR "page cgroup allocation failure\n"); |
| 144 | return -ENOMEM; |
| 145 | } |
| 146 | |
| 147 | for (index = 0; index < PAGES_PER_SECTION; index++) { |
| 148 | pc = base + index; |
| 149 | __init_page_cgroup(pc, pfn + index); |
| 150 | } |
| 151 | |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 152 | section->page_cgroup = base - pfn; |
| 153 | total_usage += table_size; |
| 154 | return 0; |
| 155 | } |
| 156 | #ifdef CONFIG_MEMORY_HOTPLUG |
| 157 | void __free_page_cgroup(unsigned long pfn) |
| 158 | { |
| 159 | struct mem_section *ms; |
| 160 | struct page_cgroup *base; |
| 161 | |
| 162 | ms = __pfn_to_section(pfn); |
| 163 | if (!ms || !ms->page_cgroup) |
| 164 | return; |
| 165 | base = ms->page_cgroup + pfn; |
KAMEZAWA Hiroyuki | 94b6da5 | 2008-10-22 14:15:05 -0700 | [diff] [blame] | 166 | if (is_vmalloc_addr(base)) { |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 167 | vfree(base); |
KAMEZAWA Hiroyuki | 94b6da5 | 2008-10-22 14:15:05 -0700 | [diff] [blame] | 168 | ms->page_cgroup = NULL; |
| 169 | } else { |
| 170 | struct page *page = virt_to_page(base); |
| 171 | if (!PageReserved(page)) { /* Is bootmem ? */ |
| 172 | kfree(base); |
| 173 | ms->page_cgroup = NULL; |
| 174 | } |
| 175 | } |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 176 | } |
| 177 | |
Al Viro | 3116848 | 2008-11-22 17:33:24 +0000 | [diff] [blame] | 178 | int __meminit online_page_cgroup(unsigned long start_pfn, |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 179 | unsigned long nr_pages, |
| 180 | int nid) |
| 181 | { |
| 182 | unsigned long start, end, pfn; |
| 183 | int fail = 0; |
| 184 | |
KAMEZAWA Hiroyuki | 33c5d3d | 2008-11-12 13:27:01 -0800 | [diff] [blame] | 185 | start = start_pfn & ~(PAGES_PER_SECTION - 1); |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 186 | end = ALIGN(start_pfn + nr_pages, PAGES_PER_SECTION); |
| 187 | |
| 188 | for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) { |
| 189 | if (!pfn_present(pfn)) |
| 190 | continue; |
| 191 | fail = init_section_page_cgroup(pfn); |
| 192 | } |
| 193 | if (!fail) |
| 194 | return 0; |
| 195 | |
| 196 | /* rollback */ |
| 197 | for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) |
| 198 | __free_page_cgroup(pfn); |
| 199 | |
| 200 | return -ENOMEM; |
| 201 | } |
| 202 | |
Al Viro | 3116848 | 2008-11-22 17:33:24 +0000 | [diff] [blame] | 203 | int __meminit offline_page_cgroup(unsigned long start_pfn, |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 204 | unsigned long nr_pages, int nid) |
| 205 | { |
| 206 | unsigned long start, end, pfn; |
| 207 | |
KAMEZAWA Hiroyuki | 33c5d3d | 2008-11-12 13:27:01 -0800 | [diff] [blame] | 208 | start = start_pfn & ~(PAGES_PER_SECTION - 1); |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 209 | end = ALIGN(start_pfn + nr_pages, PAGES_PER_SECTION); |
| 210 | |
| 211 | for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) |
| 212 | __free_page_cgroup(pfn); |
| 213 | return 0; |
| 214 | |
| 215 | } |
| 216 | |
Al Viro | 3116848 | 2008-11-22 17:33:24 +0000 | [diff] [blame] | 217 | static int __meminit page_cgroup_callback(struct notifier_block *self, |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 218 | unsigned long action, void *arg) |
| 219 | { |
| 220 | struct memory_notify *mn = arg; |
| 221 | int ret = 0; |
| 222 | switch (action) { |
| 223 | case MEM_GOING_ONLINE: |
| 224 | ret = online_page_cgroup(mn->start_pfn, |
| 225 | mn->nr_pages, mn->status_change_nid); |
| 226 | break; |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 227 | case MEM_OFFLINE: |
| 228 | offline_page_cgroup(mn->start_pfn, |
| 229 | mn->nr_pages, mn->status_change_nid); |
| 230 | break; |
KAMEZAWA Hiroyuki | dc19f9d | 2008-12-01 13:13:48 -0800 | [diff] [blame] | 231 | case MEM_CANCEL_ONLINE: |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 232 | case MEM_GOING_OFFLINE: |
| 233 | break; |
| 234 | case MEM_ONLINE: |
| 235 | case MEM_CANCEL_OFFLINE: |
| 236 | break; |
| 237 | } |
KAMEZAWA Hiroyuki | dc19f9d | 2008-12-01 13:13:48 -0800 | [diff] [blame] | 238 | |
| 239 | if (ret) |
| 240 | ret = notifier_from_errno(ret); |
| 241 | else |
| 242 | ret = NOTIFY_OK; |
| 243 | |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 244 | return ret; |
| 245 | } |
| 246 | |
| 247 | #endif |
| 248 | |
| 249 | void __init page_cgroup_init(void) |
| 250 | { |
| 251 | unsigned long pfn; |
| 252 | int fail = 0; |
| 253 | |
Hirokazu Takahashi | f8d66542 | 2009-01-07 18:08:02 -0800 | [diff] [blame] | 254 | if (mem_cgroup_disabled()) |
KAMEZAWA Hiroyuki | 94b6da5 | 2008-10-22 14:15:05 -0700 | [diff] [blame] | 255 | return; |
| 256 | |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 257 | for (pfn = 0; !fail && pfn < max_pfn; pfn += PAGES_PER_SECTION) { |
| 258 | if (!pfn_present(pfn)) |
| 259 | continue; |
| 260 | fail = init_section_page_cgroup(pfn); |
| 261 | } |
| 262 | if (fail) { |
Randy Dunlap | 8ca739e | 2009-06-17 16:26:32 -0700 | [diff] [blame] | 263 | printk(KERN_CRIT "try 'cgroup_disable=memory' boot option\n"); |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 264 | panic("Out of memory"); |
| 265 | } else { |
| 266 | hotplug_memory_notifier(page_cgroup_callback, 0); |
| 267 | } |
| 268 | printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage); |
Randy Dunlap | 8ca739e | 2009-06-17 16:26:32 -0700 | [diff] [blame] | 269 | printk(KERN_INFO "please try 'cgroup_disable=memory' option if you don't" |
| 270 | " want memory cgroups\n"); |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 271 | } |
| 272 | |
Al Viro | 3116848 | 2008-11-22 17:33:24 +0000 | [diff] [blame] | 273 | void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat) |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 274 | { |
| 275 | return; |
| 276 | } |
| 277 | |
| 278 | #endif |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 279 | |
| 280 | |
| 281 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP |
| 282 | |
| 283 | static DEFINE_MUTEX(swap_cgroup_mutex); |
| 284 | struct swap_cgroup_ctrl { |
| 285 | struct page **map; |
| 286 | unsigned long length; |
KAMEZAWA Hiroyuki | e9e58a4 | 2010-03-15 00:34:57 -0400 | [diff] [blame^] | 287 | spinlock_t lock; |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 288 | }; |
| 289 | |
| 290 | struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES]; |
| 291 | |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 292 | struct swap_cgroup { |
KAMEZAWA Hiroyuki | a3b2d69 | 2009-04-02 16:57:45 -0700 | [diff] [blame] | 293 | unsigned short id; |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 294 | }; |
| 295 | #define SC_PER_PAGE (PAGE_SIZE/sizeof(struct swap_cgroup)) |
| 296 | #define SC_POS_MASK (SC_PER_PAGE - 1) |
| 297 | |
| 298 | /* |
| 299 | * SwapCgroup implements "lookup" and "exchange" operations. |
| 300 | * In typical usage, this swap_cgroup is accessed via memcg's charge/uncharge |
| 301 | * against SwapCache. At swap_free(), this is accessed directly from swap. |
| 302 | * |
| 303 | * This means, |
| 304 | * - we have no race in "exchange" when we're accessed via SwapCache because |
| 305 | * SwapCache(and its swp_entry) is under lock. |
| 306 | * - When called via swap_free(), there is no user of this entry and no race. |
| 307 | * Then, we don't need lock around "exchange". |
| 308 | * |
| 309 | * TODO: we can push these buffers out to HIGHMEM. |
| 310 | */ |
| 311 | |
| 312 | /* |
| 313 | * allocate buffer for swap_cgroup. |
| 314 | */ |
| 315 | static int swap_cgroup_prepare(int type) |
| 316 | { |
| 317 | struct page *page; |
| 318 | struct swap_cgroup_ctrl *ctrl; |
| 319 | unsigned long idx, max; |
| 320 | |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 321 | ctrl = &swap_cgroup_ctrl[type]; |
| 322 | |
| 323 | for (idx = 0; idx < ctrl->length; idx++) { |
| 324 | page = alloc_page(GFP_KERNEL | __GFP_ZERO); |
| 325 | if (!page) |
| 326 | goto not_enough_page; |
| 327 | ctrl->map[idx] = page; |
| 328 | } |
| 329 | return 0; |
| 330 | not_enough_page: |
| 331 | max = idx; |
| 332 | for (idx = 0; idx < max; idx++) |
| 333 | __free_page(ctrl->map[idx]); |
| 334 | |
| 335 | return -ENOMEM; |
| 336 | } |
| 337 | |
| 338 | /** |
Daisuke Nishimura | 0249144 | 2010-03-10 15:22:17 -0800 | [diff] [blame] | 339 | * swap_cgroup_cmpxchg - cmpxchg mem_cgroup's id for this swp_entry. |
| 340 | * @end: swap entry to be cmpxchged |
| 341 | * @old: old id |
| 342 | * @new: new id |
| 343 | * |
| 344 | * Returns old id at success, 0 at failure. |
| 345 | * (There is no mem_cgroup useing 0 as its id) |
| 346 | */ |
| 347 | unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, |
| 348 | unsigned short old, unsigned short new) |
| 349 | { |
| 350 | int type = swp_type(ent); |
| 351 | unsigned long offset = swp_offset(ent); |
| 352 | unsigned long idx = offset / SC_PER_PAGE; |
| 353 | unsigned long pos = offset & SC_POS_MASK; |
| 354 | struct swap_cgroup_ctrl *ctrl; |
| 355 | struct page *mappage; |
| 356 | struct swap_cgroup *sc; |
KAMEZAWA Hiroyuki | e9e58a4 | 2010-03-15 00:34:57 -0400 | [diff] [blame^] | 357 | unsigned long flags; |
| 358 | unsigned short retval; |
Daisuke Nishimura | 0249144 | 2010-03-10 15:22:17 -0800 | [diff] [blame] | 359 | |
| 360 | ctrl = &swap_cgroup_ctrl[type]; |
| 361 | |
| 362 | mappage = ctrl->map[idx]; |
| 363 | sc = page_address(mappage); |
| 364 | sc += pos; |
KAMEZAWA Hiroyuki | e9e58a4 | 2010-03-15 00:34:57 -0400 | [diff] [blame^] | 365 | spin_lock_irqsave(&ctrl->lock, flags); |
| 366 | retval = sc->id; |
| 367 | if (retval == old) |
| 368 | sc->id = new; |
Daisuke Nishimura | 0249144 | 2010-03-10 15:22:17 -0800 | [diff] [blame] | 369 | else |
KAMEZAWA Hiroyuki | e9e58a4 | 2010-03-15 00:34:57 -0400 | [diff] [blame^] | 370 | retval = 0; |
| 371 | spin_unlock_irqrestore(&ctrl->lock, flags); |
| 372 | return retval; |
Daisuke Nishimura | 0249144 | 2010-03-10 15:22:17 -0800 | [diff] [blame] | 373 | } |
| 374 | |
| 375 | /** |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 376 | * swap_cgroup_record - record mem_cgroup for this swp_entry. |
| 377 | * @ent: swap entry to be recorded into |
| 378 | * @mem: mem_cgroup to be recorded |
| 379 | * |
KAMEZAWA Hiroyuki | a3b2d69 | 2009-04-02 16:57:45 -0700 | [diff] [blame] | 380 | * Returns old value at success, 0 at failure. |
| 381 | * (Of course, old value can be 0.) |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 382 | */ |
KAMEZAWA Hiroyuki | a3b2d69 | 2009-04-02 16:57:45 -0700 | [diff] [blame] | 383 | unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id) |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 384 | { |
| 385 | int type = swp_type(ent); |
| 386 | unsigned long offset = swp_offset(ent); |
| 387 | unsigned long idx = offset / SC_PER_PAGE; |
| 388 | unsigned long pos = offset & SC_POS_MASK; |
| 389 | struct swap_cgroup_ctrl *ctrl; |
| 390 | struct page *mappage; |
| 391 | struct swap_cgroup *sc; |
KAMEZAWA Hiroyuki | a3b2d69 | 2009-04-02 16:57:45 -0700 | [diff] [blame] | 392 | unsigned short old; |
KAMEZAWA Hiroyuki | e9e58a4 | 2010-03-15 00:34:57 -0400 | [diff] [blame^] | 393 | unsigned long flags; |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 394 | |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 395 | ctrl = &swap_cgroup_ctrl[type]; |
| 396 | |
| 397 | mappage = ctrl->map[idx]; |
| 398 | sc = page_address(mappage); |
| 399 | sc += pos; |
KAMEZAWA Hiroyuki | e9e58a4 | 2010-03-15 00:34:57 -0400 | [diff] [blame^] | 400 | spin_lock_irqsave(&ctrl->lock, flags); |
| 401 | old = sc->id; |
| 402 | sc->id = id; |
| 403 | spin_unlock_irqrestore(&ctrl->lock, flags); |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 404 | |
| 405 | return old; |
| 406 | } |
| 407 | |
| 408 | /** |
| 409 | * lookup_swap_cgroup - lookup mem_cgroup tied to swap entry |
| 410 | * @ent: swap entry to be looked up. |
| 411 | * |
KAMEZAWA Hiroyuki | a3b2d69 | 2009-04-02 16:57:45 -0700 | [diff] [blame] | 412 | * Returns CSS ID of mem_cgroup at success. 0 at failure. (0 is invalid ID) |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 413 | */ |
KAMEZAWA Hiroyuki | a3b2d69 | 2009-04-02 16:57:45 -0700 | [diff] [blame] | 414 | unsigned short lookup_swap_cgroup(swp_entry_t ent) |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 415 | { |
| 416 | int type = swp_type(ent); |
| 417 | unsigned long offset = swp_offset(ent); |
| 418 | unsigned long idx = offset / SC_PER_PAGE; |
| 419 | unsigned long pos = offset & SC_POS_MASK; |
| 420 | struct swap_cgroup_ctrl *ctrl; |
| 421 | struct page *mappage; |
| 422 | struct swap_cgroup *sc; |
KAMEZAWA Hiroyuki | a3b2d69 | 2009-04-02 16:57:45 -0700 | [diff] [blame] | 423 | unsigned short ret; |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 424 | |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 425 | ctrl = &swap_cgroup_ctrl[type]; |
| 426 | mappage = ctrl->map[idx]; |
| 427 | sc = page_address(mappage); |
| 428 | sc += pos; |
KAMEZAWA Hiroyuki | a3b2d69 | 2009-04-02 16:57:45 -0700 | [diff] [blame] | 429 | ret = sc->id; |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 430 | return ret; |
| 431 | } |
| 432 | |
| 433 | int swap_cgroup_swapon(int type, unsigned long max_pages) |
| 434 | { |
| 435 | void *array; |
| 436 | unsigned long array_size; |
| 437 | unsigned long length; |
| 438 | struct swap_cgroup_ctrl *ctrl; |
| 439 | |
| 440 | if (!do_swap_account) |
| 441 | return 0; |
| 442 | |
| 443 | length = ((max_pages/SC_PER_PAGE) + 1); |
| 444 | array_size = length * sizeof(void *); |
| 445 | |
| 446 | array = vmalloc(array_size); |
| 447 | if (!array) |
| 448 | goto nomem; |
| 449 | |
| 450 | memset(array, 0, array_size); |
| 451 | ctrl = &swap_cgroup_ctrl[type]; |
| 452 | mutex_lock(&swap_cgroup_mutex); |
| 453 | ctrl->length = length; |
| 454 | ctrl->map = array; |
KAMEZAWA Hiroyuki | e9e58a4 | 2010-03-15 00:34:57 -0400 | [diff] [blame^] | 455 | spin_lock_init(&ctrl->lock); |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 456 | if (swap_cgroup_prepare(type)) { |
| 457 | /* memory shortage */ |
| 458 | ctrl->map = NULL; |
| 459 | ctrl->length = 0; |
| 460 | vfree(array); |
| 461 | mutex_unlock(&swap_cgroup_mutex); |
| 462 | goto nomem; |
| 463 | } |
| 464 | mutex_unlock(&swap_cgroup_mutex); |
| 465 | |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 466 | return 0; |
| 467 | nomem: |
| 468 | printk(KERN_INFO "couldn't allocate enough memory for swap_cgroup.\n"); |
| 469 | printk(KERN_INFO |
| 470 | "swap_cgroup can be disabled by noswapaccount boot option\n"); |
| 471 | return -ENOMEM; |
| 472 | } |
| 473 | |
| 474 | void swap_cgroup_swapoff(int type) |
| 475 | { |
| 476 | int i; |
| 477 | struct swap_cgroup_ctrl *ctrl; |
| 478 | |
| 479 | if (!do_swap_account) |
| 480 | return; |
| 481 | |
| 482 | mutex_lock(&swap_cgroup_mutex); |
| 483 | ctrl = &swap_cgroup_ctrl[type]; |
| 484 | if (ctrl->map) { |
| 485 | for (i = 0; i < ctrl->length; i++) { |
| 486 | struct page *page = ctrl->map[i]; |
| 487 | if (page) |
| 488 | __free_page(page); |
| 489 | } |
| 490 | vfree(ctrl->map); |
| 491 | ctrl->map = NULL; |
| 492 | ctrl->length = 0; |
| 493 | } |
| 494 | mutex_unlock(&swap_cgroup_mutex); |
| 495 | } |
| 496 | |
| 497 | #endif |