Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Memory Migration functionality - linux/mm/migration.c |
| 3 | * |
| 4 | * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter |
| 5 | * |
| 6 | * Page migration was first developed in the context of the memory hotplug |
| 7 | * project. The main authors of the migration code are: |
| 8 | * |
| 9 | * IWAMOTO Toshihiro <iwamoto@valinux.co.jp> |
| 10 | * Hirokazu Takahashi <taka@valinux.co.jp> |
| 11 | * Dave Hansen <haveblue@us.ibm.com> |
Christoph Lameter | cde5353 | 2008-07-04 09:59:22 -0700 | [diff] [blame] | 12 | * Christoph Lameter |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 13 | */ |
| 14 | |
| 15 | #include <linux/migrate.h> |
| 16 | #include <linux/module.h> |
| 17 | #include <linux/swap.h> |
Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 18 | #include <linux/swapops.h> |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 19 | #include <linux/pagemap.h> |
Christoph Lameter | e23ca00 | 2006-04-10 22:52:57 -0700 | [diff] [blame] | 20 | #include <linux/buffer_head.h> |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 21 | #include <linux/mm_inline.h> |
Pavel Emelyanov | b488893 | 2007-10-18 23:40:14 -0700 | [diff] [blame] | 22 | #include <linux/nsproxy.h> |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 23 | #include <linux/pagevec.h> |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 24 | #include <linux/ksm.h> |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 25 | #include <linux/rmap.h> |
| 26 | #include <linux/topology.h> |
| 27 | #include <linux/cpu.h> |
| 28 | #include <linux/cpuset.h> |
Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 29 | #include <linux/writeback.h> |
Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 30 | #include <linux/mempolicy.h> |
| 31 | #include <linux/vmalloc.h> |
David Quigley | 86c3a76 | 2006-06-23 02:04:02 -0700 | [diff] [blame] | 32 | #include <linux/security.h> |
Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 33 | #include <linux/memcontrol.h> |
Adrian Bunk | 4f5ca26 | 2008-07-23 21:27:02 -0700 | [diff] [blame] | 34 | #include <linux/syscalls.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 35 | #include <linux/gfp.h> |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 36 | |
| 37 | #include "internal.h" |
| 38 | |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 39 | #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru)) |
| 40 | |
| 41 | /* |
Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 42 | * migrate_prep() needs to be called before we start compiling a list of pages |
| 43 | * to be migrated using isolate_lru_page(). |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 44 | */ |
| 45 | int migrate_prep(void) |
| 46 | { |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 47 | /* |
| 48 | * Clear the LRU lists so pages can be isolated. |
| 49 | * Note that pages may be moved off the LRU after we have |
| 50 | * drained them. Those pages will fail to migrate like other |
| 51 | * pages that may be busy. |
| 52 | */ |
| 53 | lru_add_drain_all(); |
| 54 | |
| 55 | return 0; |
| 56 | } |
| 57 | |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 58 | /* |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 59 | * Add isolated pages on the list back to the LRU under page lock |
| 60 | * to avoid leaking evictable pages back onto unevictable list. |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 61 | */ |
Minchan Kim | e13861d | 2010-05-24 14:31:59 -0700 | [diff] [blame] | 62 | void putback_lru_pages(struct list_head *l) |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 63 | { |
| 64 | struct page *page; |
| 65 | struct page *page2; |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 66 | |
| 67 | list_for_each_entry_safe(page, page2, l, lru) { |
Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 68 | list_del(&page->lru); |
KOSAKI Motohiro | a731286 | 2009-09-21 17:01:37 -0700 | [diff] [blame] | 69 | dec_zone_page_state(page, NR_ISOLATED_ANON + |
Johannes Weiner | 6c0b135 | 2009-09-21 17:02:59 -0700 | [diff] [blame] | 70 | page_is_file_cache(page)); |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 71 | putback_lru_page(page); |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 72 | } |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 73 | } |
| 74 | |
Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 75 | /* |
| 76 | * Restore a potential migration pte to a working pte entry |
| 77 | */ |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 78 | static int remove_migration_pte(struct page *new, struct vm_area_struct *vma, |
| 79 | unsigned long addr, void *old) |
Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 80 | { |
| 81 | struct mm_struct *mm = vma->vm_mm; |
| 82 | swp_entry_t entry; |
| 83 | pgd_t *pgd; |
| 84 | pud_t *pud; |
| 85 | pmd_t *pmd; |
| 86 | pte_t *ptep, pte; |
| 87 | spinlock_t *ptl; |
| 88 | |
| 89 | pgd = pgd_offset(mm, addr); |
| 90 | if (!pgd_present(*pgd)) |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 91 | goto out; |
Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 92 | |
| 93 | pud = pud_offset(pgd, addr); |
| 94 | if (!pud_present(*pud)) |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 95 | goto out; |
Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 96 | |
| 97 | pmd = pmd_offset(pud, addr); |
| 98 | if (!pmd_present(*pmd)) |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 99 | goto out; |
Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 100 | |
| 101 | ptep = pte_offset_map(pmd, addr); |
| 102 | |
| 103 | if (!is_swap_pte(*ptep)) { |
| 104 | pte_unmap(ptep); |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 105 | goto out; |
Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 106 | } |
| 107 | |
| 108 | ptl = pte_lockptr(mm, pmd); |
| 109 | spin_lock(ptl); |
| 110 | pte = *ptep; |
| 111 | if (!is_swap_pte(pte)) |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 112 | goto unlock; |
Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 113 | |
| 114 | entry = pte_to_swp_entry(pte); |
| 115 | |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 116 | if (!is_migration_entry(entry) || |
| 117 | migration_entry_to_page(entry) != old) |
| 118 | goto unlock; |
Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 119 | |
Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 120 | get_page(new); |
| 121 | pte = pte_mkold(mk_pte(new, vma->vm_page_prot)); |
| 122 | if (is_write_migration_entry(entry)) |
| 123 | pte = pte_mkwrite(pte); |
KAMEZAWA Hiroyuki | 97ee052 | 2007-10-16 01:25:43 -0700 | [diff] [blame] | 124 | flush_cache_page(vma, addr, pte_pfn(pte)); |
Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 125 | set_pte_at(mm, addr, ptep, pte); |
Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 126 | |
| 127 | if (PageAnon(new)) |
| 128 | page_add_anon_rmap(new, vma, addr); |
| 129 | else |
| 130 | page_add_file_rmap(new); |
| 131 | |
| 132 | /* No need to invalidate - it was non-present before */ |
Russell King | 4b3073e | 2009-12-18 16:40:18 +0000 | [diff] [blame] | 133 | update_mmu_cache(vma, addr, ptep); |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 134 | unlock: |
Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 135 | pte_unmap_unlock(ptep, ptl); |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 136 | out: |
| 137 | return SWAP_AGAIN; |
Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 138 | } |
| 139 | |
| 140 | /* |
Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 141 | * Get rid of all migration entries and replace them by |
| 142 | * references to the indicated page. |
| 143 | */ |
| 144 | static void remove_migration_ptes(struct page *old, struct page *new) |
| 145 | { |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 146 | rmap_walk(new, remove_migration_pte, old); |
Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 147 | } |
| 148 | |
| 149 | /* |
Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 150 | * Something used the pte of a page under migration. We need to |
| 151 | * get to the page and wait until migration is finished. |
| 152 | * When we return from this function the fault will be retried. |
| 153 | * |
| 154 | * This function is called from do_swap_page(). |
| 155 | */ |
| 156 | void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, |
| 157 | unsigned long address) |
| 158 | { |
| 159 | pte_t *ptep, pte; |
| 160 | spinlock_t *ptl; |
| 161 | swp_entry_t entry; |
| 162 | struct page *page; |
| 163 | |
| 164 | ptep = pte_offset_map_lock(mm, pmd, address, &ptl); |
| 165 | pte = *ptep; |
| 166 | if (!is_swap_pte(pte)) |
| 167 | goto out; |
| 168 | |
| 169 | entry = pte_to_swp_entry(pte); |
| 170 | if (!is_migration_entry(entry)) |
| 171 | goto out; |
| 172 | |
| 173 | page = migration_entry_to_page(entry); |
| 174 | |
Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 175 | /* |
| 176 | * Once radix-tree replacement of page migration started, page_count |
| 177 | * *must* be zero. And, we don't want to call wait_on_page_locked() |
| 178 | * against a page without get_page(). |
| 179 | * So, we use get_page_unless_zero(), here. Even failed, page fault |
| 180 | * will occur again. |
| 181 | */ |
| 182 | if (!get_page_unless_zero(page)) |
| 183 | goto out; |
Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 184 | pte_unmap_unlock(ptep, ptl); |
| 185 | wait_on_page_locked(page); |
| 186 | put_page(page); |
| 187 | return; |
| 188 | out: |
| 189 | pte_unmap_unlock(ptep, ptl); |
| 190 | } |
| 191 | |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 192 | /* |
Christoph Lameter | c3fcf8a | 2006-06-23 02:03:32 -0700 | [diff] [blame] | 193 | * Replace the page in the mapping. |
Christoph Lameter | 5b5c712 | 2006-06-23 02:03:29 -0700 | [diff] [blame] | 194 | * |
| 195 | * The number of remaining references must be: |
| 196 | * 1 for anonymous pages without a mapping |
| 197 | * 2 for pages with a mapping |
David Howells | 266cf65 | 2009-04-03 16:42:36 +0100 | [diff] [blame] | 198 | * 3 for pages with a mapping and PagePrivate/PagePrivate2 set. |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 199 | */ |
Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 200 | static int migrate_page_move_mapping(struct address_space *mapping, |
| 201 | struct page *newpage, struct page *page) |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 202 | { |
Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 203 | int expected_count; |
Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 204 | void **pslot; |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 205 | |
Christoph Lameter | 6c5240a | 2006-06-23 02:03:37 -0700 | [diff] [blame] | 206 | if (!mapping) { |
Christoph Lameter | 0e8c7d0 | 2007-04-23 14:41:09 -0700 | [diff] [blame] | 207 | /* Anonymous page without mapping */ |
Christoph Lameter | 6c5240a | 2006-06-23 02:03:37 -0700 | [diff] [blame] | 208 | if (page_count(page) != 1) |
| 209 | return -EAGAIN; |
| 210 | return 0; |
| 211 | } |
| 212 | |
Nick Piggin | 19fd623 | 2008-07-25 19:45:32 -0700 | [diff] [blame] | 213 | spin_lock_irq(&mapping->tree_lock); |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 214 | |
Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 215 | pslot = radix_tree_lookup_slot(&mapping->page_tree, |
| 216 | page_index(page)); |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 217 | |
Johannes Weiner | edcf474 | 2009-09-21 17:02:59 -0700 | [diff] [blame] | 218 | expected_count = 2 + page_has_private(page); |
Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 219 | if (page_count(page) != expected_count || |
Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 220 | (struct page *)radix_tree_deref_slot(pslot) != page) { |
Nick Piggin | 19fd623 | 2008-07-25 19:45:32 -0700 | [diff] [blame] | 221 | spin_unlock_irq(&mapping->tree_lock); |
Christoph Lameter | e23ca00 | 2006-04-10 22:52:57 -0700 | [diff] [blame] | 222 | return -EAGAIN; |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 223 | } |
| 224 | |
Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 225 | if (!page_freeze_refs(page, expected_count)) { |
Nick Piggin | 19fd623 | 2008-07-25 19:45:32 -0700 | [diff] [blame] | 226 | spin_unlock_irq(&mapping->tree_lock); |
Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 227 | return -EAGAIN; |
| 228 | } |
| 229 | |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 230 | /* |
| 231 | * Now we know that no one else is looking at the page. |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 232 | */ |
Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 233 | get_page(newpage); /* add cache reference */ |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 234 | if (PageSwapCache(page)) { |
| 235 | SetPageSwapCache(newpage); |
| 236 | set_page_private(newpage, page_private(page)); |
| 237 | } |
| 238 | |
Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 239 | radix_tree_replace_slot(pslot, newpage); |
| 240 | |
Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 241 | page_unfreeze_refs(page, expected_count); |
Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 242 | /* |
| 243 | * Drop cache reference from old page. |
| 244 | * We know this isn't the last reference. |
| 245 | */ |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 246 | __put_page(page); |
Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 247 | |
Christoph Lameter | 0e8c7d0 | 2007-04-23 14:41:09 -0700 | [diff] [blame] | 248 | /* |
| 249 | * If moved to a different zone then also account |
| 250 | * the page for that zone. Other VM counters will be |
| 251 | * taken care of when we establish references to the |
| 252 | * new page and drop references to the old page. |
| 253 | * |
| 254 | * Note that anonymous pages are accounted for |
| 255 | * via NR_FILE_PAGES and NR_ANON_PAGES if they |
| 256 | * are mapped to swap space. |
| 257 | */ |
| 258 | __dec_zone_page_state(page, NR_FILE_PAGES); |
| 259 | __inc_zone_page_state(newpage, NR_FILE_PAGES); |
KOSAKI Motohiro | 4b02108 | 2009-09-21 17:01:33 -0700 | [diff] [blame] | 260 | if (PageSwapBacked(page)) { |
| 261 | __dec_zone_page_state(page, NR_SHMEM); |
| 262 | __inc_zone_page_state(newpage, NR_SHMEM); |
| 263 | } |
Nick Piggin | 19fd623 | 2008-07-25 19:45:32 -0700 | [diff] [blame] | 264 | spin_unlock_irq(&mapping->tree_lock); |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 265 | |
| 266 | return 0; |
| 267 | } |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 268 | |
| 269 | /* |
| 270 | * Copy the page to its new location |
| 271 | */ |
Christoph Lameter | e7340f7 | 2006-06-23 02:03:29 -0700 | [diff] [blame] | 272 | static void migrate_page_copy(struct page *newpage, struct page *page) |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 273 | { |
| 274 | copy_highpage(newpage, page); |
| 275 | |
| 276 | if (PageError(page)) |
| 277 | SetPageError(newpage); |
| 278 | if (PageReferenced(page)) |
| 279 | SetPageReferenced(newpage); |
| 280 | if (PageUptodate(page)) |
| 281 | SetPageUptodate(newpage); |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 282 | if (TestClearPageActive(page)) { |
| 283 | VM_BUG_ON(PageUnevictable(page)); |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 284 | SetPageActive(newpage); |
Lee Schermerhorn | 418b27e | 2009-12-14 17:59:54 -0800 | [diff] [blame] | 285 | } else if (TestClearPageUnevictable(page)) |
| 286 | SetPageUnevictable(newpage); |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 287 | if (PageChecked(page)) |
| 288 | SetPageChecked(newpage); |
| 289 | if (PageMappedToDisk(page)) |
| 290 | SetPageMappedToDisk(newpage); |
| 291 | |
| 292 | if (PageDirty(page)) { |
| 293 | clear_page_dirty_for_io(page); |
Nick Piggin | 3a902c5 | 2008-04-30 00:55:16 -0700 | [diff] [blame] | 294 | /* |
| 295 | * Want to mark the page and the radix tree as dirty, and |
| 296 | * redo the accounting that clear_page_dirty_for_io undid, |
| 297 | * but we can't use set_page_dirty because that function |
| 298 | * is actually a signal that all of the page has become dirty. |
| 299 | * Wheras only part of our page may be dirty. |
| 300 | */ |
| 301 | __set_page_dirty_nobuffers(newpage); |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 302 | } |
| 303 | |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 304 | mlock_migrate_page(newpage, page); |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 305 | ksm_migrate_page(newpage, page); |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 306 | |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 307 | ClearPageSwapCache(page); |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 308 | ClearPagePrivate(page); |
| 309 | set_page_private(page, 0); |
| 310 | page->mapping = NULL; |
| 311 | |
| 312 | /* |
| 313 | * If any waiters have accumulated on the new page then |
| 314 | * wake them up. |
| 315 | */ |
| 316 | if (PageWriteback(newpage)) |
| 317 | end_page_writeback(newpage); |
| 318 | } |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 319 | |
Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 320 | /************************************************************ |
| 321 | * Migration functions |
| 322 | ***********************************************************/ |
| 323 | |
| 324 | /* Always fail migration. Used for mappings that are not movable */ |
Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 325 | int fail_migrate_page(struct address_space *mapping, |
| 326 | struct page *newpage, struct page *page) |
Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 327 | { |
| 328 | return -EIO; |
| 329 | } |
| 330 | EXPORT_SYMBOL(fail_migrate_page); |
| 331 | |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 332 | /* |
| 333 | * Common logic to directly migrate a single page suitable for |
David Howells | 266cf65 | 2009-04-03 16:42:36 +0100 | [diff] [blame] | 334 | * pages that do not use PagePrivate/PagePrivate2. |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 335 | * |
| 336 | * Pages are locked upon entry and exit. |
| 337 | */ |
Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 338 | int migrate_page(struct address_space *mapping, |
| 339 | struct page *newpage, struct page *page) |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 340 | { |
| 341 | int rc; |
| 342 | |
| 343 | BUG_ON(PageWriteback(page)); /* Writeback must be complete */ |
| 344 | |
Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 345 | rc = migrate_page_move_mapping(mapping, newpage, page); |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 346 | |
| 347 | if (rc) |
| 348 | return rc; |
| 349 | |
| 350 | migrate_page_copy(newpage, page); |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 351 | return 0; |
| 352 | } |
| 353 | EXPORT_SYMBOL(migrate_page); |
| 354 | |
David Howells | 9361401 | 2006-09-30 20:45:40 +0200 | [diff] [blame] | 355 | #ifdef CONFIG_BLOCK |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 356 | /* |
Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 357 | * Migration function for pages with buffers. This function can only be used |
| 358 | * if the underlying filesystem guarantees that no other references to "page" |
| 359 | * exist. |
| 360 | */ |
Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 361 | int buffer_migrate_page(struct address_space *mapping, |
| 362 | struct page *newpage, struct page *page) |
Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 363 | { |
Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 364 | struct buffer_head *bh, *head; |
| 365 | int rc; |
| 366 | |
Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 367 | if (!page_has_buffers(page)) |
Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 368 | return migrate_page(mapping, newpage, page); |
Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 369 | |
| 370 | head = page_buffers(page); |
| 371 | |
Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 372 | rc = migrate_page_move_mapping(mapping, newpage, page); |
Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 373 | |
| 374 | if (rc) |
| 375 | return rc; |
| 376 | |
| 377 | bh = head; |
| 378 | do { |
| 379 | get_bh(bh); |
| 380 | lock_buffer(bh); |
| 381 | bh = bh->b_this_page; |
| 382 | |
| 383 | } while (bh != head); |
| 384 | |
| 385 | ClearPagePrivate(page); |
| 386 | set_page_private(newpage, page_private(page)); |
| 387 | set_page_private(page, 0); |
| 388 | put_page(page); |
| 389 | get_page(newpage); |
| 390 | |
| 391 | bh = head; |
| 392 | do { |
| 393 | set_bh_page(bh, newpage, bh_offset(bh)); |
| 394 | bh = bh->b_this_page; |
| 395 | |
| 396 | } while (bh != head); |
| 397 | |
| 398 | SetPagePrivate(newpage); |
| 399 | |
| 400 | migrate_page_copy(newpage, page); |
| 401 | |
| 402 | bh = head; |
| 403 | do { |
| 404 | unlock_buffer(bh); |
| 405 | put_bh(bh); |
| 406 | bh = bh->b_this_page; |
| 407 | |
| 408 | } while (bh != head); |
| 409 | |
| 410 | return 0; |
| 411 | } |
| 412 | EXPORT_SYMBOL(buffer_migrate_page); |
David Howells | 9361401 | 2006-09-30 20:45:40 +0200 | [diff] [blame] | 413 | #endif |
Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 414 | |
Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 415 | /* |
| 416 | * Writeback a page to clean the dirty state |
| 417 | */ |
| 418 | static int writeout(struct address_space *mapping, struct page *page) |
| 419 | { |
| 420 | struct writeback_control wbc = { |
| 421 | .sync_mode = WB_SYNC_NONE, |
| 422 | .nr_to_write = 1, |
| 423 | .range_start = 0, |
| 424 | .range_end = LLONG_MAX, |
| 425 | .nonblocking = 1, |
| 426 | .for_reclaim = 1 |
| 427 | }; |
| 428 | int rc; |
| 429 | |
| 430 | if (!mapping->a_ops->writepage) |
| 431 | /* No write method for the address space */ |
| 432 | return -EINVAL; |
| 433 | |
| 434 | if (!clear_page_dirty_for_io(page)) |
| 435 | /* Someone else already triggered a write */ |
| 436 | return -EAGAIN; |
| 437 | |
| 438 | /* |
| 439 | * A dirty page may imply that the underlying filesystem has |
| 440 | * the page on some queue. So the page must be clean for |
| 441 | * migration. Writeout may mean we loose the lock and the |
| 442 | * page state is no longer what we checked for earlier. |
| 443 | * At this point we know that the migration attempt cannot |
| 444 | * be successful. |
| 445 | */ |
| 446 | remove_migration_ptes(page, page); |
| 447 | |
| 448 | rc = mapping->a_ops->writepage(page, &wbc); |
Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 449 | |
| 450 | if (rc != AOP_WRITEPAGE_ACTIVATE) |
| 451 | /* unlocked. Relock */ |
| 452 | lock_page(page); |
| 453 | |
Hugh Dickins | bda8550 | 2008-11-19 15:36:36 -0800 | [diff] [blame] | 454 | return (rc < 0) ? -EIO : -EAGAIN; |
Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 455 | } |
| 456 | |
| 457 | /* |
| 458 | * Default handling if a filesystem does not provide a migration function. |
| 459 | */ |
Christoph Lameter | 8351a6e | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 460 | static int fallback_migrate_page(struct address_space *mapping, |
| 461 | struct page *newpage, struct page *page) |
| 462 | { |
Christoph Lameter | 04e62a2 | 2006-06-23 02:03:38 -0700 | [diff] [blame] | 463 | if (PageDirty(page)) |
| 464 | return writeout(mapping, page); |
Christoph Lameter | 8351a6e | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 465 | |
| 466 | /* |
| 467 | * Buffers may be managed in a filesystem specific way. |
| 468 | * We must have no buffers or drop them. |
| 469 | */ |
David Howells | 266cf65 | 2009-04-03 16:42:36 +0100 | [diff] [blame] | 470 | if (page_has_private(page) && |
Christoph Lameter | 8351a6e | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 471 | !try_to_release_page(page, GFP_KERNEL)) |
| 472 | return -EAGAIN; |
| 473 | |
| 474 | return migrate_page(mapping, newpage, page); |
| 475 | } |
| 476 | |
Christoph Lameter | 1d8b85c | 2006-06-23 02:03:28 -0700 | [diff] [blame] | 477 | /* |
Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 478 | * Move a page to a newly allocated page |
| 479 | * The page is locked and all ptes have been successfully removed. |
| 480 | * |
| 481 | * The new page will have replaced the old page if this function |
| 482 | * is successful. |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 483 | * |
| 484 | * Return value: |
| 485 | * < 0 - error code |
| 486 | * == 0 - success |
Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 487 | */ |
| 488 | static int move_to_new_page(struct page *newpage, struct page *page) |
| 489 | { |
| 490 | struct address_space *mapping; |
| 491 | int rc; |
| 492 | |
| 493 | /* |
| 494 | * Block others from accessing the page when we get around to |
| 495 | * establishing additional references. We are the only one |
| 496 | * holding a reference to the new page at this point. |
| 497 | */ |
Nick Piggin | 529ae9a | 2008-08-02 12:01:03 +0200 | [diff] [blame] | 498 | if (!trylock_page(newpage)) |
Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 499 | BUG(); |
| 500 | |
| 501 | /* Prepare mapping for the new page.*/ |
| 502 | newpage->index = page->index; |
| 503 | newpage->mapping = page->mapping; |
Rik van Riel | b2e1853 | 2008-10-18 20:26:30 -0700 | [diff] [blame] | 504 | if (PageSwapBacked(page)) |
| 505 | SetPageSwapBacked(newpage); |
Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 506 | |
| 507 | mapping = page_mapping(page); |
| 508 | if (!mapping) |
| 509 | rc = migrate_page(mapping, newpage, page); |
| 510 | else if (mapping->a_ops->migratepage) |
| 511 | /* |
| 512 | * Most pages have a mapping and most filesystems |
| 513 | * should provide a migration function. Anonymous |
| 514 | * pages are part of swap space which also has its |
| 515 | * own migration function. This is the most common |
| 516 | * path for page migration. |
| 517 | */ |
| 518 | rc = mapping->a_ops->migratepage(mapping, |
| 519 | newpage, page); |
| 520 | else |
| 521 | rc = fallback_migrate_page(mapping, newpage, page); |
| 522 | |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 523 | if (!rc) |
Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 524 | remove_migration_ptes(page, newpage); |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 525 | else |
Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 526 | newpage->mapping = NULL; |
| 527 | |
| 528 | unlock_page(newpage); |
| 529 | |
| 530 | return rc; |
| 531 | } |
| 532 | |
| 533 | /* |
| 534 | * Obtain the lock on page, remove all ptes and migrate the page |
| 535 | * to the newly allocated page in newpage. |
| 536 | */ |
Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 537 | static int unmap_and_move(new_page_t get_new_page, unsigned long private, |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 538 | struct page *page, int force, int offlining) |
Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 539 | { |
| 540 | int rc = 0; |
Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 541 | int *result = NULL; |
| 542 | struct page *newpage = get_new_page(page, private, &result); |
KAMEZAWA Hiroyuki | 989f89c | 2007-08-30 23:56:21 -0700 | [diff] [blame] | 543 | int rcu_locked = 0; |
KAMEZAWA Hiroyuki | ae41be3 | 2008-02-07 00:14:10 -0800 | [diff] [blame] | 544 | int charge = 0; |
KAMEZAWA Hiroyuki | e00e431 | 2009-11-11 14:26:26 -0800 | [diff] [blame] | 545 | struct mem_cgroup *mem = NULL; |
Mel Gorman | 3f6c827 | 2010-05-24 14:32:17 -0700 | [diff] [blame] | 546 | struct anon_vma *anon_vma = NULL; |
Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 547 | |
| 548 | if (!newpage) |
| 549 | return -ENOMEM; |
Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 550 | |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 551 | if (page_count(page) == 1) { |
Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 552 | /* page was freed from under us. So we are done. */ |
Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 553 | goto move_newpage; |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 554 | } |
Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 555 | |
KAMEZAWA Hiroyuki | e8589cc | 2008-07-25 01:47:10 -0700 | [diff] [blame] | 556 | /* prepare cgroup just returns 0 or -ENOMEM */ |
Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 557 | rc = -EAGAIN; |
KAMEZAWA Hiroyuki | 01b1ae6 | 2009-01-07 18:07:50 -0800 | [diff] [blame] | 558 | |
Nick Piggin | 529ae9a | 2008-08-02 12:01:03 +0200 | [diff] [blame] | 559 | if (!trylock_page(page)) { |
Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 560 | if (!force) |
Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 561 | goto move_newpage; |
Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 562 | lock_page(page); |
| 563 | } |
| 564 | |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 565 | /* |
| 566 | * Only memory hotplug's offline_pages() caller has locked out KSM, |
| 567 | * and can safely migrate a KSM page. The other cases have skipped |
| 568 | * PageKsm along with PageReserved - but it is only now when we have |
| 569 | * the page lock that we can be certain it will not go KSM beneath us |
| 570 | * (KSM will not upgrade a page from PageAnon to PageKsm when it sees |
| 571 | * its pagecount raised, but only here do we take the page lock which |
| 572 | * serializes that). |
| 573 | */ |
| 574 | if (PageKsm(page) && !offlining) { |
| 575 | rc = -EBUSY; |
| 576 | goto unlock; |
| 577 | } |
| 578 | |
KAMEZAWA Hiroyuki | 01b1ae6 | 2009-01-07 18:07:50 -0800 | [diff] [blame] | 579 | /* charge against new page */ |
| 580 | charge = mem_cgroup_prepare_migration(page, &mem); |
| 581 | if (charge == -ENOMEM) { |
| 582 | rc = -ENOMEM; |
| 583 | goto unlock; |
| 584 | } |
| 585 | BUG_ON(charge); |
| 586 | |
Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 587 | if (PageWriteback(page)) { |
| 588 | if (!force) |
KAMEZAWA Hiroyuki | 01b1ae6 | 2009-01-07 18:07:50 -0800 | [diff] [blame] | 589 | goto uncharge; |
Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 590 | wait_on_page_writeback(page); |
| 591 | } |
Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 592 | /* |
KAMEZAWA Hiroyuki | dc386d4 | 2007-07-26 10:41:07 -0700 | [diff] [blame] | 593 | * By try_to_unmap(), page->mapcount goes down to 0 here. In this case, |
| 594 | * we cannot notice that anon_vma is freed while we migrates a page. |
| 595 | * This rcu_read_lock() delays freeing anon_vma pointer until the end |
| 596 | * of migration. File cache pages are no problem because of page_lock() |
KAMEZAWA Hiroyuki | 989f89c | 2007-08-30 23:56:21 -0700 | [diff] [blame] | 597 | * File Caches may use write_page() or lock_page() in migration, then, |
| 598 | * just care Anon page here. |
Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 599 | */ |
KAMEZAWA Hiroyuki | 989f89c | 2007-08-30 23:56:21 -0700 | [diff] [blame] | 600 | if (PageAnon(page)) { |
| 601 | rcu_read_lock(); |
| 602 | rcu_locked = 1; |
Mel Gorman | 3f6c827 | 2010-05-24 14:32:17 -0700 | [diff] [blame] | 603 | anon_vma = page_anon_vma(page); |
Mel Gorman | 7f60c21 | 2010-05-24 14:32:18 -0700 | [diff] [blame^] | 604 | atomic_inc(&anon_vma->external_refcount); |
KAMEZAWA Hiroyuki | 989f89c | 2007-08-30 23:56:21 -0700 | [diff] [blame] | 605 | } |
Shaohua Li | 62e1c55 | 2008-02-04 22:29:33 -0800 | [diff] [blame] | 606 | |
KAMEZAWA Hiroyuki | dc386d4 | 2007-07-26 10:41:07 -0700 | [diff] [blame] | 607 | /* |
Shaohua Li | 62e1c55 | 2008-02-04 22:29:33 -0800 | [diff] [blame] | 608 | * Corner case handling: |
| 609 | * 1. When a new swap-cache page is read into, it is added to the LRU |
| 610 | * and treated as swapcache but it has no rmap yet. |
| 611 | * Calling try_to_unmap() against a page->mapping==NULL page will |
| 612 | * trigger a BUG. So handle it here. |
| 613 | * 2. An orphaned page (see truncate_complete_page) might have |
| 614 | * fs-private metadata. The page can be picked up due to memory |
| 615 | * offlining. Everywhere else except page reclaim, the page is |
| 616 | * invisible to the vm, so the page can not be migrated. So try to |
| 617 | * free the metadata, so the page can be freed. |
KAMEZAWA Hiroyuki | dc386d4 | 2007-07-26 10:41:07 -0700 | [diff] [blame] | 618 | */ |
Shaohua Li | 62e1c55 | 2008-02-04 22:29:33 -0800 | [diff] [blame] | 619 | if (!page->mapping) { |
David Howells | 266cf65 | 2009-04-03 16:42:36 +0100 | [diff] [blame] | 620 | if (!PageAnon(page) && page_has_private(page)) { |
Shaohua Li | 62e1c55 | 2008-02-04 22:29:33 -0800 | [diff] [blame] | 621 | /* |
| 622 | * Go direct to try_to_free_buffers() here because |
| 623 | * a) that's what try_to_release_page() would do anyway |
| 624 | * b) we may be under rcu_read_lock() here, so we can't |
| 625 | * use GFP_KERNEL which is what try_to_release_page() |
| 626 | * needs to be effective. |
| 627 | */ |
| 628 | try_to_free_buffers(page); |
Shaohua Li | abfc348 | 2009-09-21 17:01:19 -0700 | [diff] [blame] | 629 | goto rcu_unlock; |
Shaohua Li | 62e1c55 | 2008-02-04 22:29:33 -0800 | [diff] [blame] | 630 | } |
Shaohua Li | abfc348 | 2009-09-21 17:01:19 -0700 | [diff] [blame] | 631 | goto skip_unmap; |
Shaohua Li | 62e1c55 | 2008-02-04 22:29:33 -0800 | [diff] [blame] | 632 | } |
| 633 | |
KAMEZAWA Hiroyuki | dc386d4 | 2007-07-26 10:41:07 -0700 | [diff] [blame] | 634 | /* Establish migration ptes or remove ptes */ |
Andi Kleen | 14fa31b | 2009-09-16 11:50:10 +0200 | [diff] [blame] | 635 | try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); |
KAMEZAWA Hiroyuki | dc386d4 | 2007-07-26 10:41:07 -0700 | [diff] [blame] | 636 | |
Shaohua Li | abfc348 | 2009-09-21 17:01:19 -0700 | [diff] [blame] | 637 | skip_unmap: |
Christoph Lameter | e6a1530 | 2006-06-25 05:46:49 -0700 | [diff] [blame] | 638 | if (!page_mapped(page)) |
| 639 | rc = move_to_new_page(newpage, page); |
Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 640 | |
KAMEZAWA Hiroyuki | e8589cc | 2008-07-25 01:47:10 -0700 | [diff] [blame] | 641 | if (rc) |
Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 642 | remove_migration_ptes(page, page); |
KAMEZAWA Hiroyuki | dc386d4 | 2007-07-26 10:41:07 -0700 | [diff] [blame] | 643 | rcu_unlock: |
Mel Gorman | 3f6c827 | 2010-05-24 14:32:17 -0700 | [diff] [blame] | 644 | |
| 645 | /* Drop an anon_vma reference if we took one */ |
Mel Gorman | 7f60c21 | 2010-05-24 14:32:18 -0700 | [diff] [blame^] | 646 | if (anon_vma && atomic_dec_and_lock(&anon_vma->external_refcount, &anon_vma->lock)) { |
Mel Gorman | 3f6c827 | 2010-05-24 14:32:17 -0700 | [diff] [blame] | 647 | int empty = list_empty(&anon_vma->head); |
| 648 | spin_unlock(&anon_vma->lock); |
| 649 | if (empty) |
| 650 | anon_vma_free(anon_vma); |
| 651 | } |
| 652 | |
KAMEZAWA Hiroyuki | 989f89c | 2007-08-30 23:56:21 -0700 | [diff] [blame] | 653 | if (rcu_locked) |
| 654 | rcu_read_unlock(); |
KAMEZAWA Hiroyuki | 01b1ae6 | 2009-01-07 18:07:50 -0800 | [diff] [blame] | 655 | uncharge: |
| 656 | if (!charge) |
| 657 | mem_cgroup_end_migration(mem, page, newpage); |
Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 658 | unlock: |
| 659 | unlock_page(page); |
Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 660 | |
Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 661 | if (rc != -EAGAIN) { |
Christoph Lameter | aaa994b | 2006-06-23 02:03:52 -0700 | [diff] [blame] | 662 | /* |
| 663 | * A page that has been migrated has all references |
| 664 | * removed and will be freed. A page that has not been |
| 665 | * migrated will have kepts its references and be |
| 666 | * restored. |
| 667 | */ |
| 668 | list_del(&page->lru); |
KOSAKI Motohiro | a731286 | 2009-09-21 17:01:37 -0700 | [diff] [blame] | 669 | dec_zone_page_state(page, NR_ISOLATED_ANON + |
Johannes Weiner | 6c0b135 | 2009-09-21 17:02:59 -0700 | [diff] [blame] | 670 | page_is_file_cache(page)); |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 671 | putback_lru_page(page); |
Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 672 | } |
Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 673 | |
| 674 | move_newpage: |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 675 | |
Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 676 | /* |
| 677 | * Move the new page to the LRU. If migration was not successful |
| 678 | * then this will free the page. |
| 679 | */ |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 680 | putback_lru_page(newpage); |
| 681 | |
Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 682 | if (result) { |
| 683 | if (rc) |
| 684 | *result = rc; |
| 685 | else |
| 686 | *result = page_to_nid(newpage); |
| 687 | } |
Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 688 | return rc; |
| 689 | } |
| 690 | |
| 691 | /* |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 692 | * migrate_pages |
| 693 | * |
Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 694 | * The function takes one list of pages to migrate and a function |
| 695 | * that determines from the page to be migrated and the private data |
| 696 | * the target of the move and allocates the page. |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 697 | * |
| 698 | * The function returns after 10 attempts or if no pages |
| 699 | * are movable anymore because to has become empty |
Christoph Lameter | aaa994b | 2006-06-23 02:03:52 -0700 | [diff] [blame] | 700 | * or no retryable pages exist anymore. All pages will be |
Gabriel Craciunescu | e9534b3 | 2007-10-20 02:13:26 +0200 | [diff] [blame] | 701 | * returned to the LRU or freed. |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 702 | * |
Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 703 | * Return: Number of pages not migrated or error code. |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 704 | */ |
Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 705 | int migrate_pages(struct list_head *from, |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 706 | new_page_t get_new_page, unsigned long private, int offlining) |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 707 | { |
Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 708 | int retry = 1; |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 709 | int nr_failed = 0; |
| 710 | int pass = 0; |
| 711 | struct page *page; |
| 712 | struct page *page2; |
| 713 | int swapwrite = current->flags & PF_SWAPWRITE; |
| 714 | int rc; |
| 715 | |
| 716 | if (!swapwrite) |
| 717 | current->flags |= PF_SWAPWRITE; |
| 718 | |
Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 719 | for(pass = 0; pass < 10 && retry; pass++) { |
| 720 | retry = 0; |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 721 | |
Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 722 | list_for_each_entry_safe(page, page2, from, lru) { |
Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 723 | cond_resched(); |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 724 | |
Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 725 | rc = unmap_and_move(get_new_page, private, |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 726 | page, pass > 2, offlining); |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 727 | |
Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 728 | switch(rc) { |
Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 729 | case -ENOMEM: |
| 730 | goto out; |
Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 731 | case -EAGAIN: |
Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 732 | retry++; |
Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 733 | break; |
| 734 | case 0: |
Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 735 | break; |
| 736 | default: |
Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 737 | /* Permanent failure */ |
Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 738 | nr_failed++; |
Christoph Lameter | e24f0b8 | 2006-06-23 02:03:51 -0700 | [diff] [blame] | 739 | break; |
Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 740 | } |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 741 | } |
| 742 | } |
Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 743 | rc = 0; |
| 744 | out: |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 745 | if (!swapwrite) |
| 746 | current->flags &= ~PF_SWAPWRITE; |
| 747 | |
Christoph Lameter | aaa994b | 2006-06-23 02:03:52 -0700 | [diff] [blame] | 748 | putback_lru_pages(from); |
Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 749 | |
| 750 | if (rc) |
| 751 | return rc; |
| 752 | |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 753 | return nr_failed + retry; |
| 754 | } |
| 755 | |
Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 756 | #ifdef CONFIG_NUMA |
| 757 | /* |
| 758 | * Move a list of individual pages |
| 759 | */ |
| 760 | struct page_to_node { |
| 761 | unsigned long addr; |
| 762 | struct page *page; |
| 763 | int node; |
| 764 | int status; |
| 765 | }; |
| 766 | |
| 767 | static struct page *new_page_node(struct page *p, unsigned long private, |
| 768 | int **result) |
| 769 | { |
| 770 | struct page_to_node *pm = (struct page_to_node *)private; |
| 771 | |
| 772 | while (pm->node != MAX_NUMNODES && pm->page != p) |
| 773 | pm++; |
| 774 | |
| 775 | if (pm->node == MAX_NUMNODES) |
| 776 | return NULL; |
| 777 | |
| 778 | *result = &pm->status; |
| 779 | |
Mel Gorman | 6484eb3 | 2009-06-16 15:31:54 -0700 | [diff] [blame] | 780 | return alloc_pages_exact_node(pm->node, |
Mel Gorman | 769848c | 2007-07-17 04:03:05 -0700 | [diff] [blame] | 781 | GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0); |
Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 782 | } |
| 783 | |
| 784 | /* |
| 785 | * Move a set of pages as indicated in the pm array. The addr |
| 786 | * field must be set to the virtual address of the page to be moved |
| 787 | * and the node number must contain a valid target node. |
Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 788 | * The pm array ends with node = MAX_NUMNODES. |
Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 789 | */ |
Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 790 | static int do_move_page_to_node_array(struct mm_struct *mm, |
| 791 | struct page_to_node *pm, |
| 792 | int migrate_all) |
Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 793 | { |
| 794 | int err; |
| 795 | struct page_to_node *pp; |
| 796 | LIST_HEAD(pagelist); |
| 797 | |
| 798 | down_read(&mm->mmap_sem); |
| 799 | |
| 800 | /* |
| 801 | * Build a list of pages to migrate |
| 802 | */ |
Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 803 | for (pp = pm; pp->node != MAX_NUMNODES; pp++) { |
| 804 | struct vm_area_struct *vma; |
| 805 | struct page *page; |
| 806 | |
Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 807 | err = -EFAULT; |
| 808 | vma = find_vma(mm, pp->addr); |
Christoph Lameter | 0dc952d | 2007-03-05 00:30:33 -0800 | [diff] [blame] | 809 | if (!vma || !vma_migratable(vma)) |
Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 810 | goto set_status; |
| 811 | |
| 812 | page = follow_page(vma, pp->addr, FOLL_GET); |
Linus Torvalds | 89f5b7d | 2008-06-20 11:18:25 -0700 | [diff] [blame] | 813 | |
| 814 | err = PTR_ERR(page); |
| 815 | if (IS_ERR(page)) |
| 816 | goto set_status; |
| 817 | |
Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 818 | err = -ENOENT; |
| 819 | if (!page) |
| 820 | goto set_status; |
| 821 | |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 822 | /* Use PageReserved to check for zero page */ |
| 823 | if (PageReserved(page) || PageKsm(page)) |
Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 824 | goto put_and_set; |
| 825 | |
| 826 | pp->page = page; |
| 827 | err = page_to_nid(page); |
| 828 | |
| 829 | if (err == pp->node) |
| 830 | /* |
| 831 | * Node already in the right place |
| 832 | */ |
| 833 | goto put_and_set; |
| 834 | |
| 835 | err = -EACCES; |
| 836 | if (page_mapcount(page) > 1 && |
| 837 | !migrate_all) |
| 838 | goto put_and_set; |
| 839 | |
Nick Piggin | 62695a8 | 2008-10-18 20:26:09 -0700 | [diff] [blame] | 840 | err = isolate_lru_page(page); |
KOSAKI Motohiro | 6d9c285 | 2009-12-14 17:58:11 -0800 | [diff] [blame] | 841 | if (!err) { |
Nick Piggin | 62695a8 | 2008-10-18 20:26:09 -0700 | [diff] [blame] | 842 | list_add_tail(&page->lru, &pagelist); |
KOSAKI Motohiro | 6d9c285 | 2009-12-14 17:58:11 -0800 | [diff] [blame] | 843 | inc_zone_page_state(page, NR_ISOLATED_ANON + |
| 844 | page_is_file_cache(page)); |
| 845 | } |
Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 846 | put_and_set: |
| 847 | /* |
| 848 | * Either remove the duplicate refcount from |
| 849 | * isolate_lru_page() or drop the page ref if it was |
| 850 | * not isolated. |
| 851 | */ |
| 852 | put_page(page); |
| 853 | set_status: |
| 854 | pp->status = err; |
| 855 | } |
| 856 | |
Brice Goglin | e78bbfa | 2008-10-18 20:27:15 -0700 | [diff] [blame] | 857 | err = 0; |
Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 858 | if (!list_empty(&pagelist)) |
| 859 | err = migrate_pages(&pagelist, new_page_node, |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 860 | (unsigned long)pm, 0); |
Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 861 | |
| 862 | up_read(&mm->mmap_sem); |
| 863 | return err; |
| 864 | } |
| 865 | |
| 866 | /* |
Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 867 | * Migrate an array of page address onto an array of nodes and fill |
| 868 | * the corresponding array of status. |
| 869 | */ |
| 870 | static int do_pages_move(struct mm_struct *mm, struct task_struct *task, |
| 871 | unsigned long nr_pages, |
| 872 | const void __user * __user *pages, |
| 873 | const int __user *nodes, |
| 874 | int __user *status, int flags) |
| 875 | { |
Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 876 | struct page_to_node *pm; |
Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 877 | nodemask_t task_nodes; |
Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 878 | unsigned long chunk_nr_pages; |
| 879 | unsigned long chunk_start; |
| 880 | int err; |
Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 881 | |
| 882 | task_nodes = cpuset_mems_allowed(task); |
| 883 | |
Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 884 | err = -ENOMEM; |
| 885 | pm = (struct page_to_node *)__get_free_page(GFP_KERNEL); |
| 886 | if (!pm) |
Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 887 | goto out; |
Brice Goglin | 35282a2 | 2009-06-16 15:32:43 -0700 | [diff] [blame] | 888 | |
| 889 | migrate_prep(); |
| 890 | |
Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 891 | /* |
Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 892 | * Store a chunk of page_to_node array in a page, |
| 893 | * but keep the last one as a marker |
Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 894 | */ |
Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 895 | chunk_nr_pages = (PAGE_SIZE / sizeof(struct page_to_node)) - 1; |
Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 896 | |
Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 897 | for (chunk_start = 0; |
| 898 | chunk_start < nr_pages; |
| 899 | chunk_start += chunk_nr_pages) { |
| 900 | int j; |
Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 901 | |
Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 902 | if (chunk_start + chunk_nr_pages > nr_pages) |
| 903 | chunk_nr_pages = nr_pages - chunk_start; |
| 904 | |
| 905 | /* fill the chunk pm with addrs and nodes from user-space */ |
| 906 | for (j = 0; j < chunk_nr_pages; j++) { |
| 907 | const void __user *p; |
Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 908 | int node; |
| 909 | |
Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 910 | err = -EFAULT; |
| 911 | if (get_user(p, pages + j + chunk_start)) |
| 912 | goto out_pm; |
| 913 | pm[j].addr = (unsigned long) p; |
| 914 | |
| 915 | if (get_user(node, nodes + j + chunk_start)) |
Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 916 | goto out_pm; |
| 917 | |
| 918 | err = -ENODEV; |
Linus Torvalds | 6f5a55f | 2010-02-05 16:16:50 -0800 | [diff] [blame] | 919 | if (node < 0 || node >= MAX_NUMNODES) |
| 920 | goto out_pm; |
| 921 | |
Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 922 | if (!node_state(node, N_HIGH_MEMORY)) |
| 923 | goto out_pm; |
| 924 | |
| 925 | err = -EACCES; |
| 926 | if (!node_isset(node, task_nodes)) |
| 927 | goto out_pm; |
| 928 | |
Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 929 | pm[j].node = node; |
| 930 | } |
Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 931 | |
Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 932 | /* End marker for this chunk */ |
| 933 | pm[chunk_nr_pages].node = MAX_NUMNODES; |
| 934 | |
| 935 | /* Migrate this chunk */ |
| 936 | err = do_move_page_to_node_array(mm, pm, |
| 937 | flags & MPOL_MF_MOVE_ALL); |
| 938 | if (err < 0) |
| 939 | goto out_pm; |
| 940 | |
Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 941 | /* Return status information */ |
Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 942 | for (j = 0; j < chunk_nr_pages; j++) |
| 943 | if (put_user(pm[j].status, status + j + chunk_start)) { |
Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 944 | err = -EFAULT; |
Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 945 | goto out_pm; |
| 946 | } |
| 947 | } |
| 948 | err = 0; |
Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 949 | |
| 950 | out_pm: |
Brice Goglin | 3140a22 | 2009-01-06 14:38:57 -0800 | [diff] [blame] | 951 | free_page((unsigned long)pm); |
Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 952 | out: |
| 953 | return err; |
| 954 | } |
| 955 | |
| 956 | /* |
Brice Goglin | 2f007e7 | 2008-10-18 20:27:16 -0700 | [diff] [blame] | 957 | * Determine the nodes of an array of pages and store it in an array of status. |
Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 958 | */ |
Brice Goglin | 80bba12 | 2008-12-09 13:14:23 -0800 | [diff] [blame] | 959 | static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages, |
| 960 | const void __user **pages, int *status) |
Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 961 | { |
Brice Goglin | 2f007e7 | 2008-10-18 20:27:16 -0700 | [diff] [blame] | 962 | unsigned long i; |
Brice Goglin | 2f007e7 | 2008-10-18 20:27:16 -0700 | [diff] [blame] | 963 | |
Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 964 | down_read(&mm->mmap_sem); |
| 965 | |
Brice Goglin | 2f007e7 | 2008-10-18 20:27:16 -0700 | [diff] [blame] | 966 | for (i = 0; i < nr_pages; i++) { |
Brice Goglin | 80bba12 | 2008-12-09 13:14:23 -0800 | [diff] [blame] | 967 | unsigned long addr = (unsigned long)(*pages); |
Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 968 | struct vm_area_struct *vma; |
| 969 | struct page *page; |
KOSAKI Motohiro | c095adb | 2008-12-16 16:06:43 +0900 | [diff] [blame] | 970 | int err = -EFAULT; |
Brice Goglin | 2f007e7 | 2008-10-18 20:27:16 -0700 | [diff] [blame] | 971 | |
| 972 | vma = find_vma(mm, addr); |
Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 973 | if (!vma) |
| 974 | goto set_status; |
| 975 | |
Brice Goglin | 2f007e7 | 2008-10-18 20:27:16 -0700 | [diff] [blame] | 976 | page = follow_page(vma, addr, 0); |
Linus Torvalds | 89f5b7d | 2008-06-20 11:18:25 -0700 | [diff] [blame] | 977 | |
| 978 | err = PTR_ERR(page); |
| 979 | if (IS_ERR(page)) |
| 980 | goto set_status; |
| 981 | |
Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 982 | err = -ENOENT; |
| 983 | /* Use PageReserved to check for zero page */ |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 984 | if (!page || PageReserved(page) || PageKsm(page)) |
Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 985 | goto set_status; |
| 986 | |
| 987 | err = page_to_nid(page); |
| 988 | set_status: |
Brice Goglin | 80bba12 | 2008-12-09 13:14:23 -0800 | [diff] [blame] | 989 | *status = err; |
| 990 | |
| 991 | pages++; |
| 992 | status++; |
| 993 | } |
| 994 | |
| 995 | up_read(&mm->mmap_sem); |
| 996 | } |
| 997 | |
| 998 | /* |
| 999 | * Determine the nodes of a user array of pages and store it in |
| 1000 | * a user array of status. |
| 1001 | */ |
| 1002 | static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages, |
| 1003 | const void __user * __user *pages, |
| 1004 | int __user *status) |
| 1005 | { |
| 1006 | #define DO_PAGES_STAT_CHUNK_NR 16 |
| 1007 | const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR]; |
| 1008 | int chunk_status[DO_PAGES_STAT_CHUNK_NR]; |
Brice Goglin | 80bba12 | 2008-12-09 13:14:23 -0800 | [diff] [blame] | 1009 | |
H. Peter Anvin | 87b8d1a | 2010-02-18 16:13:40 -0800 | [diff] [blame] | 1010 | while (nr_pages) { |
| 1011 | unsigned long chunk_nr; |
Brice Goglin | 80bba12 | 2008-12-09 13:14:23 -0800 | [diff] [blame] | 1012 | |
H. Peter Anvin | 87b8d1a | 2010-02-18 16:13:40 -0800 | [diff] [blame] | 1013 | chunk_nr = nr_pages; |
| 1014 | if (chunk_nr > DO_PAGES_STAT_CHUNK_NR) |
| 1015 | chunk_nr = DO_PAGES_STAT_CHUNK_NR; |
| 1016 | |
| 1017 | if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages))) |
| 1018 | break; |
Brice Goglin | 80bba12 | 2008-12-09 13:14:23 -0800 | [diff] [blame] | 1019 | |
| 1020 | do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status); |
| 1021 | |
H. Peter Anvin | 87b8d1a | 2010-02-18 16:13:40 -0800 | [diff] [blame] | 1022 | if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status))) |
| 1023 | break; |
Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1024 | |
H. Peter Anvin | 87b8d1a | 2010-02-18 16:13:40 -0800 | [diff] [blame] | 1025 | pages += chunk_nr; |
| 1026 | status += chunk_nr; |
| 1027 | nr_pages -= chunk_nr; |
| 1028 | } |
| 1029 | return nr_pages ? -EFAULT : 0; |
Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1030 | } |
| 1031 | |
| 1032 | /* |
| 1033 | * Move a list of pages in the address space of the currently executing |
| 1034 | * process. |
| 1035 | */ |
Heiko Carstens | 938bb9f | 2009-01-14 14:14:30 +0100 | [diff] [blame] | 1036 | SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, |
| 1037 | const void __user * __user *, pages, |
| 1038 | const int __user *, nodes, |
| 1039 | int __user *, status, int, flags) |
Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1040 | { |
David Howells | c69e8d9 | 2008-11-14 10:39:19 +1100 | [diff] [blame] | 1041 | const struct cred *cred = current_cred(), *tcred; |
Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1042 | struct task_struct *task; |
Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1043 | struct mm_struct *mm; |
Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1044 | int err; |
Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1045 | |
| 1046 | /* Check flags */ |
| 1047 | if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL)) |
| 1048 | return -EINVAL; |
| 1049 | |
| 1050 | if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) |
| 1051 | return -EPERM; |
| 1052 | |
| 1053 | /* Find the mm_struct */ |
| 1054 | read_lock(&tasklist_lock); |
Pavel Emelyanov | 228ebcb | 2007-10-18 23:40:16 -0700 | [diff] [blame] | 1055 | task = pid ? find_task_by_vpid(pid) : current; |
Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1056 | if (!task) { |
| 1057 | read_unlock(&tasklist_lock); |
| 1058 | return -ESRCH; |
| 1059 | } |
| 1060 | mm = get_task_mm(task); |
| 1061 | read_unlock(&tasklist_lock); |
| 1062 | |
| 1063 | if (!mm) |
| 1064 | return -EINVAL; |
| 1065 | |
| 1066 | /* |
| 1067 | * Check if this process has the right to modify the specified |
| 1068 | * process. The right exists if the process has administrative |
| 1069 | * capabilities, superuser privileges or the same |
| 1070 | * userid as the target process. |
| 1071 | */ |
David Howells | c69e8d9 | 2008-11-14 10:39:19 +1100 | [diff] [blame] | 1072 | rcu_read_lock(); |
| 1073 | tcred = __task_cred(task); |
David Howells | b6dff3e | 2008-11-14 10:39:16 +1100 | [diff] [blame] | 1074 | if (cred->euid != tcred->suid && cred->euid != tcred->uid && |
| 1075 | cred->uid != tcred->suid && cred->uid != tcred->uid && |
Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1076 | !capable(CAP_SYS_NICE)) { |
David Howells | c69e8d9 | 2008-11-14 10:39:19 +1100 | [diff] [blame] | 1077 | rcu_read_unlock(); |
Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1078 | err = -EPERM; |
Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1079 | goto out; |
Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1080 | } |
David Howells | c69e8d9 | 2008-11-14 10:39:19 +1100 | [diff] [blame] | 1081 | rcu_read_unlock(); |
Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1082 | |
David Quigley | 86c3a76 | 2006-06-23 02:04:02 -0700 | [diff] [blame] | 1083 | err = security_task_movememory(task); |
| 1084 | if (err) |
Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1085 | goto out; |
David Quigley | 86c3a76 | 2006-06-23 02:04:02 -0700 | [diff] [blame] | 1086 | |
Brice Goglin | 5e9a0f0 | 2008-10-18 20:27:17 -0700 | [diff] [blame] | 1087 | if (nodes) { |
| 1088 | err = do_pages_move(mm, task, nr_pages, pages, nodes, status, |
| 1089 | flags); |
| 1090 | } else { |
Brice Goglin | 2f007e7 | 2008-10-18 20:27:16 -0700 | [diff] [blame] | 1091 | err = do_pages_stat(mm, nr_pages, pages, status); |
Brice Goglin | 2f007e7 | 2008-10-18 20:27:16 -0700 | [diff] [blame] | 1092 | } |
David Quigley | 86c3a76 | 2006-06-23 02:04:02 -0700 | [diff] [blame] | 1093 | |
Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1094 | out: |
Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1095 | mmput(mm); |
| 1096 | return err; |
| 1097 | } |
Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1098 | |
Christoph Lameter | 7b2259b | 2006-06-25 05:46:48 -0700 | [diff] [blame] | 1099 | /* |
| 1100 | * Call migration functions in the vma_ops that may prepare |
| 1101 | * memory in a vm for migration. migration functions may perform |
| 1102 | * the migration for vmas that do not have an underlying page struct. |
| 1103 | */ |
| 1104 | int migrate_vmas(struct mm_struct *mm, const nodemask_t *to, |
| 1105 | const nodemask_t *from, unsigned long flags) |
| 1106 | { |
| 1107 | struct vm_area_struct *vma; |
| 1108 | int err = 0; |
| 1109 | |
Daisuke Nishimura | 1001c9f | 2009-02-11 13:04:18 -0800 | [diff] [blame] | 1110 | for (vma = mm->mmap; vma && !err; vma = vma->vm_next) { |
Christoph Lameter | 7b2259b | 2006-06-25 05:46:48 -0700 | [diff] [blame] | 1111 | if (vma->vm_ops && vma->vm_ops->migrate) { |
| 1112 | err = vma->vm_ops->migrate(vma, to, from, flags); |
| 1113 | if (err) |
| 1114 | break; |
| 1115 | } |
| 1116 | } |
| 1117 | return err; |
| 1118 | } |
Gerald Schaefer | 83d1674 | 2008-07-23 21:28:22 -0700 | [diff] [blame] | 1119 | #endif |