Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 1 | /* |
Heiko Carstens | 239a6425 | 2009-06-12 10:26:33 +0200 | [diff] [blame] | 2 | * Copyright IBM Corp. 2007,2009 |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 3 | * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> |
| 4 | */ |
| 5 | |
| 6 | #include <linux/sched.h> |
| 7 | #include <linux/kernel.h> |
| 8 | #include <linux/errno.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 9 | #include <linux/gfp.h> |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 10 | #include <linux/mm.h> |
| 11 | #include <linux/swap.h> |
| 12 | #include <linux/smp.h> |
| 13 | #include <linux/highmem.h> |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 14 | #include <linux/pagemap.h> |
| 15 | #include <linux/spinlock.h> |
| 16 | #include <linux/module.h> |
| 17 | #include <linux/quicklist.h> |
Martin Schwidefsky | 8021714 | 2010-10-25 16:10:11 +0200 | [diff] [blame] | 18 | #include <linux/rcupdate.h> |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 19 | #include <linux/slab.h> |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 20 | |
| 21 | #include <asm/system.h> |
| 22 | #include <asm/pgtable.h> |
| 23 | #include <asm/pgalloc.h> |
| 24 | #include <asm/tlb.h> |
| 25 | #include <asm/tlbflush.h> |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 26 | #include <asm/mmu_context.h> |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 27 | |
| 28 | #ifndef CONFIG_64BIT |
| 29 | #define ALLOC_ORDER 1 |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 30 | #define FRAG_MASK 0x0f |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 31 | #else |
| 32 | #define ALLOC_ORDER 2 |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 33 | #define FRAG_MASK 0x03 |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 34 | #endif |
| 35 | |
Heiko Carstens | 239a6425 | 2009-06-12 10:26:33 +0200 | [diff] [blame] | 36 | unsigned long VMALLOC_START = VMALLOC_END - VMALLOC_SIZE; |
| 37 | EXPORT_SYMBOL(VMALLOC_START); |
| 38 | |
| 39 | static int __init parse_vmalloc(char *arg) |
| 40 | { |
| 41 | if (!arg) |
| 42 | return -EINVAL; |
| 43 | VMALLOC_START = (VMALLOC_END - memparse(arg, &arg)) & PAGE_MASK; |
| 44 | return 0; |
| 45 | } |
| 46 | early_param("vmalloc", parse_vmalloc); |
| 47 | |
Martin Schwidefsky | 043d070 | 2011-05-23 10:24:23 +0200 | [diff] [blame] | 48 | unsigned long *crst_table_alloc(struct mm_struct *mm) |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 49 | { |
| 50 | struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); |
| 51 | |
| 52 | if (!page) |
| 53 | return NULL; |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 54 | return (unsigned long *) page_to_phys(page); |
| 55 | } |
| 56 | |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 57 | void crst_table_free(struct mm_struct *mm, unsigned long *table) |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 58 | { |
Martin Schwidefsky | 043d070 | 2011-05-23 10:24:23 +0200 | [diff] [blame] | 59 | free_pages((unsigned long) table, ALLOC_ORDER); |
Martin Schwidefsky | 8021714 | 2010-10-25 16:10:11 +0200 | [diff] [blame] | 60 | } |
| 61 | |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 62 | #ifdef CONFIG_64BIT |
| 63 | int crst_table_upgrade(struct mm_struct *mm, unsigned long limit) |
| 64 | { |
| 65 | unsigned long *table, *pgd; |
| 66 | unsigned long entry; |
| 67 | |
| 68 | BUG_ON(limit > (1UL << 53)); |
| 69 | repeat: |
Martin Schwidefsky | 043d070 | 2011-05-23 10:24:23 +0200 | [diff] [blame] | 70 | table = crst_table_alloc(mm); |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 71 | if (!table) |
| 72 | return -ENOMEM; |
Martin Schwidefsky | 8021714 | 2010-10-25 16:10:11 +0200 | [diff] [blame] | 73 | spin_lock_bh(&mm->page_table_lock); |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 74 | if (mm->context.asce_limit < limit) { |
| 75 | pgd = (unsigned long *) mm->pgd; |
| 76 | if (mm->context.asce_limit <= (1UL << 31)) { |
| 77 | entry = _REGION3_ENTRY_EMPTY; |
| 78 | mm->context.asce_limit = 1UL << 42; |
| 79 | mm->context.asce_bits = _ASCE_TABLE_LENGTH | |
| 80 | _ASCE_USER_BITS | |
| 81 | _ASCE_TYPE_REGION3; |
| 82 | } else { |
| 83 | entry = _REGION2_ENTRY_EMPTY; |
| 84 | mm->context.asce_limit = 1UL << 53; |
| 85 | mm->context.asce_bits = _ASCE_TABLE_LENGTH | |
| 86 | _ASCE_USER_BITS | |
| 87 | _ASCE_TYPE_REGION2; |
| 88 | } |
| 89 | crst_table_init(table, entry); |
| 90 | pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd); |
| 91 | mm->pgd = (pgd_t *) table; |
Martin Schwidefsky | f481bfa | 2009-03-18 13:27:36 +0100 | [diff] [blame] | 92 | mm->task_size = mm->context.asce_limit; |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 93 | table = NULL; |
| 94 | } |
Martin Schwidefsky | 8021714 | 2010-10-25 16:10:11 +0200 | [diff] [blame] | 95 | spin_unlock_bh(&mm->page_table_lock); |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 96 | if (table) |
| 97 | crst_table_free(mm, table); |
| 98 | if (mm->context.asce_limit < limit) |
| 99 | goto repeat; |
| 100 | update_mm(mm, current); |
| 101 | return 0; |
| 102 | } |
| 103 | |
| 104 | void crst_table_downgrade(struct mm_struct *mm, unsigned long limit) |
| 105 | { |
| 106 | pgd_t *pgd; |
| 107 | |
| 108 | if (mm->context.asce_limit <= limit) |
| 109 | return; |
| 110 | __tlb_flush_mm(mm); |
| 111 | while (mm->context.asce_limit > limit) { |
| 112 | pgd = mm->pgd; |
| 113 | switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) { |
| 114 | case _REGION_ENTRY_TYPE_R2: |
| 115 | mm->context.asce_limit = 1UL << 42; |
| 116 | mm->context.asce_bits = _ASCE_TABLE_LENGTH | |
| 117 | _ASCE_USER_BITS | |
| 118 | _ASCE_TYPE_REGION3; |
| 119 | break; |
| 120 | case _REGION_ENTRY_TYPE_R3: |
| 121 | mm->context.asce_limit = 1UL << 31; |
| 122 | mm->context.asce_bits = _ASCE_TABLE_LENGTH | |
| 123 | _ASCE_USER_BITS | |
| 124 | _ASCE_TYPE_SEGMENT; |
| 125 | break; |
| 126 | default: |
| 127 | BUG(); |
| 128 | } |
| 129 | mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN); |
Martin Schwidefsky | f481bfa | 2009-03-18 13:27:36 +0100 | [diff] [blame] | 130 | mm->task_size = mm->context.asce_limit; |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 131 | crst_table_free(mm, (unsigned long *) pgd); |
| 132 | } |
| 133 | update_mm(mm, current); |
| 134 | } |
| 135 | #endif |
| 136 | |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 137 | #ifdef CONFIG_PGSTE |
| 138 | |
| 139 | /** |
| 140 | * gmap_alloc - allocate a guest address space |
| 141 | * @mm: pointer to the parent mm_struct |
| 142 | * |
| 143 | * Returns a guest address space structure. |
| 144 | */ |
| 145 | struct gmap *gmap_alloc(struct mm_struct *mm) |
| 146 | { |
| 147 | struct gmap *gmap; |
| 148 | struct page *page; |
| 149 | unsigned long *table; |
| 150 | |
| 151 | gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL); |
| 152 | if (!gmap) |
| 153 | goto out; |
| 154 | INIT_LIST_HEAD(&gmap->crst_list); |
| 155 | gmap->mm = mm; |
| 156 | page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); |
| 157 | if (!page) |
| 158 | goto out_free; |
| 159 | list_add(&page->lru, &gmap->crst_list); |
| 160 | table = (unsigned long *) page_to_phys(page); |
| 161 | crst_table_init(table, _REGION1_ENTRY_EMPTY); |
| 162 | gmap->table = table; |
Christian Borntraeger | 480e592 | 2011-09-20 17:07:28 +0200 | [diff] [blame] | 163 | gmap->asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH | |
| 164 | _ASCE_USER_BITS | __pa(table); |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 165 | list_add(&gmap->list, &mm->context.gmap_list); |
| 166 | return gmap; |
| 167 | |
| 168 | out_free: |
| 169 | kfree(gmap); |
| 170 | out: |
| 171 | return NULL; |
| 172 | } |
| 173 | EXPORT_SYMBOL_GPL(gmap_alloc); |
| 174 | |
| 175 | static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table) |
| 176 | { |
| 177 | struct gmap_pgtable *mp; |
| 178 | struct gmap_rmap *rmap; |
| 179 | struct page *page; |
| 180 | |
| 181 | if (*table & _SEGMENT_ENTRY_INV) |
| 182 | return 0; |
| 183 | page = pfn_to_page(*table >> PAGE_SHIFT); |
| 184 | mp = (struct gmap_pgtable *) page->index; |
| 185 | list_for_each_entry(rmap, &mp->mapper, list) { |
| 186 | if (rmap->entry != table) |
| 187 | continue; |
| 188 | list_del(&rmap->list); |
| 189 | kfree(rmap); |
| 190 | break; |
| 191 | } |
| 192 | *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr; |
| 193 | return 1; |
| 194 | } |
| 195 | |
| 196 | static void gmap_flush_tlb(struct gmap *gmap) |
| 197 | { |
| 198 | if (MACHINE_HAS_IDTE) |
| 199 | __tlb_flush_idte((unsigned long) gmap->table | |
| 200 | _ASCE_TYPE_REGION1); |
| 201 | else |
| 202 | __tlb_flush_global(); |
| 203 | } |
| 204 | |
| 205 | /** |
| 206 | * gmap_free - free a guest address space |
| 207 | * @gmap: pointer to the guest address space structure |
| 208 | */ |
| 209 | void gmap_free(struct gmap *gmap) |
| 210 | { |
| 211 | struct page *page, *next; |
| 212 | unsigned long *table; |
| 213 | int i; |
| 214 | |
| 215 | |
| 216 | /* Flush tlb. */ |
| 217 | if (MACHINE_HAS_IDTE) |
| 218 | __tlb_flush_idte((unsigned long) gmap->table | |
| 219 | _ASCE_TYPE_REGION1); |
| 220 | else |
| 221 | __tlb_flush_global(); |
| 222 | |
| 223 | /* Free all segment & region tables. */ |
| 224 | down_read(&gmap->mm->mmap_sem); |
| 225 | list_for_each_entry_safe(page, next, &gmap->crst_list, lru) { |
| 226 | table = (unsigned long *) page_to_phys(page); |
| 227 | if ((*table & _REGION_ENTRY_TYPE_MASK) == 0) |
| 228 | /* Remove gmap rmap structures for segment table. */ |
| 229 | for (i = 0; i < PTRS_PER_PMD; i++, table++) |
| 230 | gmap_unlink_segment(gmap, table); |
| 231 | __free_pages(page, ALLOC_ORDER); |
| 232 | } |
| 233 | up_read(&gmap->mm->mmap_sem); |
| 234 | list_del(&gmap->list); |
| 235 | kfree(gmap); |
| 236 | } |
| 237 | EXPORT_SYMBOL_GPL(gmap_free); |
| 238 | |
| 239 | /** |
| 240 | * gmap_enable - switch primary space to the guest address space |
| 241 | * @gmap: pointer to the guest address space structure |
| 242 | */ |
| 243 | void gmap_enable(struct gmap *gmap) |
| 244 | { |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 245 | S390_lowcore.gmap = (unsigned long) gmap; |
| 246 | } |
| 247 | EXPORT_SYMBOL_GPL(gmap_enable); |
| 248 | |
| 249 | /** |
| 250 | * gmap_disable - switch back to the standard primary address space |
| 251 | * @gmap: pointer to the guest address space structure |
| 252 | */ |
| 253 | void gmap_disable(struct gmap *gmap) |
| 254 | { |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 255 | S390_lowcore.gmap = 0UL; |
| 256 | } |
| 257 | EXPORT_SYMBOL_GPL(gmap_disable); |
| 258 | |
| 259 | static int gmap_alloc_table(struct gmap *gmap, |
| 260 | unsigned long *table, unsigned long init) |
| 261 | { |
| 262 | struct page *page; |
| 263 | unsigned long *new; |
| 264 | |
| 265 | page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); |
| 266 | if (!page) |
| 267 | return -ENOMEM; |
| 268 | new = (unsigned long *) page_to_phys(page); |
| 269 | crst_table_init(new, init); |
| 270 | down_read(&gmap->mm->mmap_sem); |
| 271 | if (*table & _REGION_ENTRY_INV) { |
| 272 | list_add(&page->lru, &gmap->crst_list); |
| 273 | *table = (unsigned long) new | _REGION_ENTRY_LENGTH | |
| 274 | (*table & _REGION_ENTRY_TYPE_MASK); |
| 275 | } else |
| 276 | __free_pages(page, ALLOC_ORDER); |
| 277 | up_read(&gmap->mm->mmap_sem); |
| 278 | return 0; |
| 279 | } |
| 280 | |
| 281 | /** |
| 282 | * gmap_unmap_segment - unmap segment from the guest address space |
| 283 | * @gmap: pointer to the guest address space structure |
| 284 | * @addr: address in the guest address space |
| 285 | * @len: length of the memory area to unmap |
| 286 | * |
| 287 | * Returns 0 if the unmap succeded, -EINVAL if not. |
| 288 | */ |
| 289 | int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len) |
| 290 | { |
| 291 | unsigned long *table; |
| 292 | unsigned long off; |
| 293 | int flush; |
| 294 | |
| 295 | if ((to | len) & (PMD_SIZE - 1)) |
| 296 | return -EINVAL; |
| 297 | if (len == 0 || to + len < to) |
| 298 | return -EINVAL; |
| 299 | |
| 300 | flush = 0; |
| 301 | down_read(&gmap->mm->mmap_sem); |
| 302 | for (off = 0; off < len; off += PMD_SIZE) { |
| 303 | /* Walk the guest addr space page table */ |
| 304 | table = gmap->table + (((to + off) >> 53) & 0x7ff); |
| 305 | if (*table & _REGION_ENTRY_INV) |
Carsten Otte | 05873df | 2011-09-26 16:40:34 +0200 | [diff] [blame^] | 306 | goto out; |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 307 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
| 308 | table = table + (((to + off) >> 42) & 0x7ff); |
| 309 | if (*table & _REGION_ENTRY_INV) |
Carsten Otte | 05873df | 2011-09-26 16:40:34 +0200 | [diff] [blame^] | 310 | goto out; |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 311 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
| 312 | table = table + (((to + off) >> 31) & 0x7ff); |
| 313 | if (*table & _REGION_ENTRY_INV) |
Carsten Otte | 05873df | 2011-09-26 16:40:34 +0200 | [diff] [blame^] | 314 | goto out; |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 315 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
| 316 | table = table + (((to + off) >> 20) & 0x7ff); |
| 317 | |
| 318 | /* Clear segment table entry in guest address space. */ |
| 319 | flush |= gmap_unlink_segment(gmap, table); |
| 320 | *table = _SEGMENT_ENTRY_INV; |
| 321 | } |
Carsten Otte | 05873df | 2011-09-26 16:40:34 +0200 | [diff] [blame^] | 322 | out: |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 323 | up_read(&gmap->mm->mmap_sem); |
| 324 | if (flush) |
| 325 | gmap_flush_tlb(gmap); |
| 326 | return 0; |
| 327 | } |
| 328 | EXPORT_SYMBOL_GPL(gmap_unmap_segment); |
| 329 | |
| 330 | /** |
| 331 | * gmap_mmap_segment - map a segment to the guest address space |
| 332 | * @gmap: pointer to the guest address space structure |
| 333 | * @from: source address in the parent address space |
| 334 | * @to: target address in the guest address space |
| 335 | * |
| 336 | * Returns 0 if the mmap succeded, -EINVAL or -ENOMEM if not. |
| 337 | */ |
| 338 | int gmap_map_segment(struct gmap *gmap, unsigned long from, |
| 339 | unsigned long to, unsigned long len) |
| 340 | { |
| 341 | unsigned long *table; |
| 342 | unsigned long off; |
| 343 | int flush; |
| 344 | |
| 345 | if ((from | to | len) & (PMD_SIZE - 1)) |
| 346 | return -EINVAL; |
| 347 | if (len == 0 || from + len > PGDIR_SIZE || |
| 348 | from + len < from || to + len < to) |
| 349 | return -EINVAL; |
| 350 | |
| 351 | flush = 0; |
| 352 | down_read(&gmap->mm->mmap_sem); |
| 353 | for (off = 0; off < len; off += PMD_SIZE) { |
| 354 | /* Walk the gmap address space page table */ |
| 355 | table = gmap->table + (((to + off) >> 53) & 0x7ff); |
| 356 | if ((*table & _REGION_ENTRY_INV) && |
| 357 | gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY)) |
| 358 | goto out_unmap; |
| 359 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
| 360 | table = table + (((to + off) >> 42) & 0x7ff); |
| 361 | if ((*table & _REGION_ENTRY_INV) && |
| 362 | gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY)) |
| 363 | goto out_unmap; |
| 364 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
| 365 | table = table + (((to + off) >> 31) & 0x7ff); |
| 366 | if ((*table & _REGION_ENTRY_INV) && |
| 367 | gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY)) |
| 368 | goto out_unmap; |
| 369 | table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN); |
| 370 | table = table + (((to + off) >> 20) & 0x7ff); |
| 371 | |
| 372 | /* Store 'from' address in an invalid segment table entry. */ |
| 373 | flush |= gmap_unlink_segment(gmap, table); |
| 374 | *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | (from + off); |
| 375 | } |
| 376 | up_read(&gmap->mm->mmap_sem); |
| 377 | if (flush) |
| 378 | gmap_flush_tlb(gmap); |
| 379 | return 0; |
| 380 | |
| 381 | out_unmap: |
| 382 | up_read(&gmap->mm->mmap_sem); |
| 383 | gmap_unmap_segment(gmap, to, len); |
| 384 | return -ENOMEM; |
| 385 | } |
| 386 | EXPORT_SYMBOL_GPL(gmap_map_segment); |
| 387 | |
| 388 | unsigned long gmap_fault(unsigned long address, struct gmap *gmap) |
| 389 | { |
| 390 | unsigned long *table, vmaddr, segment; |
| 391 | struct mm_struct *mm; |
| 392 | struct gmap_pgtable *mp; |
| 393 | struct gmap_rmap *rmap; |
| 394 | struct vm_area_struct *vma; |
| 395 | struct page *page; |
| 396 | pgd_t *pgd; |
| 397 | pud_t *pud; |
| 398 | pmd_t *pmd; |
| 399 | |
| 400 | current->thread.gmap_addr = address; |
| 401 | mm = gmap->mm; |
| 402 | /* Walk the gmap address space page table */ |
| 403 | table = gmap->table + ((address >> 53) & 0x7ff); |
| 404 | if (unlikely(*table & _REGION_ENTRY_INV)) |
| 405 | return -EFAULT; |
| 406 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
| 407 | table = table + ((address >> 42) & 0x7ff); |
| 408 | if (unlikely(*table & _REGION_ENTRY_INV)) |
| 409 | return -EFAULT; |
| 410 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
| 411 | table = table + ((address >> 31) & 0x7ff); |
| 412 | if (unlikely(*table & _REGION_ENTRY_INV)) |
| 413 | return -EFAULT; |
| 414 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
| 415 | table = table + ((address >> 20) & 0x7ff); |
| 416 | |
| 417 | /* Convert the gmap address to an mm address. */ |
| 418 | segment = *table; |
| 419 | if (likely(!(segment & _SEGMENT_ENTRY_INV))) { |
| 420 | page = pfn_to_page(segment >> PAGE_SHIFT); |
| 421 | mp = (struct gmap_pgtable *) page->index; |
| 422 | return mp->vmaddr | (address & ~PMD_MASK); |
| 423 | } else if (segment & _SEGMENT_ENTRY_RO) { |
| 424 | vmaddr = segment & _SEGMENT_ENTRY_ORIGIN; |
| 425 | vma = find_vma(mm, vmaddr); |
| 426 | if (!vma || vma->vm_start > vmaddr) |
| 427 | return -EFAULT; |
| 428 | |
| 429 | /* Walk the parent mm page table */ |
| 430 | pgd = pgd_offset(mm, vmaddr); |
| 431 | pud = pud_alloc(mm, pgd, vmaddr); |
| 432 | if (!pud) |
| 433 | return -ENOMEM; |
| 434 | pmd = pmd_alloc(mm, pud, vmaddr); |
| 435 | if (!pmd) |
| 436 | return -ENOMEM; |
| 437 | if (!pmd_present(*pmd) && |
| 438 | __pte_alloc(mm, vma, pmd, vmaddr)) |
| 439 | return -ENOMEM; |
| 440 | /* pmd now points to a valid segment table entry. */ |
| 441 | rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT); |
| 442 | if (!rmap) |
| 443 | return -ENOMEM; |
| 444 | /* Link gmap segment table entry location to page table. */ |
| 445 | page = pmd_page(*pmd); |
| 446 | mp = (struct gmap_pgtable *) page->index; |
| 447 | rmap->entry = table; |
| 448 | list_add(&rmap->list, &mp->mapper); |
| 449 | /* Set gmap segment table entry to page table. */ |
| 450 | *table = pmd_val(*pmd) & PAGE_MASK; |
| 451 | return vmaddr | (address & ~PMD_MASK); |
| 452 | } |
| 453 | return -EFAULT; |
| 454 | |
| 455 | } |
| 456 | EXPORT_SYMBOL_GPL(gmap_fault); |
| 457 | |
| 458 | void gmap_unmap_notifier(struct mm_struct *mm, unsigned long *table) |
| 459 | { |
| 460 | struct gmap_rmap *rmap, *next; |
| 461 | struct gmap_pgtable *mp; |
| 462 | struct page *page; |
| 463 | int flush; |
| 464 | |
| 465 | flush = 0; |
| 466 | spin_lock(&mm->page_table_lock); |
| 467 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); |
| 468 | mp = (struct gmap_pgtable *) page->index; |
| 469 | list_for_each_entry_safe(rmap, next, &mp->mapper, list) { |
| 470 | *rmap->entry = |
| 471 | _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr; |
| 472 | list_del(&rmap->list); |
| 473 | kfree(rmap); |
| 474 | flush = 1; |
| 475 | } |
| 476 | spin_unlock(&mm->page_table_lock); |
| 477 | if (flush) |
| 478 | __tlb_flush_global(); |
| 479 | } |
| 480 | |
| 481 | static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm, |
| 482 | unsigned long vmaddr) |
| 483 | { |
| 484 | struct page *page; |
| 485 | unsigned long *table; |
| 486 | struct gmap_pgtable *mp; |
| 487 | |
| 488 | page = alloc_page(GFP_KERNEL|__GFP_REPEAT); |
| 489 | if (!page) |
| 490 | return NULL; |
| 491 | mp = kmalloc(sizeof(*mp), GFP_KERNEL|__GFP_REPEAT); |
| 492 | if (!mp) { |
| 493 | __free_page(page); |
| 494 | return NULL; |
| 495 | } |
| 496 | pgtable_page_ctor(page); |
| 497 | mp->vmaddr = vmaddr & PMD_MASK; |
| 498 | INIT_LIST_HEAD(&mp->mapper); |
| 499 | page->index = (unsigned long) mp; |
| 500 | atomic_set(&page->_mapcount, 3); |
| 501 | table = (unsigned long *) page_to_phys(page); |
| 502 | clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2); |
| 503 | clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2); |
| 504 | return table; |
| 505 | } |
| 506 | |
| 507 | static inline void page_table_free_pgste(unsigned long *table) |
| 508 | { |
| 509 | struct page *page; |
| 510 | struct gmap_pgtable *mp; |
| 511 | |
| 512 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); |
| 513 | mp = (struct gmap_pgtable *) page->index; |
| 514 | BUG_ON(!list_empty(&mp->mapper)); |
| 515 | pgtable_page_ctor(page); |
| 516 | atomic_set(&page->_mapcount, -1); |
| 517 | kfree(mp); |
| 518 | __free_page(page); |
| 519 | } |
| 520 | |
| 521 | #else /* CONFIG_PGSTE */ |
| 522 | |
| 523 | static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm, |
| 524 | unsigned long vmaddr) |
| 525 | { |
Jan Glauber | 944291d | 2011-08-03 16:44:18 +0200 | [diff] [blame] | 526 | return NULL; |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 527 | } |
| 528 | |
| 529 | static inline void page_table_free_pgste(unsigned long *table) |
| 530 | { |
| 531 | } |
| 532 | |
| 533 | static inline void gmap_unmap_notifier(struct mm_struct *mm, |
| 534 | unsigned long *table) |
| 535 | { |
| 536 | } |
| 537 | |
| 538 | #endif /* CONFIG_PGSTE */ |
| 539 | |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 540 | static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits) |
| 541 | { |
| 542 | unsigned int old, new; |
| 543 | |
| 544 | do { |
| 545 | old = atomic_read(v); |
| 546 | new = old ^ bits; |
| 547 | } while (atomic_cmpxchg(v, old, new) != old); |
| 548 | return new; |
| 549 | } |
| 550 | |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 551 | /* |
| 552 | * page table entry allocation/free routines. |
| 553 | */ |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 554 | unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr) |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 555 | { |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 556 | struct page *page; |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 557 | unsigned long *table; |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 558 | unsigned int mask, bit; |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 559 | |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 560 | if (mm_has_pgste(mm)) |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 561 | return page_table_alloc_pgste(mm, vmaddr); |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 562 | /* Allocate fragments of a 4K page as 1K/2K page table */ |
Martin Schwidefsky | 8021714 | 2010-10-25 16:10:11 +0200 | [diff] [blame] | 563 | spin_lock_bh(&mm->context.list_lock); |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 564 | mask = FRAG_MASK; |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 565 | if (!list_empty(&mm->context.pgtable_list)) { |
| 566 | page = list_first_entry(&mm->context.pgtable_list, |
| 567 | struct page, lru); |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 568 | table = (unsigned long *) page_to_phys(page); |
| 569 | mask = atomic_read(&page->_mapcount); |
| 570 | mask = mask | (mask >> 4); |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 571 | } |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 572 | if ((mask & FRAG_MASK) == FRAG_MASK) { |
Martin Schwidefsky | 8021714 | 2010-10-25 16:10:11 +0200 | [diff] [blame] | 573 | spin_unlock_bh(&mm->context.list_lock); |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 574 | page = alloc_page(GFP_KERNEL|__GFP_REPEAT); |
| 575 | if (!page) |
| 576 | return NULL; |
| 577 | pgtable_page_ctor(page); |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 578 | atomic_set(&page->_mapcount, 1); |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 579 | table = (unsigned long *) page_to_phys(page); |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 580 | clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE); |
Martin Schwidefsky | 8021714 | 2010-10-25 16:10:11 +0200 | [diff] [blame] | 581 | spin_lock_bh(&mm->context.list_lock); |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 582 | list_add(&page->lru, &mm->context.pgtable_list); |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 583 | } else { |
| 584 | for (bit = 1; mask & bit; bit <<= 1) |
| 585 | table += PTRS_PER_PTE; |
| 586 | mask = atomic_xor_bits(&page->_mapcount, bit); |
| 587 | if ((mask & FRAG_MASK) == FRAG_MASK) |
| 588 | list_del(&page->lru); |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 589 | } |
Martin Schwidefsky | 8021714 | 2010-10-25 16:10:11 +0200 | [diff] [blame] | 590 | spin_unlock_bh(&mm->context.list_lock); |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 591 | return table; |
| 592 | } |
| 593 | |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 594 | void page_table_free(struct mm_struct *mm, unsigned long *table) |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 595 | { |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 596 | struct page *page; |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 597 | unsigned int bit, mask; |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 598 | |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 599 | if (mm_has_pgste(mm)) { |
| 600 | gmap_unmap_notifier(mm, table); |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 601 | return page_table_free_pgste(table); |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 602 | } |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 603 | /* Free 1K/2K page table fragment of a 4K page */ |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 604 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 605 | bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t))); |
Martin Schwidefsky | 8021714 | 2010-10-25 16:10:11 +0200 | [diff] [blame] | 606 | spin_lock_bh(&mm->context.list_lock); |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 607 | if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK) |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 608 | list_del(&page->lru); |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 609 | mask = atomic_xor_bits(&page->_mapcount, bit); |
| 610 | if (mask & FRAG_MASK) |
| 611 | list_add(&page->lru, &mm->context.pgtable_list); |
Martin Schwidefsky | 8021714 | 2010-10-25 16:10:11 +0200 | [diff] [blame] | 612 | spin_unlock_bh(&mm->context.list_lock); |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 613 | if (mask == 0) { |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 614 | pgtable_page_dtor(page); |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 615 | atomic_set(&page->_mapcount, -1); |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 616 | __free_page(page); |
| 617 | } |
| 618 | } |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 619 | |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 620 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE |
Martin Schwidefsky | 8021714 | 2010-10-25 16:10:11 +0200 | [diff] [blame] | 621 | |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 622 | static void __page_table_free_rcu(void *table, unsigned bit) |
| 623 | { |
| 624 | struct page *page; |
| 625 | |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 626 | if (bit == FRAG_MASK) |
| 627 | return page_table_free_pgste(table); |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 628 | /* Free 1K/2K page table fragment of a 4K page */ |
| 629 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); |
| 630 | if (atomic_xor_bits(&page->_mapcount, bit) == 0) { |
| 631 | pgtable_page_dtor(page); |
| 632 | atomic_set(&page->_mapcount, -1); |
| 633 | __free_page(page); |
Martin Schwidefsky | 8021714 | 2010-10-25 16:10:11 +0200 | [diff] [blame] | 634 | } |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 635 | } |
| 636 | |
| 637 | void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table) |
| 638 | { |
| 639 | struct mm_struct *mm; |
| 640 | struct page *page; |
| 641 | unsigned int bit, mask; |
| 642 | |
| 643 | mm = tlb->mm; |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 644 | if (mm_has_pgste(mm)) { |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 645 | gmap_unmap_notifier(mm, table); |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 646 | table = (unsigned long *) (__pa(table) | FRAG_MASK); |
| 647 | tlb_remove_table(tlb, table); |
| 648 | return; |
Martin Schwidefsky | 8021714 | 2010-10-25 16:10:11 +0200 | [diff] [blame] | 649 | } |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 650 | bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t))); |
Martin Schwidefsky | 8021714 | 2010-10-25 16:10:11 +0200 | [diff] [blame] | 651 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); |
| 652 | spin_lock_bh(&mm->context.list_lock); |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 653 | if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK) |
| 654 | list_del(&page->lru); |
| 655 | mask = atomic_xor_bits(&page->_mapcount, bit | (bit << 4)); |
| 656 | if (mask & FRAG_MASK) |
| 657 | list_add_tail(&page->lru, &mm->context.pgtable_list); |
Martin Schwidefsky | 8021714 | 2010-10-25 16:10:11 +0200 | [diff] [blame] | 658 | spin_unlock_bh(&mm->context.list_lock); |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 659 | table = (unsigned long *) (__pa(table) | (bit << 4)); |
| 660 | tlb_remove_table(tlb, table); |
Martin Schwidefsky | 8021714 | 2010-10-25 16:10:11 +0200 | [diff] [blame] | 661 | } |
| 662 | |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 663 | void __tlb_remove_table(void *_table) |
| 664 | { |
| 665 | void *table = (void *)((unsigned long) _table & PAGE_MASK); |
| 666 | unsigned type = (unsigned long) _table & ~PAGE_MASK; |
| 667 | |
| 668 | if (type) |
| 669 | __page_table_free_rcu(table, type); |
| 670 | else |
| 671 | free_pages((unsigned long) table, ALLOC_ORDER); |
| 672 | } |
| 673 | |
| 674 | #endif |
| 675 | |
Carsten Otte | 402b086 | 2008-03-25 18:47:10 +0100 | [diff] [blame] | 676 | /* |
| 677 | * switch on pgstes for its userspace process (for kvm) |
| 678 | */ |
| 679 | int s390_enable_sie(void) |
| 680 | { |
| 681 | struct task_struct *tsk = current; |
Christian Borntraeger | 74b6b52 | 2008-05-21 13:37:29 +0200 | [diff] [blame] | 682 | struct mm_struct *mm, *old_mm; |
Carsten Otte | 402b086 | 2008-03-25 18:47:10 +0100 | [diff] [blame] | 683 | |
Carsten Otte | 702d9e5 | 2009-03-26 15:23:57 +0100 | [diff] [blame] | 684 | /* Do we have switched amode? If no, we cannot do sie */ |
Martin Schwidefsky | b11b533 | 2009-12-07 12:51:43 +0100 | [diff] [blame] | 685 | if (user_mode == HOME_SPACE_MODE) |
Carsten Otte | 702d9e5 | 2009-03-26 15:23:57 +0100 | [diff] [blame] | 686 | return -EINVAL; |
| 687 | |
Christian Borntraeger | 74b6b52 | 2008-05-21 13:37:29 +0200 | [diff] [blame] | 688 | /* Do we have pgstes? if yes, we are done */ |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 689 | if (mm_has_pgste(tsk->mm)) |
Christian Borntraeger | 74b6b52 | 2008-05-21 13:37:29 +0200 | [diff] [blame] | 690 | return 0; |
Carsten Otte | 402b086 | 2008-03-25 18:47:10 +0100 | [diff] [blame] | 691 | |
Christian Borntraeger | 74b6b52 | 2008-05-21 13:37:29 +0200 | [diff] [blame] | 692 | /* lets check if we are allowed to replace the mm */ |
| 693 | task_lock(tsk); |
Carsten Otte | 402b086 | 2008-03-25 18:47:10 +0100 | [diff] [blame] | 694 | if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 || |
Martin Schwidefsky | 52a21f2 | 2009-10-06 10:33:55 +0200 | [diff] [blame] | 695 | #ifdef CONFIG_AIO |
| 696 | !hlist_empty(&tsk->mm->ioctx_list) || |
| 697 | #endif |
| 698 | tsk->mm != tsk->active_mm) { |
Christian Borntraeger | 74b6b52 | 2008-05-21 13:37:29 +0200 | [diff] [blame] | 699 | task_unlock(tsk); |
| 700 | return -EINVAL; |
| 701 | } |
| 702 | task_unlock(tsk); |
Carsten Otte | 402b086 | 2008-03-25 18:47:10 +0100 | [diff] [blame] | 703 | |
Christian Borntraeger | 250cf77 | 2008-10-28 11:10:15 +0100 | [diff] [blame] | 704 | /* we copy the mm and let dup_mm create the page tables with_pgstes */ |
| 705 | tsk->mm->context.alloc_pgste = 1; |
Carsten Otte | 402b086 | 2008-03-25 18:47:10 +0100 | [diff] [blame] | 706 | mm = dup_mm(tsk); |
Christian Borntraeger | 250cf77 | 2008-10-28 11:10:15 +0100 | [diff] [blame] | 707 | tsk->mm->context.alloc_pgste = 0; |
Carsten Otte | 402b086 | 2008-03-25 18:47:10 +0100 | [diff] [blame] | 708 | if (!mm) |
Christian Borntraeger | 74b6b52 | 2008-05-21 13:37:29 +0200 | [diff] [blame] | 709 | return -ENOMEM; |
| 710 | |
Christian Borntraeger | 250cf77 | 2008-10-28 11:10:15 +0100 | [diff] [blame] | 711 | /* Now lets check again if something happened */ |
Christian Borntraeger | 74b6b52 | 2008-05-21 13:37:29 +0200 | [diff] [blame] | 712 | task_lock(tsk); |
| 713 | if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 || |
Martin Schwidefsky | 52a21f2 | 2009-10-06 10:33:55 +0200 | [diff] [blame] | 714 | #ifdef CONFIG_AIO |
| 715 | !hlist_empty(&tsk->mm->ioctx_list) || |
| 716 | #endif |
| 717 | tsk->mm != tsk->active_mm) { |
Christian Borntraeger | 74b6b52 | 2008-05-21 13:37:29 +0200 | [diff] [blame] | 718 | mmput(mm); |
| 719 | task_unlock(tsk); |
| 720 | return -EINVAL; |
| 721 | } |
| 722 | |
| 723 | /* ok, we are alone. No ptrace, no threads, etc. */ |
| 724 | old_mm = tsk->mm; |
Carsten Otte | 402b086 | 2008-03-25 18:47:10 +0100 | [diff] [blame] | 725 | tsk->mm = tsk->active_mm = mm; |
| 726 | preempt_disable(); |
| 727 | update_mm(mm, tsk); |
Christian Borntraeger | e05ef9b | 2010-10-25 16:10:45 +0200 | [diff] [blame] | 728 | atomic_inc(&mm->context.attach_count); |
| 729 | atomic_dec(&old_mm->context.attach_count); |
Rusty Russell | 005f8ee | 2009-03-26 15:25:01 +0100 | [diff] [blame] | 730 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); |
Carsten Otte | 402b086 | 2008-03-25 18:47:10 +0100 | [diff] [blame] | 731 | preempt_enable(); |
Carsten Otte | 402b086 | 2008-03-25 18:47:10 +0100 | [diff] [blame] | 732 | task_unlock(tsk); |
Christian Borntraeger | 74b6b52 | 2008-05-21 13:37:29 +0200 | [diff] [blame] | 733 | mmput(old_mm); |
| 734 | return 0; |
Carsten Otte | 402b086 | 2008-03-25 18:47:10 +0100 | [diff] [blame] | 735 | } |
| 736 | EXPORT_SYMBOL_GPL(s390_enable_sie); |
Hans-Joachim Picht | 7db11a3 | 2009-06-16 10:30:26 +0200 | [diff] [blame] | 737 | |
Heiko Carstens | 87458ff | 2009-09-22 22:58:46 +0200 | [diff] [blame] | 738 | #if defined(CONFIG_DEBUG_PAGEALLOC) && defined(CONFIG_HIBERNATION) |
Hans-Joachim Picht | 7db11a3 | 2009-06-16 10:30:26 +0200 | [diff] [blame] | 739 | bool kernel_page_present(struct page *page) |
| 740 | { |
| 741 | unsigned long addr; |
| 742 | int cc; |
| 743 | |
| 744 | addr = page_to_phys(page); |
Heiko Carstens | 87458ff | 2009-09-22 22:58:46 +0200 | [diff] [blame] | 745 | asm volatile( |
| 746 | " lra %1,0(%1)\n" |
| 747 | " ipm %0\n" |
| 748 | " srl %0,28" |
| 749 | : "=d" (cc), "+a" (addr) : : "cc"); |
Hans-Joachim Picht | 7db11a3 | 2009-06-16 10:30:26 +0200 | [diff] [blame] | 750 | return cc == 0; |
| 751 | } |
Heiko Carstens | 87458ff | 2009-09-22 22:58:46 +0200 | [diff] [blame] | 752 | #endif /* CONFIG_HIBERNATION && CONFIG_DEBUG_PAGEALLOC */ |