Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 1 | /* |
| 2 | * arch/s390/mm/pgtable.c |
| 3 | * |
| 4 | * Copyright IBM Corp. 2007 |
| 5 | * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> |
| 6 | */ |
| 7 | |
| 8 | #include <linux/sched.h> |
| 9 | #include <linux/kernel.h> |
| 10 | #include <linux/errno.h> |
| 11 | #include <linux/mm.h> |
| 12 | #include <linux/swap.h> |
| 13 | #include <linux/smp.h> |
| 14 | #include <linux/highmem.h> |
| 15 | #include <linux/slab.h> |
| 16 | #include <linux/pagemap.h> |
| 17 | #include <linux/spinlock.h> |
| 18 | #include <linux/module.h> |
| 19 | #include <linux/quicklist.h> |
| 20 | |
| 21 | #include <asm/system.h> |
| 22 | #include <asm/pgtable.h> |
| 23 | #include <asm/pgalloc.h> |
| 24 | #include <asm/tlb.h> |
| 25 | #include <asm/tlbflush.h> |
| 26 | |
| 27 | #ifndef CONFIG_64BIT |
| 28 | #define ALLOC_ORDER 1 |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame^] | 29 | #define TABLES_PER_PAGE 4 |
| 30 | #define FRAG_MASK 15UL |
| 31 | #define SECOND_HALVES 10UL |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 32 | #else |
| 33 | #define ALLOC_ORDER 2 |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame^] | 34 | #define TABLES_PER_PAGE 2 |
| 35 | #define FRAG_MASK 3UL |
| 36 | #define SECOND_HALVES 2UL |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 37 | #endif |
| 38 | |
| 39 | unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec) |
| 40 | { |
| 41 | struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); |
| 42 | |
| 43 | if (!page) |
| 44 | return NULL; |
| 45 | page->index = 0; |
| 46 | if (noexec) { |
| 47 | struct page *shadow = alloc_pages(GFP_KERNEL, ALLOC_ORDER); |
| 48 | if (!shadow) { |
| 49 | __free_pages(page, ALLOC_ORDER); |
| 50 | return NULL; |
| 51 | } |
| 52 | page->index = page_to_phys(shadow); |
| 53 | } |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame^] | 54 | spin_lock(&mm->page_table_lock); |
| 55 | list_add(&page->lru, &mm->context.crst_list); |
| 56 | spin_unlock(&mm->page_table_lock); |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 57 | return (unsigned long *) page_to_phys(page); |
| 58 | } |
| 59 | |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame^] | 60 | void crst_table_free(struct mm_struct *mm, unsigned long *table) |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 61 | { |
| 62 | unsigned long *shadow = get_shadow_table(table); |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame^] | 63 | struct page *page = virt_to_page(table); |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 64 | |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame^] | 65 | spin_lock(&mm->page_table_lock); |
| 66 | list_del(&page->lru); |
| 67 | spin_unlock(&mm->page_table_lock); |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 68 | if (shadow) |
| 69 | free_pages((unsigned long) shadow, ALLOC_ORDER); |
| 70 | free_pages((unsigned long) table, ALLOC_ORDER); |
| 71 | } |
| 72 | |
| 73 | /* |
| 74 | * page table entry allocation/free routines. |
| 75 | */ |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame^] | 76 | unsigned long *page_table_alloc(struct mm_struct *mm) |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 77 | { |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame^] | 78 | struct page *page; |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 79 | unsigned long *table; |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame^] | 80 | unsigned long bits; |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 81 | |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame^] | 82 | bits = mm->context.noexec ? 3UL : 1UL; |
| 83 | spin_lock(&mm->page_table_lock); |
| 84 | page = NULL; |
| 85 | if (!list_empty(&mm->context.pgtable_list)) { |
| 86 | page = list_first_entry(&mm->context.pgtable_list, |
| 87 | struct page, lru); |
| 88 | if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1)) |
| 89 | page = NULL; |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 90 | } |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame^] | 91 | if (!page) { |
| 92 | spin_unlock(&mm->page_table_lock); |
| 93 | page = alloc_page(GFP_KERNEL|__GFP_REPEAT); |
| 94 | if (!page) |
| 95 | return NULL; |
| 96 | pgtable_page_ctor(page); |
| 97 | page->flags &= ~FRAG_MASK; |
| 98 | table = (unsigned long *) page_to_phys(page); |
| 99 | clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE); |
| 100 | spin_lock(&mm->page_table_lock); |
| 101 | list_add(&page->lru, &mm->context.pgtable_list); |
| 102 | } |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 103 | table = (unsigned long *) page_to_phys(page); |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame^] | 104 | while (page->flags & bits) { |
| 105 | table += 256; |
| 106 | bits <<= 1; |
| 107 | } |
| 108 | page->flags |= bits; |
| 109 | if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1)) |
| 110 | list_move_tail(&page->lru, &mm->context.pgtable_list); |
| 111 | spin_unlock(&mm->page_table_lock); |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 112 | return table; |
| 113 | } |
| 114 | |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame^] | 115 | void page_table_free(struct mm_struct *mm, unsigned long *table) |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 116 | { |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame^] | 117 | struct page *page; |
| 118 | unsigned long bits; |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 119 | |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame^] | 120 | bits = mm->context.noexec ? 3UL : 1UL; |
| 121 | bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long); |
| 122 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); |
| 123 | spin_lock(&mm->page_table_lock); |
| 124 | page->flags ^= bits; |
| 125 | if (page->flags & FRAG_MASK) { |
| 126 | /* Page now has some free pgtable fragments. */ |
| 127 | list_move(&page->lru, &mm->context.pgtable_list); |
| 128 | page = NULL; |
| 129 | } else |
| 130 | /* All fragments of the 4K page have been freed. */ |
| 131 | list_del(&page->lru); |
| 132 | spin_unlock(&mm->page_table_lock); |
| 133 | if (page) { |
| 134 | pgtable_page_dtor(page); |
| 135 | __free_page(page); |
| 136 | } |
| 137 | } |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 138 | |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame^] | 139 | void disable_noexec(struct mm_struct *mm, struct task_struct *tsk) |
| 140 | { |
| 141 | struct page *page; |
| 142 | |
| 143 | spin_lock(&mm->page_table_lock); |
| 144 | /* Free shadow region and segment tables. */ |
| 145 | list_for_each_entry(page, &mm->context.crst_list, lru) |
| 146 | if (page->index) { |
| 147 | free_pages((unsigned long) page->index, ALLOC_ORDER); |
| 148 | page->index = 0; |
| 149 | } |
| 150 | /* "Free" second halves of page tables. */ |
| 151 | list_for_each_entry(page, &mm->context.pgtable_list, lru) |
| 152 | page->flags &= ~SECOND_HALVES; |
| 153 | spin_unlock(&mm->page_table_lock); |
| 154 | mm->context.noexec = 0; |
| 155 | update_mm(mm, tsk); |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 156 | } |