blob: 809e778930390b38b754807a651e60f2280d75dc [file] [log] [blame]
Martin Schwidefsky3610cce2007-10-22 12:52:47 +02001/*
2 * arch/s390/mm/pgtable.c
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
6 */
7
8#include <linux/sched.h>
9#include <linux/kernel.h>
10#include <linux/errno.h>
11#include <linux/mm.h>
12#include <linux/swap.h>
13#include <linux/smp.h>
14#include <linux/highmem.h>
15#include <linux/slab.h>
16#include <linux/pagemap.h>
17#include <linux/spinlock.h>
18#include <linux/module.h>
19#include <linux/quicklist.h>
20
21#include <asm/system.h>
22#include <asm/pgtable.h>
23#include <asm/pgalloc.h>
24#include <asm/tlb.h>
25#include <asm/tlbflush.h>
26
27#ifndef CONFIG_64BIT
28#define ALLOC_ORDER 1
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010029#define TABLES_PER_PAGE 4
30#define FRAG_MASK 15UL
31#define SECOND_HALVES 10UL
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020032#else
33#define ALLOC_ORDER 2
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010034#define TABLES_PER_PAGE 2
35#define FRAG_MASK 3UL
36#define SECOND_HALVES 2UL
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020037#endif
38
39unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec)
40{
41 struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
42
43 if (!page)
44 return NULL;
45 page->index = 0;
46 if (noexec) {
47 struct page *shadow = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
48 if (!shadow) {
49 __free_pages(page, ALLOC_ORDER);
50 return NULL;
51 }
52 page->index = page_to_phys(shadow);
53 }
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010054 spin_lock(&mm->page_table_lock);
55 list_add(&page->lru, &mm->context.crst_list);
56 spin_unlock(&mm->page_table_lock);
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020057 return (unsigned long *) page_to_phys(page);
58}
59
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010060void crst_table_free(struct mm_struct *mm, unsigned long *table)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020061{
62 unsigned long *shadow = get_shadow_table(table);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010063 struct page *page = virt_to_page(table);
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020064
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010065 spin_lock(&mm->page_table_lock);
66 list_del(&page->lru);
67 spin_unlock(&mm->page_table_lock);
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020068 if (shadow)
69 free_pages((unsigned long) shadow, ALLOC_ORDER);
70 free_pages((unsigned long) table, ALLOC_ORDER);
71}
72
73/*
74 * page table entry allocation/free routines.
75 */
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010076unsigned long *page_table_alloc(struct mm_struct *mm)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020077{
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010078 struct page *page;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020079 unsigned long *table;
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010080 unsigned long bits;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020081
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010082 bits = mm->context.noexec ? 3UL : 1UL;
83 spin_lock(&mm->page_table_lock);
84 page = NULL;
85 if (!list_empty(&mm->context.pgtable_list)) {
86 page = list_first_entry(&mm->context.pgtable_list,
87 struct page, lru);
88 if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1))
89 page = NULL;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020090 }
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010091 if (!page) {
92 spin_unlock(&mm->page_table_lock);
93 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
94 if (!page)
95 return NULL;
96 pgtable_page_ctor(page);
97 page->flags &= ~FRAG_MASK;
98 table = (unsigned long *) page_to_phys(page);
99 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
100 spin_lock(&mm->page_table_lock);
101 list_add(&page->lru, &mm->context.pgtable_list);
102 }
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200103 table = (unsigned long *) page_to_phys(page);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100104 while (page->flags & bits) {
105 table += 256;
106 bits <<= 1;
107 }
108 page->flags |= bits;
109 if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1))
110 list_move_tail(&page->lru, &mm->context.pgtable_list);
111 spin_unlock(&mm->page_table_lock);
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200112 return table;
113}
114
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100115void page_table_free(struct mm_struct *mm, unsigned long *table)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200116{
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100117 struct page *page;
118 unsigned long bits;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200119
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100120 bits = mm->context.noexec ? 3UL : 1UL;
121 bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
122 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
123 spin_lock(&mm->page_table_lock);
124 page->flags ^= bits;
125 if (page->flags & FRAG_MASK) {
126 /* Page now has some free pgtable fragments. */
127 list_move(&page->lru, &mm->context.pgtable_list);
128 page = NULL;
129 } else
130 /* All fragments of the 4K page have been freed. */
131 list_del(&page->lru);
132 spin_unlock(&mm->page_table_lock);
133 if (page) {
134 pgtable_page_dtor(page);
135 __free_page(page);
136 }
137}
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200138
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100139void disable_noexec(struct mm_struct *mm, struct task_struct *tsk)
140{
141 struct page *page;
142
143 spin_lock(&mm->page_table_lock);
144 /* Free shadow region and segment tables. */
145 list_for_each_entry(page, &mm->context.crst_list, lru)
146 if (page->index) {
147 free_pages((unsigned long) page->index, ALLOC_ORDER);
148 page->index = 0;
149 }
150 /* "Free" second halves of page tables. */
151 list_for_each_entry(page, &mm->context.pgtable_list, lru)
152 page->flags &= ~SECOND_HALVES;
153 spin_unlock(&mm->page_table_lock);
154 mm->context.noexec = 0;
155 update_mm(mm, tsk);
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200156}