blob: bbe691dd272e21697301e98b3ced936c850dd360 [file] [log] [blame]
Ingo Molnar9f4c8152008-01-30 13:33:41 +01001/*
2 * Copyright 2002 Andi Kleen, SuSE Labs.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 * Thanks to Ben LaHaise for precious feedback.
Ingo Molnar9f4c8152008-01-30 13:33:41 +01004 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005#include <linux/highmem.h>
Ingo Molnar81922062008-01-30 13:34:04 +01006#include <linux/bootmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include <linux/module.h>
Ingo Molnar9f4c8152008-01-30 13:33:41 +01008#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/slab.h>
Ingo Molnar9f4c8152008-01-30 13:33:41 +010010#include <linux/mm.h>
11
Thomas Gleixner950f9d92008-01-30 13:34:06 +010012#include <asm/e820.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <asm/processor.h>
14#include <asm/tlbflush.h>
Dave Jonesf8af0952006-01-06 00:12:10 -080015#include <asm/sections.h>
Ingo Molnar9f4c8152008-01-30 13:33:41 +010016#include <asm/uaccess.h>
17#include <asm/pgalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
Arjan van de Vened724be2008-01-30 13:34:04 +010019static inline int
20within(unsigned long addr, unsigned long start, unsigned long end)
Ingo Molnar687c4822008-01-30 13:34:04 +010021{
Arjan van de Vened724be2008-01-30 13:34:04 +010022 return addr >= start && addr < end;
23}
24
25/*
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +010026 * Flushing functions
27 */
28void clflush_cache_range(void *addr, int size)
29{
30 int i;
31
32 for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size)
33 clflush(addr+i);
34}
35
36static void flush_kernel_map(void *arg)
37{
38 /*
39 * Flush all to work around Errata in early athlons regarding
40 * large page flushing.
41 */
42 __flush_tlb_all();
43
44 if (boot_cpu_data.x86_model >= 4)
45 wbinvd();
46}
47
48static void global_flush_tlb(void)
49{
50 BUG_ON(irqs_disabled());
51
52 on_each_cpu(flush_kernel_map, NULL, 1, 1);
53}
54
Thomas Gleixner57a6a462008-01-30 13:34:08 +010055struct clflush_data {
56 unsigned long addr;
57 int numpages;
58};
59
60static void __cpa_flush_range(void *arg)
61{
62 struct clflush_data *cld = arg;
63
64 /*
65 * We could optimize that further and do individual per page
66 * tlb invalidates for a low number of pages. Caveat: we must
67 * flush the high aliases on 64bit as well.
68 */
69 __flush_tlb_all();
70
71 clflush_cache_range((void *) cld->addr, cld->numpages * PAGE_SIZE);
72}
73
74static void cpa_flush_range(unsigned long addr, int numpages)
75{
76 struct clflush_data cld;
77
78 BUG_ON(irqs_disabled());
79
80 cld.addr = addr;
81 cld.numpages = numpages;
82
83 on_each_cpu(__cpa_flush_range, &cld, 1, 1);
84}
85
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +010086/*
Arjan van de Vened724be2008-01-30 13:34:04 +010087 * Certain areas of memory on x86 require very specific protection flags,
88 * for example the BIOS area or kernel text. Callers don't always get this
89 * right (again, ioremap() on BIOS memory is not uncommon) so this function
90 * checks and fixes these known static required protection bits.
91 */
92static inline pgprot_t static_protections(pgprot_t prot, unsigned long address)
93{
94 pgprot_t forbidden = __pgprot(0);
95
Ingo Molnar687c4822008-01-30 13:34:04 +010096 /*
Arjan van de Vened724be2008-01-30 13:34:04 +010097 * The BIOS area between 640k and 1Mb needs to be executable for
98 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
Ingo Molnar687c4822008-01-30 13:34:04 +010099 */
Arjan van de Vened724be2008-01-30 13:34:04 +0100100 if (within(__pa(address), BIOS_BEGIN, BIOS_END))
101 pgprot_val(forbidden) |= _PAGE_NX;
102
103 /*
104 * The kernel text needs to be executable for obvious reasons
105 * Does not cover __inittext since that is gone later on
106 */
107 if (within(address, (unsigned long)_text, (unsigned long)_etext))
108 pgprot_val(forbidden) |= _PAGE_NX;
109
110#ifdef CONFIG_DEBUG_RODATA
111 /* The .rodata section needs to be read-only */
112 if (within(address, (unsigned long)__start_rodata,
113 (unsigned long)__end_rodata))
114 pgprot_val(forbidden) |= _PAGE_RW;
115#endif
116
117 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
Ingo Molnar687c4822008-01-30 13:34:04 +0100118
119 return prot;
120}
121
Ingo Molnarf0646e42008-01-30 13:33:43 +0100122pte_t *lookup_address(unsigned long address, int *level)
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100123{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124 pgd_t *pgd = pgd_offset_k(address);
125 pud_t *pud;
126 pmd_t *pmd;
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100127
Thomas Gleixner30551bb2008-01-30 13:34:04 +0100128 *level = PG_LEVEL_NONE;
129
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130 if (pgd_none(*pgd))
131 return NULL;
132 pud = pud_offset(pgd, address);
133 if (pud_none(*pud))
134 return NULL;
135 pmd = pmd_offset(pud, address);
136 if (pmd_none(*pmd))
137 return NULL;
Thomas Gleixner30551bb2008-01-30 13:34:04 +0100138
139 *level = PG_LEVEL_2M;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140 if (pmd_large(*pmd))
141 return (pte_t *)pmd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142
Thomas Gleixner30551bb2008-01-30 13:34:04 +0100143 *level = PG_LEVEL_4K;
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100144 return pte_offset_kernel(pmd, address);
145}
146
Ingo Molnar9a3dc782008-01-30 13:33:57 +0100147static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100148{
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100149 /* change init_mm */
150 set_pte_atomic(kpte, pte);
Ingo Molnar44af6c42008-01-30 13:34:03 +0100151#ifdef CONFIG_X86_32
Ingo Molnare4b71dc2008-01-30 13:34:04 +0100152 if (!SHARED_KERNEL_PMD) {
Ingo Molnar44af6c42008-01-30 13:34:03 +0100153 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154
Ingo Molnar44af6c42008-01-30 13:34:03 +0100155 for (page = pgd_list; page; page = (struct page *)page->index) {
156 pgd_t *pgd;
157 pud_t *pud;
158 pmd_t *pmd;
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100159
Ingo Molnar44af6c42008-01-30 13:34:03 +0100160 pgd = (pgd_t *)page_address(page) + pgd_index(address);
161 pud = pud_offset(pgd, address);
162 pmd = pmd_offset(pud, address);
163 set_pte_atomic((pte_t *)pmd, pte);
164 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165 }
Ingo Molnar44af6c42008-01-30 13:34:03 +0100166#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167}
168
Ingo Molnar7afe15b2008-01-30 13:33:57 +0100169static int split_large_page(pte_t *kpte, unsigned long address)
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100170{
Ingo Molnar7afe15b2008-01-30 13:33:57 +0100171 pgprot_t ref_prot = pte_pgprot(pte_clrhuge(*kpte));
Ingo Molnar12d6f212008-01-30 13:33:58 +0100172 gfp_t gfp_flags = GFP_KERNEL;
Ingo Molnar9a3dc782008-01-30 13:33:57 +0100173 unsigned long flags;
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100174 unsigned long addr;
175 pte_t *pbase, *tmp;
176 struct page *base;
Ingo Molnar7afe15b2008-01-30 13:33:57 +0100177 int i, level;
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100178
Ingo Molnar12d6f212008-01-30 13:33:58 +0100179#ifdef CONFIG_DEBUG_PAGEALLOC
180 gfp_flags = GFP_ATOMIC;
181#endif
182 base = alloc_pages(gfp_flags, 0);
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100183 if (!base)
184 return -ENOMEM;
185
Ingo Molnar9a3dc782008-01-30 13:33:57 +0100186 spin_lock_irqsave(&pgd_lock, flags);
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100187 /*
188 * Check for races, another CPU might have split this page
189 * up for us already:
190 */
191 tmp = lookup_address(address, &level);
Ingo Molnar5508a7482008-01-30 13:33:56 +0100192 if (tmp != kpte) {
193 WARN_ON_ONCE(1);
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100194 goto out_unlock;
Ingo Molnar5508a7482008-01-30 13:33:56 +0100195 }
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100196
197 address = __pa(address);
198 addr = address & LARGE_PAGE_MASK;
199 pbase = (pte_t *)page_address(base);
Ingo Molnar44af6c42008-01-30 13:34:03 +0100200#ifdef CONFIG_X86_32
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100201 paravirt_alloc_pt(&init_mm, page_to_pfn(base));
Ingo Molnar44af6c42008-01-30 13:34:03 +0100202#endif
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100203
204 for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE)
205 set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, ref_prot));
206
207 /*
Huang, Ying4c881ca2008-01-30 13:34:04 +0100208 * Install the new, split up pagetable. Important detail here:
209 *
210 * On Intel the NX bit of all levels must be cleared to make a
211 * page executable. See section 4.13.2 of Intel 64 and IA-32
212 * Architectures Software Developer's Manual).
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100213 */
Huang, Ying4c881ca2008-01-30 13:34:04 +0100214 ref_prot = pte_pgprot(pte_mkexec(pte_clrhuge(*kpte)));
Ingo Molnar9a3dc782008-01-30 13:33:57 +0100215 __set_pmd_pte(kpte, address, mk_pte(base, ref_prot));
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100216 base = NULL;
217
218out_unlock:
Ingo Molnar9a3dc782008-01-30 13:33:57 +0100219 spin_unlock_irqrestore(&pgd_lock, flags);
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100220
221 if (base)
222 __free_pages(base, 0);
223
224 return 0;
225}
226
Ingo Molnar44af6c42008-01-30 13:34:03 +0100227static int
Ingo Molnar81922062008-01-30 13:34:04 +0100228__change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot)
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100229{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 struct page *kpte_page;
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100231 int level, err = 0;
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100232 pte_t *kpte;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233
Ingo Molnar81922062008-01-30 13:34:04 +0100234#ifdef CONFIG_X86_32
235 BUG_ON(pfn > max_low_pfn);
236#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237
Ingo Molnar97f99fe2008-01-30 13:33:55 +0100238repeat:
Ingo Molnarf0646e42008-01-30 13:33:43 +0100239 kpte = lookup_address(address, &level);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 if (!kpte)
241 return -EINVAL;
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100242
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 kpte_page = virt_to_page(kpte);
Andi Kleen65d2f0b2007-07-21 17:09:51 +0200244 BUG_ON(PageLRU(kpte_page));
245 BUG_ON(PageCompound(kpte_page));
246
Arjan van de Vened724be2008-01-30 13:34:04 +0100247 prot = static_protections(prot, address);
Andi Kleen65d2f0b2007-07-21 17:09:51 +0200248
Thomas Gleixner30551bb2008-01-30 13:34:04 +0100249 if (level == PG_LEVEL_4K) {
Thomas Gleixnera72a08a2008-01-30 13:34:07 +0100250 WARN_ON_ONCE(pgprot_val(prot) & _PAGE_PSE);
Ingo Molnar81922062008-01-30 13:34:04 +0100251 set_pte_atomic(kpte, pfn_pte(pfn, canon_pgprot(prot)));
Ingo Molnar78c94ab2008-01-30 13:33:55 +0100252 } else {
Thomas Gleixnera72a08a2008-01-30 13:34:07 +0100253 /* Clear the PSE bit for the 4k level pages ! */
254 pgprot_val(prot) = pgprot_val(prot) & ~_PAGE_PSE;
255
Ingo Molnar7afe15b2008-01-30 13:33:57 +0100256 err = split_large_page(kpte, address);
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100257 if (!err)
258 goto repeat;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 }
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100260 return err;
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100261}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262
Ingo Molnar44af6c42008-01-30 13:34:03 +0100263/**
264 * change_page_attr_addr - Change page table attributes in linear mapping
265 * @address: Virtual address in linear mapping.
Ingo Molnar44af6c42008-01-30 13:34:03 +0100266 * @prot: New page table attribute (PAGE_*)
267 *
268 * Change page attributes of a page in the direct mapping. This is a variant
269 * of change_page_attr() that also works on memory holes that do not have
270 * mem_map entry (pfn_valid() is false).
271 *
272 * See change_page_attr() documentation for more details.
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100273 *
274 * Modules and drivers should use the set_memory_* APIs instead.
Ingo Molnar44af6c42008-01-30 13:34:03 +0100275 */
276
Arjan van de Ven488fd992008-01-30 13:34:07 +0100277static int change_page_attr_addr(unsigned long address, pgprot_t prot)
Ingo Molnar44af6c42008-01-30 13:34:03 +0100278{
Arjan van de Ven488fd992008-01-30 13:34:07 +0100279 int err = 0, kernel_map = 0;
280 unsigned long pfn = __pa(address) >> PAGE_SHIFT;
Ingo Molnar44af6c42008-01-30 13:34:03 +0100281
282#ifdef CONFIG_X86_64
283 if (address >= __START_KERNEL_map &&
284 address < __START_KERNEL_map + KERNEL_TEXT_SIZE) {
285
286 address = (unsigned long)__va(__pa(address));
287 kernel_map = 1;
288 }
289#endif
290
Arjan van de Ven488fd992008-01-30 13:34:07 +0100291 if (!kernel_map || pte_present(pfn_pte(0, prot))) {
292 err = __change_page_attr(address, pfn, prot);
293 if (err)
294 return err;
Ingo Molnar44af6c42008-01-30 13:34:03 +0100295 }
296
Arjan van de Ven488fd992008-01-30 13:34:07 +0100297#ifdef CONFIG_X86_64
298 /*
299 * Handle kernel mapping too which aliases part of
300 * lowmem:
301 */
302 if (__pa(address) < KERNEL_TEXT_SIZE) {
303 unsigned long addr2;
304 pgprot_t prot2;
305
306 addr2 = __START_KERNEL_map + __pa(address);
307 /* Make sure the kernel mappings stay executable */
308 prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
309 err = __change_page_attr(addr2, pfn, prot2);
310 }
311#endif
312
Ingo Molnar44af6c42008-01-30 13:34:03 +0100313 return err;
314}
315
Thomas Gleixnerff314522008-01-30 13:34:08 +0100316static int __change_page_attr_set_clr(unsigned long addr, int numpages,
317 pgprot_t mask_set, pgprot_t mask_clr)
318{
319 pgprot_t new_prot;
320 int level;
321 pte_t *pte;
322 int i, ret;
323
324 for (i = 0; i < numpages ; i++) {
325
326 pte = lookup_address(addr, &level);
327 if (!pte)
328 return -EINVAL;
329
330 new_prot = pte_pgprot(*pte);
331
332 pgprot_val(new_prot) &= ~pgprot_val(mask_clr);
333 pgprot_val(new_prot) |= pgprot_val(mask_set);
334
335 ret = change_page_attr_addr(addr, new_prot);
336 if (ret)
337 return ret;
338 addr += PAGE_SIZE;
339 }
340
341 return 0;
342}
343
344static int change_page_attr_set_clr(unsigned long addr, int numpages,
345 pgprot_t mask_set, pgprot_t mask_clr)
346{
347 int ret = __change_page_attr_set_clr(addr, numpages, mask_set,
348 mask_clr);
349
Thomas Gleixner57a6a462008-01-30 13:34:08 +0100350 /*
351 * On success we use clflush, when the CPU supports it to
352 * avoid the wbindv. If the CPU does not support it and in the
353 * error case we fall back to global_flush_tlb (which uses
354 * wbindv):
355 */
356 if (!ret && cpu_has_clflush)
357 cpa_flush_range(addr, numpages);
358 else
359 global_flush_tlb();
Thomas Gleixnerff314522008-01-30 13:34:08 +0100360
361 return ret;
362}
363
Thomas Gleixner56744542008-01-30 13:34:08 +0100364static inline int change_page_attr_set(unsigned long addr, int numpages,
365 pgprot_t mask)
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100366{
Thomas Gleixner56744542008-01-30 13:34:08 +0100367 return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0));
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100368}
369
Thomas Gleixner56744542008-01-30 13:34:08 +0100370static inline int change_page_attr_clear(unsigned long addr, int numpages,
371 pgprot_t mask)
Thomas Gleixner72932c72008-01-30 13:34:08 +0100372{
Thomas Gleixner56744542008-01-30 13:34:08 +0100373 return __change_page_attr_set_clr(addr, numpages, __pgprot(0), mask);
Thomas Gleixner72932c72008-01-30 13:34:08 +0100374
375}
376
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100377int set_memory_uc(unsigned long addr, int numpages)
378{
Thomas Gleixner72932c72008-01-30 13:34:08 +0100379 return change_page_attr_set(addr, numpages,
380 __pgprot(_PAGE_PCD | _PAGE_PWT));
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100381}
382EXPORT_SYMBOL(set_memory_uc);
383
384int set_memory_wb(unsigned long addr, int numpages)
385{
Thomas Gleixner72932c72008-01-30 13:34:08 +0100386 return change_page_attr_clear(addr, numpages,
387 __pgprot(_PAGE_PCD | _PAGE_PWT));
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100388}
389EXPORT_SYMBOL(set_memory_wb);
390
391int set_memory_x(unsigned long addr, int numpages)
392{
Thomas Gleixner72932c72008-01-30 13:34:08 +0100393 return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_NX));
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100394}
395EXPORT_SYMBOL(set_memory_x);
396
397int set_memory_nx(unsigned long addr, int numpages)
398{
Thomas Gleixner72932c72008-01-30 13:34:08 +0100399 return change_page_attr_set(addr, numpages, __pgprot(_PAGE_NX));
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100400}
401EXPORT_SYMBOL(set_memory_nx);
402
403int set_memory_ro(unsigned long addr, int numpages)
404{
Thomas Gleixner72932c72008-01-30 13:34:08 +0100405 return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_RW));
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100406}
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100407
408int set_memory_rw(unsigned long addr, int numpages)
409{
Thomas Gleixner72932c72008-01-30 13:34:08 +0100410 return change_page_attr_set(addr, numpages, __pgprot(_PAGE_RW));
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100411}
Ingo Molnarf62d0f02008-01-30 13:34:07 +0100412
413int set_memory_np(unsigned long addr, int numpages)
414{
Thomas Gleixner72932c72008-01-30 13:34:08 +0100415 return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_PRESENT));
Ingo Molnarf62d0f02008-01-30 13:34:07 +0100416}
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100417
418int set_pages_uc(struct page *page, int numpages)
419{
420 unsigned long addr = (unsigned long)page_address(page);
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100421
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100422 return set_memory_uc(addr, numpages);
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100423}
424EXPORT_SYMBOL(set_pages_uc);
425
426int set_pages_wb(struct page *page, int numpages)
427{
428 unsigned long addr = (unsigned long)page_address(page);
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100429
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100430 return set_memory_wb(addr, numpages);
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100431}
432EXPORT_SYMBOL(set_pages_wb);
433
434int set_pages_x(struct page *page, int numpages)
435{
436 unsigned long addr = (unsigned long)page_address(page);
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100437
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100438 return set_memory_x(addr, numpages);
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100439}
440EXPORT_SYMBOL(set_pages_x);
441
442int set_pages_nx(struct page *page, int numpages)
443{
444 unsigned long addr = (unsigned long)page_address(page);
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100445
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100446 return set_memory_nx(addr, numpages);
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100447}
448EXPORT_SYMBOL(set_pages_nx);
449
450int set_pages_ro(struct page *page, int numpages)
451{
452 unsigned long addr = (unsigned long)page_address(page);
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100453
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100454 return set_memory_ro(addr, numpages);
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100455}
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100456
457int set_pages_rw(struct page *page, int numpages)
458{
459 unsigned long addr = (unsigned long)page_address(page);
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100460
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100461 return set_memory_rw(addr, numpages);
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100462}
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100463
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464
Thomas Gleixner56744542008-01-30 13:34:08 +0100465#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_CPA_DEBUG)
466static inline int __change_page_attr_set(unsigned long addr, int numpages,
467 pgprot_t mask)
468{
469 return __change_page_attr_set_clr(addr, numpages, mask, __pgprot(0));
470}
471
472static inline int __change_page_attr_clear(unsigned long addr, int numpages,
473 pgprot_t mask)
474{
475 return __change_page_attr_set_clr(addr, numpages, __pgprot(0), mask);
476}
477#endif
478
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479#ifdef CONFIG_DEBUG_PAGEALLOC
Ingo Molnarf62d0f02008-01-30 13:34:07 +0100480
481static int __set_pages_p(struct page *page, int numpages)
482{
483 unsigned long addr = (unsigned long)page_address(page);
Thomas Gleixner72932c72008-01-30 13:34:08 +0100484
485 return __change_page_attr_set(addr, numpages,
486 __pgprot(_PAGE_PRESENT | _PAGE_RW));
Ingo Molnarf62d0f02008-01-30 13:34:07 +0100487}
488
489static int __set_pages_np(struct page *page, int numpages)
490{
491 unsigned long addr = (unsigned long)page_address(page);
Thomas Gleixner72932c72008-01-30 13:34:08 +0100492
493 return __change_page_attr_clear(addr, numpages,
494 __pgprot(_PAGE_PRESENT));
Ingo Molnarf62d0f02008-01-30 13:34:07 +0100495}
496
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497void kernel_map_pages(struct page *page, int numpages, int enable)
498{
499 if (PageHighMem(page))
500 return;
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100501 if (!enable) {
Ingo Molnarf9b84042006-06-27 02:54:49 -0700502 debug_check_no_locks_freed(page_address(page),
503 numpages * PAGE_SIZE);
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100504 }
Ingo Molnarde5097c2006-01-09 15:59:21 -0800505
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100506 /*
Ingo Molnar12d6f212008-01-30 13:33:58 +0100507 * If page allocator is not up yet then do not call c_p_a():
508 */
509 if (!debug_pagealloc_enabled)
510 return;
511
512 /*
Ingo Molnare4b71dc2008-01-30 13:34:04 +0100513 * The return value is ignored - the calls cannot fail,
514 * large pages are disabled at boot time:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515 */
Ingo Molnarf62d0f02008-01-30 13:34:07 +0100516 if (enable)
517 __set_pages_p(page, numpages);
518 else
519 __set_pages_np(page, numpages);
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100520
521 /*
Ingo Molnare4b71dc2008-01-30 13:34:04 +0100522 * We should perform an IPI and flush all tlbs,
523 * but that can deadlock->flush only current cpu:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524 */
525 __flush_tlb_all();
526}
527#endif
Arjan van de Vend1028a12008-01-30 13:34:07 +0100528
529/*
530 * The testcases use internal knowledge of the implementation that shouldn't
531 * be exposed to the rest of the kernel. Include these directly here.
532 */
533#ifdef CONFIG_CPA_DEBUG
534#include "pageattr-test.c"
535#endif