blob: 90b658ac39c258871c70fac966034f658d89ee31 [file] [log] [blame]
Ingo Molnar9f4c8152008-01-30 13:33:41 +01001/*
2 * Copyright 2002 Andi Kleen, SuSE Labs.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 * Thanks to Ben LaHaise for precious feedback.
Ingo Molnar9f4c8152008-01-30 13:33:41 +01004 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005#include <linux/highmem.h>
Ingo Molnar81922062008-01-30 13:34:04 +01006#include <linux/bootmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include <linux/module.h>
Ingo Molnar9f4c8152008-01-30 13:33:41 +01008#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/slab.h>
Ingo Molnar9f4c8152008-01-30 13:33:41 +010010#include <linux/mm.h>
11
Thomas Gleixner950f9d92008-01-30 13:34:06 +010012#include <asm/e820.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <asm/processor.h>
14#include <asm/tlbflush.h>
Dave Jonesf8af0952006-01-06 00:12:10 -080015#include <asm/sections.h>
Ingo Molnar9f4c8152008-01-30 13:33:41 +010016#include <asm/uaccess.h>
17#include <asm/pgalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
Arjan van de Vened724be2008-01-30 13:34:04 +010019static inline int
20within(unsigned long addr, unsigned long start, unsigned long end)
Ingo Molnar687c4822008-01-30 13:34:04 +010021{
Arjan van de Vened724be2008-01-30 13:34:04 +010022 return addr >= start && addr < end;
23}
24
25/*
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +010026 * Flushing functions
27 */
Thomas Gleixnercd8ddf12008-01-30 13:34:08 +010028
29
30/**
31 * clflush_cache_range - flush a cache range with clflush
32 * @addr: virtual start address
33 * @size: number of bytes to flush
34 *
35 * clflush is an unordered instruction which needs fencing with mfence
36 * to avoid ordering issues.
37 */
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +010038void clflush_cache_range(void *addr, int size)
39{
40 int i;
41
Thomas Gleixnercd8ddf12008-01-30 13:34:08 +010042 mb();
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +010043 for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size)
44 clflush(addr+i);
Thomas Gleixnercd8ddf12008-01-30 13:34:08 +010045 mb();
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +010046}
47
Thomas Gleixneraf1e6842008-01-30 13:34:08 +010048static void __cpa_flush_all(void *arg)
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +010049{
50 /*
51 * Flush all to work around Errata in early athlons regarding
52 * large page flushing.
53 */
54 __flush_tlb_all();
55
56 if (boot_cpu_data.x86_model >= 4)
57 wbinvd();
58}
59
Thomas Gleixneraf1e6842008-01-30 13:34:08 +010060static void cpa_flush_all(void)
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +010061{
62 BUG_ON(irqs_disabled());
63
Thomas Gleixneraf1e6842008-01-30 13:34:08 +010064 on_each_cpu(__cpa_flush_all, NULL, 1, 1);
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +010065}
66
Thomas Gleixner57a6a462008-01-30 13:34:08 +010067struct clflush_data {
68 unsigned long addr;
69 int numpages;
70};
71
72static void __cpa_flush_range(void *arg)
73{
74 struct clflush_data *cld = arg;
75
76 /*
77 * We could optimize that further and do individual per page
78 * tlb invalidates for a low number of pages. Caveat: we must
79 * flush the high aliases on 64bit as well.
80 */
81 __flush_tlb_all();
82
83 clflush_cache_range((void *) cld->addr, cld->numpages * PAGE_SIZE);
84}
85
86static void cpa_flush_range(unsigned long addr, int numpages)
87{
88 struct clflush_data cld;
89
90 BUG_ON(irqs_disabled());
91
92 cld.addr = addr;
93 cld.numpages = numpages;
94
95 on_each_cpu(__cpa_flush_range, &cld, 1, 1);
96}
97
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +010098/*
Arjan van de Vened724be2008-01-30 13:34:04 +010099 * Certain areas of memory on x86 require very specific protection flags,
100 * for example the BIOS area or kernel text. Callers don't always get this
101 * right (again, ioremap() on BIOS memory is not uncommon) so this function
102 * checks and fixes these known static required protection bits.
103 */
104static inline pgprot_t static_protections(pgprot_t prot, unsigned long address)
105{
106 pgprot_t forbidden = __pgprot(0);
107
Ingo Molnar687c4822008-01-30 13:34:04 +0100108 /*
Arjan van de Vened724be2008-01-30 13:34:04 +0100109 * The BIOS area between 640k and 1Mb needs to be executable for
110 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
Ingo Molnar687c4822008-01-30 13:34:04 +0100111 */
Arjan van de Vened724be2008-01-30 13:34:04 +0100112 if (within(__pa(address), BIOS_BEGIN, BIOS_END))
113 pgprot_val(forbidden) |= _PAGE_NX;
114
115 /*
116 * The kernel text needs to be executable for obvious reasons
117 * Does not cover __inittext since that is gone later on
118 */
119 if (within(address, (unsigned long)_text, (unsigned long)_etext))
120 pgprot_val(forbidden) |= _PAGE_NX;
121
122#ifdef CONFIG_DEBUG_RODATA
123 /* The .rodata section needs to be read-only */
124 if (within(address, (unsigned long)__start_rodata,
125 (unsigned long)__end_rodata))
126 pgprot_val(forbidden) |= _PAGE_RW;
127#endif
128
129 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
Ingo Molnar687c4822008-01-30 13:34:04 +0100130
131 return prot;
132}
133
Ingo Molnarf0646e42008-01-30 13:33:43 +0100134pte_t *lookup_address(unsigned long address, int *level)
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100135{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136 pgd_t *pgd = pgd_offset_k(address);
137 pud_t *pud;
138 pmd_t *pmd;
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100139
Thomas Gleixner30551bb2008-01-30 13:34:04 +0100140 *level = PG_LEVEL_NONE;
141
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142 if (pgd_none(*pgd))
143 return NULL;
144 pud = pud_offset(pgd, address);
145 if (pud_none(*pud))
146 return NULL;
147 pmd = pmd_offset(pud, address);
148 if (pmd_none(*pmd))
149 return NULL;
Thomas Gleixner30551bb2008-01-30 13:34:04 +0100150
151 *level = PG_LEVEL_2M;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 if (pmd_large(*pmd))
153 return (pte_t *)pmd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154
Thomas Gleixner30551bb2008-01-30 13:34:04 +0100155 *level = PG_LEVEL_4K;
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100156 return pte_offset_kernel(pmd, address);
157}
158
Ingo Molnar9a3dc782008-01-30 13:33:57 +0100159static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100160{
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100161 /* change init_mm */
162 set_pte_atomic(kpte, pte);
Ingo Molnar44af6c42008-01-30 13:34:03 +0100163#ifdef CONFIG_X86_32
Ingo Molnare4b71dc2008-01-30 13:34:04 +0100164 if (!SHARED_KERNEL_PMD) {
Ingo Molnar44af6c42008-01-30 13:34:03 +0100165 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166
Ingo Molnar44af6c42008-01-30 13:34:03 +0100167 for (page = pgd_list; page; page = (struct page *)page->index) {
168 pgd_t *pgd;
169 pud_t *pud;
170 pmd_t *pmd;
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100171
Ingo Molnar44af6c42008-01-30 13:34:03 +0100172 pgd = (pgd_t *)page_address(page) + pgd_index(address);
173 pud = pud_offset(pgd, address);
174 pmd = pmd_offset(pud, address);
175 set_pte_atomic((pte_t *)pmd, pte);
176 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 }
Ingo Molnar44af6c42008-01-30 13:34:03 +0100178#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179}
180
Ingo Molnar7afe15b2008-01-30 13:33:57 +0100181static int split_large_page(pte_t *kpte, unsigned long address)
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100182{
Ingo Molnar7afe15b2008-01-30 13:33:57 +0100183 pgprot_t ref_prot = pte_pgprot(pte_clrhuge(*kpte));
Ingo Molnar12d6f212008-01-30 13:33:58 +0100184 gfp_t gfp_flags = GFP_KERNEL;
Ingo Molnar9a3dc782008-01-30 13:33:57 +0100185 unsigned long flags;
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100186 unsigned long addr;
187 pte_t *pbase, *tmp;
188 struct page *base;
Ingo Molnar7afe15b2008-01-30 13:33:57 +0100189 int i, level;
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100190
Ingo Molnar12d6f212008-01-30 13:33:58 +0100191#ifdef CONFIG_DEBUG_PAGEALLOC
192 gfp_flags = GFP_ATOMIC;
193#endif
194 base = alloc_pages(gfp_flags, 0);
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100195 if (!base)
196 return -ENOMEM;
197
Ingo Molnar9a3dc782008-01-30 13:33:57 +0100198 spin_lock_irqsave(&pgd_lock, flags);
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100199 /*
200 * Check for races, another CPU might have split this page
201 * up for us already:
202 */
203 tmp = lookup_address(address, &level);
Ingo Molnar5508a7482008-01-30 13:33:56 +0100204 if (tmp != kpte) {
205 WARN_ON_ONCE(1);
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100206 goto out_unlock;
Ingo Molnar5508a7482008-01-30 13:33:56 +0100207 }
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100208
209 address = __pa(address);
210 addr = address & LARGE_PAGE_MASK;
211 pbase = (pte_t *)page_address(base);
Ingo Molnar44af6c42008-01-30 13:34:03 +0100212#ifdef CONFIG_X86_32
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100213 paravirt_alloc_pt(&init_mm, page_to_pfn(base));
Ingo Molnar44af6c42008-01-30 13:34:03 +0100214#endif
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100215
216 for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE)
217 set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, ref_prot));
218
219 /*
Huang, Ying4c881ca2008-01-30 13:34:04 +0100220 * Install the new, split up pagetable. Important detail here:
221 *
222 * On Intel the NX bit of all levels must be cleared to make a
223 * page executable. See section 4.13.2 of Intel 64 and IA-32
224 * Architectures Software Developer's Manual).
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100225 */
Huang, Ying4c881ca2008-01-30 13:34:04 +0100226 ref_prot = pte_pgprot(pte_mkexec(pte_clrhuge(*kpte)));
Ingo Molnar9a3dc782008-01-30 13:33:57 +0100227 __set_pmd_pte(kpte, address, mk_pte(base, ref_prot));
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100228 base = NULL;
229
230out_unlock:
Ingo Molnar9a3dc782008-01-30 13:33:57 +0100231 spin_unlock_irqrestore(&pgd_lock, flags);
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100232
233 if (base)
234 __free_pages(base, 0);
235
236 return 0;
237}
238
Ingo Molnar44af6c42008-01-30 13:34:03 +0100239static int
Ingo Molnar81922062008-01-30 13:34:04 +0100240__change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot)
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100241{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 struct page *kpte_page;
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100243 int level, err = 0;
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100244 pte_t *kpte;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245
Ingo Molnar81922062008-01-30 13:34:04 +0100246#ifdef CONFIG_X86_32
247 BUG_ON(pfn > max_low_pfn);
248#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249
Ingo Molnar97f99fe2008-01-30 13:33:55 +0100250repeat:
Ingo Molnarf0646e42008-01-30 13:33:43 +0100251 kpte = lookup_address(address, &level);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 if (!kpte)
253 return -EINVAL;
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100254
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255 kpte_page = virt_to_page(kpte);
Andi Kleen65d2f0b2007-07-21 17:09:51 +0200256 BUG_ON(PageLRU(kpte_page));
257 BUG_ON(PageCompound(kpte_page));
258
Arjan van de Vened724be2008-01-30 13:34:04 +0100259 prot = static_protections(prot, address);
Andi Kleen65d2f0b2007-07-21 17:09:51 +0200260
Thomas Gleixner30551bb2008-01-30 13:34:04 +0100261 if (level == PG_LEVEL_4K) {
Thomas Gleixnera72a08a2008-01-30 13:34:07 +0100262 WARN_ON_ONCE(pgprot_val(prot) & _PAGE_PSE);
Ingo Molnar81922062008-01-30 13:34:04 +0100263 set_pte_atomic(kpte, pfn_pte(pfn, canon_pgprot(prot)));
Ingo Molnar78c94ab2008-01-30 13:33:55 +0100264 } else {
Thomas Gleixnera72a08a2008-01-30 13:34:07 +0100265 /* Clear the PSE bit for the 4k level pages ! */
266 pgprot_val(prot) = pgprot_val(prot) & ~_PAGE_PSE;
267
Ingo Molnar7afe15b2008-01-30 13:33:57 +0100268 err = split_large_page(kpte, address);
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100269 if (!err)
270 goto repeat;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271 }
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100272 return err;
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100273}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274
Ingo Molnar44af6c42008-01-30 13:34:03 +0100275/**
276 * change_page_attr_addr - Change page table attributes in linear mapping
277 * @address: Virtual address in linear mapping.
Ingo Molnar44af6c42008-01-30 13:34:03 +0100278 * @prot: New page table attribute (PAGE_*)
279 *
280 * Change page attributes of a page in the direct mapping. This is a variant
281 * of change_page_attr() that also works on memory holes that do not have
282 * mem_map entry (pfn_valid() is false).
283 *
284 * See change_page_attr() documentation for more details.
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100285 *
286 * Modules and drivers should use the set_memory_* APIs instead.
Ingo Molnar44af6c42008-01-30 13:34:03 +0100287 */
288
Arjan van de Ven488fd992008-01-30 13:34:07 +0100289static int change_page_attr_addr(unsigned long address, pgprot_t prot)
Ingo Molnar44af6c42008-01-30 13:34:03 +0100290{
Arjan van de Ven488fd992008-01-30 13:34:07 +0100291 int err = 0, kernel_map = 0;
292 unsigned long pfn = __pa(address) >> PAGE_SHIFT;
Ingo Molnar44af6c42008-01-30 13:34:03 +0100293
294#ifdef CONFIG_X86_64
295 if (address >= __START_KERNEL_map &&
296 address < __START_KERNEL_map + KERNEL_TEXT_SIZE) {
297
298 address = (unsigned long)__va(__pa(address));
299 kernel_map = 1;
300 }
301#endif
302
Arjan van de Ven488fd992008-01-30 13:34:07 +0100303 if (!kernel_map || pte_present(pfn_pte(0, prot))) {
304 err = __change_page_attr(address, pfn, prot);
305 if (err)
306 return err;
Ingo Molnar44af6c42008-01-30 13:34:03 +0100307 }
308
Arjan van de Ven488fd992008-01-30 13:34:07 +0100309#ifdef CONFIG_X86_64
310 /*
311 * Handle kernel mapping too which aliases part of
312 * lowmem:
313 */
314 if (__pa(address) < KERNEL_TEXT_SIZE) {
315 unsigned long addr2;
316 pgprot_t prot2;
317
318 addr2 = __START_KERNEL_map + __pa(address);
319 /* Make sure the kernel mappings stay executable */
320 prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
321 err = __change_page_attr(addr2, pfn, prot2);
322 }
323#endif
324
Ingo Molnar44af6c42008-01-30 13:34:03 +0100325 return err;
326}
327
Thomas Gleixnerff314522008-01-30 13:34:08 +0100328static int __change_page_attr_set_clr(unsigned long addr, int numpages,
329 pgprot_t mask_set, pgprot_t mask_clr)
330{
331 pgprot_t new_prot;
332 int level;
333 pte_t *pte;
334 int i, ret;
335
336 for (i = 0; i < numpages ; i++) {
337
338 pte = lookup_address(addr, &level);
339 if (!pte)
340 return -EINVAL;
341
342 new_prot = pte_pgprot(*pte);
343
344 pgprot_val(new_prot) &= ~pgprot_val(mask_clr);
345 pgprot_val(new_prot) |= pgprot_val(mask_set);
346
347 ret = change_page_attr_addr(addr, new_prot);
348 if (ret)
349 return ret;
350 addr += PAGE_SIZE;
351 }
352
353 return 0;
354}
355
356static int change_page_attr_set_clr(unsigned long addr, int numpages,
357 pgprot_t mask_set, pgprot_t mask_clr)
358{
359 int ret = __change_page_attr_set_clr(addr, numpages, mask_set,
360 mask_clr);
361
Thomas Gleixner57a6a462008-01-30 13:34:08 +0100362 /*
363 * On success we use clflush, when the CPU supports it to
364 * avoid the wbindv. If the CPU does not support it and in the
Thomas Gleixneraf1e6842008-01-30 13:34:08 +0100365 * error case we fall back to cpa_flush_all (which uses
Thomas Gleixner57a6a462008-01-30 13:34:08 +0100366 * wbindv):
367 */
368 if (!ret && cpu_has_clflush)
369 cpa_flush_range(addr, numpages);
370 else
Thomas Gleixneraf1e6842008-01-30 13:34:08 +0100371 cpa_flush_all();
Thomas Gleixnerff314522008-01-30 13:34:08 +0100372
373 return ret;
374}
375
Thomas Gleixner56744542008-01-30 13:34:08 +0100376static inline int change_page_attr_set(unsigned long addr, int numpages,
377 pgprot_t mask)
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100378{
Thomas Gleixner56744542008-01-30 13:34:08 +0100379 return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0));
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100380}
381
Thomas Gleixner56744542008-01-30 13:34:08 +0100382static inline int change_page_attr_clear(unsigned long addr, int numpages,
383 pgprot_t mask)
Thomas Gleixner72932c72008-01-30 13:34:08 +0100384{
Thomas Gleixner56744542008-01-30 13:34:08 +0100385 return __change_page_attr_set_clr(addr, numpages, __pgprot(0), mask);
Thomas Gleixner72932c72008-01-30 13:34:08 +0100386
387}
388
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100389int set_memory_uc(unsigned long addr, int numpages)
390{
Thomas Gleixner72932c72008-01-30 13:34:08 +0100391 return change_page_attr_set(addr, numpages,
392 __pgprot(_PAGE_PCD | _PAGE_PWT));
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100393}
394EXPORT_SYMBOL(set_memory_uc);
395
396int set_memory_wb(unsigned long addr, int numpages)
397{
Thomas Gleixner72932c72008-01-30 13:34:08 +0100398 return change_page_attr_clear(addr, numpages,
399 __pgprot(_PAGE_PCD | _PAGE_PWT));
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100400}
401EXPORT_SYMBOL(set_memory_wb);
402
403int set_memory_x(unsigned long addr, int numpages)
404{
Thomas Gleixner72932c72008-01-30 13:34:08 +0100405 return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_NX));
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100406}
407EXPORT_SYMBOL(set_memory_x);
408
409int set_memory_nx(unsigned long addr, int numpages)
410{
Thomas Gleixner72932c72008-01-30 13:34:08 +0100411 return change_page_attr_set(addr, numpages, __pgprot(_PAGE_NX));
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100412}
413EXPORT_SYMBOL(set_memory_nx);
414
415int set_memory_ro(unsigned long addr, int numpages)
416{
Thomas Gleixner72932c72008-01-30 13:34:08 +0100417 return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_RW));
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100418}
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100419
420int set_memory_rw(unsigned long addr, int numpages)
421{
Thomas Gleixner72932c72008-01-30 13:34:08 +0100422 return change_page_attr_set(addr, numpages, __pgprot(_PAGE_RW));
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100423}
Ingo Molnarf62d0f02008-01-30 13:34:07 +0100424
425int set_memory_np(unsigned long addr, int numpages)
426{
Thomas Gleixner72932c72008-01-30 13:34:08 +0100427 return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_PRESENT));
Ingo Molnarf62d0f02008-01-30 13:34:07 +0100428}
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100429
430int set_pages_uc(struct page *page, int numpages)
431{
432 unsigned long addr = (unsigned long)page_address(page);
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100433
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100434 return set_memory_uc(addr, numpages);
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100435}
436EXPORT_SYMBOL(set_pages_uc);
437
438int set_pages_wb(struct page *page, int numpages)
439{
440 unsigned long addr = (unsigned long)page_address(page);
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100441
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100442 return set_memory_wb(addr, numpages);
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100443}
444EXPORT_SYMBOL(set_pages_wb);
445
446int set_pages_x(struct page *page, int numpages)
447{
448 unsigned long addr = (unsigned long)page_address(page);
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100449
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100450 return set_memory_x(addr, numpages);
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100451}
452EXPORT_SYMBOL(set_pages_x);
453
454int set_pages_nx(struct page *page, int numpages)
455{
456 unsigned long addr = (unsigned long)page_address(page);
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100457
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100458 return set_memory_nx(addr, numpages);
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100459}
460EXPORT_SYMBOL(set_pages_nx);
461
462int set_pages_ro(struct page *page, int numpages)
463{
464 unsigned long addr = (unsigned long)page_address(page);
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100465
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100466 return set_memory_ro(addr, numpages);
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100467}
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100468
469int set_pages_rw(struct page *page, int numpages)
470{
471 unsigned long addr = (unsigned long)page_address(page);
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100472
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100473 return set_memory_rw(addr, numpages);
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100474}
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100475
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476
Thomas Gleixner56744542008-01-30 13:34:08 +0100477#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_CPA_DEBUG)
478static inline int __change_page_attr_set(unsigned long addr, int numpages,
479 pgprot_t mask)
480{
481 return __change_page_attr_set_clr(addr, numpages, mask, __pgprot(0));
482}
483
484static inline int __change_page_attr_clear(unsigned long addr, int numpages,
485 pgprot_t mask)
486{
487 return __change_page_attr_set_clr(addr, numpages, __pgprot(0), mask);
488}
489#endif
490
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491#ifdef CONFIG_DEBUG_PAGEALLOC
Ingo Molnarf62d0f02008-01-30 13:34:07 +0100492
493static int __set_pages_p(struct page *page, int numpages)
494{
495 unsigned long addr = (unsigned long)page_address(page);
Thomas Gleixner72932c72008-01-30 13:34:08 +0100496
497 return __change_page_attr_set(addr, numpages,
498 __pgprot(_PAGE_PRESENT | _PAGE_RW));
Ingo Molnarf62d0f02008-01-30 13:34:07 +0100499}
500
501static int __set_pages_np(struct page *page, int numpages)
502{
503 unsigned long addr = (unsigned long)page_address(page);
Thomas Gleixner72932c72008-01-30 13:34:08 +0100504
505 return __change_page_attr_clear(addr, numpages,
506 __pgprot(_PAGE_PRESENT));
Ingo Molnarf62d0f02008-01-30 13:34:07 +0100507}
508
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509void kernel_map_pages(struct page *page, int numpages, int enable)
510{
511 if (PageHighMem(page))
512 return;
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100513 if (!enable) {
Ingo Molnarf9b84042006-06-27 02:54:49 -0700514 debug_check_no_locks_freed(page_address(page),
515 numpages * PAGE_SIZE);
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100516 }
Ingo Molnarde5097c2006-01-09 15:59:21 -0800517
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100518 /*
Ingo Molnar12d6f212008-01-30 13:33:58 +0100519 * If page allocator is not up yet then do not call c_p_a():
520 */
521 if (!debug_pagealloc_enabled)
522 return;
523
524 /*
Ingo Molnare4b71dc2008-01-30 13:34:04 +0100525 * The return value is ignored - the calls cannot fail,
526 * large pages are disabled at boot time:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 */
Ingo Molnarf62d0f02008-01-30 13:34:07 +0100528 if (enable)
529 __set_pages_p(page, numpages);
530 else
531 __set_pages_np(page, numpages);
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100532
533 /*
Ingo Molnare4b71dc2008-01-30 13:34:04 +0100534 * We should perform an IPI and flush all tlbs,
535 * but that can deadlock->flush only current cpu:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536 */
537 __flush_tlb_all();
538}
539#endif
Arjan van de Vend1028a12008-01-30 13:34:07 +0100540
541/*
542 * The testcases use internal knowledge of the implementation that shouldn't
543 * be exposed to the rest of the kernel. Include these directly here.
544 */
545#ifdef CONFIG_CPA_DEBUG
546#include "pageattr-test.c"
547#endif