David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 1 | #ifndef _ASM_POWERPC_PGTABLE_PPC32_H |
| 2 | #define _ASM_POWERPC_PGTABLE_PPC32_H |
| 3 | |
David Gibson | d1953c8 | 2007-05-08 12:46:49 +1000 | [diff] [blame] | 4 | #include <asm-generic/pgtable-nopmd.h> |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 5 | |
| 6 | #ifndef __ASSEMBLY__ |
| 7 | #include <linux/sched.h> |
| 8 | #include <linux/threads.h> |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 9 | #include <asm/io.h> /* For sub-arch specific PPC_PIN_SIZE */ |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 10 | |
| 11 | extern unsigned long va_to_phys(unsigned long address); |
| 12 | extern pte_t *va_to_pte(unsigned long address); |
| 13 | extern unsigned long ioremap_bot, ioremap_base; |
Benjamin Herrenschmidt | b98ac05d | 2007-10-31 16:42:19 +1100 | [diff] [blame] | 14 | |
| 15 | #ifdef CONFIG_44x |
| 16 | extern int icache_44x_need_flush; |
| 17 | #endif |
| 18 | |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 19 | #endif /* __ASSEMBLY__ */ |
| 20 | |
| 21 | /* |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 22 | * The normal case is that PTEs are 32-bits and we have a 1-page |
| 23 | * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus |
| 24 | * |
| 25 | * For any >32-bit physical address platform, we can use the following |
| 26 | * two level page table layout where the pgdir is 8KB and the MS 13 bits |
| 27 | * are an index to the second level table. The combined pgdir/pmd first |
| 28 | * level has 2048 entries and the second level has 512 64-bit PTE entries. |
| 29 | * -Matt |
| 30 | */ |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 31 | /* PGDIR_SHIFT determines what a top-level page table entry can map */ |
David Gibson | d1953c8 | 2007-05-08 12:46:49 +1000 | [diff] [blame] | 32 | #define PGDIR_SHIFT (PAGE_SHIFT + PTE_SHIFT) |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 33 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) |
| 34 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) |
| 35 | |
| 36 | /* |
| 37 | * entries per page directory level: our page-table tree is two-level, so |
| 38 | * we don't really have any PMD directory. |
| 39 | */ |
Kumar Gala | bee86f1 | 2007-12-06 13:11:04 -0600 | [diff] [blame] | 40 | #ifndef __ASSEMBLY__ |
| 41 | #define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_SHIFT) |
| 42 | #define PGD_TABLE_SIZE (sizeof(pgd_t) << (32 - PGDIR_SHIFT)) |
| 43 | #endif /* __ASSEMBLY__ */ |
| 44 | |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 45 | #define PTRS_PER_PTE (1 << PTE_SHIFT) |
| 46 | #define PTRS_PER_PMD 1 |
| 47 | #define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT)) |
| 48 | |
| 49 | #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) |
| 50 | #define FIRST_USER_ADDRESS 0 |
| 51 | |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 52 | #define pte_ERROR(e) \ |
David Gibson | 0aeafb0 | 2007-05-04 16:47:51 +1000 | [diff] [blame] | 53 | printk("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \ |
| 54 | (unsigned long long)pte_val(e)) |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 55 | #define pgd_ERROR(e) \ |
| 56 | printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) |
| 57 | |
| 58 | /* |
| 59 | * Just any arbitrary offset to the start of the vmalloc VM area: the |
| 60 | * current 64MB value just means that there will be a 64MB "hole" after the |
| 61 | * physical memory until the kernel virtual memory starts. That means that |
| 62 | * any out-of-bounds memory accesses will hopefully be caught. |
| 63 | * The vmalloc() routines leaves a hole of 4kB between each vmalloced |
| 64 | * area for the same reason. ;) |
| 65 | * |
| 66 | * We no longer map larger than phys RAM with the BATs so we don't have |
| 67 | * to worry about the VMALLOC_OFFSET causing problems. We do have to worry |
| 68 | * about clashes between our early calls to ioremap() that start growing down |
| 69 | * from ioremap_base being run into the VM area allocations (growing upwards |
| 70 | * from VMALLOC_START). For this reason we have ioremap_bot to check when |
| 71 | * we actually run into our mappings setup in the early boot with the VM |
| 72 | * system. This really does become a problem for machines with good amounts |
| 73 | * of RAM. -- Cort |
| 74 | */ |
| 75 | #define VMALLOC_OFFSET (0x1000000) /* 16M */ |
| 76 | #ifdef PPC_PIN_SIZE |
| 77 | #define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))) |
| 78 | #else |
| 79 | #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))) |
| 80 | #endif |
| 81 | #define VMALLOC_END ioremap_bot |
| 82 | |
| 83 | /* |
| 84 | * Bits in a linux-style PTE. These match the bits in the |
| 85 | * (hardware-defined) PowerPC PTE as closely as possible. |
| 86 | */ |
| 87 | |
| 88 | #if defined(CONFIG_40x) |
Benjamin Herrenschmidt | c605782 | 2009-03-10 17:53:29 +0000 | [diff] [blame^] | 89 | #include <asm/pte-40x.h> |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 90 | #elif defined(CONFIG_44x) |
Benjamin Herrenschmidt | c605782 | 2009-03-10 17:53:29 +0000 | [diff] [blame^] | 91 | #include <asm/pte-44x.h> |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 92 | #elif defined(CONFIG_FSL_BOOKE) |
Benjamin Herrenschmidt | c605782 | 2009-03-10 17:53:29 +0000 | [diff] [blame^] | 93 | #include <asm/pte-fsl-booke.h> |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 94 | #elif defined(CONFIG_8xx) |
Benjamin Herrenschmidt | c605782 | 2009-03-10 17:53:29 +0000 | [diff] [blame^] | 95 | #include <asm/pte-8xx.h> |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 96 | #else /* CONFIG_6xx */ |
Benjamin Herrenschmidt | c605782 | 2009-03-10 17:53:29 +0000 | [diff] [blame^] | 97 | #include <asm/pte-hash32.h> |
Becky Bruce | 4ee7084 | 2008-09-24 11:01:24 -0500 | [diff] [blame] | 98 | #endif |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 99 | |
Benjamin Herrenschmidt | c605782 | 2009-03-10 17:53:29 +0000 | [diff] [blame^] | 100 | /* If _PAGE_SPECIAL is defined, then we advertise our support for it */ |
| 101 | #ifdef _PAGE_SPECIAL |
Kumar Gala | 9a62c05 | 2008-07-31 08:41:10 -0500 | [diff] [blame] | 102 | #define __HAVE_ARCH_PTE_SPECIAL |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 103 | #endif |
| 104 | |
| 105 | /* |
Benjamin Herrenschmidt | c605782 | 2009-03-10 17:53:29 +0000 | [diff] [blame^] | 106 | * Some bits are only used on some cpu families... Make sure that all |
| 107 | * the undefined gets defined as 0 |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 108 | */ |
| 109 | #ifndef _PAGE_HASHPTE |
| 110 | #define _PAGE_HASHPTE 0 |
| 111 | #endif |
| 112 | #ifndef _PTE_NONE_MASK |
| 113 | #define _PTE_NONE_MASK 0 |
| 114 | #endif |
| 115 | #ifndef _PAGE_SHARED |
| 116 | #define _PAGE_SHARED 0 |
| 117 | #endif |
| 118 | #ifndef _PAGE_HWWRITE |
| 119 | #define _PAGE_HWWRITE 0 |
| 120 | #endif |
| 121 | #ifndef _PAGE_HWEXEC |
| 122 | #define _PAGE_HWEXEC 0 |
| 123 | #endif |
| 124 | #ifndef _PAGE_EXEC |
| 125 | #define _PAGE_EXEC 0 |
| 126 | #endif |
Benjamin Herrenschmidt | a1f242f | 2008-07-23 21:27:08 -0700 | [diff] [blame] | 127 | #ifndef _PAGE_ENDIAN |
| 128 | #define _PAGE_ENDIAN 0 |
| 129 | #endif |
| 130 | #ifndef _PAGE_COHERENT |
| 131 | #define _PAGE_COHERENT 0 |
| 132 | #endif |
Kumar Gala | ff8dc769 | 2008-07-27 03:57:30 +1000 | [diff] [blame] | 133 | #ifndef _PAGE_WRITETHRU |
| 134 | #define _PAGE_WRITETHRU 0 |
| 135 | #endif |
Kumar Gala | 9a62c05 | 2008-07-31 08:41:10 -0500 | [diff] [blame] | 136 | #ifndef _PAGE_SPECIAL |
| 137 | #define _PAGE_SPECIAL 0 |
| 138 | #endif |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 139 | #ifndef _PMD_PRESENT_MASK |
| 140 | #define _PMD_PRESENT_MASK _PMD_PRESENT |
| 141 | #endif |
| 142 | #ifndef _PMD_SIZE |
| 143 | #define _PMD_SIZE 0 |
| 144 | #define PMD_PAGE_SIZE(pmd) bad_call_to_PMD_PAGE_SIZE() |
| 145 | #endif |
| 146 | |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 147 | #define _PAGE_HPTEFLAGS _PAGE_HASHPTE |
| 148 | |
Philippe Gerum | fbc78b0 | 2009-02-12 12:18:46 +0000 | [diff] [blame] | 149 | #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | \ |
| 150 | _PAGE_SPECIAL) |
Benjamin Herrenschmidt | a1f242f | 2008-07-23 21:27:08 -0700 | [diff] [blame] | 151 | |
David Gibson | f5ea64d | 2008-10-12 17:54:24 +0000 | [diff] [blame] | 152 | #define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \ |
| 153 | _PAGE_WRITETHRU | _PAGE_ENDIAN | \ |
| 154 | _PAGE_USER | _PAGE_ACCESSED | \ |
| 155 | _PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | \ |
| 156 | _PAGE_EXEC | _PAGE_HWEXEC) |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 157 | |
Benjamin Herrenschmidt | 64b3d0e | 2008-12-18 19:13:51 +0000 | [diff] [blame] | 158 | /* |
| 159 | * We define 2 sets of base prot bits, one for basic pages (ie, |
| 160 | * cacheable kernel and user pages) and one for non cacheable |
| 161 | * pages. We always set _PAGE_COHERENT when SMP is enabled or |
| 162 | * the processor might need it for DMA coherency. |
| 163 | */ |
| 164 | #if defined(CONFIG_SMP) || defined(CONFIG_PPC_STD_MMU) |
| 165 | #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT) |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 166 | #else |
| 167 | #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED) |
| 168 | #endif |
Benjamin Herrenschmidt | 64b3d0e | 2008-12-18 19:13:51 +0000 | [diff] [blame] | 169 | #define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NO_CACHE) |
| 170 | |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 171 | #define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE) |
| 172 | #define _PAGE_KERNEL (_PAGE_BASE | _PAGE_SHARED | _PAGE_WRENABLE) |
Benjamin Herrenschmidt | 64b3d0e | 2008-12-18 19:13:51 +0000 | [diff] [blame] | 173 | #define _PAGE_KERNEL_NC (_PAGE_BASE_NC | _PAGE_SHARED | _PAGE_WRENABLE) |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 174 | |
| 175 | #ifdef CONFIG_PPC_STD_MMU |
| 176 | /* On standard PPC MMU, no user access implies kernel read/write access, |
| 177 | * so to write-protect kernel memory we must turn on user access */ |
| 178 | #define _PAGE_KERNEL_RO (_PAGE_BASE | _PAGE_SHARED | _PAGE_USER) |
| 179 | #else |
| 180 | #define _PAGE_KERNEL_RO (_PAGE_BASE | _PAGE_SHARED) |
| 181 | #endif |
| 182 | |
Benjamin Herrenschmidt | 64b3d0e | 2008-12-18 19:13:51 +0000 | [diff] [blame] | 183 | #define _PAGE_IO (_PAGE_KERNEL_NC | _PAGE_GUARDED) |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 184 | #define _PAGE_RAM (_PAGE_KERNEL | _PAGE_HWEXEC) |
| 185 | |
Ionut Nicu | 221ac32 | 2008-03-06 03:12:54 +1100 | [diff] [blame] | 186 | #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\ |
| 187 | defined(CONFIG_KPROBES) |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 188 | /* We want the debuggers to be able to set breakpoints anywhere, so |
| 189 | * don't write protect the kernel text */ |
| 190 | #define _PAGE_RAM_TEXT _PAGE_RAM |
| 191 | #else |
| 192 | #define _PAGE_RAM_TEXT (_PAGE_KERNEL_RO | _PAGE_HWEXEC) |
| 193 | #endif |
| 194 | |
| 195 | #define PAGE_NONE __pgprot(_PAGE_BASE) |
| 196 | #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) |
| 197 | #define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) |
| 198 | #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW) |
| 199 | #define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC) |
| 200 | #define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) |
| 201 | #define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) |
| 202 | |
| 203 | #define PAGE_KERNEL __pgprot(_PAGE_RAM) |
| 204 | #define PAGE_KERNEL_NOCACHE __pgprot(_PAGE_IO) |
| 205 | |
| 206 | /* |
| 207 | * The PowerPC can only do execute protection on a segment (256MB) basis, |
| 208 | * not on a page basis. So we consider execute permission the same as read. |
| 209 | * Also, write permissions imply read permissions. |
| 210 | * This is the closest we can get.. |
| 211 | */ |
| 212 | #define __P000 PAGE_NONE |
| 213 | #define __P001 PAGE_READONLY_X |
| 214 | #define __P010 PAGE_COPY |
| 215 | #define __P011 PAGE_COPY_X |
| 216 | #define __P100 PAGE_READONLY |
| 217 | #define __P101 PAGE_READONLY_X |
| 218 | #define __P110 PAGE_COPY |
| 219 | #define __P111 PAGE_COPY_X |
| 220 | |
| 221 | #define __S000 PAGE_NONE |
| 222 | #define __S001 PAGE_READONLY_X |
| 223 | #define __S010 PAGE_SHARED |
| 224 | #define __S011 PAGE_SHARED_X |
| 225 | #define __S100 PAGE_READONLY |
| 226 | #define __S101 PAGE_READONLY_X |
| 227 | #define __S110 PAGE_SHARED |
| 228 | #define __S111 PAGE_SHARED_X |
| 229 | |
| 230 | #ifndef __ASSEMBLY__ |
| 231 | /* Make sure we get a link error if PMD_PAGE_SIZE is ever called on a |
| 232 | * kernel without large page PMD support */ |
| 233 | extern unsigned long bad_call_to_PMD_PAGE_SIZE(void); |
| 234 | |
| 235 | /* |
| 236 | * Conversions between PTE values and page frame numbers. |
| 237 | */ |
| 238 | |
| 239 | /* in some case we want to additionaly adjust where the pfn is in the pte to |
| 240 | * allow room for more flags */ |
| 241 | #if defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT) |
| 242 | #define PFN_SHIFT_OFFSET (PAGE_SHIFT + 8) |
| 243 | #else |
| 244 | #define PFN_SHIFT_OFFSET (PAGE_SHIFT) |
| 245 | #endif |
| 246 | |
| 247 | #define pte_pfn(x) (pte_val(x) >> PFN_SHIFT_OFFSET) |
| 248 | #define pte_page(x) pfn_to_page(pte_pfn(x)) |
| 249 | |
| 250 | #define pfn_pte(pfn, prot) __pte(((pte_basic_t)(pfn) << PFN_SHIFT_OFFSET) |\ |
| 251 | pgprot_val(prot)) |
| 252 | #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 253 | #endif /* __ASSEMBLY__ */ |
| 254 | |
| 255 | #define pte_none(pte) ((pte_val(pte) & ~_PTE_NONE_MASK) == 0) |
| 256 | #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) |
Kumar Gala | 9bf2b5c | 2008-07-16 15:54:21 -0500 | [diff] [blame] | 257 | #define pte_clear(mm, addr, ptep) \ |
| 258 | do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0) |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 259 | |
| 260 | #define pmd_none(pmd) (!pmd_val(pmd)) |
| 261 | #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD) |
| 262 | #define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK) |
| 263 | #define pmd_clear(pmdp) do { pmd_val(*(pmdp)) = 0; } while (0) |
| 264 | |
| 265 | #ifndef __ASSEMBLY__ |
| 266 | /* |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 267 | * The following only work if pte_present() is true. |
| 268 | * Undefined behaviour if not.. |
| 269 | */ |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 270 | static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 271 | static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } |
| 272 | static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } |
| 273 | static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } |
Kumar Gala | 9a62c05 | 2008-07-31 08:41:10 -0500 | [diff] [blame] | 274 | static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 275 | |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 276 | static inline pte_t pte_wrprotect(pte_t pte) { |
| 277 | pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; } |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 278 | static inline pte_t pte_mkclean(pte_t pte) { |
| 279 | pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; } |
| 280 | static inline pte_t pte_mkold(pte_t pte) { |
| 281 | pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } |
| 282 | |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 283 | static inline pte_t pte_mkwrite(pte_t pte) { |
| 284 | pte_val(pte) |= _PAGE_RW; return pte; } |
| 285 | static inline pte_t pte_mkdirty(pte_t pte) { |
| 286 | pte_val(pte) |= _PAGE_DIRTY; return pte; } |
| 287 | static inline pte_t pte_mkyoung(pte_t pte) { |
| 288 | pte_val(pte) |= _PAGE_ACCESSED; return pte; } |
Nick Piggin | 7e67513 | 2008-04-28 02:13:00 -0700 | [diff] [blame] | 289 | static inline pte_t pte_mkspecial(pte_t pte) { |
Kumar Gala | 9a62c05 | 2008-07-31 08:41:10 -0500 | [diff] [blame] | 290 | pte_val(pte) |= _PAGE_SPECIAL; return pte; } |
David Gibson | f5ea64d | 2008-10-12 17:54:24 +0000 | [diff] [blame] | 291 | static inline pgprot_t pte_pgprot(pte_t pte) |
Benjamin Herrenschmidt | a1f242f | 2008-07-23 21:27:08 -0700 | [diff] [blame] | 292 | { |
David Gibson | f5ea64d | 2008-10-12 17:54:24 +0000 | [diff] [blame] | 293 | return __pgprot(pte_val(pte) & PAGE_PROT_BITS); |
Benjamin Herrenschmidt | a1f242f | 2008-07-23 21:27:08 -0700 | [diff] [blame] | 294 | } |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 295 | |
| 296 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
| 297 | { |
| 298 | pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); |
| 299 | return pte; |
| 300 | } |
| 301 | |
| 302 | /* |
| 303 | * When flushing the tlb entry for a page, we also need to flush the hash |
| 304 | * table entry. flush_hash_pages is assembler (for speed) in hashtable.S. |
| 305 | */ |
| 306 | extern int flush_hash_pages(unsigned context, unsigned long va, |
| 307 | unsigned long pmdval, int count); |
| 308 | |
| 309 | /* Add an HPTE to the hash table */ |
| 310 | extern void add_hash_page(unsigned context, unsigned long va, |
| 311 | unsigned long pmdval); |
| 312 | |
Becky Bruce | 4ee7084 | 2008-09-24 11:01:24 -0500 | [diff] [blame] | 313 | /* Flush an entry from the TLB/hash table */ |
| 314 | extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, |
| 315 | unsigned long address); |
| 316 | |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 317 | /* |
Benjamin Herrenschmidt | c605782 | 2009-03-10 17:53:29 +0000 | [diff] [blame^] | 318 | * PTE updates. This function is called whenever an existing |
| 319 | * valid PTE is updated. This does -not- include set_pte_at() |
| 320 | * which nowadays only sets a new PTE. |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 321 | * |
Benjamin Herrenschmidt | c605782 | 2009-03-10 17:53:29 +0000 | [diff] [blame^] | 322 | * Depending on the type of MMU, we may need to use atomic updates |
| 323 | * and the PTE may be either 32 or 64 bit wide. In the later case, |
| 324 | * when using atomic updates, only the low part of the PTE is |
| 325 | * accessed atomically. |
| 326 | * |
| 327 | * In addition, on 44x, we also maintain a global flag indicating |
| 328 | * that an executable user mapping was modified, which is needed |
| 329 | * to properly flush the virtually tagged instruction cache of |
| 330 | * those implementations. |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 331 | */ |
| 332 | #ifndef CONFIG_PTE_64BIT |
Benjamin Herrenschmidt | 1bc54c0 | 2008-07-08 15:54:40 +1000 | [diff] [blame] | 333 | static inline unsigned long pte_update(pte_t *p, |
| 334 | unsigned long clr, |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 335 | unsigned long set) |
| 336 | { |
Benjamin Herrenschmidt | 1bc54c0 | 2008-07-08 15:54:40 +1000 | [diff] [blame] | 337 | #ifdef PTE_ATOMIC_UPDATES |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 338 | unsigned long old, tmp; |
| 339 | |
| 340 | __asm__ __volatile__("\ |
| 341 | 1: lwarx %0,0,%3\n\ |
| 342 | andc %1,%0,%4\n\ |
| 343 | or %1,%1,%5\n" |
| 344 | PPC405_ERR77(0,%3) |
| 345 | " stwcx. %1,0,%3\n\ |
| 346 | bne- 1b" |
| 347 | : "=&r" (old), "=&r" (tmp), "=m" (*p) |
| 348 | : "r" (p), "r" (clr), "r" (set), "m" (*p) |
| 349 | : "cc" ); |
Benjamin Herrenschmidt | 1bc54c0 | 2008-07-08 15:54:40 +1000 | [diff] [blame] | 350 | #else /* PTE_ATOMIC_UPDATES */ |
| 351 | unsigned long old = pte_val(*p); |
| 352 | *p = __pte((old & ~clr) | set); |
| 353 | #endif /* !PTE_ATOMIC_UPDATES */ |
| 354 | |
Benjamin Herrenschmidt | b98ac05d | 2007-10-31 16:42:19 +1100 | [diff] [blame] | 355 | #ifdef CONFIG_44x |
| 356 | if ((old & _PAGE_USER) && (old & _PAGE_HWEXEC)) |
| 357 | icache_44x_need_flush = 1; |
| 358 | #endif |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 359 | return old; |
| 360 | } |
Benjamin Herrenschmidt | 1bc54c0 | 2008-07-08 15:54:40 +1000 | [diff] [blame] | 361 | #else /* CONFIG_PTE_64BIT */ |
Benjamin Herrenschmidt | 1bc54c0 | 2008-07-08 15:54:40 +1000 | [diff] [blame] | 362 | static inline unsigned long long pte_update(pte_t *p, |
| 363 | unsigned long clr, |
| 364 | unsigned long set) |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 365 | { |
Benjamin Herrenschmidt | 1bc54c0 | 2008-07-08 15:54:40 +1000 | [diff] [blame] | 366 | #ifdef PTE_ATOMIC_UPDATES |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 367 | unsigned long long old; |
| 368 | unsigned long tmp; |
| 369 | |
| 370 | __asm__ __volatile__("\ |
| 371 | 1: lwarx %L0,0,%4\n\ |
| 372 | lwzx %0,0,%3\n\ |
| 373 | andc %1,%L0,%5\n\ |
| 374 | or %1,%1,%6\n" |
| 375 | PPC405_ERR77(0,%3) |
| 376 | " stwcx. %1,0,%4\n\ |
| 377 | bne- 1b" |
| 378 | : "=&r" (old), "=&r" (tmp), "=m" (*p) |
| 379 | : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p) |
| 380 | : "cc" ); |
Benjamin Herrenschmidt | 1bc54c0 | 2008-07-08 15:54:40 +1000 | [diff] [blame] | 381 | #else /* PTE_ATOMIC_UPDATES */ |
| 382 | unsigned long long old = pte_val(*p); |
Kumar Gala | 585583d | 2008-07-14 08:08:45 -0500 | [diff] [blame] | 383 | *p = __pte((old & ~(unsigned long long)clr) | set); |
Benjamin Herrenschmidt | 1bc54c0 | 2008-07-08 15:54:40 +1000 | [diff] [blame] | 384 | #endif /* !PTE_ATOMIC_UPDATES */ |
| 385 | |
Benjamin Herrenschmidt | b98ac05d | 2007-10-31 16:42:19 +1100 | [diff] [blame] | 386 | #ifdef CONFIG_44x |
| 387 | if ((old & _PAGE_USER) && (old & _PAGE_HWEXEC)) |
| 388 | icache_44x_need_flush = 1; |
| 389 | #endif |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 390 | return old; |
| 391 | } |
Benjamin Herrenschmidt | 1bc54c0 | 2008-07-08 15:54:40 +1000 | [diff] [blame] | 392 | #endif /* CONFIG_PTE_64BIT */ |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 393 | |
| 394 | /* |
Becky Bruce | bf2737f | 2008-06-14 09:12:44 +1000 | [diff] [blame] | 395 | * 2.6 calls this without flushing the TLB entry; this is wrong |
| 396 | * for our hash-based implementation, we fix that up here. |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 397 | */ |
| 398 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG |
| 399 | static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep) |
| 400 | { |
| 401 | unsigned long old; |
| 402 | old = pte_update(ptep, _PAGE_ACCESSED, 0); |
| 403 | #if _PAGE_HASHPTE != 0 |
| 404 | if (old & _PAGE_HASHPTE) { |
| 405 | unsigned long ptephys = __pa(ptep) & PAGE_MASK; |
| 406 | flush_hash_pages(context, addr, ptephys, 1); |
| 407 | } |
| 408 | #endif |
| 409 | return (old & _PAGE_ACCESSED) != 0; |
| 410 | } |
| 411 | #define ptep_test_and_clear_young(__vma, __addr, __ptep) \ |
| 412 | __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep) |
| 413 | |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 414 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR |
| 415 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, |
| 416 | pte_t *ptep) |
| 417 | { |
| 418 | return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0)); |
| 419 | } |
| 420 | |
| 421 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT |
| 422 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, |
| 423 | pte_t *ptep) |
| 424 | { |
| 425 | pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), 0); |
| 426 | } |
Andy Whitcroft | 016b33c | 2008-06-26 19:55:58 +1000 | [diff] [blame] | 427 | static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, |
| 428 | unsigned long addr, pte_t *ptep) |
| 429 | { |
| 430 | ptep_set_wrprotect(mm, addr, ptep); |
| 431 | } |
| 432 | |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 433 | |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 434 | static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry) |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 435 | { |
| 436 | unsigned long bits = pte_val(entry) & |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 437 | (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | |
| 438 | _PAGE_HWEXEC | _PAGE_EXEC); |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 439 | pte_update(ptep, 0, bits); |
| 440 | } |
| 441 | |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 442 | #define __HAVE_ARCH_PTE_SAME |
| 443 | #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0) |
| 444 | |
| 445 | /* |
| 446 | * Note that on Book E processors, the pmd contains the kernel virtual |
| 447 | * (lowmem) address of the pte page. The physical address is less useful |
| 448 | * because everything runs with translation enabled (even the TLB miss |
| 449 | * handler). On everything else the pmd contains the physical address |
| 450 | * of the pte page. -- paulus |
| 451 | */ |
| 452 | #ifndef CONFIG_BOOKE |
| 453 | #define pmd_page_vaddr(pmd) \ |
| 454 | ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) |
| 455 | #define pmd_page(pmd) \ |
| 456 | (mem_map + (pmd_val(pmd) >> PAGE_SHIFT)) |
| 457 | #else |
| 458 | #define pmd_page_vaddr(pmd) \ |
| 459 | ((unsigned long) (pmd_val(pmd) & PAGE_MASK)) |
| 460 | #define pmd_page(pmd) \ |
Kumar Gala | af892e0 | 2008-04-16 05:52:30 +1000 | [diff] [blame] | 461 | pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT)) |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 462 | #endif |
| 463 | |
| 464 | /* to find an entry in a kernel page-table-directory */ |
| 465 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) |
| 466 | |
| 467 | /* to find an entry in a page-table-directory */ |
| 468 | #define pgd_index(address) ((address) >> PGDIR_SHIFT) |
| 469 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) |
| 470 | |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 471 | /* Find an entry in the third-level page table.. */ |
| 472 | #define pte_index(address) \ |
| 473 | (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) |
| 474 | #define pte_offset_kernel(dir, addr) \ |
| 475 | ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr)) |
| 476 | #define pte_offset_map(dir, addr) \ |
| 477 | ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE0) + pte_index(addr)) |
| 478 | #define pte_offset_map_nested(dir, addr) \ |
| 479 | ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE1) + pte_index(addr)) |
| 480 | |
| 481 | #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) |
| 482 | #define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1) |
| 483 | |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 484 | /* |
| 485 | * Encode and decode a swap entry. |
| 486 | * Note that the bits we use in a PTE for representing a swap entry |
| 487 | * must not include the _PAGE_PRESENT bit, the _PAGE_FILE bit, or the |
| 488 | *_PAGE_HASHPTE bit (if used). -- paulus |
| 489 | */ |
| 490 | #define __swp_type(entry) ((entry).val & 0x1f) |
| 491 | #define __swp_offset(entry) ((entry).val >> 5) |
| 492 | #define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 5) }) |
| 493 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 }) |
| 494 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 }) |
| 495 | |
| 496 | /* Encode and decode a nonlinear file mapping entry */ |
| 497 | #define PTE_FILE_MAX_BITS 29 |
| 498 | #define pte_to_pgoff(pte) (pte_val(pte) >> 3) |
| 499 | #define pgoff_to_pte(off) ((pte_t) { ((off) << 3) | _PAGE_FILE }) |
| 500 | |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 501 | /* |
| 502 | * No page table caches to initialise |
| 503 | */ |
| 504 | #define pgtable_cache_init() do { } while (0) |
| 505 | |
| 506 | extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, |
| 507 | pmd_t **pmdp); |
| 508 | |
| 509 | #endif /* !__ASSEMBLY__ */ |
| 510 | |
| 511 | #endif /* _ASM_POWERPC_PGTABLE_PPC32_H */ |