Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Russell King | 4baa992 | 2008-08-02 10:55:55 +0100 | [diff] [blame] | 2 | * arch/arm/include/asm/pgtable.h |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * |
| 4 | * Copyright (C) 1995-2002 Russell King |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License version 2 as |
| 8 | * published by the Free Software Foundation. |
| 9 | */ |
| 10 | #ifndef _ASMARM_PGTABLE_H |
| 11 | #define _ASMARM_PGTABLE_H |
| 12 | |
Russell King | f6e3354 | 2010-11-16 00:22:09 +0000 | [diff] [blame] | 13 | #include <linux/const.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include <asm-generic/4level-fixup.h> |
Russell King | 002547b | 2006-06-20 20:46:52 +0100 | [diff] [blame] | 15 | #include <asm/proc-fns.h> |
| 16 | |
| 17 | #ifndef CONFIG_MMU |
| 18 | |
| 19 | #include "pgtable-nommu.h" |
| 20 | |
| 21 | #else |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | |
| 23 | #include <asm/memory.h> |
Russell King | a09e64f | 2008-08-05 16:14:15 +0100 | [diff] [blame] | 24 | #include <mach/vmalloc.h> |
Russell King | ad1ae2f | 2006-12-13 14:34:43 +0000 | [diff] [blame] | 25 | #include <asm/pgtable-hwdef.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | |
| 27 | /* |
Russell King | 5c3073e | 2005-05-03 12:20:29 +0100 | [diff] [blame] | 28 | * Just any arbitrary offset to the start of the vmalloc VM area: the |
| 29 | * current 8MB value just means that there will be a 8MB "hole" after the |
| 30 | * physical memory until the kernel virtual memory starts. That means that |
| 31 | * any out-of-bounds memory accesses will hopefully be caught. |
| 32 | * The vmalloc() routines leaves a hole of 4kB between each vmalloced |
| 33 | * area for the same reason. ;) |
| 34 | * |
| 35 | * Note that platforms may override VMALLOC_START, but they must provide |
| 36 | * VMALLOC_END. VMALLOC_END defines the (exclusive) limit of this space, |
| 37 | * which may not overlap IO space. |
| 38 | */ |
| 39 | #ifndef VMALLOC_START |
| 40 | #define VMALLOC_OFFSET (8*1024*1024) |
| 41 | #define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) |
| 42 | #endif |
| 43 | |
| 44 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | * Hardware-wise, we have a two level page table structure, where the first |
| 46 | * level has 4096 entries, and the second level has 256 entries. Each entry |
| 47 | * is one 32-bit word. Most of the bits in the second level entry are used |
| 48 | * by hardware, and there aren't any "accessed" and "dirty" bits. |
| 49 | * |
| 50 | * Linux on the other hand has a three level page table structure, which can |
| 51 | * be wrapped to fit a two level page table structure easily - using the PGD |
| 52 | * and PTE only. However, Linux also expects one "PTE" table per page, and |
| 53 | * at least a "dirty" bit. |
| 54 | * |
| 55 | * Therefore, we tweak the implementation slightly - we tell Linux that we |
| 56 | * have 2048 entries in the first level, each of which is 8 bytes (iow, two |
| 57 | * hardware pointers to the second level.) The second level contains two |
Russell King | d30e45e | 2010-11-16 00:16:01 +0000 | [diff] [blame] | 58 | * hardware PTE tables arranged contiguously, preceded by Linux versions |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | * which contain the state information Linux needs. We, therefore, end up |
| 60 | * with 512 entries in the "PTE" level. |
| 61 | * |
| 62 | * This leads to the page tables having the following layout: |
| 63 | * |
| 64 | * pgd pte |
| 65 | * | | |
Russell King | d30e45e | 2010-11-16 00:16:01 +0000 | [diff] [blame] | 66 | * +--------+ |
| 67 | * | | +------------+ +0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | * +- - - - + | Linux pt 0 | |
Russell King | d30e45e | 2010-11-16 00:16:01 +0000 | [diff] [blame] | 69 | * | | +------------+ +1024 |
| 70 | * +--------+ +0 | Linux pt 1 | |
| 71 | * | |-----> +------------+ +2048 |
| 72 | * +- - - - + +4 | h/w pt 0 | |
| 73 | * | |-----> +------------+ +3072 |
| 74 | * +--------+ +8 | h/w pt 1 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 75 | * | | +------------+ +4096 |
| 76 | * |
| 77 | * See L_PTE_xxx below for definitions of bits in the "Linux pt", and |
| 78 | * PTE_xxx for definitions of bits appearing in the "h/w pt". |
| 79 | * |
| 80 | * PMD_xxx definitions refer to bits in the first level page table. |
| 81 | * |
| 82 | * The "dirty" bit is emulated by only granting hardware write permission |
| 83 | * iff the page is marked "writable" and "dirty" in the Linux PTE. This |
| 84 | * means that a write to a clean page will cause a permission fault, and |
| 85 | * the Linux MM layer will mark the page dirty via handle_pte_fault(). |
| 86 | * For the hardware to notice the permission change, the TLB entry must |
Martin Schwidefsky | f0e47c2 | 2007-07-17 04:03:03 -0700 | [diff] [blame] | 87 | * be flushed, and ptep_set_access_flags() does that for us. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 | * |
| 89 | * The "accessed" or "young" bit is emulated by a similar method; we only |
| 90 | * allow accesses to the page if the "young" bit is set. Accesses to the |
| 91 | * page will cause a fault, and handle_pte_fault() will set the young bit |
| 92 | * for us as long as the page is marked present in the corresponding Linux |
Martin Schwidefsky | f0e47c2 | 2007-07-17 04:03:03 -0700 | [diff] [blame] | 93 | * PTE entry. Again, ptep_set_access_flags() will ensure that the TLB is |
| 94 | * up to date. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 | * |
| 96 | * However, when the "young" bit is cleared, we deny access to the page |
| 97 | * by clearing the hardware PTE. Currently Linux does not flush the TLB |
| 98 | * for us in this case, which means the TLB will retain the transation |
| 99 | * until either the TLB entry is evicted under pressure, or a context |
| 100 | * switch which changes the user space mapping occurs. |
| 101 | */ |
| 102 | #define PTRS_PER_PTE 512 |
| 103 | #define PTRS_PER_PMD 1 |
| 104 | #define PTRS_PER_PGD 2048 |
| 105 | |
Russell King | d30e45e | 2010-11-16 00:16:01 +0000 | [diff] [blame] | 106 | #define PTE_HWTABLE_PTRS (PTRS_PER_PTE) |
| 107 | #define PTE_HWTABLE_OFF (PTE_HWTABLE_PTRS * sizeof(pte_t)) |
| 108 | #define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u32)) |
| 109 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 110 | /* |
| 111 | * PMD_SHIFT determines the size of the area a second-level page table can map |
| 112 | * PGDIR_SHIFT determines what a third-level page table entry can map |
| 113 | */ |
| 114 | #define PMD_SHIFT 21 |
| 115 | #define PGDIR_SHIFT 21 |
| 116 | |
| 117 | #define LIBRARY_TEXT_START 0x0c000000 |
| 118 | |
| 119 | #ifndef __ASSEMBLY__ |
Russell King | 69529c0 | 2010-11-16 00:19:55 +0000 | [diff] [blame] | 120 | extern void __pte_error(const char *file, int line, pte_t); |
| 121 | extern void __pmd_error(const char *file, int line, pmd_t); |
| 122 | extern void __pgd_error(const char *file, int line, pgd_t); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 123 | |
Russell King | 69529c0 | 2010-11-16 00:19:55 +0000 | [diff] [blame] | 124 | #define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte) |
| 125 | #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd) |
| 126 | #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | #endif /* !__ASSEMBLY__ */ |
| 128 | |
| 129 | #define PMD_SIZE (1UL << PMD_SHIFT) |
| 130 | #define PMD_MASK (~(PMD_SIZE-1)) |
| 131 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) |
| 132 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) |
| 133 | |
Hugh Dickins | 6119be0 | 2005-04-19 13:29:21 -0700 | [diff] [blame] | 134 | /* |
| 135 | * This is the lowest virtual address we can permit any user space |
| 136 | * mapping to be mapped at. This is particularly important for |
| 137 | * non-high vector CPUs. |
| 138 | */ |
| 139 | #define FIRST_USER_ADDRESS PAGE_SIZE |
| 140 | |
Russell King | e926f44 | 2010-11-21 11:55:37 +0000 | [diff] [blame] | 141 | #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 | |
| 143 | /* |
George G. Davis | 4052ebb | 2006-09-22 18:36:38 +0100 | [diff] [blame] | 144 | * section address mask and size definitions. |
| 145 | */ |
| 146 | #define SECTION_SHIFT 20 |
| 147 | #define SECTION_SIZE (1UL << SECTION_SHIFT) |
| 148 | #define SECTION_MASK (~(SECTION_SIZE-1)) |
| 149 | |
| 150 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 151 | * ARMv6 supersection address mask and size definitions. |
| 152 | */ |
| 153 | #define SUPERSECTION_SHIFT 24 |
| 154 | #define SUPERSECTION_SIZE (1UL << SUPERSECTION_SHIFT) |
| 155 | #define SUPERSECTION_MASK (~(SUPERSECTION_SIZE-1)) |
| 156 | |
| 157 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 158 | * "Linux" PTE definitions. |
| 159 | * |
| 160 | * We keep two sets of PTEs - the hardware and the linux version. |
| 161 | * This allows greater flexibility in the way we map the Linux bits |
| 162 | * onto the hardware tables, and allows us to have YOUNG and DIRTY |
| 163 | * bits. |
| 164 | * |
| 165 | * The PTE table pointer refers to the hardware entries; the "Linux" |
| 166 | * entries are stored 1024 bytes below. |
| 167 | */ |
Russell King | f6e3354 | 2010-11-16 00:22:09 +0000 | [diff] [blame] | 168 | #define L_PTE_PRESENT (_AT(pteval_t, 1) << 0) |
| 169 | #define L_PTE_YOUNG (_AT(pteval_t, 1) << 1) |
| 170 | #define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */ |
| 171 | #define L_PTE_DIRTY (_AT(pteval_t, 1) << 6) |
Russell King | 36bb94b | 2010-11-16 08:40:36 +0000 | [diff] [blame] | 172 | #define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) |
Russell King | f6e3354 | 2010-11-16 00:22:09 +0000 | [diff] [blame] | 173 | #define L_PTE_USER (_AT(pteval_t, 1) << 8) |
Russell King | 9522d7e | 2010-11-16 00:23:31 +0000 | [diff] [blame] | 174 | #define L_PTE_XN (_AT(pteval_t, 1) << 9) |
Russell King | f6e3354 | 2010-11-16 00:22:09 +0000 | [diff] [blame] | 175 | #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 | |
Russell King | bb30f36 | 2008-09-06 20:04:59 +0100 | [diff] [blame] | 177 | /* |
| 178 | * These are the memory types, defined to be compatible with |
| 179 | * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB |
Russell King | bb30f36 | 2008-09-06 20:04:59 +0100 | [diff] [blame] | 180 | */ |
Russell King | f6e3354 | 2010-11-16 00:22:09 +0000 | [diff] [blame] | 181 | #define L_PTE_MT_UNCACHED (_AT(pteval_t, 0x00) << 2) /* 0000 */ |
| 182 | #define L_PTE_MT_BUFFERABLE (_AT(pteval_t, 0x01) << 2) /* 0001 */ |
| 183 | #define L_PTE_MT_WRITETHROUGH (_AT(pteval_t, 0x02) << 2) /* 0010 */ |
| 184 | #define L_PTE_MT_WRITEBACK (_AT(pteval_t, 0x03) << 2) /* 0011 */ |
| 185 | #define L_PTE_MT_MINICACHE (_AT(pteval_t, 0x06) << 2) /* 0110 (sa1100, xscale) */ |
| 186 | #define L_PTE_MT_WRITEALLOC (_AT(pteval_t, 0x07) << 2) /* 0111 */ |
| 187 | #define L_PTE_MT_DEV_SHARED (_AT(pteval_t, 0x04) << 2) /* 0100 */ |
| 188 | #define L_PTE_MT_DEV_NONSHARED (_AT(pteval_t, 0x0c) << 2) /* 1100 */ |
| 189 | #define L_PTE_MT_DEV_WC (_AT(pteval_t, 0x09) << 2) /* 1001 */ |
| 190 | #define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 0x0b) << 2) /* 1011 */ |
| 191 | #define L_PTE_MT_MASK (_AT(pteval_t, 0x0f) << 2) |
Russell King | bb30f36 | 2008-09-06 20:04:59 +0100 | [diff] [blame] | 192 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 | #ifndef __ASSEMBLY__ |
| 194 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 195 | /* |
Imre_Deak | 44b1869 | 2007-02-11 13:45:13 +0100 | [diff] [blame] | 196 | * The pgprot_* and protection_map entries will be fixed up in runtime |
| 197 | * to include the cachable and bufferable bits based on memory policy, |
| 198 | * as well as any architecture dependent bits like global/ASID and SMP |
| 199 | * shared mapping bits. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 | */ |
Russell King | bb30f36 | 2008-09-06 20:04:59 +0100 | [diff] [blame] | 201 | #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 202 | |
Imre_Deak | 44b1869 | 2007-02-11 13:45:13 +0100 | [diff] [blame] | 203 | extern pgprot_t pgprot_user; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 204 | extern pgprot_t pgprot_kernel; |
| 205 | |
Russell King | 8ec5366 | 2008-09-07 17:16:54 +0100 | [diff] [blame] | 206 | #define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 207 | |
Russell King | 36bb94b | 2010-11-16 08:40:36 +0000 | [diff] [blame] | 208 | #define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY) |
| 209 | #define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN) |
| 210 | #define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER) |
| 211 | #define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) |
| 212 | #define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY) |
| 213 | #define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) |
| 214 | #define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY) |
Russell King | 9522d7e | 2010-11-16 00:23:31 +0000 | [diff] [blame] | 215 | #define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN) |
| 216 | #define PAGE_KERNEL_EXEC pgprot_kernel |
Russell King | 8ec5366 | 2008-09-07 17:16:54 +0100 | [diff] [blame] | 217 | |
Russell King | 36bb94b | 2010-11-16 08:40:36 +0000 | [diff] [blame] | 218 | #define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN) |
| 219 | #define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN) |
| 220 | #define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER) |
| 221 | #define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) |
| 222 | #define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY) |
| 223 | #define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) |
| 224 | #define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY) |
Imre_Deak | 44b1869 | 2007-02-11 13:45:13 +0100 | [diff] [blame] | 225 | |
Russell King | eb9b2b6 | 2010-11-26 17:39:28 +0000 | [diff] [blame] | 226 | #define __pgprot_modify(prot,mask,bits) \ |
| 227 | __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) |
| 228 | |
| 229 | #define pgprot_noncached(prot) \ |
| 230 | __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED) |
| 231 | |
| 232 | #define pgprot_writecombine(prot) \ |
| 233 | __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE) |
| 234 | |
Santosh Shilimkar | 8fb5428 | 2011-06-28 12:42:56 -0700 | [diff] [blame^] | 235 | #define pgprot_stronglyordered(prot) \ |
| 236 | __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED) |
| 237 | |
Russell King | eb9b2b6 | 2010-11-26 17:39:28 +0000 | [diff] [blame] | 238 | #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE |
| 239 | #define pgprot_dmacoherent(prot) \ |
Russell King | 9522d7e | 2010-11-16 00:23:31 +0000 | [diff] [blame] | 240 | __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN) |
Russell King | eb9b2b6 | 2010-11-26 17:39:28 +0000 | [diff] [blame] | 241 | #define __HAVE_PHYS_MEM_ACCESS_PROT |
| 242 | struct file; |
| 243 | extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, |
| 244 | unsigned long size, pgprot_t vma_prot); |
| 245 | #else |
| 246 | #define pgprot_dmacoherent(prot) \ |
Russell King | 9522d7e | 2010-11-16 00:23:31 +0000 | [diff] [blame] | 247 | __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN) |
Russell King | eb9b2b6 | 2010-11-26 17:39:28 +0000 | [diff] [blame] | 248 | #endif |
| 249 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 250 | #endif /* __ASSEMBLY__ */ |
| 251 | |
| 252 | /* |
| 253 | * The table below defines the page protection levels that we insert into our |
| 254 | * Linux page table version. These get translated into the best that the |
| 255 | * architecture can perform. Note that on most ARM hardware: |
| 256 | * 1) We cannot do execute protection |
| 257 | * 2) If we could do execute protection, then read is implied |
| 258 | * 3) write implies read permissions |
| 259 | */ |
Imre_Deak | 44b1869 | 2007-02-11 13:45:13 +0100 | [diff] [blame] | 260 | #define __P000 __PAGE_NONE |
| 261 | #define __P001 __PAGE_READONLY |
| 262 | #define __P010 __PAGE_COPY |
| 263 | #define __P011 __PAGE_COPY |
Russell King | 8ec5366 | 2008-09-07 17:16:54 +0100 | [diff] [blame] | 264 | #define __P100 __PAGE_READONLY_EXEC |
| 265 | #define __P101 __PAGE_READONLY_EXEC |
| 266 | #define __P110 __PAGE_COPY_EXEC |
| 267 | #define __P111 __PAGE_COPY_EXEC |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 268 | |
Imre_Deak | 44b1869 | 2007-02-11 13:45:13 +0100 | [diff] [blame] | 269 | #define __S000 __PAGE_NONE |
| 270 | #define __S001 __PAGE_READONLY |
| 271 | #define __S010 __PAGE_SHARED |
| 272 | #define __S011 __PAGE_SHARED |
Russell King | 8ec5366 | 2008-09-07 17:16:54 +0100 | [diff] [blame] | 273 | #define __S100 __PAGE_READONLY_EXEC |
| 274 | #define __S101 __PAGE_READONLY_EXEC |
| 275 | #define __S110 __PAGE_SHARED_EXEC |
| 276 | #define __S111 __PAGE_SHARED_EXEC |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 277 | |
| 278 | #ifndef __ASSEMBLY__ |
| 279 | /* |
| 280 | * ZERO_PAGE is a global shared page that is always zero: used |
| 281 | * for zero-mapped memory areas etc.. |
| 282 | */ |
| 283 | extern struct page *empty_zero_page; |
| 284 | #define ZERO_PAGE(vaddr) (empty_zero_page) |
| 285 | |
Russell King | 4eec4b1 | 2010-11-26 20:12:12 +0000 | [diff] [blame] | 286 | |
| 287 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; |
| 288 | |
| 289 | /* to find an entry in a page-table-directory */ |
| 290 | #define pgd_index(addr) ((addr) >> PGDIR_SHIFT) |
| 291 | |
| 292 | #define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr)) |
| 293 | |
| 294 | /* to find an entry in a kernel page-table-directory */ |
| 295 | #define pgd_offset_k(addr) pgd_offset(&init_mm, addr) |
| 296 | |
| 297 | /* |
| 298 | * The "pgd_xxx()" functions here are trivial for a folded two-level |
| 299 | * setup: the pgd is never bad, and a pmd always exists (as it's folded |
| 300 | * into the pgd entry) |
| 301 | */ |
| 302 | #define pgd_none(pgd) (0) |
| 303 | #define pgd_bad(pgd) (0) |
| 304 | #define pgd_present(pgd) (1) |
| 305 | #define pgd_clear(pgdp) do { } while (0) |
| 306 | #define set_pgd(pgd,pgdp) do { } while (0) |
Russell King | 516295e | 2010-11-21 16:27:49 +0000 | [diff] [blame] | 307 | #define set_pud(pud,pudp) do { } while (0) |
Russell King | 4eec4b1 | 2010-11-26 20:12:12 +0000 | [diff] [blame] | 308 | |
| 309 | |
Russell King | b510b049 | 2010-11-26 20:35:25 +0000 | [diff] [blame] | 310 | /* Find an entry in the second-level page table.. */ |
| 311 | #define pmd_offset(dir, addr) ((pmd_t *)(dir)) |
Nick Piggin | 7e67513 | 2008-04-28 02:13:00 -0700 | [diff] [blame] | 312 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 313 | #define pmd_none(pmd) (!pmd_val(pmd)) |
| 314 | #define pmd_present(pmd) (pmd_val(pmd)) |
| 315 | #define pmd_bad(pmd) (pmd_val(pmd) & 2) |
| 316 | |
| 317 | #define copy_pmd(pmdpd,pmdps) \ |
| 318 | do { \ |
| 319 | pmdpd[0] = pmdps[0]; \ |
| 320 | pmdpd[1] = pmdps[1]; \ |
| 321 | flush_pmd_entry(pmdpd); \ |
| 322 | } while (0) |
| 323 | |
| 324 | #define pmd_clear(pmdp) \ |
| 325 | do { \ |
| 326 | pmdp[0] = __pmd(0); \ |
| 327 | pmdp[1] = __pmd(0); \ |
| 328 | clean_pmd_entry(pmdp); \ |
| 329 | } while (0) |
| 330 | |
Dave McCracken | 46a82b2 | 2006-09-25 23:31:48 -0700 | [diff] [blame] | 331 | static inline pte_t *pmd_page_vaddr(pmd_t pmd) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 332 | { |
Russell King | d30e45e | 2010-11-16 00:16:01 +0000 | [diff] [blame] | 333 | return __va(pmd_val(pmd) & PAGE_MASK); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 334 | } |
| 335 | |
Russell King | 924a158 | 2009-04-26 13:14:52 +0100 | [diff] [blame] | 336 | #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 337 | |
Russell King | c0ba10b | 2010-11-21 14:42:47 +0000 | [diff] [blame] | 338 | /* we don't need complex calculations here as the pmd is folded into the pgd */ |
| 339 | #define pmd_addr_end(addr,end) (end) |
| 340 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 341 | |
Russell King | b510b049 | 2010-11-26 20:35:25 +0000 | [diff] [blame] | 342 | #ifndef CONFIG_HIGHPTE |
| 343 | #define __pte_map(pmd) pmd_page_vaddr(*(pmd)) |
| 344 | #define __pte_unmap(pte) do { } while (0) |
| 345 | #else |
Russell King | d30e45e | 2010-11-16 00:16:01 +0000 | [diff] [blame] | 346 | #define __pte_map(pmd) (pte_t *)kmap_atomic(pmd_page(*(pmd))) |
| 347 | #define __pte_unmap(pte) kunmap_atomic(pte) |
Russell King | b510b049 | 2010-11-26 20:35:25 +0000 | [diff] [blame] | 348 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 349 | |
Russell King | b510b049 | 2010-11-26 20:35:25 +0000 | [diff] [blame] | 350 | #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) |
| 351 | |
| 352 | #define pte_offset_kernel(pmd,addr) (pmd_page_vaddr(*(pmd)) + pte_index(addr)) |
| 353 | |
| 354 | #define pte_offset_map(pmd,addr) (__pte_map(pmd) + pte_index(addr)) |
| 355 | #define pte_unmap(pte) __pte_unmap(pte) |
| 356 | |
| 357 | #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) |
Will Deacon | cae6292 | 2011-02-15 12:42:57 +0100 | [diff] [blame] | 358 | #define pfn_pte(pfn,prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot)) |
Russell King | b510b049 | 2010-11-26 20:35:25 +0000 | [diff] [blame] | 359 | |
| 360 | #define pte_page(pte) pfn_to_page(pte_pfn(pte)) |
| 361 | #define mk_pte(page,prot) pfn_pte(page_to_pfn(page), prot) |
| 362 | |
| 363 | #define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext) |
| 364 | #define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0) |
| 365 | |
| 366 | #if __LINUX_ARM_ARCH__ < 6 |
| 367 | static inline void __sync_icache_dcache(pte_t pteval) |
| 368 | { |
| 369 | } |
| 370 | #else |
| 371 | extern void __sync_icache_dcache(pte_t pteval); |
| 372 | #endif |
| 373 | |
| 374 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, |
| 375 | pte_t *ptep, pte_t pteval) |
| 376 | { |
| 377 | if (addr >= TASK_SIZE) |
| 378 | set_pte_ext(ptep, pteval, 0); |
| 379 | else { |
| 380 | __sync_icache_dcache(pteval); |
| 381 | set_pte_ext(ptep, pteval, PTE_EXT_NG); |
| 382 | } |
| 383 | } |
| 384 | |
| 385 | #define pte_none(pte) (!pte_val(pte)) |
| 386 | #define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT) |
Russell King | 36bb94b | 2010-11-16 08:40:36 +0000 | [diff] [blame] | 387 | #define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY)) |
Russell King | b510b049 | 2010-11-26 20:35:25 +0000 | [diff] [blame] | 388 | #define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY) |
| 389 | #define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG) |
Russell King | 9522d7e | 2010-11-16 00:23:31 +0000 | [diff] [blame] | 390 | #define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN)) |
Russell King | b510b049 | 2010-11-26 20:35:25 +0000 | [diff] [blame] | 391 | #define pte_special(pte) (0) |
| 392 | |
| 393 | #define pte_present_user(pte) \ |
| 394 | ((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \ |
| 395 | (L_PTE_PRESENT | L_PTE_USER)) |
| 396 | |
| 397 | #define PTE_BIT_FUNC(fn,op) \ |
| 398 | static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } |
| 399 | |
Russell King | 36bb94b | 2010-11-16 08:40:36 +0000 | [diff] [blame] | 400 | PTE_BIT_FUNC(wrprotect, |= L_PTE_RDONLY); |
| 401 | PTE_BIT_FUNC(mkwrite, &= ~L_PTE_RDONLY); |
Russell King | b510b049 | 2010-11-26 20:35:25 +0000 | [diff] [blame] | 402 | PTE_BIT_FUNC(mkclean, &= ~L_PTE_DIRTY); |
| 403 | PTE_BIT_FUNC(mkdirty, |= L_PTE_DIRTY); |
| 404 | PTE_BIT_FUNC(mkold, &= ~L_PTE_YOUNG); |
| 405 | PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG); |
| 406 | |
| 407 | static inline pte_t pte_mkspecial(pte_t pte) { return pte; } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 408 | |
| 409 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
| 410 | { |
Russell King | 36bb94b | 2010-11-16 08:40:36 +0000 | [diff] [blame] | 411 | const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 412 | pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); |
| 413 | return pte; |
| 414 | } |
| 415 | |
Russell King | fb93a1c | 2009-07-05 11:30:15 +0100 | [diff] [blame] | 416 | /* |
| 417 | * Encode and decode a swap entry. Swap entries are stored in the Linux |
| 418 | * page tables as follows: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 419 | * |
Russell King | fb93a1c | 2009-07-05 11:30:15 +0100 | [diff] [blame] | 420 | * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 |
| 421 | * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 |
Russell King | 6a00cde | 2009-07-11 16:57:20 +0100 | [diff] [blame] | 422 | * <--------------- offset --------------------> <- type --> 0 0 0 |
Russell King | fb93a1c | 2009-07-05 11:30:15 +0100 | [diff] [blame] | 423 | * |
Russell King | 6a00cde | 2009-07-11 16:57:20 +0100 | [diff] [blame] | 424 | * This gives us up to 63 swap files and 32GB per swap file. Note that |
Russell King | fb93a1c | 2009-07-05 11:30:15 +0100 | [diff] [blame] | 425 | * the offset field is always non-zero. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 426 | */ |
Russell King | 6a00cde | 2009-07-11 16:57:20 +0100 | [diff] [blame] | 427 | #define __SWP_TYPE_SHIFT 3 |
| 428 | #define __SWP_TYPE_BITS 6 |
Russell King | fb93a1c | 2009-07-05 11:30:15 +0100 | [diff] [blame] | 429 | #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) |
| 430 | #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) |
| 431 | |
| 432 | #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) |
| 433 | #define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT) |
| 434 | #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) }) |
| 435 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 436 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) |
| 437 | #define __swp_entry_to_pte(swp) ((pte_t) { (swp).val }) |
| 438 | |
Russell King | fb93a1c | 2009-07-05 11:30:15 +0100 | [diff] [blame] | 439 | /* |
| 440 | * It is an error for the kernel to have more swap files than we can |
| 441 | * encode in the PTEs. This ensures that we know when MAX_SWAPFILES |
| 442 | * is increased beyond what we presently support. |
| 443 | */ |
| 444 | #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS) |
| 445 | |
Russell King | 65b1bfc | 2009-07-05 11:52:21 +0100 | [diff] [blame] | 446 | /* |
| 447 | * Encode and decode a file entry. File entries are stored in the Linux |
| 448 | * page tables as follows: |
| 449 | * |
| 450 | * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 |
| 451 | * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 |
Russell King | 6a00cde | 2009-07-11 16:57:20 +0100 | [diff] [blame] | 452 | * <----------------------- offset ------------------------> 1 0 0 |
Russell King | 65b1bfc | 2009-07-05 11:52:21 +0100 | [diff] [blame] | 453 | */ |
| 454 | #define pte_file(pte) (pte_val(pte) & L_PTE_FILE) |
Russell King | 6a00cde | 2009-07-11 16:57:20 +0100 | [diff] [blame] | 455 | #define pte_to_pgoff(x) (pte_val(x) >> 3) |
| 456 | #define pgoff_to_pte(x) __pte(((x) << 3) | L_PTE_FILE) |
Russell King | 65b1bfc | 2009-07-05 11:52:21 +0100 | [diff] [blame] | 457 | |
Russell King | 6a00cde | 2009-07-11 16:57:20 +0100 | [diff] [blame] | 458 | #define PTE_FILE_MAX_BITS 29 |
Russell King | 65b1bfc | 2009-07-05 11:52:21 +0100 | [diff] [blame] | 459 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 460 | /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ |
| 461 | /* FIXME: this is not correct */ |
| 462 | #define kern_addr_valid(addr) (1) |
| 463 | |
| 464 | #include <asm-generic/pgtable.h> |
| 465 | |
| 466 | /* |
| 467 | * We provide our own arch_get_unmapped_area to cope with VIPT caches. |
| 468 | */ |
| 469 | #define HAVE_ARCH_UNMAPPED_AREA |
| 470 | |
| 471 | /* |
Randy Dunlap | 33bf561 | 2005-09-13 01:25:50 -0700 | [diff] [blame] | 472 | * remap a physical page `pfn' of size `size' with page protection `prot' |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 473 | * into virtual address `from' |
| 474 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 475 | #define io_remap_pfn_range(vma,from,pfn,size,prot) \ |
| 476 | remap_pfn_range(vma, from, pfn, size, prot) |
| 477 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 478 | #define pgtable_cache_init() do { } while (0) |
| 479 | |
Russell King | 614dd05 | 2010-11-21 11:41:57 +0000 | [diff] [blame] | 480 | void identity_mapping_add(pgd_t *, unsigned long, unsigned long); |
| 481 | void identity_mapping_del(pgd_t *, unsigned long, unsigned long); |
| 482 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 483 | #endif /* !__ASSEMBLY__ */ |
| 484 | |
Russell King | 002547b | 2006-06-20 20:46:52 +0100 | [diff] [blame] | 485 | #endif /* CONFIG_MMU */ |
| 486 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 487 | #endif /* _ASMARM_PGTABLE_H */ |