Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Russell King | 4baa992 | 2008-08-02 10:55:55 +0100 | [diff] [blame] | 2 | * arch/arm/include/asm/pgtable.h |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * |
| 4 | * Copyright (C) 1995-2002 Russell King |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License version 2 as |
| 8 | * published by the Free Software Foundation. |
| 9 | */ |
| 10 | #ifndef _ASMARM_PGTABLE_H |
| 11 | #define _ASMARM_PGTABLE_H |
| 12 | |
Russell King | f6e3354 | 2010-11-16 00:22:09 +0000 | [diff] [blame] | 13 | #include <linux/const.h> |
Russell King | 002547b | 2006-06-20 20:46:52 +0100 | [diff] [blame] | 14 | #include <asm/proc-fns.h> |
| 15 | |
| 16 | #ifndef CONFIG_MMU |
| 17 | |
Russell King | a32618d | 2011-11-22 17:30:28 +0000 | [diff] [blame] | 18 | #include <asm-generic/4level-fixup.h> |
David Howells | a1ce392 | 2012-10-02 18:01:25 +0100 | [diff] [blame] | 19 | #include <asm/pgtable-nommu.h> |
Russell King | 002547b | 2006-06-20 20:46:52 +0100 | [diff] [blame] | 20 | |
| 21 | #else |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | |
Kirill A. Shutemov | 9849a56 | 2017-03-09 17:24:05 +0300 | [diff] [blame] | 23 | #define __ARCH_USE_5LEVEL_HACK |
Russell King | a32618d | 2011-11-22 17:30:28 +0000 | [diff] [blame] | 24 | #include <asm-generic/pgtable-nopud.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | #include <asm/memory.h> |
Russell King | ad1ae2f | 2006-12-13 14:34:43 +0000 | [diff] [blame] | 26 | #include <asm/pgtable-hwdef.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | |
Catalin Marinas | 8d96250 | 2012-07-25 14:39:26 +0100 | [diff] [blame] | 28 | |
| 29 | #include <asm/tlbflush.h> |
| 30 | |
Catalin Marinas | dcfdae0 | 2011-11-22 17:30:29 +0000 | [diff] [blame] | 31 | #ifdef CONFIG_ARM_LPAE |
| 32 | #include <asm/pgtable-3level.h> |
| 33 | #else |
Catalin Marinas | 17f5721 | 2011-09-05 17:41:02 +0100 | [diff] [blame] | 34 | #include <asm/pgtable-2level.h> |
Catalin Marinas | dcfdae0 | 2011-11-22 17:30:29 +0000 | [diff] [blame] | 35 | #endif |
Catalin Marinas | 17f5721 | 2011-09-05 17:41:02 +0100 | [diff] [blame] | 36 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | /* |
Russell King | 5c3073e | 2005-05-03 12:20:29 +0100 | [diff] [blame] | 38 | * Just any arbitrary offset to the start of the vmalloc VM area: the |
| 39 | * current 8MB value just means that there will be a 8MB "hole" after the |
| 40 | * physical memory until the kernel virtual memory starts. That means that |
| 41 | * any out-of-bounds memory accesses will hopefully be caught. |
| 42 | * The vmalloc() routines leaves a hole of 4kB between each vmalloced |
| 43 | * area for the same reason. ;) |
Russell King | 5c3073e | 2005-05-03 12:20:29 +0100 | [diff] [blame] | 44 | */ |
Russell King | 5c3073e | 2005-05-03 12:20:29 +0100 | [diff] [blame] | 45 | #define VMALLOC_OFFSET (8*1024*1024) |
| 46 | #define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) |
Nicolas Pitre | 6ff0966 | 2015-09-13 03:25:26 +0100 | [diff] [blame] | 47 | #define VMALLOC_END 0xff800000UL |
Russell King | 5c3073e | 2005-05-03 12:20:29 +0100 | [diff] [blame] | 48 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | #define LIBRARY_TEXT_START 0x0c000000 |
| 50 | |
| 51 | #ifndef __ASSEMBLY__ |
Russell King | 69529c0 | 2010-11-16 00:19:55 +0000 | [diff] [blame] | 52 | extern void __pte_error(const char *file, int line, pte_t); |
| 53 | extern void __pmd_error(const char *file, int line, pmd_t); |
| 54 | extern void __pgd_error(const char *file, int line, pgd_t); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | |
Russell King | 69529c0 | 2010-11-16 00:19:55 +0000 | [diff] [blame] | 56 | #define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte) |
| 57 | #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd) |
| 58 | #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | |
Hugh Dickins | 6119be0 | 2005-04-19 13:29:21 -0700 | [diff] [blame] | 60 | /* |
| 61 | * This is the lowest virtual address we can permit any user space |
| 62 | * mapping to be mapped at. This is particularly important for |
| 63 | * non-high vector CPUs. |
| 64 | */ |
Russell King | d8aa712 | 2013-11-28 21:43:40 +0000 | [diff] [blame] | 65 | #define FIRST_USER_ADDRESS (PAGE_SIZE * 2) |
Hugh Dickins | 6119be0 | 2005-04-19 13:29:21 -0700 | [diff] [blame] | 66 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 67 | /* |
Catalin Marinas | 104ad3b | 2013-04-29 15:07:45 -0700 | [diff] [blame] | 68 | * Use TASK_SIZE as the ceiling argument for free_pgtables() and |
| 69 | * free_pgd_range() to avoid freeing the modules pmd when LPAE is enabled (pmd |
| 70 | * page shared between user and kernel). |
| 71 | */ |
| 72 | #ifdef CONFIG_ARM_LPAE |
| 73 | #define USER_PGTABLES_CEILING TASK_SIZE |
| 74 | #endif |
| 75 | |
| 76 | /* |
Imre_Deak | 44b1869 | 2007-02-11 13:45:13 +0100 | [diff] [blame] | 77 | * The pgprot_* and protection_map entries will be fixed up in runtime |
| 78 | * to include the cachable and bufferable bits based on memory policy, |
| 79 | * as well as any architecture dependent bits like global/ASID and SMP |
| 80 | * shared mapping bits. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 | */ |
Russell King | bb30f36 | 2008-09-06 20:04:59 +0100 | [diff] [blame] | 82 | #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 | |
Imre_Deak | 44b1869 | 2007-02-11 13:45:13 +0100 | [diff] [blame] | 84 | extern pgprot_t pgprot_user; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | extern pgprot_t pgprot_kernel; |
Christoffer Dall | cc577c2 | 2013-01-20 18:28:04 -0500 | [diff] [blame] | 86 | extern pgprot_t pgprot_hyp_device; |
| 87 | extern pgprot_t pgprot_s2; |
| 88 | extern pgprot_t pgprot_s2_device; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 89 | |
Russell King | 8ec5366 | 2008-09-07 17:16:54 +0100 | [diff] [blame] | 90 | #define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 | |
Will Deacon | 26ffd0d | 2012-09-01 05:22:12 +0100 | [diff] [blame] | 92 | #define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY | L_PTE_NONE) |
Russell King | 36bb94b | 2010-11-16 08:40:36 +0000 | [diff] [blame] | 93 | #define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN) |
| 94 | #define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER) |
| 95 | #define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) |
| 96 | #define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY) |
| 97 | #define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) |
| 98 | #define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY) |
Russell King | 9522d7e | 2010-11-16 00:23:31 +0000 | [diff] [blame] | 99 | #define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN) |
| 100 | #define PAGE_KERNEL_EXEC pgprot_kernel |
Marc Zyngier | 0996353 | 2016-06-13 15:00:49 +0100 | [diff] [blame] | 101 | #define PAGE_HYP _MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_XN) |
Marc Zyngier | 5900270 | 2016-06-13 15:00:48 +0100 | [diff] [blame] | 102 | #define PAGE_HYP_EXEC _MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_RDONLY) |
Marc Zyngier | 74a6b88 | 2016-06-13 15:00:47 +0100 | [diff] [blame] | 103 | #define PAGE_HYP_RO _MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_RDONLY | L_PTE_XN) |
Christoffer Dall | cc577c2 | 2013-01-20 18:28:04 -0500 | [diff] [blame] | 104 | #define PAGE_HYP_DEVICE _MOD_PROT(pgprot_hyp_device, L_PTE_HYP) |
| 105 | #define PAGE_S2 _MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY) |
Ard Biesheuvel | 903ed3a | 2014-09-17 14:56:19 -0700 | [diff] [blame] | 106 | #define PAGE_S2_DEVICE _MOD_PROT(pgprot_s2_device, L_PTE_S2_RDONLY) |
Russell King | 8ec5366 | 2008-09-07 17:16:54 +0100 | [diff] [blame] | 107 | |
Will Deacon | 26ffd0d | 2012-09-01 05:22:12 +0100 | [diff] [blame] | 108 | #define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE) |
Russell King | 36bb94b | 2010-11-16 08:40:36 +0000 | [diff] [blame] | 109 | #define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN) |
| 110 | #define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER) |
| 111 | #define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) |
| 112 | #define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY) |
| 113 | #define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) |
| 114 | #define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY) |
Imre_Deak | 44b1869 | 2007-02-11 13:45:13 +0100 | [diff] [blame] | 115 | |
Russell King | eb9b2b6 | 2010-11-26 17:39:28 +0000 | [diff] [blame] | 116 | #define __pgprot_modify(prot,mask,bits) \ |
| 117 | __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) |
| 118 | |
| 119 | #define pgprot_noncached(prot) \ |
| 120 | __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED) |
| 121 | |
| 122 | #define pgprot_writecombine(prot) \ |
| 123 | __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE) |
| 124 | |
Santosh Shilimkar | 8fb5428 | 2011-06-28 12:42:56 -0700 | [diff] [blame] | 125 | #define pgprot_stronglyordered(prot) \ |
| 126 | __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED) |
| 127 | |
Russell King | eb9b2b6 | 2010-11-26 17:39:28 +0000 | [diff] [blame] | 128 | #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE |
| 129 | #define pgprot_dmacoherent(prot) \ |
Russell King | 9522d7e | 2010-11-16 00:23:31 +0000 | [diff] [blame] | 130 | __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN) |
Russell King | eb9b2b6 | 2010-11-26 17:39:28 +0000 | [diff] [blame] | 131 | #define __HAVE_PHYS_MEM_ACCESS_PROT |
| 132 | struct file; |
| 133 | extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, |
| 134 | unsigned long size, pgprot_t vma_prot); |
| 135 | #else |
| 136 | #define pgprot_dmacoherent(prot) \ |
Russell King | 9522d7e | 2010-11-16 00:23:31 +0000 | [diff] [blame] | 137 | __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN) |
Russell King | eb9b2b6 | 2010-11-26 17:39:28 +0000 | [diff] [blame] | 138 | #endif |
| 139 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 | #endif /* __ASSEMBLY__ */ |
| 141 | |
| 142 | /* |
| 143 | * The table below defines the page protection levels that we insert into our |
| 144 | * Linux page table version. These get translated into the best that the |
| 145 | * architecture can perform. Note that on most ARM hardware: |
| 146 | * 1) We cannot do execute protection |
| 147 | * 2) If we could do execute protection, then read is implied |
| 148 | * 3) write implies read permissions |
| 149 | */ |
Imre_Deak | 44b1869 | 2007-02-11 13:45:13 +0100 | [diff] [blame] | 150 | #define __P000 __PAGE_NONE |
| 151 | #define __P001 __PAGE_READONLY |
| 152 | #define __P010 __PAGE_COPY |
| 153 | #define __P011 __PAGE_COPY |
Russell King | 8ec5366 | 2008-09-07 17:16:54 +0100 | [diff] [blame] | 154 | #define __P100 __PAGE_READONLY_EXEC |
| 155 | #define __P101 __PAGE_READONLY_EXEC |
| 156 | #define __P110 __PAGE_COPY_EXEC |
| 157 | #define __P111 __PAGE_COPY_EXEC |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 158 | |
Imre_Deak | 44b1869 | 2007-02-11 13:45:13 +0100 | [diff] [blame] | 159 | #define __S000 __PAGE_NONE |
| 160 | #define __S001 __PAGE_READONLY |
| 161 | #define __S010 __PAGE_SHARED |
| 162 | #define __S011 __PAGE_SHARED |
Russell King | 8ec5366 | 2008-09-07 17:16:54 +0100 | [diff] [blame] | 163 | #define __S100 __PAGE_READONLY_EXEC |
| 164 | #define __S101 __PAGE_READONLY_EXEC |
| 165 | #define __S110 __PAGE_SHARED_EXEC |
| 166 | #define __S111 __PAGE_SHARED_EXEC |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 167 | |
| 168 | #ifndef __ASSEMBLY__ |
| 169 | /* |
| 170 | * ZERO_PAGE is a global shared page that is always zero: used |
| 171 | * for zero-mapped memory areas etc.. |
| 172 | */ |
| 173 | extern struct page *empty_zero_page; |
| 174 | #define ZERO_PAGE(vaddr) (empty_zero_page) |
| 175 | |
Russell King | 4eec4b1 | 2010-11-26 20:12:12 +0000 | [diff] [blame] | 176 | |
| 177 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; |
| 178 | |
| 179 | /* to find an entry in a page-table-directory */ |
| 180 | #define pgd_index(addr) ((addr) >> PGDIR_SHIFT) |
| 181 | |
| 182 | #define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr)) |
| 183 | |
| 184 | /* to find an entry in a kernel page-table-directory */ |
| 185 | #define pgd_offset_k(addr) pgd_offset(&init_mm, addr) |
| 186 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 187 | #define pmd_none(pmd) (!pmd_val(pmd)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 | |
Dave McCracken | 46a82b2 | 2006-09-25 23:31:48 -0700 | [diff] [blame] | 189 | static inline pte_t *pmd_page_vaddr(pmd_t pmd) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 190 | { |
Catalin Marinas | d7c5d0d | 2011-09-05 17:52:36 +0100 | [diff] [blame] | 191 | return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 192 | } |
| 193 | |
Catalin Marinas | d7c5d0d | 2011-09-05 17:52:36 +0100 | [diff] [blame] | 194 | #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 195 | |
Russell King | b510b049 | 2010-11-26 20:35:25 +0000 | [diff] [blame] | 196 | #ifndef CONFIG_HIGHPTE |
| 197 | #define __pte_map(pmd) pmd_page_vaddr(*(pmd)) |
| 198 | #define __pte_unmap(pte) do { } while (0) |
| 199 | #else |
Russell King | d30e45e | 2010-11-16 00:16:01 +0000 | [diff] [blame] | 200 | #define __pte_map(pmd) (pte_t *)kmap_atomic(pmd_page(*(pmd))) |
| 201 | #define __pte_unmap(pte) kunmap_atomic(pte) |
Russell King | b510b049 | 2010-11-26 20:35:25 +0000 | [diff] [blame] | 202 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 203 | |
Russell King | b510b049 | 2010-11-26 20:35:25 +0000 | [diff] [blame] | 204 | #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) |
| 205 | |
| 206 | #define pte_offset_kernel(pmd,addr) (pmd_page_vaddr(*(pmd)) + pte_index(addr)) |
| 207 | |
| 208 | #define pte_offset_map(pmd,addr) (__pte_map(pmd) + pte_index(addr)) |
| 209 | #define pte_unmap(pte) __pte_unmap(pte) |
| 210 | |
Catalin Marinas | d7c5d0d | 2011-09-05 17:52:36 +0100 | [diff] [blame] | 211 | #define pte_pfn(pte) ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT) |
Will Deacon | cae6292 | 2011-02-15 12:42:57 +0100 | [diff] [blame] | 212 | #define pfn_pte(pfn,prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot)) |
Russell King | b510b049 | 2010-11-26 20:35:25 +0000 | [diff] [blame] | 213 | |
| 214 | #define pte_page(pte) pfn_to_page(pte_pfn(pte)) |
| 215 | #define mk_pte(page,prot) pfn_pte(page_to_pfn(page), prot) |
| 216 | |
Russell King | b510b049 | 2010-11-26 20:35:25 +0000 | [diff] [blame] | 217 | #define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0) |
| 218 | |
Steven Capper | f295070 | 2014-07-18 16:15:27 +0100 | [diff] [blame] | 219 | #define pte_isset(pte, val) ((u32)(val) == (val) ? pte_val(pte) & (val) \ |
| 220 | : !!(pte_val(pte) & (val))) |
| 221 | #define pte_isclear(pte, val) (!(pte_val(pte) & (val))) |
| 222 | |
Russell King | b510b049 | 2010-11-26 20:35:25 +0000 | [diff] [blame] | 223 | #define pte_none(pte) (!pte_val(pte)) |
Steven Capper | f295070 | 2014-07-18 16:15:27 +0100 | [diff] [blame] | 224 | #define pte_present(pte) (pte_isset((pte), L_PTE_PRESENT)) |
| 225 | #define pte_valid(pte) (pte_isset((pte), L_PTE_VALID)) |
Will Deacon | 1971188 | 2014-02-21 17:01:48 +0100 | [diff] [blame] | 226 | #define pte_accessible(mm, pte) (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte)) |
Steven Capper | f295070 | 2014-07-18 16:15:27 +0100 | [diff] [blame] | 227 | #define pte_write(pte) (pte_isclear((pte), L_PTE_RDONLY)) |
| 228 | #define pte_dirty(pte) (pte_isset((pte), L_PTE_DIRTY)) |
| 229 | #define pte_young(pte) (pte_isset((pte), L_PTE_YOUNG)) |
| 230 | #define pte_exec(pte) (pte_isclear((pte), L_PTE_XN)) |
Russell King | b510b049 | 2010-11-26 20:35:25 +0000 | [diff] [blame] | 231 | |
Will Deacon | 1971188 | 2014-02-21 17:01:48 +0100 | [diff] [blame] | 232 | #define pte_valid_user(pte) \ |
Steven Capper | f295070 | 2014-07-18 16:15:27 +0100 | [diff] [blame] | 233 | (pte_valid(pte) && pte_isset((pte), L_PTE_USER) && pte_young(pte)) |
Russell King | b510b049 | 2010-11-26 20:35:25 +0000 | [diff] [blame] | 234 | |
Russell King | 1ee5e87 | 2017-10-25 11:04:14 +0100 | [diff] [blame^] | 235 | static inline bool pte_access_permitted(pte_t pte, bool write) |
| 236 | { |
| 237 | pteval_t mask = L_PTE_PRESENT | L_PTE_USER; |
| 238 | pteval_t needed = mask; |
| 239 | |
| 240 | if (write) |
| 241 | mask |= L_PTE_RDONLY; |
| 242 | |
| 243 | return (pte_val(pte) & mask) == needed; |
| 244 | } |
| 245 | #define pte_access_permitted pte_access_permitted |
| 246 | |
Will Deacon | 47f1204 | 2012-08-10 17:51:18 +0100 | [diff] [blame] | 247 | #if __LINUX_ARM_ARCH__ < 6 |
| 248 | static inline void __sync_icache_dcache(pte_t pteval) |
| 249 | { |
| 250 | } |
| 251 | #else |
| 252 | extern void __sync_icache_dcache(pte_t pteval); |
| 253 | #endif |
| 254 | |
| 255 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, |
| 256 | pte_t *ptep, pte_t pteval) |
| 257 | { |
| 258 | unsigned long ext = 0; |
| 259 | |
Will Deacon | 1971188 | 2014-02-21 17:01:48 +0100 | [diff] [blame] | 260 | if (addr < TASK_SIZE && pte_valid_user(pteval)) { |
Steve Capper | bd95130 | 2014-10-09 15:29:16 -0700 | [diff] [blame] | 261 | if (!pte_special(pteval)) |
| 262 | __sync_icache_dcache(pteval); |
Will Deacon | 47f1204 | 2012-08-10 17:51:18 +0100 | [diff] [blame] | 263 | ext |= PTE_EXT_NG; |
| 264 | } |
| 265 | |
| 266 | set_pte_ext(ptep, pteval, ext); |
| 267 | } |
| 268 | |
Jungseung Lee | 1f92f77 | 2014-11-29 03:03:51 +0100 | [diff] [blame] | 269 | static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot) |
| 270 | { |
| 271 | pte_val(pte) &= ~pgprot_val(prot); |
| 272 | return pte; |
| 273 | } |
Russell King | b510b049 | 2010-11-26 20:35:25 +0000 | [diff] [blame] | 274 | |
Jungseung Lee | 1f92f77 | 2014-11-29 03:03:51 +0100 | [diff] [blame] | 275 | static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot) |
| 276 | { |
| 277 | pte_val(pte) |= pgprot_val(prot); |
| 278 | return pte; |
| 279 | } |
| 280 | |
| 281 | static inline pte_t pte_wrprotect(pte_t pte) |
| 282 | { |
| 283 | return set_pte_bit(pte, __pgprot(L_PTE_RDONLY)); |
| 284 | } |
| 285 | |
| 286 | static inline pte_t pte_mkwrite(pte_t pte) |
| 287 | { |
| 288 | return clear_pte_bit(pte, __pgprot(L_PTE_RDONLY)); |
| 289 | } |
| 290 | |
| 291 | static inline pte_t pte_mkclean(pte_t pte) |
| 292 | { |
| 293 | return clear_pte_bit(pte, __pgprot(L_PTE_DIRTY)); |
| 294 | } |
| 295 | |
| 296 | static inline pte_t pte_mkdirty(pte_t pte) |
| 297 | { |
| 298 | return set_pte_bit(pte, __pgprot(L_PTE_DIRTY)); |
| 299 | } |
| 300 | |
| 301 | static inline pte_t pte_mkold(pte_t pte) |
| 302 | { |
| 303 | return clear_pte_bit(pte, __pgprot(L_PTE_YOUNG)); |
| 304 | } |
| 305 | |
| 306 | static inline pte_t pte_mkyoung(pte_t pte) |
| 307 | { |
| 308 | return set_pte_bit(pte, __pgprot(L_PTE_YOUNG)); |
| 309 | } |
| 310 | |
| 311 | static inline pte_t pte_mkexec(pte_t pte) |
| 312 | { |
| 313 | return clear_pte_bit(pte, __pgprot(L_PTE_XN)); |
| 314 | } |
| 315 | |
| 316 | static inline pte_t pte_mknexec(pte_t pte) |
| 317 | { |
| 318 | return set_pte_bit(pte, __pgprot(L_PTE_XN)); |
| 319 | } |
Russell King | b510b049 | 2010-11-26 20:35:25 +0000 | [diff] [blame] | 320 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 321 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
| 322 | { |
Catalin Marinas | 69dde4c | 2013-02-18 17:51:20 +0100 | [diff] [blame] | 323 | const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER | |
| 324 | L_PTE_NONE | L_PTE_VALID; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 325 | pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); |
| 326 | return pte; |
| 327 | } |
| 328 | |
Russell King | fb93a1c | 2009-07-05 11:30:15 +0100 | [diff] [blame] | 329 | /* |
| 330 | * Encode and decode a swap entry. Swap entries are stored in the Linux |
| 331 | * page tables as follows: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 332 | * |
Russell King | fb93a1c | 2009-07-05 11:30:15 +0100 | [diff] [blame] | 333 | * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 |
| 334 | * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 |
Kirill A. Shutemov | b007ea7 | 2015-02-10 14:10:17 -0800 | [diff] [blame] | 335 | * <--------------- offset ------------------------> < type -> 0 0 |
Russell King | fb93a1c | 2009-07-05 11:30:15 +0100 | [diff] [blame] | 336 | * |
Kirill A. Shutemov | b007ea7 | 2015-02-10 14:10:17 -0800 | [diff] [blame] | 337 | * This gives us up to 31 swap files and 128GB per swap file. Note that |
Russell King | fb93a1c | 2009-07-05 11:30:15 +0100 | [diff] [blame] | 338 | * the offset field is always non-zero. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 339 | */ |
Kirill A. Shutemov | b007ea7 | 2015-02-10 14:10:17 -0800 | [diff] [blame] | 340 | #define __SWP_TYPE_SHIFT 2 |
Will Deacon | f5f2025 | 2012-08-10 17:51:19 +0100 | [diff] [blame] | 341 | #define __SWP_TYPE_BITS 5 |
Russell King | fb93a1c | 2009-07-05 11:30:15 +0100 | [diff] [blame] | 342 | #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) |
| 343 | #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) |
| 344 | |
| 345 | #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) |
| 346 | #define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT) |
| 347 | #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) }) |
| 348 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 349 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) |
| 350 | #define __swp_entry_to_pte(swp) ((pte_t) { (swp).val }) |
| 351 | |
Russell King | fb93a1c | 2009-07-05 11:30:15 +0100 | [diff] [blame] | 352 | /* |
| 353 | * It is an error for the kernel to have more swap files than we can |
| 354 | * encode in the PTEs. This ensures that we know when MAX_SWAPFILES |
| 355 | * is increased beyond what we presently support. |
| 356 | */ |
| 357 | #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS) |
| 358 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 359 | /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ |
| 360 | /* FIXME: this is not correct */ |
| 361 | #define kern_addr_valid(addr) (1) |
| 362 | |
| 363 | #include <asm-generic/pgtable.h> |
| 364 | |
| 365 | /* |
| 366 | * We provide our own arch_get_unmapped_area to cope with VIPT caches. |
| 367 | */ |
| 368 | #define HAVE_ARCH_UNMAPPED_AREA |
Rob Herring | 7dbaa46 | 2011-11-22 04:01:07 +0100 | [diff] [blame] | 369 | #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 370 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 371 | #define pgtable_cache_init() do { } while (0) |
| 372 | |
| 373 | #endif /* !__ASSEMBLY__ */ |
| 374 | |
Russell King | 002547b | 2006-06-20 20:46:52 +0100 | [diff] [blame] | 375 | #endif /* CONFIG_MMU */ |
| 376 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 377 | #endif /* _ASMARM_PGTABLE_H */ |