Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * S390 version |
Heiko Carstens | a53c8fa | 2012-07-20 11:15:04 +0200 | [diff] [blame] | 3 | * Copyright IBM Corp. 1999, 2000 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | * Author(s): Hartmut Penner (hp@de.ibm.com) |
| 5 | * Ulrich Weigand (weigand@de.ibm.com) |
| 6 | * Martin Schwidefsky (schwidefsky@de.ibm.com) |
| 7 | * |
| 8 | * Derived from "include/asm-i386/pgtable.h" |
| 9 | */ |
| 10 | |
| 11 | #ifndef _ASM_S390_PGTABLE_H |
| 12 | #define _ASM_S390_PGTABLE_H |
| 13 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | /* |
| 15 | * The Linux memory management assumes a three-level page table setup. For |
| 16 | * s390 31 bit we "fold" the mid level into the top-level page table, so |
| 17 | * that we physically have the same two-level page table as the s390 mmu |
| 18 | * expects in 31 bit mode. For s390 64 bit we use three of the five levels |
| 19 | * the hardware provides (region first and region second tables are not |
| 20 | * used). |
| 21 | * |
| 22 | * The "pgd_xxx()" functions are trivial for a folded two-level |
| 23 | * setup: the pgd is never bad, and a pmd always exists (as it's folded |
| 24 | * into the pgd entry) |
| 25 | * |
| 26 | * This file contains the functions and defines necessary to modify and use |
| 27 | * the S390 page table tree. |
| 28 | */ |
| 29 | #ifndef __ASSEMBLY__ |
Heiko Carstens | 9789db0 | 2008-07-14 09:59:11 +0200 | [diff] [blame] | 30 | #include <linux/sched.h> |
Heiko Carstens | 2dcea57 | 2006-09-29 01:58:41 -0700 | [diff] [blame] | 31 | #include <linux/mm_types.h> |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 32 | #include <linux/page-flags.h> |
Martin Schwidefsky | 527e30b | 2014-04-30 16:04:25 +0200 | [diff] [blame] | 33 | #include <linux/radix-tree.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | #include <asm/bug.h> |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 35 | #include <asm/page.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096))); |
| 38 | extern void paging_init(void); |
Heiko Carstens | 2b67fc4 | 2007-02-05 21:16:47 +0100 | [diff] [blame] | 39 | extern void vmem_map_init(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 | |
| 41 | /* |
| 42 | * The S390 doesn't have any external MMU info: the kernel page |
| 43 | * tables contain all the necessary information. |
| 44 | */ |
Russell King | 4b3073e | 2009-12-18 16:40:18 +0000 | [diff] [blame] | 45 | #define update_mmu_cache(vma, address, ptep) do { } while (0) |
David Miller | b113da6 | 2012-10-08 16:34:25 -0700 | [diff] [blame] | 46 | #define update_mmu_cache_pmd(vma, address, ptep) do { } while (0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | |
| 48 | /* |
Martin Schwidefsky | 238ec4e | 2010-10-25 16:10:07 +0200 | [diff] [blame] | 49 | * ZERO_PAGE is a global shared page that is always zero; used |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 | * for zero-mapped memory areas etc.. |
| 51 | */ |
Martin Schwidefsky | 238ec4e | 2010-10-25 16:10:07 +0200 | [diff] [blame] | 52 | |
| 53 | extern unsigned long empty_zero_page; |
| 54 | extern unsigned long zero_page_mask; |
| 55 | |
| 56 | #define ZERO_PAGE(vaddr) \ |
| 57 | (virt_to_page((void *)(empty_zero_page + \ |
| 58 | (((unsigned long)(vaddr)) &zero_page_mask)))) |
Kirill A. Shutemov | 816422a | 2012-12-12 13:52:36 -0800 | [diff] [blame] | 59 | #define __HAVE_COLOR_ZERO_PAGE |
Martin Schwidefsky | 238ec4e | 2010-10-25 16:10:07 +0200 | [diff] [blame] | 60 | |
Linus Torvalds | 4f2e290 | 2013-04-17 08:46:19 -0700 | [diff] [blame] | 61 | /* TODO: s390 cannot support io_remap_pfn_range... */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | #endif /* !__ASSEMBLY__ */ |
| 63 | |
| 64 | /* |
| 65 | * PMD_SHIFT determines the size of the area a second-level page |
| 66 | * table can map |
| 67 | * PGDIR_SHIFT determines what a third-level page table entry can map |
| 68 | */ |
Heiko Carstens | 5a79859a | 2015-02-12 13:08:27 +0100 | [diff] [blame] | 69 | #define PMD_SHIFT 20 |
| 70 | #define PUD_SHIFT 31 |
| 71 | #define PGDIR_SHIFT 42 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | |
| 73 | #define PMD_SIZE (1UL << PMD_SHIFT) |
| 74 | #define PMD_MASK (~(PMD_SIZE-1)) |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 75 | #define PUD_SIZE (1UL << PUD_SHIFT) |
| 76 | #define PUD_MASK (~(PUD_SIZE-1)) |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 77 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) |
| 78 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 | |
| 80 | /* |
| 81 | * entries per page directory level: the S390 is two-level, so |
| 82 | * we don't really have any PMD directory physically. |
| 83 | * for S390 segment-table entries are combined to one PGD |
| 84 | * that leads to 1024 pte per pgd |
| 85 | */ |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 86 | #define PTRS_PER_PTE 256 |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 87 | #define PTRS_PER_PMD 2048 |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 88 | #define PTRS_PER_PUD 2048 |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 89 | #define PTRS_PER_PGD 2048 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 | |
Kirill A. Shutemov | d016bf7 | 2015-02-11 15:26:41 -0800 | [diff] [blame] | 91 | #define FIRST_USER_ADDRESS 0UL |
Hugh Dickins | d455a36 | 2005-04-19 13:29:23 -0700 | [diff] [blame] | 92 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 93 | #define pte_ERROR(e) \ |
| 94 | printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e)) |
| 95 | #define pmd_ERROR(e) \ |
| 96 | printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e)) |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 97 | #define pud_ERROR(e) \ |
| 98 | printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 | #define pgd_ERROR(e) \ |
| 100 | printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e)) |
| 101 | |
| 102 | #ifndef __ASSEMBLY__ |
| 103 | /* |
Heiko Carstens | c972cc6 | 2012-10-05 16:52:18 +0200 | [diff] [blame] | 104 | * The vmalloc and module area will always be on the topmost area of the kernel |
| 105 | * mapping. We reserve 96MB (31bit) / 128GB (64bit) for vmalloc and modules. |
| 106 | * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where |
| 107 | * modules will reside. That makes sure that inter module branches always |
| 108 | * happen without trampolines and in addition the placement within a 2GB frame |
| 109 | * is branch prediction unit friendly. |
Heiko Carstens | 8b62bc9 | 2006-12-04 15:40:56 +0100 | [diff] [blame] | 110 | */ |
Heiko Carstens | 239a6425 | 2009-06-12 10:26:33 +0200 | [diff] [blame] | 111 | extern unsigned long VMALLOC_START; |
Martin Schwidefsky | 14045eb | 2011-12-27 11:27:07 +0100 | [diff] [blame] | 112 | extern unsigned long VMALLOC_END; |
| 113 | extern struct page *vmemmap; |
Heiko Carstens | 239a6425 | 2009-06-12 10:26:33 +0200 | [diff] [blame] | 114 | |
Martin Schwidefsky | 14045eb | 2011-12-27 11:27:07 +0100 | [diff] [blame] | 115 | #define VMEM_MAX_PHYS ((unsigned long) vmemmap) |
Christian Borntraeger | 5fd9c6e | 2008-01-26 14:11:00 +0100 | [diff] [blame] | 116 | |
Heiko Carstens | c972cc6 | 2012-10-05 16:52:18 +0200 | [diff] [blame] | 117 | extern unsigned long MODULES_VADDR; |
| 118 | extern unsigned long MODULES_END; |
| 119 | #define MODULES_VADDR MODULES_VADDR |
| 120 | #define MODULES_END MODULES_END |
| 121 | #define MODULES_LEN (1UL << 31) |
Heiko Carstens | c972cc6 | 2012-10-05 16:52:18 +0200 | [diff] [blame] | 122 | |
Heiko Carstens | c933146 | 2014-10-15 12:17:38 +0200 | [diff] [blame] | 123 | static inline int is_module_addr(void *addr) |
| 124 | { |
Heiko Carstens | c933146 | 2014-10-15 12:17:38 +0200 | [diff] [blame] | 125 | BUILD_BUG_ON(MODULES_LEN > (1UL << 31)); |
| 126 | if (addr < (void *)MODULES_VADDR) |
| 127 | return 0; |
| 128 | if (addr > (void *)MODULES_END) |
| 129 | return 0; |
Heiko Carstens | c933146 | 2014-10-15 12:17:38 +0200 | [diff] [blame] | 130 | return 1; |
| 131 | } |
| 132 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 | /* |
| 134 | * A 31 bit pagetable entry of S390 has following format: |
| 135 | * | PFRA | | OS | |
| 136 | * 0 0IP0 |
| 137 | * 00000000001111111111222222222233 |
| 138 | * 01234567890123456789012345678901 |
| 139 | * |
| 140 | * I Page-Invalid Bit: Page is not available for address-translation |
| 141 | * P Page-Protection Bit: Store access not possible for page |
| 142 | * |
| 143 | * A 31 bit segmenttable entry of S390 has following format: |
| 144 | * | P-table origin | |PTL |
| 145 | * 0 IC |
| 146 | * 00000000001111111111222222222233 |
| 147 | * 01234567890123456789012345678901 |
| 148 | * |
| 149 | * I Segment-Invalid Bit: Segment is not available for address-translation |
| 150 | * C Common-Segment Bit: Segment is not private (PoP 3-30) |
| 151 | * PTL Page-Table-Length: Page-table length (PTL+1*16 entries -> up to 256) |
| 152 | * |
| 153 | * The 31 bit segmenttable origin of S390 has following format: |
| 154 | * |
| 155 | * |S-table origin | | STL | |
| 156 | * X **GPS |
| 157 | * 00000000001111111111222222222233 |
| 158 | * 01234567890123456789012345678901 |
| 159 | * |
| 160 | * X Space-Switch event: |
| 161 | * G Segment-Invalid Bit: * |
| 162 | * P Private-Space Bit: Segment is not private (PoP 3-30) |
| 163 | * S Storage-Alteration: |
| 164 | * STL Segment-Table-Length: Segment-table length (STL+1*16 entries -> up to 2048) |
| 165 | * |
| 166 | * A 64 bit pagetable entry of S390 has following format: |
Christian Borntraeger | 6a985c6 | 2009-12-07 12:52:11 +0100 | [diff] [blame] | 167 | * | PFRA |0IPC| OS | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 168 | * 0000000000111111111122222222223333333333444444444455555555556666 |
| 169 | * 0123456789012345678901234567890123456789012345678901234567890123 |
| 170 | * |
| 171 | * I Page-Invalid Bit: Page is not available for address-translation |
| 172 | * P Page-Protection Bit: Store access not possible for page |
Christian Borntraeger | 6a985c6 | 2009-12-07 12:52:11 +0100 | [diff] [blame] | 173 | * C Change-bit override: HW is not required to set change bit |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 174 | * |
| 175 | * A 64 bit segmenttable entry of S390 has following format: |
| 176 | * | P-table origin | TT |
| 177 | * 0000000000111111111122222222223333333333444444444455555555556666 |
| 178 | * 0123456789012345678901234567890123456789012345678901234567890123 |
| 179 | * |
| 180 | * I Segment-Invalid Bit: Segment is not available for address-translation |
| 181 | * C Common-Segment Bit: Segment is not private (PoP 3-30) |
| 182 | * P Page-Protection Bit: Store access not possible for page |
| 183 | * TT Type 00 |
| 184 | * |
| 185 | * A 64 bit region table entry of S390 has following format: |
| 186 | * | S-table origin | TF TTTL |
| 187 | * 0000000000111111111122222222223333333333444444444455555555556666 |
| 188 | * 0123456789012345678901234567890123456789012345678901234567890123 |
| 189 | * |
| 190 | * I Segment-Invalid Bit: Segment is not available for address-translation |
| 191 | * TT Type 01 |
| 192 | * TF |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 193 | * TL Table length |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 194 | * |
| 195 | * The 64 bit regiontable origin of S390 has following format: |
| 196 | * | region table origon | DTTL |
| 197 | * 0000000000111111111122222222223333333333444444444455555555556666 |
| 198 | * 0123456789012345678901234567890123456789012345678901234567890123 |
| 199 | * |
| 200 | * X Space-Switch event: |
| 201 | * G Segment-Invalid Bit: |
| 202 | * P Private-Space Bit: |
| 203 | * S Storage-Alteration: |
| 204 | * R Real space |
| 205 | * TL Table-Length: |
| 206 | * |
| 207 | * A storage key has the following format: |
| 208 | * | ACC |F|R|C|0| |
| 209 | * 0 3 4 5 6 7 |
| 210 | * ACC: access key |
| 211 | * F : fetch protection bit |
| 212 | * R : referenced bit |
| 213 | * C : changed bit |
| 214 | */ |
| 215 | |
| 216 | /* Hardware bits in the page table entry */ |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 217 | #define _PAGE_PROTECT 0x200 /* HW read-only bit */ |
Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 218 | #define _PAGE_INVALID 0x400 /* HW invalid bit */ |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 219 | #define _PAGE_LARGE 0x800 /* Bit to mark a large pte */ |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 220 | |
| 221 | /* Software bits in the page table entry */ |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 222 | #define _PAGE_PRESENT 0x001 /* SW pte present bit */ |
| 223 | #define _PAGE_TYPE 0x002 /* SW pte type bit */ |
| 224 | #define _PAGE_YOUNG 0x004 /* SW pte young bit */ |
| 225 | #define _PAGE_DIRTY 0x008 /* SW pte dirty bit */ |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 226 | #define _PAGE_READ 0x010 /* SW pte read bit */ |
| 227 | #define _PAGE_WRITE 0x020 /* SW pte write bit */ |
| 228 | #define _PAGE_SPECIAL 0x040 /* SW associated with special page */ |
Konstantin Weitz | b31288f | 2013-04-17 17:36:29 +0200 | [diff] [blame] | 229 | #define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */ |
Nick Piggin | a08cb62 | 2008-04-28 02:13:03 -0700 | [diff] [blame] | 230 | #define __HAVE_ARCH_PTE_SPECIAL |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 231 | |
Nick Piggin | 138c902 | 2008-07-08 11:31:06 +0200 | [diff] [blame] | 232 | /* Set of bits not changed in pte_modify */ |
Heiko Carstens | 6a5c148 | 2014-09-22 08:50:51 +0200 | [diff] [blame] | 233 | #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \ |
| 234 | _PAGE_YOUNG) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 235 | |
Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 236 | /* |
Kirill A. Shutemov | 6e76d4b | 2015-02-10 14:11:04 -0800 | [diff] [blame] | 237 | * handle_pte_fault uses pte_present and pte_none to find out the pte type |
| 238 | * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to |
| 239 | * distinguish present from not-present ptes. It is changed only with the page |
| 240 | * table lock held. |
Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 241 | * |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 242 | * The following table gives the different possible bit combinations for |
| 243 | * the pte hardware and software bits in the last 12 bits of a pte: |
Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 244 | * |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 245 | * 842100000000 |
| 246 | * 000084210000 |
| 247 | * 000000008421 |
| 248 | * .IR...wrdytp |
| 249 | * empty .10...000000 |
| 250 | * swap .10...xxxx10 |
| 251 | * file .11...xxxxx0 |
| 252 | * prot-none, clean, old .11...000001 |
| 253 | * prot-none, clean, young .11...000101 |
| 254 | * prot-none, dirty, old .10...001001 |
| 255 | * prot-none, dirty, young .10...001101 |
| 256 | * read-only, clean, old .11...010001 |
| 257 | * read-only, clean, young .01...010101 |
| 258 | * read-only, dirty, old .11...011001 |
| 259 | * read-only, dirty, young .01...011101 |
| 260 | * read-write, clean, old .11...110001 |
| 261 | * read-write, clean, young .01...110101 |
| 262 | * read-write, dirty, old .10...111001 |
| 263 | * read-write, dirty, young .00...111101 |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 264 | * |
| 265 | * pte_present is true for the bit pattern .xx...xxxxx1, (pte & 0x001) == 0x001 |
| 266 | * pte_none is true for the bit pattern .10...xxxx00, (pte & 0x603) == 0x400 |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 267 | * pte_swap is true for the bit pattern .10...xxxx10, (pte & 0x603) == 0x402 |
Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 268 | */ |
| 269 | |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 270 | /* Bits in the segment/region table address-space-control-element */ |
| 271 | #define _ASCE_ORIGIN ~0xfffUL/* segment table origin */ |
| 272 | #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */ |
| 273 | #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */ |
| 274 | #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */ |
| 275 | #define _ASCE_REAL_SPACE 0x20 /* real space control */ |
| 276 | #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */ |
| 277 | #define _ASCE_TYPE_REGION1 0x0c /* region first table type */ |
| 278 | #define _ASCE_TYPE_REGION2 0x08 /* region second table type */ |
| 279 | #define _ASCE_TYPE_REGION3 0x04 /* region third table type */ |
| 280 | #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */ |
| 281 | #define _ASCE_TABLE_LENGTH 0x03 /* region table length */ |
| 282 | |
| 283 | /* Bits in the region table entry */ |
| 284 | #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */ |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 285 | #define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */ |
| 286 | #define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */ |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 287 | #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */ |
| 288 | #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */ |
| 289 | #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */ |
| 290 | #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */ |
| 291 | #define _REGION_ENTRY_LENGTH 0x03 /* region third length */ |
| 292 | |
| 293 | #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH) |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 294 | #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID) |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 295 | #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH) |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 296 | #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID) |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 297 | #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH) |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 298 | #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID) |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 299 | |
Heiko Carstens | 18da236 | 2012-10-08 09:18:26 +0200 | [diff] [blame] | 300 | #define _REGION3_ENTRY_LARGE 0x400 /* RTTE-format control, large page */ |
Heiko Carstens | 1819ed1 | 2013-02-16 11:47:27 +0100 | [diff] [blame] | 301 | #define _REGION3_ENTRY_RO 0x200 /* page protection bit */ |
Heiko Carstens | 18da236 | 2012-10-08 09:18:26 +0200 | [diff] [blame] | 302 | |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 303 | /* Bits in the segment table entry */ |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 304 | #define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL |
Martin Schwidefsky | 152125b | 2014-07-24 11:03:41 +0200 | [diff] [blame] | 305 | #define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff0ff33UL |
Heiko Carstens | ea81531 | 2013-03-21 12:50:39 +0100 | [diff] [blame] | 306 | #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */ |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 307 | #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */ |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 308 | #define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */ |
| 309 | #define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */ |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 310 | |
| 311 | #define _SEGMENT_ENTRY (0) |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 312 | #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID) |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 313 | |
Martin Schwidefsky | 152125b | 2014-07-24 11:03:41 +0200 | [diff] [blame] | 314 | #define _SEGMENT_ENTRY_DIRTY 0x2000 /* SW segment dirty bit */ |
| 315 | #define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */ |
| 316 | #define _SEGMENT_ENTRY_SPLIT 0x0800 /* THP splitting bit */ |
| 317 | #define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */ |
Martin Schwidefsky | 152125b | 2014-07-24 11:03:41 +0200 | [diff] [blame] | 318 | #define _SEGMENT_ENTRY_READ 0x0002 /* SW segment read bit */ |
| 319 | #define _SEGMENT_ENTRY_WRITE 0x0001 /* SW segment write bit */ |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 320 | |
| 321 | /* |
| 322 | * Segment table entry encoding (R = read-only, I = invalid, y = young bit): |
Martin Schwidefsky | 152125b | 2014-07-24 11:03:41 +0200 | [diff] [blame] | 323 | * dy..R...I...wr |
| 324 | * prot-none, clean, old 00..1...1...00 |
| 325 | * prot-none, clean, young 01..1...1...00 |
| 326 | * prot-none, dirty, old 10..1...1...00 |
| 327 | * prot-none, dirty, young 11..1...1...00 |
| 328 | * read-only, clean, old 00..1...1...01 |
| 329 | * read-only, clean, young 01..1...0...01 |
| 330 | * read-only, dirty, old 10..1...1...01 |
| 331 | * read-only, dirty, young 11..1...0...01 |
| 332 | * read-write, clean, old 00..1...1...11 |
| 333 | * read-write, clean, young 01..1...0...11 |
| 334 | * read-write, dirty, old 10..0...1...11 |
| 335 | * read-write, dirty, young 11..0...0...11 |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 336 | * The segment table origin is used to distinguish empty (origin==0) from |
| 337 | * read-write, old segment table entries (origin!=0) |
| 338 | */ |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 339 | |
Martin Schwidefsky | 152125b | 2014-07-24 11:03:41 +0200 | [diff] [blame] | 340 | #define _SEGMENT_ENTRY_SPLIT_BIT 11 /* THP splitting bit number */ |
Gerald Schaefer | 1ae1c1d | 2012-10-08 16:30:24 -0700 | [diff] [blame] | 341 | |
Martin Schwidefsky | 6c61cfe | 2011-06-06 14:14:42 +0200 | [diff] [blame] | 342 | /* Page status table bits for virtualization */ |
Martin Schwidefsky | 0d0dafc | 2013-05-17 14:41:33 +0200 | [diff] [blame] | 343 | #define PGSTE_ACC_BITS 0xf000000000000000UL |
| 344 | #define PGSTE_FP_BIT 0x0800000000000000UL |
| 345 | #define PGSTE_PCL_BIT 0x0080000000000000UL |
| 346 | #define PGSTE_HR_BIT 0x0040000000000000UL |
| 347 | #define PGSTE_HC_BIT 0x0020000000000000UL |
| 348 | #define PGSTE_GR_BIT 0x0004000000000000UL |
| 349 | #define PGSTE_GC_BIT 0x0002000000000000UL |
Martin Schwidefsky | 0a61b22 | 2013-10-18 12:03:41 +0200 | [diff] [blame] | 350 | #define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */ |
| 351 | #define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */ |
Martin Schwidefsky | 6c61cfe | 2011-06-06 14:14:42 +0200 | [diff] [blame] | 352 | |
Konstantin Weitz | b31288f | 2013-04-17 17:36:29 +0200 | [diff] [blame] | 353 | /* Guest Page State used for virtualization */ |
| 354 | #define _PGSTE_GPS_ZERO 0x0000000080000000UL |
| 355 | #define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL |
| 356 | #define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL |
| 357 | #define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL |
| 358 | |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 359 | /* |
| 360 | * A user page table pointer has the space-switch-event bit, the |
| 361 | * private-space-control bit and the storage-alteration-event-control |
| 362 | * bit set. A kernel page table pointer doesn't need them. |
| 363 | */ |
| 364 | #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \ |
| 365 | _ASCE_ALT_EVENT) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 366 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 367 | /* |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 368 | * Page protection definitions. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 369 | */ |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 370 | #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID) |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 371 | #define PAGE_READ __pgprot(_PAGE_PRESENT | _PAGE_READ | \ |
| 372 | _PAGE_INVALID | _PAGE_PROTECT) |
| 373 | #define PAGE_WRITE __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ |
| 374 | _PAGE_INVALID | _PAGE_PROTECT) |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 375 | |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 376 | #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ |
| 377 | _PAGE_YOUNG | _PAGE_DIRTY) |
| 378 | #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ |
| 379 | _PAGE_YOUNG | _PAGE_DIRTY) |
| 380 | #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \ |
| 381 | _PAGE_PROTECT) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 382 | |
| 383 | /* |
Martin Schwidefsky | 043d070 | 2011-05-23 10:24:23 +0200 | [diff] [blame] | 384 | * On s390 the page table entry has an invalid bit and a read-only bit. |
| 385 | * Read permission implies execute permission and write permission |
| 386 | * implies read permission. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 387 | */ |
| 388 | /*xwr*/ |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 389 | #define __P000 PAGE_NONE |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 390 | #define __P001 PAGE_READ |
| 391 | #define __P010 PAGE_READ |
| 392 | #define __P011 PAGE_READ |
| 393 | #define __P100 PAGE_READ |
| 394 | #define __P101 PAGE_READ |
| 395 | #define __P110 PAGE_READ |
| 396 | #define __P111 PAGE_READ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 397 | |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 398 | #define __S000 PAGE_NONE |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 399 | #define __S001 PAGE_READ |
| 400 | #define __S010 PAGE_WRITE |
| 401 | #define __S011 PAGE_WRITE |
| 402 | #define __S100 PAGE_READ |
| 403 | #define __S101 PAGE_READ |
| 404 | #define __S110 PAGE_WRITE |
| 405 | #define __S111 PAGE_WRITE |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 406 | |
Gerald Schaefer | 106c992 | 2013-04-29 15:07:23 -0700 | [diff] [blame] | 407 | /* |
| 408 | * Segment entry (large page) protection definitions. |
| 409 | */ |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 410 | #define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \ |
| 411 | _SEGMENT_ENTRY_PROTECT) |
Martin Schwidefsky | 152125b | 2014-07-24 11:03:41 +0200 | [diff] [blame] | 412 | #define SEGMENT_READ __pgprot(_SEGMENT_ENTRY_PROTECT | \ |
| 413 | _SEGMENT_ENTRY_READ) |
| 414 | #define SEGMENT_WRITE __pgprot(_SEGMENT_ENTRY_READ | \ |
| 415 | _SEGMENT_ENTRY_WRITE) |
Gerald Schaefer | 106c992 | 2013-04-29 15:07:23 -0700 | [diff] [blame] | 416 | |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 417 | static inline int mm_has_pgste(struct mm_struct *mm) |
| 418 | { |
| 419 | #ifdef CONFIG_PGSTE |
| 420 | if (unlikely(mm->context.has_pgste)) |
| 421 | return 1; |
| 422 | #endif |
| 423 | return 0; |
| 424 | } |
Dominik Dingel | 65eef335 | 2014-01-14 15:02:11 +0100 | [diff] [blame] | 425 | |
Martin Schwidefsky | 0b46e0a | 2015-04-15 13:23:26 +0200 | [diff] [blame^] | 426 | static inline int mm_alloc_pgste(struct mm_struct *mm) |
| 427 | { |
| 428 | #ifdef CONFIG_PGSTE |
| 429 | if (unlikely(mm->context.alloc_pgste)) |
| 430 | return 1; |
| 431 | #endif |
| 432 | return 0; |
| 433 | } |
| 434 | |
Dominik Dingel | 2faee8f | 2014-10-23 12:08:38 +0200 | [diff] [blame] | 435 | /* |
| 436 | * In the case that a guest uses storage keys |
| 437 | * faults should no longer be backed by zero pages |
| 438 | */ |
| 439 | #define mm_forbids_zeropage mm_use_skey |
Dominik Dingel | 65eef335 | 2014-01-14 15:02:11 +0100 | [diff] [blame] | 440 | static inline int mm_use_skey(struct mm_struct *mm) |
| 441 | { |
| 442 | #ifdef CONFIG_PGSTE |
| 443 | if (mm->context.use_skey) |
| 444 | return 1; |
| 445 | #endif |
| 446 | return 0; |
| 447 | } |
| 448 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 449 | /* |
| 450 | * pgd/pmd/pte query functions |
| 451 | */ |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 452 | static inline int pgd_present(pgd_t pgd) |
| 453 | { |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 454 | if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2) |
| 455 | return 1; |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 456 | return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL; |
| 457 | } |
| 458 | |
| 459 | static inline int pgd_none(pgd_t pgd) |
| 460 | { |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 461 | if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2) |
| 462 | return 0; |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 463 | return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL; |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 464 | } |
| 465 | |
| 466 | static inline int pgd_bad(pgd_t pgd) |
| 467 | { |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 468 | /* |
| 469 | * With dynamic page table levels the pgd can be a region table |
| 470 | * entry or a segment table entry. Check for the bit that are |
| 471 | * invalid for either table entry. |
| 472 | */ |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 473 | unsigned long mask = |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 474 | ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID & |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 475 | ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH; |
| 476 | return (pgd_val(pgd) & mask) != 0; |
| 477 | } |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 478 | |
| 479 | static inline int pud_present(pud_t pud) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 480 | { |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 481 | if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3) |
| 482 | return 1; |
Martin Schwidefsky | 0d01792 | 2007-12-17 16:25:48 +0100 | [diff] [blame] | 483 | return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 484 | } |
| 485 | |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 486 | static inline int pud_none(pud_t pud) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 487 | { |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 488 | if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3) |
| 489 | return 0; |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 490 | return (pud_val(pud) & _REGION_ENTRY_INVALID) != 0UL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 491 | } |
| 492 | |
Heiko Carstens | 18da236 | 2012-10-08 09:18:26 +0200 | [diff] [blame] | 493 | static inline int pud_large(pud_t pud) |
| 494 | { |
| 495 | if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3) |
| 496 | return 0; |
| 497 | return !!(pud_val(pud) & _REGION3_ENTRY_LARGE); |
| 498 | } |
| 499 | |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 500 | static inline int pud_bad(pud_t pud) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 501 | { |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 502 | /* |
| 503 | * With dynamic page table levels the pud can be a region table |
| 504 | * entry or a segment table entry. Check for the bit that are |
| 505 | * invalid for either table entry. |
| 506 | */ |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 507 | unsigned long mask = |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 508 | ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID & |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 509 | ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH; |
| 510 | return (pud_val(pud) & mask) != 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 511 | } |
| 512 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 513 | static inline int pmd_present(pmd_t pmd) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 514 | { |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 515 | return pmd_val(pmd) != _SEGMENT_ENTRY_INVALID; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 516 | } |
| 517 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 518 | static inline int pmd_none(pmd_t pmd) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 519 | { |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 520 | return pmd_val(pmd) == _SEGMENT_ENTRY_INVALID; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 521 | } |
| 522 | |
Heiko Carstens | 378b1e7 | 2012-10-01 12:58:34 +0200 | [diff] [blame] | 523 | static inline int pmd_large(pmd_t pmd) |
| 524 | { |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 525 | return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0; |
Heiko Carstens | 378b1e7 | 2012-10-01 12:58:34 +0200 | [diff] [blame] | 526 | } |
| 527 | |
Martin Schwidefsky | 152125b | 2014-07-24 11:03:41 +0200 | [diff] [blame] | 528 | static inline int pmd_pfn(pmd_t pmd) |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 529 | { |
Martin Schwidefsky | 152125b | 2014-07-24 11:03:41 +0200 | [diff] [blame] | 530 | unsigned long origin_mask; |
| 531 | |
| 532 | origin_mask = _SEGMENT_ENTRY_ORIGIN; |
| 533 | if (pmd_large(pmd)) |
| 534 | origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE; |
| 535 | return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT; |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 536 | } |
| 537 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 538 | static inline int pmd_bad(pmd_t pmd) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 539 | { |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 540 | if (pmd_large(pmd)) |
| 541 | return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0; |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 542 | return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 543 | } |
| 544 | |
Gerald Schaefer | 75077af | 2012-10-08 16:30:15 -0700 | [diff] [blame] | 545 | #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH |
| 546 | extern void pmdp_splitting_flush(struct vm_area_struct *vma, |
| 547 | unsigned long addr, pmd_t *pmdp); |
| 548 | |
Gerald Schaefer | 1ae1c1d | 2012-10-08 16:30:24 -0700 | [diff] [blame] | 549 | #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS |
| 550 | extern int pmdp_set_access_flags(struct vm_area_struct *vma, |
| 551 | unsigned long address, pmd_t *pmdp, |
| 552 | pmd_t entry, int dirty); |
| 553 | |
| 554 | #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH |
| 555 | extern int pmdp_clear_flush_young(struct vm_area_struct *vma, |
| 556 | unsigned long address, pmd_t *pmdp); |
| 557 | |
| 558 | #define __HAVE_ARCH_PMD_WRITE |
| 559 | static inline int pmd_write(pmd_t pmd) |
| 560 | { |
Martin Schwidefsky | 152125b | 2014-07-24 11:03:41 +0200 | [diff] [blame] | 561 | return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0; |
| 562 | } |
| 563 | |
| 564 | static inline int pmd_dirty(pmd_t pmd) |
| 565 | { |
| 566 | int dirty = 1; |
| 567 | if (pmd_large(pmd)) |
| 568 | dirty = (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0; |
| 569 | return dirty; |
Gerald Schaefer | 1ae1c1d | 2012-10-08 16:30:24 -0700 | [diff] [blame] | 570 | } |
| 571 | |
| 572 | static inline int pmd_young(pmd_t pmd) |
| 573 | { |
Martin Schwidefsky | 152125b | 2014-07-24 11:03:41 +0200 | [diff] [blame] | 574 | int young = 1; |
| 575 | if (pmd_large(pmd)) |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 576 | young = (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0; |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 577 | return young; |
Gerald Schaefer | 1ae1c1d | 2012-10-08 16:30:24 -0700 | [diff] [blame] | 578 | } |
| 579 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 580 | static inline int pte_present(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 581 | { |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 582 | /* Bit pattern: (pte & 0x001) == 0x001 */ |
| 583 | return (pte_val(pte) & _PAGE_PRESENT) != 0; |
| 584 | } |
| 585 | |
| 586 | static inline int pte_none(pte_t pte) |
| 587 | { |
| 588 | /* Bit pattern: pte == 0x400 */ |
| 589 | return pte_val(pte) == _PAGE_INVALID; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 590 | } |
| 591 | |
Konstantin Weitz | b31288f | 2013-04-17 17:36:29 +0200 | [diff] [blame] | 592 | static inline int pte_swap(pte_t pte) |
| 593 | { |
| 594 | /* Bit pattern: (pte & 0x603) == 0x402 */ |
| 595 | return (pte_val(pte) & (_PAGE_INVALID | _PAGE_PROTECT | |
| 596 | _PAGE_TYPE | _PAGE_PRESENT)) |
| 597 | == (_PAGE_INVALID | _PAGE_TYPE); |
| 598 | } |
| 599 | |
Nick Piggin | 7e67513 | 2008-04-28 02:13:00 -0700 | [diff] [blame] | 600 | static inline int pte_special(pte_t pte) |
| 601 | { |
Nick Piggin | a08cb62 | 2008-04-28 02:13:03 -0700 | [diff] [blame] | 602 | return (pte_val(pte) & _PAGE_SPECIAL); |
Nick Piggin | 7e67513 | 2008-04-28 02:13:00 -0700 | [diff] [blame] | 603 | } |
| 604 | |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 605 | #define __HAVE_ARCH_PTE_SAME |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 606 | static inline int pte_same(pte_t a, pte_t b) |
Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 607 | { |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 608 | return pte_val(a) == pte_val(b); |
Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 609 | } |
| 610 | |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 611 | static inline pgste_t pgste_get_lock(pte_t *ptep) |
| 612 | { |
| 613 | unsigned long new = 0; |
| 614 | #ifdef CONFIG_PGSTE |
| 615 | unsigned long old; |
| 616 | |
| 617 | preempt_disable(); |
| 618 | asm( |
| 619 | " lg %0,%2\n" |
| 620 | "0: lgr %1,%0\n" |
Martin Schwidefsky | 0d0dafc | 2013-05-17 14:41:33 +0200 | [diff] [blame] | 621 | " nihh %0,0xff7f\n" /* clear PCL bit in old */ |
| 622 | " oihh %1,0x0080\n" /* set PCL bit in new */ |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 623 | " csg %0,%1,%2\n" |
| 624 | " jl 0b\n" |
| 625 | : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE]) |
Christian Borntraeger | a8f6e7f | 2013-06-05 09:25:34 +0200 | [diff] [blame] | 626 | : "Q" (ptep[PTRS_PER_PTE]) : "cc", "memory"); |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 627 | #endif |
| 628 | return __pgste(new); |
| 629 | } |
| 630 | |
| 631 | static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste) |
Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 632 | { |
| 633 | #ifdef CONFIG_PGSTE |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 634 | asm( |
Martin Schwidefsky | 0d0dafc | 2013-05-17 14:41:33 +0200 | [diff] [blame] | 635 | " nihh %1,0xff7f\n" /* clear PCL bit */ |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 636 | " stg %1,%0\n" |
| 637 | : "=Q" (ptep[PTRS_PER_PTE]) |
Christian Borntraeger | a8f6e7f | 2013-06-05 09:25:34 +0200 | [diff] [blame] | 638 | : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE]) |
| 639 | : "cc", "memory"); |
Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 640 | preempt_enable(); |
| 641 | #endif |
| 642 | } |
| 643 | |
Martin Schwidefsky | d56c893 | 2013-07-19 11:15:54 +0200 | [diff] [blame] | 644 | static inline pgste_t pgste_get(pte_t *ptep) |
| 645 | { |
| 646 | unsigned long pgste = 0; |
| 647 | #ifdef CONFIG_PGSTE |
| 648 | pgste = *(unsigned long *)(ptep + PTRS_PER_PTE); |
| 649 | #endif |
| 650 | return __pgste(pgste); |
| 651 | } |
| 652 | |
Christian Borntraeger | 3a82603 | 2013-06-05 09:22:33 +0200 | [diff] [blame] | 653 | static inline void pgste_set(pte_t *ptep, pgste_t pgste) |
| 654 | { |
| 655 | #ifdef CONFIG_PGSTE |
| 656 | *(pgste_t *)(ptep + PTRS_PER_PTE) = pgste; |
| 657 | #endif |
| 658 | } |
| 659 | |
Dominik Dingel | 65eef335 | 2014-01-14 15:02:11 +0100 | [diff] [blame] | 660 | static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste, |
| 661 | struct mm_struct *mm) |
Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 662 | { |
| 663 | #ifdef CONFIG_PGSTE |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 664 | unsigned long address, bits, skey; |
Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 665 | |
Dominik Dingel | 65eef335 | 2014-01-14 15:02:11 +0100 | [diff] [blame] | 666 | if (!mm_use_skey(mm) || pte_val(*ptep) & _PAGE_INVALID) |
Martin Schwidefsky | 09b5388 | 2011-11-14 11:19:00 +0100 | [diff] [blame] | 667 | return pgste; |
Heiko Carstens | a43a9d9 | 2011-05-29 12:40:50 +0200 | [diff] [blame] | 668 | address = pte_val(*ptep) & PAGE_MASK; |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 669 | skey = (unsigned long) page_get_storage_key(address); |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 670 | bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED); |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 671 | /* Transfer page changed & referenced bit to guest bits in pgste */ |
Martin Schwidefsky | 0d0dafc | 2013-05-17 14:41:33 +0200 | [diff] [blame] | 672 | pgste_val(pgste) |= bits << 48; /* GR bit & GC bit */ |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 673 | /* Copy page access key and fetch protection bit to pgste */ |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 674 | pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT); |
| 675 | pgste_val(pgste) |= (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56; |
Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 676 | #endif |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 677 | return pgste; |
| 678 | |
| 679 | } |
| 680 | |
Dominik Dingel | 65eef335 | 2014-01-14 15:02:11 +0100 | [diff] [blame] | 681 | static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry, |
| 682 | struct mm_struct *mm) |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 683 | { |
| 684 | #ifdef CONFIG_PGSTE |
Heiko Carstens | a43a9d9 | 2011-05-29 12:40:50 +0200 | [diff] [blame] | 685 | unsigned long address; |
Christian Borntraeger | 338679f | 2013-06-04 09:53:59 +0200 | [diff] [blame] | 686 | unsigned long nkey; |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 687 | |
Dominik Dingel | 65eef335 | 2014-01-14 15:02:11 +0100 | [diff] [blame] | 688 | if (!mm_use_skey(mm) || pte_val(entry) & _PAGE_INVALID) |
Martin Schwidefsky | 09b5388 | 2011-11-14 11:19:00 +0100 | [diff] [blame] | 689 | return; |
Christian Borntraeger | 338679f | 2013-06-04 09:53:59 +0200 | [diff] [blame] | 690 | VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID)); |
Martin Schwidefsky | 09b5388 | 2011-11-14 11:19:00 +0100 | [diff] [blame] | 691 | address = pte_val(entry) & PAGE_MASK; |
Christian Borntraeger | 338679f | 2013-06-04 09:53:59 +0200 | [diff] [blame] | 692 | /* |
| 693 | * Set page access key and fetch protection bit from pgste. |
| 694 | * The guest C/R information is still in the PGSTE, set real |
| 695 | * key C/R to 0. |
| 696 | */ |
Linus Torvalds | fe489bf | 2013-07-03 13:21:40 -0700 | [diff] [blame] | 697 | nkey = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56; |
Martin Schwidefsky | 0a61b22 | 2013-10-18 12:03:41 +0200 | [diff] [blame] | 698 | nkey |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48; |
Christian Borntraeger | 338679f | 2013-06-04 09:53:59 +0200 | [diff] [blame] | 699 | page_set_storage_key(address, nkey, 0); |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 700 | #endif |
| 701 | } |
| 702 | |
Martin Schwidefsky | 0a61b22 | 2013-10-18 12:03:41 +0200 | [diff] [blame] | 703 | static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry) |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 704 | { |
Martin Schwidefsky | 0a61b22 | 2013-10-18 12:03:41 +0200 | [diff] [blame] | 705 | if ((pte_val(entry) & _PAGE_PRESENT) && |
| 706 | (pte_val(entry) & _PAGE_WRITE) && |
| 707 | !(pte_val(entry) & _PAGE_INVALID)) { |
| 708 | if (!MACHINE_HAS_ESOP) { |
| 709 | /* |
| 710 | * Without enhanced suppression-on-protection force |
| 711 | * the dirty bit on for all writable ptes. |
| 712 | */ |
| 713 | pte_val(entry) |= _PAGE_DIRTY; |
| 714 | pte_val(entry) &= ~_PAGE_PROTECT; |
| 715 | } |
| 716 | if (!(pte_val(entry) & _PAGE_PROTECT)) |
| 717 | /* This pte allows write access, set user-dirty */ |
| 718 | pgste_val(pgste) |= PGSTE_UC_BIT; |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 719 | } |
| 720 | *ptep = entry; |
Martin Schwidefsky | 0a61b22 | 2013-10-18 12:03:41 +0200 | [diff] [blame] | 721 | return pgste; |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 722 | } |
| 723 | |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 724 | /** |
| 725 | * struct gmap_struct - guest address space |
Martin Schwidefsky | 527e30b | 2014-04-30 16:04:25 +0200 | [diff] [blame] | 726 | * @crst_list: list of all crst tables used in the guest address space |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 727 | * @mm: pointer to the parent mm_struct |
Martin Schwidefsky | 527e30b | 2014-04-30 16:04:25 +0200 | [diff] [blame] | 728 | * @guest_to_host: radix tree with guest to host address translation |
| 729 | * @host_to_guest: radix tree with pointer to segment table entries |
| 730 | * @guest_table_lock: spinlock to protect all entries in the guest page table |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 731 | * @table: pointer to the page directory |
Christian Borntraeger | 480e592 | 2011-09-20 17:07:28 +0200 | [diff] [blame] | 732 | * @asce: address space control element for gmap page table |
Dominik Dingel | 24eb3a8 | 2013-06-17 16:25:18 +0200 | [diff] [blame] | 733 | * @pfault_enabled: defines if pfaults are applicable for the guest |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 734 | */ |
| 735 | struct gmap { |
| 736 | struct list_head list; |
Martin Schwidefsky | 527e30b | 2014-04-30 16:04:25 +0200 | [diff] [blame] | 737 | struct list_head crst_list; |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 738 | struct mm_struct *mm; |
Martin Schwidefsky | 527e30b | 2014-04-30 16:04:25 +0200 | [diff] [blame] | 739 | struct radix_tree_root guest_to_host; |
| 740 | struct radix_tree_root host_to_guest; |
| 741 | spinlock_t guest_table_lock; |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 742 | unsigned long *table; |
Christian Borntraeger | 480e592 | 2011-09-20 17:07:28 +0200 | [diff] [blame] | 743 | unsigned long asce; |
Martin Schwidefsky | c6c956b | 2014-07-01 14:36:04 +0200 | [diff] [blame] | 744 | unsigned long asce_end; |
Christian Borntraeger | 2c70fe4 | 2013-05-17 14:41:36 +0200 | [diff] [blame] | 745 | void *private; |
Dominik Dingel | 24eb3a8 | 2013-06-17 16:25:18 +0200 | [diff] [blame] | 746 | bool pfault_enabled; |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 747 | }; |
| 748 | |
| 749 | /** |
Martin Schwidefsky | d338363 | 2013-04-17 10:53:39 +0200 | [diff] [blame] | 750 | * struct gmap_notifier - notify function block for page invalidation |
| 751 | * @notifier_call: address of callback function |
| 752 | */ |
| 753 | struct gmap_notifier { |
| 754 | struct list_head list; |
Martin Schwidefsky | 6e0a043 | 2014-04-29 09:34:41 +0200 | [diff] [blame] | 755 | void (*notifier_call)(struct gmap *gmap, unsigned long gaddr); |
Martin Schwidefsky | d338363 | 2013-04-17 10:53:39 +0200 | [diff] [blame] | 756 | }; |
| 757 | |
Martin Schwidefsky | c6c956b | 2014-07-01 14:36:04 +0200 | [diff] [blame] | 758 | struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit); |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 759 | void gmap_free(struct gmap *gmap); |
| 760 | void gmap_enable(struct gmap *gmap); |
| 761 | void gmap_disable(struct gmap *gmap); |
| 762 | int gmap_map_segment(struct gmap *gmap, unsigned long from, |
Martin Schwidefsky | d338363 | 2013-04-17 10:53:39 +0200 | [diff] [blame] | 763 | unsigned long to, unsigned long len); |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 764 | int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len); |
Martin Schwidefsky | 6e0a043 | 2014-04-29 09:34:41 +0200 | [diff] [blame] | 765 | unsigned long __gmap_translate(struct gmap *, unsigned long gaddr); |
| 766 | unsigned long gmap_translate(struct gmap *, unsigned long gaddr); |
Martin Schwidefsky | 527e30b | 2014-04-30 16:04:25 +0200 | [diff] [blame] | 767 | int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr); |
| 768 | int gmap_fault(struct gmap *, unsigned long gaddr, unsigned int fault_flags); |
Martin Schwidefsky | 6e0a043 | 2014-04-29 09:34:41 +0200 | [diff] [blame] | 769 | void gmap_discard(struct gmap *, unsigned long from, unsigned long to); |
| 770 | void __gmap_zap(struct gmap *, unsigned long gaddr); |
Dominik Dingel | a0bf4f1 | 2014-03-24 14:27:58 +0100 | [diff] [blame] | 771 | bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *); |
| 772 | |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 773 | |
Martin Schwidefsky | d338363 | 2013-04-17 10:53:39 +0200 | [diff] [blame] | 774 | void gmap_register_ipte_notifier(struct gmap_notifier *); |
| 775 | void gmap_unregister_ipte_notifier(struct gmap_notifier *); |
| 776 | int gmap_ipte_notify(struct gmap *, unsigned long start, unsigned long len); |
Martin Schwidefsky | 9da4e38 | 2014-04-30 14:46:26 +0200 | [diff] [blame] | 777 | void gmap_do_ipte_notify(struct mm_struct *, unsigned long addr, pte_t *); |
Martin Schwidefsky | d338363 | 2013-04-17 10:53:39 +0200 | [diff] [blame] | 778 | |
| 779 | static inline pgste_t pgste_ipte_notify(struct mm_struct *mm, |
Martin Schwidefsky | 55dbbdd | 2014-04-30 14:44:44 +0200 | [diff] [blame] | 780 | unsigned long addr, |
Martin Schwidefsky | d338363 | 2013-04-17 10:53:39 +0200 | [diff] [blame] | 781 | pte_t *ptep, pgste_t pgste) |
| 782 | { |
| 783 | #ifdef CONFIG_PGSTE |
Martin Schwidefsky | 0d0dafc | 2013-05-17 14:41:33 +0200 | [diff] [blame] | 784 | if (pgste_val(pgste) & PGSTE_IN_BIT) { |
| 785 | pgste_val(pgste) &= ~PGSTE_IN_BIT; |
Martin Schwidefsky | 9da4e38 | 2014-04-30 14:46:26 +0200 | [diff] [blame] | 786 | gmap_do_ipte_notify(mm, addr, ptep); |
Martin Schwidefsky | d338363 | 2013-04-17 10:53:39 +0200 | [diff] [blame] | 787 | } |
| 788 | #endif |
| 789 | return pgste; |
| 790 | } |
| 791 | |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 792 | /* |
| 793 | * Certain architectures need to do special things when PTEs |
| 794 | * within a page table are directly modified. Thus, the following |
| 795 | * hook is made available. |
| 796 | */ |
| 797 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, |
| 798 | pte_t *ptep, pte_t entry) |
| 799 | { |
| 800 | pgste_t pgste; |
| 801 | |
| 802 | if (mm_has_pgste(mm)) { |
| 803 | pgste = pgste_get_lock(ptep); |
Konstantin Weitz | b31288f | 2013-04-17 17:36:29 +0200 | [diff] [blame] | 804 | pgste_val(pgste) &= ~_PGSTE_GPS_ZERO; |
Dominik Dingel | 65eef335 | 2014-01-14 15:02:11 +0100 | [diff] [blame] | 805 | pgste_set_key(ptep, pgste, entry, mm); |
Martin Schwidefsky | 0a61b22 | 2013-10-18 12:03:41 +0200 | [diff] [blame] | 806 | pgste = pgste_set_pte(ptep, pgste, entry); |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 807 | pgste_set_unlock(ptep, pgste); |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 808 | } else { |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 809 | *ptep = entry; |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 810 | } |
Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 811 | } |
| 812 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 813 | /* |
| 814 | * query functions pte_write/pte_dirty/pte_young only work if |
| 815 | * pte_present() is true. Undefined behaviour if not.. |
| 816 | */ |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 817 | static inline int pte_write(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 818 | { |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 819 | return (pte_val(pte) & _PAGE_WRITE) != 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 820 | } |
| 821 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 822 | static inline int pte_dirty(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 823 | { |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 824 | return (pte_val(pte) & _PAGE_DIRTY) != 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 825 | } |
| 826 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 827 | static inline int pte_young(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 828 | { |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 829 | return (pte_val(pte) & _PAGE_YOUNG) != 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 830 | } |
| 831 | |
Konstantin Weitz | b31288f | 2013-04-17 17:36:29 +0200 | [diff] [blame] | 832 | #define __HAVE_ARCH_PTE_UNUSED |
| 833 | static inline int pte_unused(pte_t pte) |
| 834 | { |
| 835 | return pte_val(pte) & _PAGE_UNUSED; |
| 836 | } |
| 837 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 838 | /* |
| 839 | * pgd/pmd/pte modification functions |
| 840 | */ |
| 841 | |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 842 | static inline void pgd_clear(pgd_t *pgd) |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 843 | { |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 844 | if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) |
| 845 | pgd_val(*pgd) = _REGION2_ENTRY_EMPTY; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 846 | } |
| 847 | |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 848 | static inline void pud_clear(pud_t *pud) |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 849 | { |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 850 | if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) |
| 851 | pud_val(*pud) = _REGION3_ENTRY_EMPTY; |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 852 | } |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 853 | |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 854 | static inline void pmd_clear(pmd_t *pmdp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 855 | { |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 856 | pmd_val(*pmdp) = _SEGMENT_ENTRY_INVALID; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 857 | } |
| 858 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 859 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 860 | { |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 861 | pte_val(*ptep) = _PAGE_INVALID; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 862 | } |
| 863 | |
| 864 | /* |
| 865 | * The following pte modification functions only work if |
| 866 | * pte_present() is true. Undefined behaviour if not.. |
| 867 | */ |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 868 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 869 | { |
Nick Piggin | 138c902 | 2008-07-08 11:31:06 +0200 | [diff] [blame] | 870 | pte_val(pte) &= _PAGE_CHG_MASK; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 871 | pte_val(pte) |= pgprot_val(newprot); |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 872 | /* |
| 873 | * newprot for PAGE_NONE, PAGE_READ and PAGE_WRITE has the |
| 874 | * invalid bit set, clear it again for readable, young pages |
| 875 | */ |
| 876 | if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ)) |
| 877 | pte_val(pte) &= ~_PAGE_INVALID; |
| 878 | /* |
| 879 | * newprot for PAGE_READ and PAGE_WRITE has the page protection |
| 880 | * bit set, clear it again for writable, dirty pages |
| 881 | */ |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 882 | if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE)) |
| 883 | pte_val(pte) &= ~_PAGE_PROTECT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 884 | return pte; |
| 885 | } |
| 886 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 887 | static inline pte_t pte_wrprotect(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 888 | { |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 889 | pte_val(pte) &= ~_PAGE_WRITE; |
| 890 | pte_val(pte) |= _PAGE_PROTECT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 891 | return pte; |
| 892 | } |
| 893 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 894 | static inline pte_t pte_mkwrite(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 895 | { |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 896 | pte_val(pte) |= _PAGE_WRITE; |
| 897 | if (pte_val(pte) & _PAGE_DIRTY) |
| 898 | pte_val(pte) &= ~_PAGE_PROTECT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 899 | return pte; |
| 900 | } |
| 901 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 902 | static inline pte_t pte_mkclean(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 903 | { |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 904 | pte_val(pte) &= ~_PAGE_DIRTY; |
| 905 | pte_val(pte) |= _PAGE_PROTECT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 906 | return pte; |
| 907 | } |
| 908 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 909 | static inline pte_t pte_mkdirty(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 910 | { |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 911 | pte_val(pte) |= _PAGE_DIRTY; |
| 912 | if (pte_val(pte) & _PAGE_WRITE) |
| 913 | pte_val(pte) &= ~_PAGE_PROTECT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 914 | return pte; |
| 915 | } |
| 916 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 917 | static inline pte_t pte_mkold(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 918 | { |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 919 | pte_val(pte) &= ~_PAGE_YOUNG; |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 920 | pte_val(pte) |= _PAGE_INVALID; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 921 | return pte; |
| 922 | } |
| 923 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 924 | static inline pte_t pte_mkyoung(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 925 | { |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 926 | pte_val(pte) |= _PAGE_YOUNG; |
| 927 | if (pte_val(pte) & _PAGE_READ) |
| 928 | pte_val(pte) &= ~_PAGE_INVALID; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 929 | return pte; |
| 930 | } |
| 931 | |
Nick Piggin | 7e67513 | 2008-04-28 02:13:00 -0700 | [diff] [blame] | 932 | static inline pte_t pte_mkspecial(pte_t pte) |
| 933 | { |
Nick Piggin | a08cb62 | 2008-04-28 02:13:03 -0700 | [diff] [blame] | 934 | pte_val(pte) |= _PAGE_SPECIAL; |
Nick Piggin | 7e67513 | 2008-04-28 02:13:00 -0700 | [diff] [blame] | 935 | return pte; |
| 936 | } |
| 937 | |
Heiko Carstens | 84afdce | 2010-10-25 16:10:36 +0200 | [diff] [blame] | 938 | #ifdef CONFIG_HUGETLB_PAGE |
| 939 | static inline pte_t pte_mkhuge(pte_t pte) |
| 940 | { |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 941 | pte_val(pte) |= _PAGE_LARGE; |
Heiko Carstens | 84afdce | 2010-10-25 16:10:36 +0200 | [diff] [blame] | 942 | return pte; |
| 943 | } |
| 944 | #endif |
| 945 | |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 946 | static inline void __ptep_ipte(unsigned long address, pte_t *ptep) |
| 947 | { |
Martin Schwidefsky | 53e857f | 2012-09-10 13:00:09 +0200 | [diff] [blame] | 948 | unsigned long pto = (unsigned long) ptep; |
| 949 | |
Martin Schwidefsky | 53e857f | 2012-09-10 13:00:09 +0200 | [diff] [blame] | 950 | /* Invalidation + global TLB flush for the pte */ |
| 951 | asm volatile( |
| 952 | " ipte %2,%3" |
| 953 | : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address)); |
| 954 | } |
| 955 | |
Martin Schwidefsky | 1b948d6 | 2014-04-03 13:55:01 +0200 | [diff] [blame] | 956 | static inline void __ptep_ipte_local(unsigned long address, pte_t *ptep) |
| 957 | { |
| 958 | unsigned long pto = (unsigned long) ptep; |
| 959 | |
Martin Schwidefsky | 1b948d6 | 2014-04-03 13:55:01 +0200 | [diff] [blame] | 960 | /* Invalidation + local TLB flush for the pte */ |
| 961 | asm volatile( |
| 962 | " .insn rrf,0xb2210000,%2,%3,0,1" |
| 963 | : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address)); |
| 964 | } |
| 965 | |
Heiko Carstens | cfb0b24 | 2014-09-23 21:29:20 +0200 | [diff] [blame] | 966 | static inline void __ptep_ipte_range(unsigned long address, int nr, pte_t *ptep) |
| 967 | { |
| 968 | unsigned long pto = (unsigned long) ptep; |
| 969 | |
Heiko Carstens | cfb0b24 | 2014-09-23 21:29:20 +0200 | [diff] [blame] | 970 | /* Invalidate a range of ptes + global TLB flush of the ptes */ |
| 971 | do { |
| 972 | asm volatile( |
| 973 | " .insn rrf,0xb2210000,%2,%0,%1,0" |
| 974 | : "+a" (address), "+a" (nr) : "a" (pto) : "memory"); |
| 975 | } while (nr != 255); |
| 976 | } |
| 977 | |
Martin Schwidefsky | 53e857f | 2012-09-10 13:00:09 +0200 | [diff] [blame] | 978 | static inline void ptep_flush_direct(struct mm_struct *mm, |
| 979 | unsigned long address, pte_t *ptep) |
| 980 | { |
Martin Schwidefsky | 1b948d6 | 2014-04-03 13:55:01 +0200 | [diff] [blame] | 981 | int active, count; |
| 982 | |
Martin Schwidefsky | 53e857f | 2012-09-10 13:00:09 +0200 | [diff] [blame] | 983 | if (pte_val(*ptep) & _PAGE_INVALID) |
| 984 | return; |
Martin Schwidefsky | 1b948d6 | 2014-04-03 13:55:01 +0200 | [diff] [blame] | 985 | active = (mm == current->active_mm) ? 1 : 0; |
| 986 | count = atomic_add_return(0x10000, &mm->context.attach_count); |
| 987 | if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active && |
| 988 | cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) |
| 989 | __ptep_ipte_local(address, ptep); |
| 990 | else |
| 991 | __ptep_ipte(address, ptep); |
| 992 | atomic_sub(0x10000, &mm->context.attach_count); |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 993 | } |
| 994 | |
Martin Schwidefsky | 5c474a1 | 2013-08-16 13:31:40 +0200 | [diff] [blame] | 995 | static inline void ptep_flush_lazy(struct mm_struct *mm, |
| 996 | unsigned long address, pte_t *ptep) |
| 997 | { |
Martin Schwidefsky | 53e857f | 2012-09-10 13:00:09 +0200 | [diff] [blame] | 998 | int active, count; |
Martin Schwidefsky | 5c474a1 | 2013-08-16 13:31:40 +0200 | [diff] [blame] | 999 | |
Martin Schwidefsky | 53e857f | 2012-09-10 13:00:09 +0200 | [diff] [blame] | 1000 | if (pte_val(*ptep) & _PAGE_INVALID) |
| 1001 | return; |
| 1002 | active = (mm == current->active_mm) ? 1 : 0; |
| 1003 | count = atomic_add_return(0x10000, &mm->context.attach_count); |
| 1004 | if ((count & 0xffff) <= active) { |
| 1005 | pte_val(*ptep) |= _PAGE_INVALID; |
Martin Schwidefsky | 5c474a1 | 2013-08-16 13:31:40 +0200 | [diff] [blame] | 1006 | mm->context.flush_mm = 1; |
Martin Schwidefsky | 53e857f | 2012-09-10 13:00:09 +0200 | [diff] [blame] | 1007 | } else |
| 1008 | __ptep_ipte(address, ptep); |
| 1009 | atomic_sub(0x10000, &mm->context.attach_count); |
Martin Schwidefsky | 5c474a1 | 2013-08-16 13:31:40 +0200 | [diff] [blame] | 1010 | } |
| 1011 | |
Martin Schwidefsky | 0a61b22 | 2013-10-18 12:03:41 +0200 | [diff] [blame] | 1012 | /* |
| 1013 | * Get (and clear) the user dirty bit for a pte. |
| 1014 | */ |
| 1015 | static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm, |
| 1016 | unsigned long addr, |
| 1017 | pte_t *ptep) |
| 1018 | { |
| 1019 | pgste_t pgste; |
| 1020 | pte_t pte; |
| 1021 | int dirty; |
| 1022 | |
| 1023 | if (!mm_has_pgste(mm)) |
| 1024 | return 0; |
| 1025 | pgste = pgste_get_lock(ptep); |
| 1026 | dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT); |
| 1027 | pgste_val(pgste) &= ~PGSTE_UC_BIT; |
| 1028 | pte = *ptep; |
| 1029 | if (dirty && (pte_val(pte) & _PAGE_PRESENT)) { |
Martin Schwidefsky | 55dbbdd | 2014-04-30 14:44:44 +0200 | [diff] [blame] | 1030 | pgste = pgste_ipte_notify(mm, addr, ptep, pgste); |
Martin Schwidefsky | 0a61b22 | 2013-10-18 12:03:41 +0200 | [diff] [blame] | 1031 | __ptep_ipte(addr, ptep); |
| 1032 | if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE)) |
| 1033 | pte_val(pte) |= _PAGE_PROTECT; |
| 1034 | else |
| 1035 | pte_val(pte) |= _PAGE_INVALID; |
| 1036 | *ptep = pte; |
| 1037 | } |
| 1038 | pgste_set_unlock(ptep, pgste); |
| 1039 | return dirty; |
| 1040 | } |
| 1041 | |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 1042 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG |
| 1043 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, |
| 1044 | unsigned long addr, pte_t *ptep) |
| 1045 | { |
| 1046 | pgste_t pgste; |
Christian Borntraeger | 3e03d4c | 2014-08-28 21:21:41 +0200 | [diff] [blame] | 1047 | pte_t pte, oldpte; |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 1048 | int young; |
| 1049 | |
| 1050 | if (mm_has_pgste(vma->vm_mm)) { |
| 1051 | pgste = pgste_get_lock(ptep); |
Martin Schwidefsky | 55dbbdd | 2014-04-30 14:44:44 +0200 | [diff] [blame] | 1052 | pgste = pgste_ipte_notify(vma->vm_mm, addr, ptep, pgste); |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 1053 | } |
| 1054 | |
Christian Borntraeger | 3e03d4c | 2014-08-28 21:21:41 +0200 | [diff] [blame] | 1055 | oldpte = pte = *ptep; |
Martin Schwidefsky | 53e857f | 2012-09-10 13:00:09 +0200 | [diff] [blame] | 1056 | ptep_flush_direct(vma->vm_mm, addr, ptep); |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 1057 | young = pte_young(pte); |
| 1058 | pte = pte_mkold(pte); |
| 1059 | |
| 1060 | if (mm_has_pgste(vma->vm_mm)) { |
Christian Borntraeger | 3e03d4c | 2014-08-28 21:21:41 +0200 | [diff] [blame] | 1061 | pgste = pgste_update_all(&oldpte, pgste, vma->vm_mm); |
Martin Schwidefsky | 0a61b22 | 2013-10-18 12:03:41 +0200 | [diff] [blame] | 1062 | pgste = pgste_set_pte(ptep, pgste, pte); |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 1063 | pgste_set_unlock(ptep, pgste); |
| 1064 | } else |
| 1065 | *ptep = pte; |
| 1066 | |
| 1067 | return young; |
| 1068 | } |
| 1069 | |
| 1070 | #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH |
| 1071 | static inline int ptep_clear_flush_young(struct vm_area_struct *vma, |
| 1072 | unsigned long address, pte_t *ptep) |
| 1073 | { |
| 1074 | return ptep_test_and_clear_young(vma, address, ptep); |
| 1075 | } |
| 1076 | |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 1077 | /* |
| 1078 | * This is hard to understand. ptep_get_and_clear and ptep_clear_flush |
| 1079 | * both clear the TLB for the unmapped pte. The reason is that |
| 1080 | * ptep_get_and_clear is used in common code (e.g. change_pte_range) |
| 1081 | * to modify an active pte. The sequence is |
| 1082 | * 1) ptep_get_and_clear |
| 1083 | * 2) set_pte_at |
| 1084 | * 3) flush_tlb_range |
| 1085 | * On s390 the tlb needs to get flushed with the modification of the pte |
| 1086 | * if the pte is active. The only way how this can be implemented is to |
| 1087 | * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range |
| 1088 | * is a nop. |
| 1089 | */ |
| 1090 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1091 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, |
| 1092 | unsigned long address, pte_t *ptep) |
| 1093 | { |
| 1094 | pgste_t pgste; |
| 1095 | pte_t pte; |
| 1096 | |
Martin Schwidefsky | d338363 | 2013-04-17 10:53:39 +0200 | [diff] [blame] | 1097 | if (mm_has_pgste(mm)) { |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1098 | pgste = pgste_get_lock(ptep); |
Martin Schwidefsky | 55dbbdd | 2014-04-30 14:44:44 +0200 | [diff] [blame] | 1099 | pgste = pgste_ipte_notify(mm, address, ptep, pgste); |
Martin Schwidefsky | d338363 | 2013-04-17 10:53:39 +0200 | [diff] [blame] | 1100 | } |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1101 | |
| 1102 | pte = *ptep; |
Martin Schwidefsky | 5c474a1 | 2013-08-16 13:31:40 +0200 | [diff] [blame] | 1103 | ptep_flush_lazy(mm, address, ptep); |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 1104 | pte_val(*ptep) = _PAGE_INVALID; |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1105 | |
| 1106 | if (mm_has_pgste(mm)) { |
Dominik Dingel | 65eef335 | 2014-01-14 15:02:11 +0100 | [diff] [blame] | 1107 | pgste = pgste_update_all(&pte, pgste, mm); |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1108 | pgste_set_unlock(ptep, pgste); |
| 1109 | } |
| 1110 | return pte; |
| 1111 | } |
| 1112 | |
| 1113 | #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION |
| 1114 | static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, |
| 1115 | unsigned long address, |
| 1116 | pte_t *ptep) |
| 1117 | { |
Martin Schwidefsky | d338363 | 2013-04-17 10:53:39 +0200 | [diff] [blame] | 1118 | pgste_t pgste; |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1119 | pte_t pte; |
| 1120 | |
Martin Schwidefsky | d338363 | 2013-04-17 10:53:39 +0200 | [diff] [blame] | 1121 | if (mm_has_pgste(mm)) { |
| 1122 | pgste = pgste_get_lock(ptep); |
Martin Schwidefsky | 55dbbdd | 2014-04-30 14:44:44 +0200 | [diff] [blame] | 1123 | pgste_ipte_notify(mm, address, ptep, pgste); |
Martin Schwidefsky | d338363 | 2013-04-17 10:53:39 +0200 | [diff] [blame] | 1124 | } |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1125 | |
| 1126 | pte = *ptep; |
Martin Schwidefsky | 5c474a1 | 2013-08-16 13:31:40 +0200 | [diff] [blame] | 1127 | ptep_flush_lazy(mm, address, ptep); |
Christian Borntraeger | b56433c | 2013-05-27 16:19:55 +0200 | [diff] [blame] | 1128 | |
Christian Borntraeger | 3a82603 | 2013-06-05 09:22:33 +0200 | [diff] [blame] | 1129 | if (mm_has_pgste(mm)) { |
Dominik Dingel | 65eef335 | 2014-01-14 15:02:11 +0100 | [diff] [blame] | 1130 | pgste = pgste_update_all(&pte, pgste, mm); |
Christian Borntraeger | 3a82603 | 2013-06-05 09:22:33 +0200 | [diff] [blame] | 1131 | pgste_set(ptep, pgste); |
| 1132 | } |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1133 | return pte; |
| 1134 | } |
| 1135 | |
| 1136 | static inline void ptep_modify_prot_commit(struct mm_struct *mm, |
| 1137 | unsigned long address, |
| 1138 | pte_t *ptep, pte_t pte) |
| 1139 | { |
Christian Borntraeger | b56433c | 2013-05-27 16:19:55 +0200 | [diff] [blame] | 1140 | pgste_t pgste; |
| 1141 | |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 1142 | if (mm_has_pgste(mm)) { |
Martin Schwidefsky | d56c893 | 2013-07-19 11:15:54 +0200 | [diff] [blame] | 1143 | pgste = pgste_get(ptep); |
Dominik Dingel | 65eef335 | 2014-01-14 15:02:11 +0100 | [diff] [blame] | 1144 | pgste_set_key(ptep, pgste, pte, mm); |
Martin Schwidefsky | 0a61b22 | 2013-10-18 12:03:41 +0200 | [diff] [blame] | 1145 | pgste = pgste_set_pte(ptep, pgste, pte); |
Christian Borntraeger | b56433c | 2013-05-27 16:19:55 +0200 | [diff] [blame] | 1146 | pgste_set_unlock(ptep, pgste); |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 1147 | } else |
| 1148 | *ptep = pte; |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1149 | } |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 1150 | |
| 1151 | #define __HAVE_ARCH_PTEP_CLEAR_FLUSH |
Martin Schwidefsky | f0e47c2 | 2007-07-17 04:03:03 -0700 | [diff] [blame] | 1152 | static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, |
| 1153 | unsigned long address, pte_t *ptep) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1154 | { |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1155 | pgste_t pgste; |
| 1156 | pte_t pte; |
| 1157 | |
Martin Schwidefsky | d338363 | 2013-04-17 10:53:39 +0200 | [diff] [blame] | 1158 | if (mm_has_pgste(vma->vm_mm)) { |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1159 | pgste = pgste_get_lock(ptep); |
Martin Schwidefsky | 55dbbdd | 2014-04-30 14:44:44 +0200 | [diff] [blame] | 1160 | pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste); |
Martin Schwidefsky | d338363 | 2013-04-17 10:53:39 +0200 | [diff] [blame] | 1161 | } |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1162 | |
| 1163 | pte = *ptep; |
Martin Schwidefsky | 53e857f | 2012-09-10 13:00:09 +0200 | [diff] [blame] | 1164 | ptep_flush_direct(vma->vm_mm, address, ptep); |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 1165 | pte_val(*ptep) = _PAGE_INVALID; |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1166 | |
| 1167 | if (mm_has_pgste(vma->vm_mm)) { |
Konstantin Weitz | b31288f | 2013-04-17 17:36:29 +0200 | [diff] [blame] | 1168 | if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) == |
| 1169 | _PGSTE_GPS_USAGE_UNUSED) |
| 1170 | pte_val(pte) |= _PAGE_UNUSED; |
Dominik Dingel | 65eef335 | 2014-01-14 15:02:11 +0100 | [diff] [blame] | 1171 | pgste = pgste_update_all(&pte, pgste, vma->vm_mm); |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1172 | pgste_set_unlock(ptep, pgste); |
| 1173 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1174 | return pte; |
| 1175 | } |
| 1176 | |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 1177 | /* |
| 1178 | * The batched pte unmap code uses ptep_get_and_clear_full to clear the |
| 1179 | * ptes. Here an optimization is possible. tlb_gather_mmu flushes all |
| 1180 | * tlbs of an mm if it can guarantee that the ptes of the mm_struct |
| 1181 | * cannot be accessed while the batched unmap is running. In this case |
| 1182 | * full==1 and a simple pte_clear is enough. See tlb.h. |
| 1183 | */ |
| 1184 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL |
| 1185 | static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1186 | unsigned long address, |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 1187 | pte_t *ptep, int full) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1188 | { |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1189 | pgste_t pgste; |
| 1190 | pte_t pte; |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 1191 | |
Martin Schwidefsky | a055f66 | 2013-07-19 10:31:55 +0200 | [diff] [blame] | 1192 | if (!full && mm_has_pgste(mm)) { |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1193 | pgste = pgste_get_lock(ptep); |
Martin Schwidefsky | 55dbbdd | 2014-04-30 14:44:44 +0200 | [diff] [blame] | 1194 | pgste = pgste_ipte_notify(mm, address, ptep, pgste); |
Martin Schwidefsky | d338363 | 2013-04-17 10:53:39 +0200 | [diff] [blame] | 1195 | } |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1196 | |
| 1197 | pte = *ptep; |
| 1198 | if (!full) |
Martin Schwidefsky | 5c474a1 | 2013-08-16 13:31:40 +0200 | [diff] [blame] | 1199 | ptep_flush_lazy(mm, address, ptep); |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 1200 | pte_val(*ptep) = _PAGE_INVALID; |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1201 | |
Martin Schwidefsky | a055f66 | 2013-07-19 10:31:55 +0200 | [diff] [blame] | 1202 | if (!full && mm_has_pgste(mm)) { |
Dominik Dingel | 65eef335 | 2014-01-14 15:02:11 +0100 | [diff] [blame] | 1203 | pgste = pgste_update_all(&pte, pgste, mm); |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1204 | pgste_set_unlock(ptep, pgste); |
| 1205 | } |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 1206 | return pte; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1207 | } |
| 1208 | |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 1209 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1210 | static inline pte_t ptep_set_wrprotect(struct mm_struct *mm, |
| 1211 | unsigned long address, pte_t *ptep) |
| 1212 | { |
| 1213 | pgste_t pgste; |
| 1214 | pte_t pte = *ptep; |
| 1215 | |
| 1216 | if (pte_write(pte)) { |
Martin Schwidefsky | d338363 | 2013-04-17 10:53:39 +0200 | [diff] [blame] | 1217 | if (mm_has_pgste(mm)) { |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1218 | pgste = pgste_get_lock(ptep); |
Martin Schwidefsky | 55dbbdd | 2014-04-30 14:44:44 +0200 | [diff] [blame] | 1219 | pgste = pgste_ipte_notify(mm, address, ptep, pgste); |
Martin Schwidefsky | d338363 | 2013-04-17 10:53:39 +0200 | [diff] [blame] | 1220 | } |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1221 | |
Martin Schwidefsky | 5c474a1 | 2013-08-16 13:31:40 +0200 | [diff] [blame] | 1222 | ptep_flush_lazy(mm, address, ptep); |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 1223 | pte = pte_wrprotect(pte); |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1224 | |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 1225 | if (mm_has_pgste(mm)) { |
Martin Schwidefsky | 0a61b22 | 2013-10-18 12:03:41 +0200 | [diff] [blame] | 1226 | pgste = pgste_set_pte(ptep, pgste, pte); |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1227 | pgste_set_unlock(ptep, pgste); |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 1228 | } else |
| 1229 | *ptep = pte; |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1230 | } |
| 1231 | return pte; |
| 1232 | } |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 1233 | |
| 1234 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1235 | static inline int ptep_set_access_flags(struct vm_area_struct *vma, |
| 1236 | unsigned long address, pte_t *ptep, |
| 1237 | pte_t entry, int dirty) |
| 1238 | { |
| 1239 | pgste_t pgste; |
| 1240 | |
| 1241 | if (pte_same(*ptep, entry)) |
| 1242 | return 0; |
Martin Schwidefsky | d338363 | 2013-04-17 10:53:39 +0200 | [diff] [blame] | 1243 | if (mm_has_pgste(vma->vm_mm)) { |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1244 | pgste = pgste_get_lock(ptep); |
Martin Schwidefsky | 55dbbdd | 2014-04-30 14:44:44 +0200 | [diff] [blame] | 1245 | pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste); |
Martin Schwidefsky | d338363 | 2013-04-17 10:53:39 +0200 | [diff] [blame] | 1246 | } |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1247 | |
Martin Schwidefsky | 53e857f | 2012-09-10 13:00:09 +0200 | [diff] [blame] | 1248 | ptep_flush_direct(vma->vm_mm, address, ptep); |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1249 | |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 1250 | if (mm_has_pgste(vma->vm_mm)) { |
Christian Borntraeger | 1951497 | 2014-08-28 23:44:57 +0200 | [diff] [blame] | 1251 | pgste_set_key(ptep, pgste, entry, vma->vm_mm); |
Martin Schwidefsky | 0a61b22 | 2013-10-18 12:03:41 +0200 | [diff] [blame] | 1252 | pgste = pgste_set_pte(ptep, pgste, entry); |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1253 | pgste_set_unlock(ptep, pgste); |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 1254 | } else |
| 1255 | *ptep = entry; |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1256 | return 1; |
| 1257 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1258 | |
| 1259 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1260 | * Conversion functions: convert a page and protection to a page entry, |
| 1261 | * and a page entry and page directory to the page they refer to. |
| 1262 | */ |
| 1263 | static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) |
| 1264 | { |
| 1265 | pte_t __pte; |
| 1266 | pte_val(__pte) = physpage + pgprot_val(pgprot); |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 1267 | return pte_mkyoung(__pte); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1268 | } |
| 1269 | |
Heiko Carstens | 2dcea57 | 2006-09-29 01:58:41 -0700 | [diff] [blame] | 1270 | static inline pte_t mk_pte(struct page *page, pgprot_t pgprot) |
| 1271 | { |
Heiko Carstens | 0b2b6e1d | 2006-10-04 20:02:23 +0200 | [diff] [blame] | 1272 | unsigned long physpage = page_to_phys(page); |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 1273 | pte_t __pte = mk_pte_phys(physpage, pgprot); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1274 | |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 1275 | if (pte_write(__pte) && PageDirty(page)) |
| 1276 | __pte = pte_mkdirty(__pte); |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 1277 | return __pte; |
Heiko Carstens | 2dcea57 | 2006-09-29 01:58:41 -0700 | [diff] [blame] | 1278 | } |
| 1279 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1280 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 1281 | #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) |
| 1282 | #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) |
| 1283 | #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1284 | |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 1285 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1286 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) |
| 1287 | |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 1288 | #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) |
| 1289 | #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN) |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 1290 | #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 1291 | |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 1292 | static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address) |
| 1293 | { |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 1294 | pud_t *pud = (pud_t *) pgd; |
| 1295 | if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) |
| 1296 | pud = (pud_t *) pgd_deref(*pgd); |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 1297 | return pud + pud_index(address); |
| 1298 | } |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 1299 | |
| 1300 | static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) |
| 1301 | { |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 1302 | pmd_t *pmd = (pmd_t *) pud; |
| 1303 | if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) |
| 1304 | pmd = (pmd_t *) pud_deref(*pud); |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 1305 | return pmd + pmd_index(address); |
| 1306 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1307 | |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 1308 | #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot)) |
| 1309 | #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT) |
| 1310 | #define pte_page(x) pfn_to_page(pte_pfn(x)) |
| 1311 | |
Martin Schwidefsky | 152125b | 2014-07-24 11:03:41 +0200 | [diff] [blame] | 1312 | #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd)) |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 1313 | |
| 1314 | /* Find an entry in the lowest level page table.. */ |
| 1315 | #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr)) |
| 1316 | #define pte_offset_kernel(pmd, address) pte_offset(pmd,address) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1317 | #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1318 | #define pte_unmap(pte) do { } while (0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1319 | |
Gerald Schaefer | 106c992 | 2013-04-29 15:07:23 -0700 | [diff] [blame] | 1320 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE) |
Gerald Schaefer | 1ae1c1d | 2012-10-08 16:30:24 -0700 | [diff] [blame] | 1321 | static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot) |
| 1322 | { |
Gerald Schaefer | d8e7a33 | 2012-10-25 17:42:50 +0200 | [diff] [blame] | 1323 | /* |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 1324 | * pgprot is PAGE_NONE, PAGE_READ, or PAGE_WRITE (see __Pxxx / __Sxxx) |
Gerald Schaefer | d8e7a33 | 2012-10-25 17:42:50 +0200 | [diff] [blame] | 1325 | * Convert to segment table entry format. |
| 1326 | */ |
| 1327 | if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE)) |
| 1328 | return pgprot_val(SEGMENT_NONE); |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 1329 | if (pgprot_val(pgprot) == pgprot_val(PAGE_READ)) |
| 1330 | return pgprot_val(SEGMENT_READ); |
| 1331 | return pgprot_val(SEGMENT_WRITE); |
Gerald Schaefer | 1ae1c1d | 2012-10-08 16:30:24 -0700 | [diff] [blame] | 1332 | } |
| 1333 | |
Martin Schwidefsky | 152125b | 2014-07-24 11:03:41 +0200 | [diff] [blame] | 1334 | static inline pmd_t pmd_wrprotect(pmd_t pmd) |
| 1335 | { |
| 1336 | pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE; |
| 1337 | pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; |
| 1338 | return pmd; |
| 1339 | } |
| 1340 | |
| 1341 | static inline pmd_t pmd_mkwrite(pmd_t pmd) |
| 1342 | { |
| 1343 | pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE; |
| 1344 | if (pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)) |
| 1345 | return pmd; |
| 1346 | pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT; |
| 1347 | return pmd; |
| 1348 | } |
| 1349 | |
| 1350 | static inline pmd_t pmd_mkclean(pmd_t pmd) |
| 1351 | { |
| 1352 | if (pmd_large(pmd)) { |
| 1353 | pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY; |
| 1354 | pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; |
| 1355 | } |
| 1356 | return pmd; |
| 1357 | } |
| 1358 | |
| 1359 | static inline pmd_t pmd_mkdirty(pmd_t pmd) |
| 1360 | { |
| 1361 | if (pmd_large(pmd)) { |
| 1362 | pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY; |
| 1363 | if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) |
| 1364 | pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT; |
| 1365 | } |
| 1366 | return pmd; |
| 1367 | } |
| 1368 | |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 1369 | static inline pmd_t pmd_mkyoung(pmd_t pmd) |
| 1370 | { |
Martin Schwidefsky | 152125b | 2014-07-24 11:03:41 +0200 | [diff] [blame] | 1371 | if (pmd_large(pmd)) { |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 1372 | pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG; |
Martin Schwidefsky | 152125b | 2014-07-24 11:03:41 +0200 | [diff] [blame] | 1373 | if (pmd_val(pmd) & _SEGMENT_ENTRY_READ) |
| 1374 | pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID; |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 1375 | } |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 1376 | return pmd; |
| 1377 | } |
| 1378 | |
| 1379 | static inline pmd_t pmd_mkold(pmd_t pmd) |
| 1380 | { |
Martin Schwidefsky | 152125b | 2014-07-24 11:03:41 +0200 | [diff] [blame] | 1381 | if (pmd_large(pmd)) { |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 1382 | pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG; |
| 1383 | pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID; |
| 1384 | } |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 1385 | return pmd; |
| 1386 | } |
| 1387 | |
Gerald Schaefer | 1ae1c1d | 2012-10-08 16:30:24 -0700 | [diff] [blame] | 1388 | static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) |
| 1389 | { |
Martin Schwidefsky | 152125b | 2014-07-24 11:03:41 +0200 | [diff] [blame] | 1390 | if (pmd_large(pmd)) { |
| 1391 | pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE | |
| 1392 | _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG | |
| 1393 | _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SPLIT; |
| 1394 | pmd_val(pmd) |= massage_pgprot_pmd(newprot); |
| 1395 | if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)) |
| 1396 | pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; |
| 1397 | if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG)) |
| 1398 | pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID; |
| 1399 | return pmd; |
| 1400 | } |
| 1401 | pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN; |
Gerald Schaefer | 1ae1c1d | 2012-10-08 16:30:24 -0700 | [diff] [blame] | 1402 | pmd_val(pmd) |= massage_pgprot_pmd(newprot); |
| 1403 | return pmd; |
| 1404 | } |
| 1405 | |
Gerald Schaefer | 106c992 | 2013-04-29 15:07:23 -0700 | [diff] [blame] | 1406 | static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot) |
Gerald Schaefer | 1ae1c1d | 2012-10-08 16:30:24 -0700 | [diff] [blame] | 1407 | { |
Gerald Schaefer | 106c992 | 2013-04-29 15:07:23 -0700 | [diff] [blame] | 1408 | pmd_t __pmd; |
| 1409 | pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot); |
Martin Schwidefsky | 152125b | 2014-07-24 11:03:41 +0200 | [diff] [blame] | 1410 | return __pmd; |
Gerald Schaefer | 1ae1c1d | 2012-10-08 16:30:24 -0700 | [diff] [blame] | 1411 | } |
| 1412 | |
Gerald Schaefer | 106c992 | 2013-04-29 15:07:23 -0700 | [diff] [blame] | 1413 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */ |
| 1414 | |
Martin Schwidefsky | 1b948d6 | 2014-04-03 13:55:01 +0200 | [diff] [blame] | 1415 | static inline void __pmdp_csp(pmd_t *pmdp) |
| 1416 | { |
| 1417 | register unsigned long reg2 asm("2") = pmd_val(*pmdp); |
| 1418 | register unsigned long reg3 asm("3") = pmd_val(*pmdp) | |
| 1419 | _SEGMENT_ENTRY_INVALID; |
| 1420 | register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5; |
| 1421 | |
| 1422 | asm volatile( |
| 1423 | " csp %1,%3" |
| 1424 | : "=m" (*pmdp) |
| 1425 | : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc"); |
| 1426 | } |
| 1427 | |
| 1428 | static inline void __pmdp_idte(unsigned long address, pmd_t *pmdp) |
| 1429 | { |
| 1430 | unsigned long sto; |
| 1431 | |
| 1432 | sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t); |
| 1433 | asm volatile( |
| 1434 | " .insn rrf,0xb98e0000,%2,%3,0,0" |
| 1435 | : "=m" (*pmdp) |
| 1436 | : "m" (*pmdp), "a" (sto), "a" ((address & HPAGE_MASK)) |
| 1437 | : "cc" ); |
| 1438 | } |
| 1439 | |
| 1440 | static inline void __pmdp_idte_local(unsigned long address, pmd_t *pmdp) |
| 1441 | { |
| 1442 | unsigned long sto; |
| 1443 | |
| 1444 | sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t); |
| 1445 | asm volatile( |
| 1446 | " .insn rrf,0xb98e0000,%2,%3,0,1" |
| 1447 | : "=m" (*pmdp) |
| 1448 | : "m" (*pmdp), "a" (sto), "a" ((address & HPAGE_MASK)) |
| 1449 | : "cc" ); |
| 1450 | } |
| 1451 | |
| 1452 | static inline void pmdp_flush_direct(struct mm_struct *mm, |
| 1453 | unsigned long address, pmd_t *pmdp) |
| 1454 | { |
| 1455 | int active, count; |
| 1456 | |
| 1457 | if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID) |
| 1458 | return; |
| 1459 | if (!MACHINE_HAS_IDTE) { |
| 1460 | __pmdp_csp(pmdp); |
| 1461 | return; |
| 1462 | } |
| 1463 | active = (mm == current->active_mm) ? 1 : 0; |
| 1464 | count = atomic_add_return(0x10000, &mm->context.attach_count); |
| 1465 | if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active && |
| 1466 | cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) |
| 1467 | __pmdp_idte_local(address, pmdp); |
| 1468 | else |
| 1469 | __pmdp_idte(address, pmdp); |
| 1470 | atomic_sub(0x10000, &mm->context.attach_count); |
| 1471 | } |
| 1472 | |
Martin Schwidefsky | 3eabaee | 2013-07-26 15:04:02 +0200 | [diff] [blame] | 1473 | static inline void pmdp_flush_lazy(struct mm_struct *mm, |
| 1474 | unsigned long address, pmd_t *pmdp) |
| 1475 | { |
Martin Schwidefsky | 53e857f | 2012-09-10 13:00:09 +0200 | [diff] [blame] | 1476 | int active, count; |
Martin Schwidefsky | 3eabaee | 2013-07-26 15:04:02 +0200 | [diff] [blame] | 1477 | |
Martin Schwidefsky | 1b948d6 | 2014-04-03 13:55:01 +0200 | [diff] [blame] | 1478 | if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID) |
| 1479 | return; |
Martin Schwidefsky | 53e857f | 2012-09-10 13:00:09 +0200 | [diff] [blame] | 1480 | active = (mm == current->active_mm) ? 1 : 0; |
| 1481 | count = atomic_add_return(0x10000, &mm->context.attach_count); |
| 1482 | if ((count & 0xffff) <= active) { |
| 1483 | pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID; |
Martin Schwidefsky | 3eabaee | 2013-07-26 15:04:02 +0200 | [diff] [blame] | 1484 | mm->context.flush_mm = 1; |
Martin Schwidefsky | 1b948d6 | 2014-04-03 13:55:01 +0200 | [diff] [blame] | 1485 | } else if (MACHINE_HAS_IDTE) |
| 1486 | __pmdp_idte(address, pmdp); |
| 1487 | else |
| 1488 | __pmdp_csp(pmdp); |
Martin Schwidefsky | 53e857f | 2012-09-10 13:00:09 +0200 | [diff] [blame] | 1489 | atomic_sub(0x10000, &mm->context.attach_count); |
Martin Schwidefsky | 3eabaee | 2013-07-26 15:04:02 +0200 | [diff] [blame] | 1490 | } |
| 1491 | |
Gerald Schaefer | 106c992 | 2013-04-29 15:07:23 -0700 | [diff] [blame] | 1492 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 1493 | |
| 1494 | #define __HAVE_ARCH_PGTABLE_DEPOSIT |
Aneesh Kumar K.V | 6b0b50b | 2013-06-05 17:14:02 -0700 | [diff] [blame] | 1495 | extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, |
| 1496 | pgtable_t pgtable); |
Gerald Schaefer | 106c992 | 2013-04-29 15:07:23 -0700 | [diff] [blame] | 1497 | |
| 1498 | #define __HAVE_ARCH_PGTABLE_WITHDRAW |
Aneesh Kumar K.V | 6b0b50b | 2013-06-05 17:14:02 -0700 | [diff] [blame] | 1499 | extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); |
Gerald Schaefer | 106c992 | 2013-04-29 15:07:23 -0700 | [diff] [blame] | 1500 | |
| 1501 | static inline int pmd_trans_splitting(pmd_t pmd) |
| 1502 | { |
Martin Schwidefsky | 152125b | 2014-07-24 11:03:41 +0200 | [diff] [blame] | 1503 | return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) && |
| 1504 | (pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT); |
Gerald Schaefer | 106c992 | 2013-04-29 15:07:23 -0700 | [diff] [blame] | 1505 | } |
| 1506 | |
| 1507 | static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, |
| 1508 | pmd_t *pmdp, pmd_t entry) |
| 1509 | { |
Gerald Schaefer | 106c992 | 2013-04-29 15:07:23 -0700 | [diff] [blame] | 1510 | *pmdp = entry; |
| 1511 | } |
| 1512 | |
| 1513 | static inline pmd_t pmd_mkhuge(pmd_t pmd) |
| 1514 | { |
| 1515 | pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE; |
Martin Schwidefsky | 152125b | 2014-07-24 11:03:41 +0200 | [diff] [blame] | 1516 | pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG; |
| 1517 | pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; |
Gerald Schaefer | 1ae1c1d | 2012-10-08 16:30:24 -0700 | [diff] [blame] | 1518 | return pmd; |
| 1519 | } |
| 1520 | |
Gerald Schaefer | 1ae1c1d | 2012-10-08 16:30:24 -0700 | [diff] [blame] | 1521 | #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG |
| 1522 | static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, |
| 1523 | unsigned long address, pmd_t *pmdp) |
| 1524 | { |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 1525 | pmd_t pmd; |
Gerald Schaefer | 1ae1c1d | 2012-10-08 16:30:24 -0700 | [diff] [blame] | 1526 | |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 1527 | pmd = *pmdp; |
Martin Schwidefsky | 1b948d6 | 2014-04-03 13:55:01 +0200 | [diff] [blame] | 1528 | pmdp_flush_direct(vma->vm_mm, address, pmdp); |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 1529 | *pmdp = pmd_mkold(pmd); |
| 1530 | return pmd_young(pmd); |
Gerald Schaefer | 1ae1c1d | 2012-10-08 16:30:24 -0700 | [diff] [blame] | 1531 | } |
| 1532 | |
| 1533 | #define __HAVE_ARCH_PMDP_GET_AND_CLEAR |
| 1534 | static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, |
| 1535 | unsigned long address, pmd_t *pmdp) |
| 1536 | { |
| 1537 | pmd_t pmd = *pmdp; |
| 1538 | |
Martin Schwidefsky | 1b948d6 | 2014-04-03 13:55:01 +0200 | [diff] [blame] | 1539 | pmdp_flush_direct(mm, address, pmdp); |
Gerald Schaefer | 1ae1c1d | 2012-10-08 16:30:24 -0700 | [diff] [blame] | 1540 | pmd_clear(pmdp); |
| 1541 | return pmd; |
| 1542 | } |
| 1543 | |
Martin Schwidefsky | fcbe08d6 | 2014-10-24 10:52:29 +0200 | [diff] [blame] | 1544 | #define __HAVE_ARCH_PMDP_GET_AND_CLEAR_FULL |
| 1545 | static inline pmd_t pmdp_get_and_clear_full(struct mm_struct *mm, |
| 1546 | unsigned long address, |
| 1547 | pmd_t *pmdp, int full) |
| 1548 | { |
| 1549 | pmd_t pmd = *pmdp; |
| 1550 | |
| 1551 | if (!full) |
| 1552 | pmdp_flush_lazy(mm, address, pmdp); |
| 1553 | pmd_clear(pmdp); |
| 1554 | return pmd; |
| 1555 | } |
| 1556 | |
Gerald Schaefer | 1ae1c1d | 2012-10-08 16:30:24 -0700 | [diff] [blame] | 1557 | #define __HAVE_ARCH_PMDP_CLEAR_FLUSH |
| 1558 | static inline pmd_t pmdp_clear_flush(struct vm_area_struct *vma, |
| 1559 | unsigned long address, pmd_t *pmdp) |
| 1560 | { |
| 1561 | return pmdp_get_and_clear(vma->vm_mm, address, pmdp); |
| 1562 | } |
| 1563 | |
| 1564 | #define __HAVE_ARCH_PMDP_INVALIDATE |
| 1565 | static inline void pmdp_invalidate(struct vm_area_struct *vma, |
| 1566 | unsigned long address, pmd_t *pmdp) |
| 1567 | { |
Martin Schwidefsky | 1b948d6 | 2014-04-03 13:55:01 +0200 | [diff] [blame] | 1568 | pmdp_flush_direct(vma->vm_mm, address, pmdp); |
Gerald Schaefer | 1ae1c1d | 2012-10-08 16:30:24 -0700 | [diff] [blame] | 1569 | } |
| 1570 | |
Gerald Schaefer | be32865 | 2013-01-21 16:48:07 +0100 | [diff] [blame] | 1571 | #define __HAVE_ARCH_PMDP_SET_WRPROTECT |
| 1572 | static inline void pmdp_set_wrprotect(struct mm_struct *mm, |
| 1573 | unsigned long address, pmd_t *pmdp) |
| 1574 | { |
| 1575 | pmd_t pmd = *pmdp; |
| 1576 | |
| 1577 | if (pmd_write(pmd)) { |
Martin Schwidefsky | 1b948d6 | 2014-04-03 13:55:01 +0200 | [diff] [blame] | 1578 | pmdp_flush_direct(mm, address, pmdp); |
Gerald Schaefer | be32865 | 2013-01-21 16:48:07 +0100 | [diff] [blame] | 1579 | set_pmd_at(mm, address, pmdp, pmd_wrprotect(pmd)); |
| 1580 | } |
| 1581 | } |
| 1582 | |
Gerald Schaefer | 1ae1c1d | 2012-10-08 16:30:24 -0700 | [diff] [blame] | 1583 | #define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot)) |
| 1584 | #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot)) |
| 1585 | |
| 1586 | static inline int pmd_trans_huge(pmd_t pmd) |
| 1587 | { |
| 1588 | return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE; |
| 1589 | } |
| 1590 | |
| 1591 | static inline int has_transparent_hugepage(void) |
| 1592 | { |
| 1593 | return MACHINE_HAS_HPAGE ? 1 : 0; |
| 1594 | } |
Gerald Schaefer | 75077af | 2012-10-08 16:30:15 -0700 | [diff] [blame] | 1595 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
| 1596 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1597 | /* |
| 1598 | * 31 bit swap entry format: |
| 1599 | * A page-table entry has some bits we have to treat in a special way. |
| 1600 | * Bits 0, 20 and bit 23 have to be zero, otherwise an specification |
| 1601 | * exception will occur instead of a page translation exception. The |
| 1602 | * specifiation exception has the bad habit not to store necessary |
| 1603 | * information in the lowcore. |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 1604 | * Bits 21, 22, 30 and 31 are used to indicate the page type. |
| 1605 | * A swap pte is indicated by bit pattern (pte & 0x603) == 0x402 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1606 | * This leaves the bits 1-19 and bits 24-29 to store type and offset. |
| 1607 | * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19 |
| 1608 | * plus 24 for the offset. |
| 1609 | * 0| offset |0110|o|type |00| |
| 1610 | * 0 0000000001111111111 2222 2 22222 33 |
| 1611 | * 0 1234567890123456789 0123 4 56789 01 |
| 1612 | * |
| 1613 | * 64 bit swap entry format: |
| 1614 | * A page-table entry has some bits we have to treat in a special way. |
| 1615 | * Bits 52 and bit 55 have to be zero, otherwise an specification |
| 1616 | * exception will occur instead of a page translation exception. The |
| 1617 | * specifiation exception has the bad habit not to store necessary |
| 1618 | * information in the lowcore. |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 1619 | * Bits 53, 54, 62 and 63 are used to indicate the page type. |
| 1620 | * A swap pte is indicated by bit pattern (pte & 0x603) == 0x402 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1621 | * This leaves the bits 0-51 and bits 56-61 to store type and offset. |
| 1622 | * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51 |
| 1623 | * plus 56 for the offset. |
| 1624 | * | offset |0110|o|type |00| |
| 1625 | * 0000000000111111111122222222223333333333444444444455 5555 5 55566 66 |
| 1626 | * 0123456789012345678901234567890123456789012345678901 2345 6 78901 23 |
| 1627 | */ |
Heiko Carstens | 5a79859a | 2015-02-12 13:08:27 +0100 | [diff] [blame] | 1628 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1629 | #define __SWP_OFFSET_MASK (~0UL >> 11) |
Heiko Carstens | 5a79859a | 2015-02-12 13:08:27 +0100 | [diff] [blame] | 1630 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 1631 | static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1632 | { |
| 1633 | pte_t pte; |
| 1634 | offset &= __SWP_OFFSET_MASK; |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 1635 | pte_val(pte) = _PAGE_INVALID | _PAGE_TYPE | ((type & 0x1f) << 2) | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1636 | ((offset & 1UL) << 7) | ((offset & ~1UL) << 11); |
| 1637 | return pte; |
| 1638 | } |
| 1639 | |
| 1640 | #define __swp_type(entry) (((entry).val >> 2) & 0x1f) |
| 1641 | #define __swp_offset(entry) (((entry).val >> 11) | (((entry).val >> 7) & 1)) |
| 1642 | #define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) }) |
| 1643 | |
| 1644 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) |
| 1645 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) |
| 1646 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1647 | #endif /* !__ASSEMBLY__ */ |
| 1648 | |
| 1649 | #define kern_addr_valid(addr) (1) |
| 1650 | |
Heiko Carstens | 17f3458 | 2008-04-30 13:38:47 +0200 | [diff] [blame] | 1651 | extern int vmem_add_mapping(unsigned long start, unsigned long size); |
| 1652 | extern int vmem_remove_mapping(unsigned long start, unsigned long size); |
Carsten Otte | 402b086 | 2008-03-25 18:47:10 +0100 | [diff] [blame] | 1653 | extern int s390_enable_sie(void); |
Dominik Dingel | 3ac8e38 | 2014-10-23 12:09:17 +0200 | [diff] [blame] | 1654 | extern int s390_enable_skey(void); |
Dominik Dingel | a13cff3 | 2014-10-23 12:07:14 +0200 | [diff] [blame] | 1655 | extern void s390_reset_cmma(struct mm_struct *mm); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 1656 | |
Martin Schwidefsky | 1f6b83e | 2015-01-14 17:51:17 +0100 | [diff] [blame] | 1657 | /* s390 has a private copy of get unmapped area to deal with cache synonyms */ |
| 1658 | #define HAVE_ARCH_UNMAPPED_AREA |
| 1659 | #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN |
| 1660 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1661 | /* |
| 1662 | * No page table caches to initialise |
| 1663 | */ |
Heiko Carstens | 765a0ca | 2013-03-23 10:29:01 +0100 | [diff] [blame] | 1664 | static inline void pgtable_cache_init(void) { } |
| 1665 | static inline void check_pgt_cache(void) { } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1666 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1667 | #include <asm-generic/pgtable.h> |
| 1668 | |
| 1669 | #endif /* _S390_PAGE_H */ |