Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * S390 version |
Heiko Carstens | a53c8fa | 2012-07-20 11:15:04 +0200 | [diff] [blame] | 3 | * Copyright IBM Corp. 1999, 2000 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | * Author(s): Hartmut Penner (hp@de.ibm.com) |
| 5 | * Ulrich Weigand (weigand@de.ibm.com) |
| 6 | * Martin Schwidefsky (schwidefsky@de.ibm.com) |
| 7 | * |
| 8 | * Derived from "include/asm-i386/pgtable.h" |
| 9 | */ |
| 10 | |
| 11 | #ifndef _ASM_S390_PGTABLE_H |
| 12 | #define _ASM_S390_PGTABLE_H |
| 13 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | /* |
| 15 | * The Linux memory management assumes a three-level page table setup. For |
| 16 | * s390 31 bit we "fold" the mid level into the top-level page table, so |
| 17 | * that we physically have the same two-level page table as the s390 mmu |
| 18 | * expects in 31 bit mode. For s390 64 bit we use three of the five levels |
| 19 | * the hardware provides (region first and region second tables are not |
| 20 | * used). |
| 21 | * |
| 22 | * The "pgd_xxx()" functions are trivial for a folded two-level |
| 23 | * setup: the pgd is never bad, and a pmd always exists (as it's folded |
| 24 | * into the pgd entry) |
| 25 | * |
| 26 | * This file contains the functions and defines necessary to modify and use |
| 27 | * the S390 page table tree. |
| 28 | */ |
| 29 | #ifndef __ASSEMBLY__ |
Heiko Carstens | 9789db0 | 2008-07-14 09:59:11 +0200 | [diff] [blame] | 30 | #include <linux/sched.h> |
Heiko Carstens | 2dcea57 | 2006-09-29 01:58:41 -0700 | [diff] [blame] | 31 | #include <linux/mm_types.h> |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 32 | #include <linux/page-flags.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | #include <asm/bug.h> |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 34 | #include <asm/page.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096))); |
| 37 | extern void paging_init(void); |
Heiko Carstens | 2b67fc4 | 2007-02-05 21:16:47 +0100 | [diff] [blame] | 38 | extern void vmem_map_init(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | |
| 40 | /* |
| 41 | * The S390 doesn't have any external MMU info: the kernel page |
| 42 | * tables contain all the necessary information. |
| 43 | */ |
Russell King | 4b3073e | 2009-12-18 16:40:18 +0000 | [diff] [blame] | 44 | #define update_mmu_cache(vma, address, ptep) do { } while (0) |
David Miller | b113da6 | 2012-10-08 16:34:25 -0700 | [diff] [blame] | 45 | #define update_mmu_cache_pmd(vma, address, ptep) do { } while (0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | |
| 47 | /* |
Martin Schwidefsky | 238ec4e | 2010-10-25 16:10:07 +0200 | [diff] [blame] | 48 | * ZERO_PAGE is a global shared page that is always zero; used |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | * for zero-mapped memory areas etc.. |
| 50 | */ |
Martin Schwidefsky | 238ec4e | 2010-10-25 16:10:07 +0200 | [diff] [blame] | 51 | |
| 52 | extern unsigned long empty_zero_page; |
| 53 | extern unsigned long zero_page_mask; |
| 54 | |
| 55 | #define ZERO_PAGE(vaddr) \ |
| 56 | (virt_to_page((void *)(empty_zero_page + \ |
| 57 | (((unsigned long)(vaddr)) &zero_page_mask)))) |
Kirill A. Shutemov | 816422a | 2012-12-12 13:52:36 -0800 | [diff] [blame] | 58 | #define __HAVE_COLOR_ZERO_PAGE |
Martin Schwidefsky | 238ec4e | 2010-10-25 16:10:07 +0200 | [diff] [blame] | 59 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | #endif /* !__ASSEMBLY__ */ |
| 61 | |
| 62 | /* |
| 63 | * PMD_SHIFT determines the size of the area a second-level page |
| 64 | * table can map |
| 65 | * PGDIR_SHIFT determines what a third-level page table entry can map |
| 66 | */ |
Heiko Carstens | f4815ac | 2012-05-23 16:24:51 +0200 | [diff] [blame] | 67 | #ifndef CONFIG_64BIT |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 68 | # define PMD_SHIFT 20 |
| 69 | # define PUD_SHIFT 20 |
| 70 | # define PGDIR_SHIFT 20 |
Heiko Carstens | f4815ac | 2012-05-23 16:24:51 +0200 | [diff] [blame] | 71 | #else /* CONFIG_64BIT */ |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 72 | # define PMD_SHIFT 20 |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 73 | # define PUD_SHIFT 31 |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 74 | # define PGDIR_SHIFT 42 |
Heiko Carstens | f4815ac | 2012-05-23 16:24:51 +0200 | [diff] [blame] | 75 | #endif /* CONFIG_64BIT */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 76 | |
| 77 | #define PMD_SIZE (1UL << PMD_SHIFT) |
| 78 | #define PMD_MASK (~(PMD_SIZE-1)) |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 79 | #define PUD_SIZE (1UL << PUD_SHIFT) |
| 80 | #define PUD_MASK (~(PUD_SIZE-1)) |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 81 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) |
| 82 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 | |
| 84 | /* |
| 85 | * entries per page directory level: the S390 is two-level, so |
| 86 | * we don't really have any PMD directory physically. |
| 87 | * for S390 segment-table entries are combined to one PGD |
| 88 | * that leads to 1024 pte per pgd |
| 89 | */ |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 90 | #define PTRS_PER_PTE 256 |
Heiko Carstens | f4815ac | 2012-05-23 16:24:51 +0200 | [diff] [blame] | 91 | #ifndef CONFIG_64BIT |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 92 | #define PTRS_PER_PMD 1 |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 93 | #define PTRS_PER_PUD 1 |
Heiko Carstens | f4815ac | 2012-05-23 16:24:51 +0200 | [diff] [blame] | 94 | #else /* CONFIG_64BIT */ |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 95 | #define PTRS_PER_PMD 2048 |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 96 | #define PTRS_PER_PUD 2048 |
Heiko Carstens | f4815ac | 2012-05-23 16:24:51 +0200 | [diff] [blame] | 97 | #endif /* CONFIG_64BIT */ |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 98 | #define PTRS_PER_PGD 2048 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 | |
Hugh Dickins | d455a36 | 2005-04-19 13:29:23 -0700 | [diff] [blame] | 100 | #define FIRST_USER_ADDRESS 0 |
| 101 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 102 | #define pte_ERROR(e) \ |
| 103 | printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e)) |
| 104 | #define pmd_ERROR(e) \ |
| 105 | printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e)) |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 106 | #define pud_ERROR(e) \ |
| 107 | printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 | #define pgd_ERROR(e) \ |
| 109 | printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e)) |
| 110 | |
| 111 | #ifndef __ASSEMBLY__ |
| 112 | /* |
Heiko Carstens | c972cc6 | 2012-10-05 16:52:18 +0200 | [diff] [blame] | 113 | * The vmalloc and module area will always be on the topmost area of the kernel |
| 114 | * mapping. We reserve 96MB (31bit) / 128GB (64bit) for vmalloc and modules. |
| 115 | * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where |
| 116 | * modules will reside. That makes sure that inter module branches always |
| 117 | * happen without trampolines and in addition the placement within a 2GB frame |
| 118 | * is branch prediction unit friendly. |
Heiko Carstens | 8b62bc9 | 2006-12-04 15:40:56 +0100 | [diff] [blame] | 119 | */ |
Heiko Carstens | 239a6425 | 2009-06-12 10:26:33 +0200 | [diff] [blame] | 120 | extern unsigned long VMALLOC_START; |
Martin Schwidefsky | 14045eb | 2011-12-27 11:27:07 +0100 | [diff] [blame] | 121 | extern unsigned long VMALLOC_END; |
| 122 | extern struct page *vmemmap; |
Heiko Carstens | 239a6425 | 2009-06-12 10:26:33 +0200 | [diff] [blame] | 123 | |
Martin Schwidefsky | 14045eb | 2011-12-27 11:27:07 +0100 | [diff] [blame] | 124 | #define VMEM_MAX_PHYS ((unsigned long) vmemmap) |
Christian Borntraeger | 5fd9c6e | 2008-01-26 14:11:00 +0100 | [diff] [blame] | 125 | |
Heiko Carstens | c972cc6 | 2012-10-05 16:52:18 +0200 | [diff] [blame] | 126 | #ifdef CONFIG_64BIT |
| 127 | extern unsigned long MODULES_VADDR; |
| 128 | extern unsigned long MODULES_END; |
| 129 | #define MODULES_VADDR MODULES_VADDR |
| 130 | #define MODULES_END MODULES_END |
| 131 | #define MODULES_LEN (1UL << 31) |
| 132 | #endif |
| 133 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 134 | /* |
| 135 | * A 31 bit pagetable entry of S390 has following format: |
| 136 | * | PFRA | | OS | |
| 137 | * 0 0IP0 |
| 138 | * 00000000001111111111222222222233 |
| 139 | * 01234567890123456789012345678901 |
| 140 | * |
| 141 | * I Page-Invalid Bit: Page is not available for address-translation |
| 142 | * P Page-Protection Bit: Store access not possible for page |
| 143 | * |
| 144 | * A 31 bit segmenttable entry of S390 has following format: |
| 145 | * | P-table origin | |PTL |
| 146 | * 0 IC |
| 147 | * 00000000001111111111222222222233 |
| 148 | * 01234567890123456789012345678901 |
| 149 | * |
| 150 | * I Segment-Invalid Bit: Segment is not available for address-translation |
| 151 | * C Common-Segment Bit: Segment is not private (PoP 3-30) |
| 152 | * PTL Page-Table-Length: Page-table length (PTL+1*16 entries -> up to 256) |
| 153 | * |
| 154 | * The 31 bit segmenttable origin of S390 has following format: |
| 155 | * |
| 156 | * |S-table origin | | STL | |
| 157 | * X **GPS |
| 158 | * 00000000001111111111222222222233 |
| 159 | * 01234567890123456789012345678901 |
| 160 | * |
| 161 | * X Space-Switch event: |
| 162 | * G Segment-Invalid Bit: * |
| 163 | * P Private-Space Bit: Segment is not private (PoP 3-30) |
| 164 | * S Storage-Alteration: |
| 165 | * STL Segment-Table-Length: Segment-table length (STL+1*16 entries -> up to 2048) |
| 166 | * |
| 167 | * A 64 bit pagetable entry of S390 has following format: |
Christian Borntraeger | 6a985c6 | 2009-12-07 12:52:11 +0100 | [diff] [blame] | 168 | * | PFRA |0IPC| OS | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 169 | * 0000000000111111111122222222223333333333444444444455555555556666 |
| 170 | * 0123456789012345678901234567890123456789012345678901234567890123 |
| 171 | * |
| 172 | * I Page-Invalid Bit: Page is not available for address-translation |
| 173 | * P Page-Protection Bit: Store access not possible for page |
Christian Borntraeger | 6a985c6 | 2009-12-07 12:52:11 +0100 | [diff] [blame] | 174 | * C Change-bit override: HW is not required to set change bit |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 175 | * |
| 176 | * A 64 bit segmenttable entry of S390 has following format: |
| 177 | * | P-table origin | TT |
| 178 | * 0000000000111111111122222222223333333333444444444455555555556666 |
| 179 | * 0123456789012345678901234567890123456789012345678901234567890123 |
| 180 | * |
| 181 | * I Segment-Invalid Bit: Segment is not available for address-translation |
| 182 | * C Common-Segment Bit: Segment is not private (PoP 3-30) |
| 183 | * P Page-Protection Bit: Store access not possible for page |
| 184 | * TT Type 00 |
| 185 | * |
| 186 | * A 64 bit region table entry of S390 has following format: |
| 187 | * | S-table origin | TF TTTL |
| 188 | * 0000000000111111111122222222223333333333444444444455555555556666 |
| 189 | * 0123456789012345678901234567890123456789012345678901234567890123 |
| 190 | * |
| 191 | * I Segment-Invalid Bit: Segment is not available for address-translation |
| 192 | * TT Type 01 |
| 193 | * TF |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 194 | * TL Table length |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 195 | * |
| 196 | * The 64 bit regiontable origin of S390 has following format: |
| 197 | * | region table origon | DTTL |
| 198 | * 0000000000111111111122222222223333333333444444444455555555556666 |
| 199 | * 0123456789012345678901234567890123456789012345678901234567890123 |
| 200 | * |
| 201 | * X Space-Switch event: |
| 202 | * G Segment-Invalid Bit: |
| 203 | * P Private-Space Bit: |
| 204 | * S Storage-Alteration: |
| 205 | * R Real space |
| 206 | * TL Table-Length: |
| 207 | * |
| 208 | * A storage key has the following format: |
| 209 | * | ACC |F|R|C|0| |
| 210 | * 0 3 4 5 6 7 |
| 211 | * ACC: access key |
| 212 | * F : fetch protection bit |
| 213 | * R : referenced bit |
| 214 | * C : changed bit |
| 215 | */ |
| 216 | |
| 217 | /* Hardware bits in the page table entry */ |
Christian Borntraeger | 6a985c6 | 2009-12-07 12:52:11 +0100 | [diff] [blame] | 218 | #define _PAGE_CO 0x100 /* HW Change-bit override */ |
Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 219 | #define _PAGE_RO 0x200 /* HW read-only bit */ |
| 220 | #define _PAGE_INVALID 0x400 /* HW invalid bit */ |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 221 | |
| 222 | /* Software bits in the page table entry */ |
Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 223 | #define _PAGE_SWT 0x001 /* SW pte type bit t */ |
| 224 | #define _PAGE_SWX 0x002 /* SW pte type bit x */ |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 225 | #define _PAGE_SWC 0x004 /* SW pte changed bit */ |
| 226 | #define _PAGE_SWR 0x008 /* SW pte referenced bit */ |
| 227 | #define _PAGE_SWW 0x010 /* SW pte write bit */ |
| 228 | #define _PAGE_SPECIAL 0x020 /* SW associated with special page */ |
Nick Piggin | a08cb62 | 2008-04-28 02:13:03 -0700 | [diff] [blame] | 229 | #define __HAVE_ARCH_PTE_SPECIAL |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 230 | |
Nick Piggin | 138c902 | 2008-07-08 11:31:06 +0200 | [diff] [blame] | 231 | /* Set of bits not changed in pte_modify */ |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 232 | #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_CO | \ |
| 233 | _PAGE_SWC | _PAGE_SWR) |
Nick Piggin | 138c902 | 2008-07-08 11:31:06 +0200 | [diff] [blame] | 234 | |
Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 235 | /* Six different types of pages. */ |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 236 | #define _PAGE_TYPE_EMPTY 0x400 |
| 237 | #define _PAGE_TYPE_NONE 0x401 |
Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 238 | #define _PAGE_TYPE_SWAP 0x403 |
| 239 | #define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */ |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 240 | #define _PAGE_TYPE_RO 0x200 |
| 241 | #define _PAGE_TYPE_RW 0x000 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 242 | |
Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 243 | /* |
Gerald Schaefer | 53492b1 | 2008-04-30 13:38:46 +0200 | [diff] [blame] | 244 | * Only four types for huge pages, using the invalid bit and protection bit |
| 245 | * of a segment table entry. |
| 246 | */ |
| 247 | #define _HPAGE_TYPE_EMPTY 0x020 /* _SEGMENT_ENTRY_INV */ |
| 248 | #define _HPAGE_TYPE_NONE 0x220 |
| 249 | #define _HPAGE_TYPE_RO 0x200 /* _SEGMENT_ENTRY_RO */ |
| 250 | #define _HPAGE_TYPE_RW 0x000 |
| 251 | |
| 252 | /* |
Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 253 | * PTE type bits are rather complicated. handle_pte_fault uses pte_present, |
| 254 | * pte_none and pte_file to find out the pte type WITHOUT holding the page |
| 255 | * table lock. ptep_clear_flush on the other hand uses ptep_clear_flush to |
| 256 | * invalidate a given pte. ipte sets the hw invalid bit and clears all tlbs |
| 257 | * for the page. The page table entry is set to _PAGE_TYPE_EMPTY afterwards. |
| 258 | * This change is done while holding the lock, but the intermediate step |
| 259 | * of a previously valid pte with the hw invalid bit set can be observed by |
| 260 | * handle_pte_fault. That makes it necessary that all valid pte types with |
| 261 | * the hw invalid bit set must be distinguishable from the four pte types |
| 262 | * empty, none, swap and file. |
| 263 | * |
| 264 | * irxt ipte irxt |
| 265 | * _PAGE_TYPE_EMPTY 1000 -> 1000 |
| 266 | * _PAGE_TYPE_NONE 1001 -> 1001 |
| 267 | * _PAGE_TYPE_SWAP 1011 -> 1011 |
| 268 | * _PAGE_TYPE_FILE 11?1 -> 11?1 |
| 269 | * _PAGE_TYPE_RO 0100 -> 1100 |
| 270 | * _PAGE_TYPE_RW 0000 -> 1000 |
| 271 | * |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 272 | * pte_none is true for bits combinations 1000, 1010, 1100, 1110 |
Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 273 | * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001 |
| 274 | * pte_file is true for bits combinations 1101, 1111 |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 275 | * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid. |
Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 276 | */ |
| 277 | |
Heiko Carstens | f4815ac | 2012-05-23 16:24:51 +0200 | [diff] [blame] | 278 | #ifndef CONFIG_64BIT |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 279 | |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 280 | /* Bits in the segment table address-space-control-element */ |
| 281 | #define _ASCE_SPACE_SWITCH 0x80000000UL /* space switch event */ |
| 282 | #define _ASCE_ORIGIN_MASK 0x7ffff000UL /* segment table origin */ |
| 283 | #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */ |
| 284 | #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */ |
| 285 | #define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */ |
| 286 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 287 | /* Bits in the segment table entry */ |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 288 | #define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */ |
Martin Schwidefsky | 8021714 | 2010-10-25 16:10:11 +0200 | [diff] [blame] | 289 | #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */ |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 290 | #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */ |
| 291 | #define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */ |
| 292 | #define _SEGMENT_ENTRY_PTL 0x0f /* page table length */ |
| 293 | |
| 294 | #define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL) |
| 295 | #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV) |
| 296 | |
Martin Schwidefsky | 6c61cfe | 2011-06-06 14:14:42 +0200 | [diff] [blame] | 297 | /* Page status table bits for virtualization */ |
| 298 | #define RCP_ACC_BITS 0xf0000000UL |
| 299 | #define RCP_FP_BIT 0x08000000UL |
| 300 | #define RCP_PCL_BIT 0x00800000UL |
| 301 | #define RCP_HR_BIT 0x00400000UL |
| 302 | #define RCP_HC_BIT 0x00200000UL |
| 303 | #define RCP_GR_BIT 0x00040000UL |
| 304 | #define RCP_GC_BIT 0x00020000UL |
| 305 | |
| 306 | /* User dirty / referenced bit for KVM's migration feature */ |
| 307 | #define KVM_UR_BIT 0x00008000UL |
| 308 | #define KVM_UC_BIT 0x00004000UL |
| 309 | |
Heiko Carstens | f4815ac | 2012-05-23 16:24:51 +0200 | [diff] [blame] | 310 | #else /* CONFIG_64BIT */ |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 311 | |
| 312 | /* Bits in the segment/region table address-space-control-element */ |
| 313 | #define _ASCE_ORIGIN ~0xfffUL/* segment table origin */ |
| 314 | #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */ |
| 315 | #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */ |
| 316 | #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */ |
| 317 | #define _ASCE_REAL_SPACE 0x20 /* real space control */ |
| 318 | #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */ |
| 319 | #define _ASCE_TYPE_REGION1 0x0c /* region first table type */ |
| 320 | #define _ASCE_TYPE_REGION2 0x08 /* region second table type */ |
| 321 | #define _ASCE_TYPE_REGION3 0x04 /* region third table type */ |
| 322 | #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */ |
| 323 | #define _ASCE_TABLE_LENGTH 0x03 /* region table length */ |
| 324 | |
| 325 | /* Bits in the region table entry */ |
| 326 | #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */ |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 327 | #define _REGION_ENTRY_RO 0x200 /* region protection bit */ |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 328 | #define _REGION_ENTRY_INV 0x20 /* invalid region table entry */ |
| 329 | #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */ |
| 330 | #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */ |
| 331 | #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */ |
| 332 | #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */ |
| 333 | #define _REGION_ENTRY_LENGTH 0x03 /* region third length */ |
| 334 | |
| 335 | #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH) |
| 336 | #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INV) |
| 337 | #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH) |
| 338 | #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INV) |
| 339 | #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH) |
| 340 | #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV) |
| 341 | |
Heiko Carstens | 18da236 | 2012-10-08 09:18:26 +0200 | [diff] [blame] | 342 | #define _REGION3_ENTRY_LARGE 0x400 /* RTTE-format control, large page */ |
Heiko Carstens | 1819ed1 | 2013-02-16 11:47:27 +0100 | [diff] [blame^] | 343 | #define _REGION3_ENTRY_RO 0x200 /* page protection bit */ |
| 344 | #define _REGION3_ENTRY_CO 0x100 /* change-recording override */ |
Heiko Carstens | 18da236 | 2012-10-08 09:18:26 +0200 | [diff] [blame] | 345 | |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 346 | /* Bits in the segment table entry */ |
| 347 | #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */ |
| 348 | #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */ |
| 349 | #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */ |
| 350 | |
| 351 | #define _SEGMENT_ENTRY (0) |
| 352 | #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV) |
| 353 | |
Gerald Schaefer | 53492b1 | 2008-04-30 13:38:46 +0200 | [diff] [blame] | 354 | #define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */ |
| 355 | #define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */ |
Gerald Schaefer | 75077af | 2012-10-08 16:30:15 -0700 | [diff] [blame] | 356 | #define _SEGMENT_ENTRY_SPLIT_BIT 0 /* THP splitting bit number */ |
| 357 | #define _SEGMENT_ENTRY_SPLIT (1UL << _SEGMENT_ENTRY_SPLIT_BIT) |
Gerald Schaefer | 53492b1 | 2008-04-30 13:38:46 +0200 | [diff] [blame] | 358 | |
Gerald Schaefer | 1ae1c1d | 2012-10-08 16:30:24 -0700 | [diff] [blame] | 359 | /* Set of bits not changed in pmd_modify */ |
| 360 | #define _SEGMENT_CHG_MASK (_SEGMENT_ENTRY_ORIGIN | _SEGMENT_ENTRY_LARGE \ |
| 361 | | _SEGMENT_ENTRY_SPLIT | _SEGMENT_ENTRY_CO) |
| 362 | |
Martin Schwidefsky | 6c61cfe | 2011-06-06 14:14:42 +0200 | [diff] [blame] | 363 | /* Page status table bits for virtualization */ |
| 364 | #define RCP_ACC_BITS 0xf000000000000000UL |
| 365 | #define RCP_FP_BIT 0x0800000000000000UL |
| 366 | #define RCP_PCL_BIT 0x0080000000000000UL |
| 367 | #define RCP_HR_BIT 0x0040000000000000UL |
| 368 | #define RCP_HC_BIT 0x0020000000000000UL |
| 369 | #define RCP_GR_BIT 0x0004000000000000UL |
| 370 | #define RCP_GC_BIT 0x0002000000000000UL |
| 371 | |
| 372 | /* User dirty / referenced bit for KVM's migration feature */ |
| 373 | #define KVM_UR_BIT 0x0000800000000000UL |
| 374 | #define KVM_UC_BIT 0x0000400000000000UL |
| 375 | |
Heiko Carstens | f4815ac | 2012-05-23 16:24:51 +0200 | [diff] [blame] | 376 | #endif /* CONFIG_64BIT */ |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 377 | |
| 378 | /* |
| 379 | * A user page table pointer has the space-switch-event bit, the |
| 380 | * private-space-control bit and the storage-alteration-event-control |
| 381 | * bit set. A kernel page table pointer doesn't need them. |
| 382 | */ |
| 383 | #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \ |
| 384 | _ASCE_ALT_EVENT) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 385 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 386 | /* |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 387 | * Page protection definitions. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 388 | */ |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 389 | #define PAGE_NONE __pgprot(_PAGE_TYPE_NONE) |
| 390 | #define PAGE_RO __pgprot(_PAGE_TYPE_RO) |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 391 | #define PAGE_RW __pgprot(_PAGE_TYPE_RO | _PAGE_SWW) |
| 392 | #define PAGE_RWC __pgprot(_PAGE_TYPE_RW | _PAGE_SWW | _PAGE_SWC) |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 393 | |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 394 | #define PAGE_KERNEL PAGE_RWC |
Heiko Carstens | bddb7ae | 2013-01-30 16:38:55 +0100 | [diff] [blame] | 395 | #define PAGE_SHARED PAGE_KERNEL |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 396 | #define PAGE_COPY PAGE_RO |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 397 | |
| 398 | /* |
Martin Schwidefsky | 043d070 | 2011-05-23 10:24:23 +0200 | [diff] [blame] | 399 | * On s390 the page table entry has an invalid bit and a read-only bit. |
| 400 | * Read permission implies execute permission and write permission |
| 401 | * implies read permission. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 402 | */ |
| 403 | /*xwr*/ |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 404 | #define __P000 PAGE_NONE |
| 405 | #define __P001 PAGE_RO |
| 406 | #define __P010 PAGE_RO |
| 407 | #define __P011 PAGE_RO |
Martin Schwidefsky | 043d070 | 2011-05-23 10:24:23 +0200 | [diff] [blame] | 408 | #define __P100 PAGE_RO |
| 409 | #define __P101 PAGE_RO |
| 410 | #define __P110 PAGE_RO |
| 411 | #define __P111 PAGE_RO |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 412 | |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 413 | #define __S000 PAGE_NONE |
| 414 | #define __S001 PAGE_RO |
| 415 | #define __S010 PAGE_RW |
| 416 | #define __S011 PAGE_RW |
Martin Schwidefsky | 043d070 | 2011-05-23 10:24:23 +0200 | [diff] [blame] | 417 | #define __S100 PAGE_RO |
| 418 | #define __S101 PAGE_RO |
| 419 | #define __S110 PAGE_RW |
| 420 | #define __S111 PAGE_RW |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 421 | |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 422 | static inline int mm_exclusive(struct mm_struct *mm) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 423 | { |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 424 | return likely(mm == current->active_mm && |
| 425 | atomic_read(&mm->context.attach_count) <= 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 426 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 427 | |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 428 | static inline int mm_has_pgste(struct mm_struct *mm) |
| 429 | { |
| 430 | #ifdef CONFIG_PGSTE |
| 431 | if (unlikely(mm->context.has_pgste)) |
| 432 | return 1; |
| 433 | #endif |
| 434 | return 0; |
| 435 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 436 | /* |
| 437 | * pgd/pmd/pte query functions |
| 438 | */ |
Heiko Carstens | f4815ac | 2012-05-23 16:24:51 +0200 | [diff] [blame] | 439 | #ifndef CONFIG_64BIT |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 440 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 441 | static inline int pgd_present(pgd_t pgd) { return 1; } |
| 442 | static inline int pgd_none(pgd_t pgd) { return 0; } |
| 443 | static inline int pgd_bad(pgd_t pgd) { return 0; } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 444 | |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 445 | static inline int pud_present(pud_t pud) { return 1; } |
| 446 | static inline int pud_none(pud_t pud) { return 0; } |
Heiko Carstens | 18da236 | 2012-10-08 09:18:26 +0200 | [diff] [blame] | 447 | static inline int pud_large(pud_t pud) { return 0; } |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 448 | static inline int pud_bad(pud_t pud) { return 0; } |
| 449 | |
Heiko Carstens | f4815ac | 2012-05-23 16:24:51 +0200 | [diff] [blame] | 450 | #else /* CONFIG_64BIT */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 451 | |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 452 | static inline int pgd_present(pgd_t pgd) |
| 453 | { |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 454 | if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2) |
| 455 | return 1; |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 456 | return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL; |
| 457 | } |
| 458 | |
| 459 | static inline int pgd_none(pgd_t pgd) |
| 460 | { |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 461 | if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2) |
| 462 | return 0; |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 463 | return (pgd_val(pgd) & _REGION_ENTRY_INV) != 0UL; |
| 464 | } |
| 465 | |
| 466 | static inline int pgd_bad(pgd_t pgd) |
| 467 | { |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 468 | /* |
| 469 | * With dynamic page table levels the pgd can be a region table |
| 470 | * entry or a segment table entry. Check for the bit that are |
| 471 | * invalid for either table entry. |
| 472 | */ |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 473 | unsigned long mask = |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 474 | ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV & |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 475 | ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH; |
| 476 | return (pgd_val(pgd) & mask) != 0; |
| 477 | } |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 478 | |
| 479 | static inline int pud_present(pud_t pud) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 480 | { |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 481 | if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3) |
| 482 | return 1; |
Martin Schwidefsky | 0d01792 | 2007-12-17 16:25:48 +0100 | [diff] [blame] | 483 | return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 484 | } |
| 485 | |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 486 | static inline int pud_none(pud_t pud) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 487 | { |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 488 | if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3) |
| 489 | return 0; |
Martin Schwidefsky | 0d01792 | 2007-12-17 16:25:48 +0100 | [diff] [blame] | 490 | return (pud_val(pud) & _REGION_ENTRY_INV) != 0UL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 491 | } |
| 492 | |
Heiko Carstens | 18da236 | 2012-10-08 09:18:26 +0200 | [diff] [blame] | 493 | static inline int pud_large(pud_t pud) |
| 494 | { |
| 495 | if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3) |
| 496 | return 0; |
| 497 | return !!(pud_val(pud) & _REGION3_ENTRY_LARGE); |
| 498 | } |
| 499 | |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 500 | static inline int pud_bad(pud_t pud) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 501 | { |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 502 | /* |
| 503 | * With dynamic page table levels the pud can be a region table |
| 504 | * entry or a segment table entry. Check for the bit that are |
| 505 | * invalid for either table entry. |
| 506 | */ |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 507 | unsigned long mask = |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 508 | ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV & |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 509 | ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH; |
| 510 | return (pud_val(pud) & mask) != 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 511 | } |
| 512 | |
Heiko Carstens | f4815ac | 2012-05-23 16:24:51 +0200 | [diff] [blame] | 513 | #endif /* CONFIG_64BIT */ |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 514 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 515 | static inline int pmd_present(pmd_t pmd) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 516 | { |
Gerald Schaefer | d8e7a33 | 2012-10-25 17:42:50 +0200 | [diff] [blame] | 517 | unsigned long mask = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO; |
| 518 | return (pmd_val(pmd) & mask) == _HPAGE_TYPE_NONE || |
| 519 | !(pmd_val(pmd) & _SEGMENT_ENTRY_INV); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 520 | } |
| 521 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 522 | static inline int pmd_none(pmd_t pmd) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 523 | { |
Gerald Schaefer | d8e7a33 | 2012-10-25 17:42:50 +0200 | [diff] [blame] | 524 | return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) && |
| 525 | !(pmd_val(pmd) & _SEGMENT_ENTRY_RO); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 526 | } |
| 527 | |
Heiko Carstens | 378b1e7 | 2012-10-01 12:58:34 +0200 | [diff] [blame] | 528 | static inline int pmd_large(pmd_t pmd) |
| 529 | { |
| 530 | #ifdef CONFIG_64BIT |
| 531 | return !!(pmd_val(pmd) & _SEGMENT_ENTRY_LARGE); |
| 532 | #else |
| 533 | return 0; |
| 534 | #endif |
| 535 | } |
| 536 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 537 | static inline int pmd_bad(pmd_t pmd) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 538 | { |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 539 | unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INV; |
| 540 | return (pmd_val(pmd) & mask) != _SEGMENT_ENTRY; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 541 | } |
| 542 | |
Gerald Schaefer | 75077af | 2012-10-08 16:30:15 -0700 | [diff] [blame] | 543 | #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH |
| 544 | extern void pmdp_splitting_flush(struct vm_area_struct *vma, |
| 545 | unsigned long addr, pmd_t *pmdp); |
| 546 | |
Gerald Schaefer | 1ae1c1d | 2012-10-08 16:30:24 -0700 | [diff] [blame] | 547 | #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS |
| 548 | extern int pmdp_set_access_flags(struct vm_area_struct *vma, |
| 549 | unsigned long address, pmd_t *pmdp, |
| 550 | pmd_t entry, int dirty); |
| 551 | |
| 552 | #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH |
| 553 | extern int pmdp_clear_flush_young(struct vm_area_struct *vma, |
| 554 | unsigned long address, pmd_t *pmdp); |
| 555 | |
| 556 | #define __HAVE_ARCH_PMD_WRITE |
| 557 | static inline int pmd_write(pmd_t pmd) |
| 558 | { |
| 559 | return (pmd_val(pmd) & _SEGMENT_ENTRY_RO) == 0; |
| 560 | } |
| 561 | |
| 562 | static inline int pmd_young(pmd_t pmd) |
| 563 | { |
| 564 | return 0; |
| 565 | } |
| 566 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 567 | static inline int pte_none(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 568 | { |
Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 569 | return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 570 | } |
| 571 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 572 | static inline int pte_present(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 573 | { |
Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 574 | unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT | _PAGE_SWX; |
| 575 | return (pte_val(pte) & mask) == _PAGE_TYPE_NONE || |
| 576 | (!(pte_val(pte) & _PAGE_INVALID) && |
| 577 | !(pte_val(pte) & _PAGE_SWT)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 578 | } |
| 579 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 580 | static inline int pte_file(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 581 | { |
Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 582 | unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT; |
| 583 | return (pte_val(pte) & mask) == _PAGE_TYPE_FILE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 584 | } |
| 585 | |
Nick Piggin | 7e67513 | 2008-04-28 02:13:00 -0700 | [diff] [blame] | 586 | static inline int pte_special(pte_t pte) |
| 587 | { |
Nick Piggin | a08cb62 | 2008-04-28 02:13:03 -0700 | [diff] [blame] | 588 | return (pte_val(pte) & _PAGE_SPECIAL); |
Nick Piggin | 7e67513 | 2008-04-28 02:13:00 -0700 | [diff] [blame] | 589 | } |
| 590 | |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 591 | #define __HAVE_ARCH_PTE_SAME |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 592 | static inline int pte_same(pte_t a, pte_t b) |
Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 593 | { |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 594 | return pte_val(a) == pte_val(b); |
Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 595 | } |
| 596 | |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 597 | static inline pgste_t pgste_get_lock(pte_t *ptep) |
| 598 | { |
| 599 | unsigned long new = 0; |
| 600 | #ifdef CONFIG_PGSTE |
| 601 | unsigned long old; |
| 602 | |
| 603 | preempt_disable(); |
| 604 | asm( |
| 605 | " lg %0,%2\n" |
| 606 | "0: lgr %1,%0\n" |
| 607 | " nihh %0,0xff7f\n" /* clear RCP_PCL_BIT in old */ |
| 608 | " oihh %1,0x0080\n" /* set RCP_PCL_BIT in new */ |
| 609 | " csg %0,%1,%2\n" |
| 610 | " jl 0b\n" |
| 611 | : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE]) |
| 612 | : "Q" (ptep[PTRS_PER_PTE]) : "cc"); |
| 613 | #endif |
| 614 | return __pgste(new); |
| 615 | } |
| 616 | |
| 617 | static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste) |
Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 618 | { |
| 619 | #ifdef CONFIG_PGSTE |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 620 | asm( |
| 621 | " nihh %1,0xff7f\n" /* clear RCP_PCL_BIT */ |
| 622 | " stg %1,%0\n" |
| 623 | : "=Q" (ptep[PTRS_PER_PTE]) |
| 624 | : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE]) : "cc"); |
Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 625 | preempt_enable(); |
| 626 | #endif |
| 627 | } |
| 628 | |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 629 | static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste) |
Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 630 | { |
| 631 | #ifdef CONFIG_PGSTE |
Heiko Carstens | a43a9d9 | 2011-05-29 12:40:50 +0200 | [diff] [blame] | 632 | unsigned long address, bits; |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 633 | unsigned char skey; |
Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 634 | |
Martin Schwidefsky | 09b5388 | 2011-11-14 11:19:00 +0100 | [diff] [blame] | 635 | if (!pte_present(*ptep)) |
| 636 | return pgste; |
Heiko Carstens | a43a9d9 | 2011-05-29 12:40:50 +0200 | [diff] [blame] | 637 | address = pte_val(*ptep) & PAGE_MASK; |
| 638 | skey = page_get_storage_key(address); |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 639 | bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED); |
| 640 | /* Clear page changed & referenced bit in the storage key */ |
Carsten Otte | 7c81878 | 2011-12-01 13:32:16 +0100 | [diff] [blame] | 641 | if (bits & _PAGE_CHANGED) |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 642 | page_set_storage_key(address, skey ^ bits, 0); |
Carsten Otte | 7c81878 | 2011-12-01 13:32:16 +0100 | [diff] [blame] | 643 | else if (bits) |
| 644 | page_reset_referenced(address); |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 645 | /* Transfer page changed & referenced bit to guest bits in pgste */ |
| 646 | pgste_val(pgste) |= bits << 48; /* RCP_GR_BIT & RCP_GC_BIT */ |
| 647 | /* Get host changed & referenced bits from pgste */ |
| 648 | bits |= (pgste_val(pgste) & (RCP_HR_BIT | RCP_HC_BIT)) >> 52; |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 649 | /* Transfer page changed & referenced bit to kvm user bits */ |
| 650 | pgste_val(pgste) |= bits << 45; /* KVM_UR_BIT & KVM_UC_BIT */ |
| 651 | /* Clear relevant host bits in pgste. */ |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 652 | pgste_val(pgste) &= ~(RCP_HR_BIT | RCP_HC_BIT); |
| 653 | pgste_val(pgste) &= ~(RCP_ACC_BITS | RCP_FP_BIT); |
| 654 | /* Copy page access key and fetch protection bit to pgste */ |
| 655 | pgste_val(pgste) |= |
| 656 | (unsigned long) (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56; |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 657 | /* Transfer referenced bit to pte */ |
| 658 | pte_val(*ptep) |= (bits & _PAGE_REFERENCED) << 1; |
Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 659 | #endif |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 660 | return pgste; |
| 661 | |
| 662 | } |
| 663 | |
| 664 | static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste) |
| 665 | { |
| 666 | #ifdef CONFIG_PGSTE |
| 667 | int young; |
| 668 | |
Martin Schwidefsky | 09b5388 | 2011-11-14 11:19:00 +0100 | [diff] [blame] | 669 | if (!pte_present(*ptep)) |
| 670 | return pgste; |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 671 | /* Get referenced bit from storage key */ |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 672 | young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK); |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 673 | if (young) |
| 674 | pgste_val(pgste) |= RCP_GR_BIT; |
| 675 | /* Get host referenced bit from pgste */ |
| 676 | if (pgste_val(pgste) & RCP_HR_BIT) { |
| 677 | pgste_val(pgste) &= ~RCP_HR_BIT; |
| 678 | young = 1; |
| 679 | } |
| 680 | /* Transfer referenced bit to kvm user bits and pte */ |
| 681 | if (young) { |
| 682 | pgste_val(pgste) |= KVM_UR_BIT; |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 683 | pte_val(*ptep) |= _PAGE_SWR; |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 684 | } |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 685 | #endif |
| 686 | return pgste; |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 687 | } |
| 688 | |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 689 | static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry) |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 690 | { |
| 691 | #ifdef CONFIG_PGSTE |
Heiko Carstens | a43a9d9 | 2011-05-29 12:40:50 +0200 | [diff] [blame] | 692 | unsigned long address; |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 693 | unsigned long okey, nkey; |
| 694 | |
Martin Schwidefsky | 09b5388 | 2011-11-14 11:19:00 +0100 | [diff] [blame] | 695 | if (!pte_present(entry)) |
| 696 | return; |
| 697 | address = pte_val(entry) & PAGE_MASK; |
Heiko Carstens | a43a9d9 | 2011-05-29 12:40:50 +0200 | [diff] [blame] | 698 | okey = nkey = page_get_storage_key(address); |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 699 | nkey &= ~(_PAGE_ACC_BITS | _PAGE_FP_BIT); |
| 700 | /* Set page access key and fetch protection bit from pgste */ |
| 701 | nkey |= (pgste_val(pgste) & (RCP_ACC_BITS | RCP_FP_BIT)) >> 56; |
| 702 | if (okey != nkey) |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 703 | page_set_storage_key(address, nkey, 0); |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 704 | #endif |
| 705 | } |
| 706 | |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 707 | static inline void pgste_set_pte(pte_t *ptep, pte_t entry) |
| 708 | { |
| 709 | if (!MACHINE_HAS_ESOP && (pte_val(entry) & _PAGE_SWW)) { |
| 710 | /* |
| 711 | * Without enhanced suppression-on-protection force |
| 712 | * the dirty bit on for all writable ptes. |
| 713 | */ |
| 714 | pte_val(entry) |= _PAGE_SWC; |
| 715 | pte_val(entry) &= ~_PAGE_RO; |
| 716 | } |
| 717 | *ptep = entry; |
| 718 | } |
| 719 | |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 720 | /** |
| 721 | * struct gmap_struct - guest address space |
| 722 | * @mm: pointer to the parent mm_struct |
| 723 | * @table: pointer to the page directory |
Christian Borntraeger | 480e592 | 2011-09-20 17:07:28 +0200 | [diff] [blame] | 724 | * @asce: address space control element for gmap page table |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 725 | * @crst_list: list of all crst tables used in the guest address space |
| 726 | */ |
| 727 | struct gmap { |
| 728 | struct list_head list; |
| 729 | struct mm_struct *mm; |
| 730 | unsigned long *table; |
Christian Borntraeger | 480e592 | 2011-09-20 17:07:28 +0200 | [diff] [blame] | 731 | unsigned long asce; |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 732 | struct list_head crst_list; |
| 733 | }; |
| 734 | |
| 735 | /** |
| 736 | * struct gmap_rmap - reverse mapping for segment table entries |
| 737 | * @next: pointer to the next gmap_rmap structure in the list |
| 738 | * @entry: pointer to a segment table entry |
| 739 | */ |
| 740 | struct gmap_rmap { |
| 741 | struct list_head list; |
| 742 | unsigned long *entry; |
| 743 | }; |
| 744 | |
| 745 | /** |
| 746 | * struct gmap_pgtable - gmap information attached to a page table |
| 747 | * @vmaddr: address of the 1MB segment in the process virtual memory |
| 748 | * @mapper: list of segment table entries maping a page table |
| 749 | */ |
| 750 | struct gmap_pgtable { |
| 751 | unsigned long vmaddr; |
| 752 | struct list_head mapper; |
| 753 | }; |
| 754 | |
| 755 | struct gmap *gmap_alloc(struct mm_struct *mm); |
| 756 | void gmap_free(struct gmap *gmap); |
| 757 | void gmap_enable(struct gmap *gmap); |
| 758 | void gmap_disable(struct gmap *gmap); |
| 759 | int gmap_map_segment(struct gmap *gmap, unsigned long from, |
| 760 | unsigned long to, unsigned long length); |
| 761 | int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len); |
Carsten Otte | 499069e | 2011-10-30 15:17:02 +0100 | [diff] [blame] | 762 | unsigned long __gmap_fault(unsigned long address, struct gmap *); |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 763 | unsigned long gmap_fault(unsigned long address, struct gmap *); |
Christian Borntraeger | 388186b | 2011-10-30 15:17:03 +0100 | [diff] [blame] | 764 | void gmap_discard(unsigned long from, unsigned long to, struct gmap *); |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 765 | |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 766 | /* |
| 767 | * Certain architectures need to do special things when PTEs |
| 768 | * within a page table are directly modified. Thus, the following |
| 769 | * hook is made available. |
| 770 | */ |
| 771 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, |
| 772 | pte_t *ptep, pte_t entry) |
| 773 | { |
| 774 | pgste_t pgste; |
| 775 | |
| 776 | if (mm_has_pgste(mm)) { |
| 777 | pgste = pgste_get_lock(ptep); |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 778 | pgste_set_key(ptep, pgste, entry); |
| 779 | pgste_set_pte(ptep, entry); |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 780 | pgste_set_unlock(ptep, pgste); |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 781 | } else { |
| 782 | if (!(pte_val(entry) & _PAGE_INVALID) && MACHINE_HAS_EDAT1) |
| 783 | pte_val(entry) |= _PAGE_CO; |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 784 | *ptep = entry; |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 785 | } |
Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 786 | } |
| 787 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 788 | /* |
| 789 | * query functions pte_write/pte_dirty/pte_young only work if |
| 790 | * pte_present() is true. Undefined behaviour if not.. |
| 791 | */ |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 792 | static inline int pte_write(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 793 | { |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 794 | return (pte_val(pte) & _PAGE_SWW) != 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 795 | } |
| 796 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 797 | static inline int pte_dirty(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 798 | { |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 799 | return (pte_val(pte) & _PAGE_SWC) != 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 800 | } |
| 801 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 802 | static inline int pte_young(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 803 | { |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 804 | #ifdef CONFIG_PGSTE |
| 805 | if (pte_val(pte) & _PAGE_SWR) |
| 806 | return 1; |
| 807 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 808 | return 0; |
| 809 | } |
| 810 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 811 | /* |
| 812 | * pgd/pmd/pte modification functions |
| 813 | */ |
| 814 | |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 815 | static inline void pgd_clear(pgd_t *pgd) |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 816 | { |
Heiko Carstens | f4815ac | 2012-05-23 16:24:51 +0200 | [diff] [blame] | 817 | #ifdef CONFIG_64BIT |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 818 | if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) |
| 819 | pgd_val(*pgd) = _REGION2_ENTRY_EMPTY; |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 820 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 821 | } |
| 822 | |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 823 | static inline void pud_clear(pud_t *pud) |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 824 | { |
Heiko Carstens | f4815ac | 2012-05-23 16:24:51 +0200 | [diff] [blame] | 825 | #ifdef CONFIG_64BIT |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 826 | if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) |
| 827 | pud_val(*pud) = _REGION3_ENTRY_EMPTY; |
| 828 | #endif |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 829 | } |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 830 | |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 831 | static inline void pmd_clear(pmd_t *pmdp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 832 | { |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 833 | pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 834 | } |
| 835 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 836 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 837 | { |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 838 | pte_val(*ptep) = _PAGE_TYPE_EMPTY; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 839 | } |
| 840 | |
| 841 | /* |
| 842 | * The following pte modification functions only work if |
| 843 | * pte_present() is true. Undefined behaviour if not.. |
| 844 | */ |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 845 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 846 | { |
Nick Piggin | 138c902 | 2008-07-08 11:31:06 +0200 | [diff] [blame] | 847 | pte_val(pte) &= _PAGE_CHG_MASK; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 848 | pte_val(pte) |= pgprot_val(newprot); |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 849 | if ((pte_val(pte) & _PAGE_SWC) && (pte_val(pte) & _PAGE_SWW)) |
| 850 | pte_val(pte) &= ~_PAGE_RO; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 851 | return pte; |
| 852 | } |
| 853 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 854 | static inline pte_t pte_wrprotect(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 855 | { |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 856 | pte_val(pte) &= ~_PAGE_SWW; |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 857 | /* Do not clobber _PAGE_TYPE_NONE pages! */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 858 | if (!(pte_val(pte) & _PAGE_INVALID)) |
| 859 | pte_val(pte) |= _PAGE_RO; |
| 860 | return pte; |
| 861 | } |
| 862 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 863 | static inline pte_t pte_mkwrite(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 864 | { |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 865 | pte_val(pte) |= _PAGE_SWW; |
| 866 | if (pte_val(pte) & _PAGE_SWC) |
| 867 | pte_val(pte) &= ~_PAGE_RO; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 868 | return pte; |
| 869 | } |
| 870 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 871 | static inline pte_t pte_mkclean(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 872 | { |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 873 | pte_val(pte) &= ~_PAGE_SWC; |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 874 | /* Do not clobber _PAGE_TYPE_NONE pages! */ |
| 875 | if (!(pte_val(pte) & _PAGE_INVALID)) |
| 876 | pte_val(pte) |= _PAGE_RO; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 877 | return pte; |
| 878 | } |
| 879 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 880 | static inline pte_t pte_mkdirty(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 881 | { |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 882 | pte_val(pte) |= _PAGE_SWC; |
| 883 | if (pte_val(pte) & _PAGE_SWW) |
| 884 | pte_val(pte) &= ~_PAGE_RO; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 885 | return pte; |
| 886 | } |
| 887 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 888 | static inline pte_t pte_mkold(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 889 | { |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 890 | #ifdef CONFIG_PGSTE |
| 891 | pte_val(pte) &= ~_PAGE_SWR; |
| 892 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 893 | return pte; |
| 894 | } |
| 895 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 896 | static inline pte_t pte_mkyoung(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 897 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 898 | return pte; |
| 899 | } |
| 900 | |
Nick Piggin | 7e67513 | 2008-04-28 02:13:00 -0700 | [diff] [blame] | 901 | static inline pte_t pte_mkspecial(pte_t pte) |
| 902 | { |
Nick Piggin | a08cb62 | 2008-04-28 02:13:03 -0700 | [diff] [blame] | 903 | pte_val(pte) |= _PAGE_SPECIAL; |
Nick Piggin | 7e67513 | 2008-04-28 02:13:00 -0700 | [diff] [blame] | 904 | return pte; |
| 905 | } |
| 906 | |
Heiko Carstens | 84afdce | 2010-10-25 16:10:36 +0200 | [diff] [blame] | 907 | #ifdef CONFIG_HUGETLB_PAGE |
| 908 | static inline pte_t pte_mkhuge(pte_t pte) |
| 909 | { |
| 910 | /* |
| 911 | * PROT_NONE needs to be remapped from the pte type to the ste type. |
| 912 | * The HW invalid bit is also different for pte and ste. The pte |
| 913 | * invalid bit happens to be the same as the ste _SEGMENT_ENTRY_LARGE |
| 914 | * bit, so we don't have to clear it. |
| 915 | */ |
| 916 | if (pte_val(pte) & _PAGE_INVALID) { |
| 917 | if (pte_val(pte) & _PAGE_SWT) |
| 918 | pte_val(pte) |= _HPAGE_TYPE_NONE; |
| 919 | pte_val(pte) |= _SEGMENT_ENTRY_INV; |
| 920 | } |
| 921 | /* |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 922 | * Clear SW pte bits, there are no SW bits in a segment table entry. |
Heiko Carstens | 84afdce | 2010-10-25 16:10:36 +0200 | [diff] [blame] | 923 | */ |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 924 | pte_val(pte) &= ~(_PAGE_SWT | _PAGE_SWX | _PAGE_SWC | |
| 925 | _PAGE_SWR | _PAGE_SWW); |
Heiko Carstens | 84afdce | 2010-10-25 16:10:36 +0200 | [diff] [blame] | 926 | /* |
| 927 | * Also set the change-override bit because we don't need dirty bit |
| 928 | * tracking for hugetlbfs pages. |
| 929 | */ |
| 930 | pte_val(pte) |= (_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO); |
| 931 | return pte; |
| 932 | } |
| 933 | #endif |
| 934 | |
Florian Funke | 15e86b0 | 2008-10-10 21:33:26 +0200 | [diff] [blame] | 935 | /* |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 936 | * Get (and clear) the user dirty bit for a pte. |
Florian Funke | 15e86b0 | 2008-10-10 21:33:26 +0200 | [diff] [blame] | 937 | */ |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 938 | static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm, |
| 939 | pte_t *ptep) |
Florian Funke | 15e86b0 | 2008-10-10 21:33:26 +0200 | [diff] [blame] | 940 | { |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 941 | pgste_t pgste; |
| 942 | int dirty = 0; |
Florian Funke | 15e86b0 | 2008-10-10 21:33:26 +0200 | [diff] [blame] | 943 | |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 944 | if (mm_has_pgste(mm)) { |
| 945 | pgste = pgste_get_lock(ptep); |
| 946 | pgste = pgste_update_all(ptep, pgste); |
| 947 | dirty = !!(pgste_val(pgste) & KVM_UC_BIT); |
| 948 | pgste_val(pgste) &= ~KVM_UC_BIT; |
| 949 | pgste_set_unlock(ptep, pgste); |
| 950 | return dirty; |
Florian Funke | 15e86b0 | 2008-10-10 21:33:26 +0200 | [diff] [blame] | 951 | } |
Florian Funke | 15e86b0 | 2008-10-10 21:33:26 +0200 | [diff] [blame] | 952 | return dirty; |
| 953 | } |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 954 | |
| 955 | /* |
| 956 | * Get (and clear) the user referenced bit for a pte. |
| 957 | */ |
| 958 | static inline int ptep_test_and_clear_user_young(struct mm_struct *mm, |
| 959 | pte_t *ptep) |
| 960 | { |
| 961 | pgste_t pgste; |
| 962 | int young = 0; |
| 963 | |
| 964 | if (mm_has_pgste(mm)) { |
| 965 | pgste = pgste_get_lock(ptep); |
| 966 | pgste = pgste_update_young(ptep, pgste); |
| 967 | young = !!(pgste_val(pgste) & KVM_UR_BIT); |
| 968 | pgste_val(pgste) &= ~KVM_UR_BIT; |
| 969 | pgste_set_unlock(ptep, pgste); |
| 970 | } |
| 971 | return young; |
| 972 | } |
Florian Funke | 15e86b0 | 2008-10-10 21:33:26 +0200 | [diff] [blame] | 973 | |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 974 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG |
| 975 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, |
| 976 | unsigned long addr, pte_t *ptep) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 977 | { |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 978 | pgste_t pgste; |
| 979 | pte_t pte; |
Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 980 | |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 981 | if (mm_has_pgste(vma->vm_mm)) { |
| 982 | pgste = pgste_get_lock(ptep); |
| 983 | pgste = pgste_update_young(ptep, pgste); |
| 984 | pte = *ptep; |
| 985 | *ptep = pte_mkold(pte); |
| 986 | pgste_set_unlock(ptep, pgste); |
| 987 | return pte_young(pte); |
| 988 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 989 | return 0; |
| 990 | } |
| 991 | |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 992 | #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH |
| 993 | static inline int ptep_clear_flush_young(struct vm_area_struct *vma, |
| 994 | unsigned long address, pte_t *ptep) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 995 | { |
Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 996 | /* No need to flush TLB |
| 997 | * On s390 reference bits are in storage key and never in TLB |
| 998 | * With virtualization we handle the reference bit, without we |
| 999 | * we can simply return */ |
Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 1000 | return ptep_test_and_clear_young(vma, address, ptep); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1001 | } |
| 1002 | |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 1003 | static inline void __ptep_ipte(unsigned long address, pte_t *ptep) |
| 1004 | { |
| 1005 | if (!(pte_val(*ptep) & _PAGE_INVALID)) { |
Heiko Carstens | f4815ac | 2012-05-23 16:24:51 +0200 | [diff] [blame] | 1006 | #ifndef CONFIG_64BIT |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 1007 | /* pto must point to the start of the segment table */ |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 1008 | pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00); |
| 1009 | #else |
| 1010 | /* ipte in zarch mode can do the math */ |
| 1011 | pte_t *pto = ptep; |
| 1012 | #endif |
Martin Schwidefsky | 94c12cc | 2006-09-28 16:56:43 +0200 | [diff] [blame] | 1013 | asm volatile( |
| 1014 | " ipte %2,%3" |
| 1015 | : "=m" (*ptep) : "m" (*ptep), |
| 1016 | "a" (pto), "a" (address)); |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 1017 | } |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 1018 | } |
| 1019 | |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 1020 | /* |
| 1021 | * This is hard to understand. ptep_get_and_clear and ptep_clear_flush |
| 1022 | * both clear the TLB for the unmapped pte. The reason is that |
| 1023 | * ptep_get_and_clear is used in common code (e.g. change_pte_range) |
| 1024 | * to modify an active pte. The sequence is |
| 1025 | * 1) ptep_get_and_clear |
| 1026 | * 2) set_pte_at |
| 1027 | * 3) flush_tlb_range |
| 1028 | * On s390 the tlb needs to get flushed with the modification of the pte |
| 1029 | * if the pte is active. The only way how this can be implemented is to |
| 1030 | * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range |
| 1031 | * is a nop. |
| 1032 | */ |
| 1033 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1034 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, |
| 1035 | unsigned long address, pte_t *ptep) |
| 1036 | { |
| 1037 | pgste_t pgste; |
| 1038 | pte_t pte; |
| 1039 | |
| 1040 | mm->context.flush_mm = 1; |
| 1041 | if (mm_has_pgste(mm)) |
| 1042 | pgste = pgste_get_lock(ptep); |
| 1043 | |
| 1044 | pte = *ptep; |
| 1045 | if (!mm_exclusive(mm)) |
| 1046 | __ptep_ipte(address, ptep); |
| 1047 | pte_val(*ptep) = _PAGE_TYPE_EMPTY; |
| 1048 | |
| 1049 | if (mm_has_pgste(mm)) { |
| 1050 | pgste = pgste_update_all(&pte, pgste); |
| 1051 | pgste_set_unlock(ptep, pgste); |
| 1052 | } |
| 1053 | return pte; |
| 1054 | } |
| 1055 | |
| 1056 | #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION |
| 1057 | static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, |
| 1058 | unsigned long address, |
| 1059 | pte_t *ptep) |
| 1060 | { |
| 1061 | pte_t pte; |
| 1062 | |
| 1063 | mm->context.flush_mm = 1; |
| 1064 | if (mm_has_pgste(mm)) |
| 1065 | pgste_get_lock(ptep); |
| 1066 | |
| 1067 | pte = *ptep; |
| 1068 | if (!mm_exclusive(mm)) |
| 1069 | __ptep_ipte(address, ptep); |
| 1070 | return pte; |
| 1071 | } |
| 1072 | |
| 1073 | static inline void ptep_modify_prot_commit(struct mm_struct *mm, |
| 1074 | unsigned long address, |
| 1075 | pte_t *ptep, pte_t pte) |
| 1076 | { |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 1077 | if (mm_has_pgste(mm)) { |
| 1078 | pgste_set_pte(ptep, pte); |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1079 | pgste_set_unlock(ptep, *(pgste_t *)(ptep + PTRS_PER_PTE)); |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 1080 | } else |
| 1081 | *ptep = pte; |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1082 | } |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 1083 | |
| 1084 | #define __HAVE_ARCH_PTEP_CLEAR_FLUSH |
Martin Schwidefsky | f0e47c2 | 2007-07-17 04:03:03 -0700 | [diff] [blame] | 1085 | static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, |
| 1086 | unsigned long address, pte_t *ptep) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1087 | { |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1088 | pgste_t pgste; |
| 1089 | pte_t pte; |
| 1090 | |
| 1091 | if (mm_has_pgste(vma->vm_mm)) |
| 1092 | pgste = pgste_get_lock(ptep); |
| 1093 | |
| 1094 | pte = *ptep; |
| 1095 | __ptep_ipte(address, ptep); |
| 1096 | pte_val(*ptep) = _PAGE_TYPE_EMPTY; |
| 1097 | |
| 1098 | if (mm_has_pgste(vma->vm_mm)) { |
| 1099 | pgste = pgste_update_all(&pte, pgste); |
| 1100 | pgste_set_unlock(ptep, pgste); |
| 1101 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1102 | return pte; |
| 1103 | } |
| 1104 | |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 1105 | /* |
| 1106 | * The batched pte unmap code uses ptep_get_and_clear_full to clear the |
| 1107 | * ptes. Here an optimization is possible. tlb_gather_mmu flushes all |
| 1108 | * tlbs of an mm if it can guarantee that the ptes of the mm_struct |
| 1109 | * cannot be accessed while the batched unmap is running. In this case |
| 1110 | * full==1 and a simple pte_clear is enough. See tlb.h. |
| 1111 | */ |
| 1112 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL |
| 1113 | static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1114 | unsigned long address, |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 1115 | pte_t *ptep, int full) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1116 | { |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1117 | pgste_t pgste; |
| 1118 | pte_t pte; |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 1119 | |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1120 | if (mm_has_pgste(mm)) |
| 1121 | pgste = pgste_get_lock(ptep); |
| 1122 | |
| 1123 | pte = *ptep; |
| 1124 | if (!full) |
| 1125 | __ptep_ipte(address, ptep); |
| 1126 | pte_val(*ptep) = _PAGE_TYPE_EMPTY; |
| 1127 | |
| 1128 | if (mm_has_pgste(mm)) { |
| 1129 | pgste = pgste_update_all(&pte, pgste); |
| 1130 | pgste_set_unlock(ptep, pgste); |
| 1131 | } |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 1132 | return pte; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1133 | } |
| 1134 | |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 1135 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1136 | static inline pte_t ptep_set_wrprotect(struct mm_struct *mm, |
| 1137 | unsigned long address, pte_t *ptep) |
| 1138 | { |
| 1139 | pgste_t pgste; |
| 1140 | pte_t pte = *ptep; |
| 1141 | |
| 1142 | if (pte_write(pte)) { |
| 1143 | mm->context.flush_mm = 1; |
| 1144 | if (mm_has_pgste(mm)) |
| 1145 | pgste = pgste_get_lock(ptep); |
| 1146 | |
| 1147 | if (!mm_exclusive(mm)) |
| 1148 | __ptep_ipte(address, ptep); |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 1149 | pte = pte_wrprotect(pte); |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1150 | |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 1151 | if (mm_has_pgste(mm)) { |
| 1152 | pgste_set_pte(ptep, pte); |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1153 | pgste_set_unlock(ptep, pgste); |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 1154 | } else |
| 1155 | *ptep = pte; |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1156 | } |
| 1157 | return pte; |
| 1158 | } |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 1159 | |
| 1160 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1161 | static inline int ptep_set_access_flags(struct vm_area_struct *vma, |
| 1162 | unsigned long address, pte_t *ptep, |
| 1163 | pte_t entry, int dirty) |
| 1164 | { |
| 1165 | pgste_t pgste; |
| 1166 | |
| 1167 | if (pte_same(*ptep, entry)) |
| 1168 | return 0; |
| 1169 | if (mm_has_pgste(vma->vm_mm)) |
| 1170 | pgste = pgste_get_lock(ptep); |
| 1171 | |
| 1172 | __ptep_ipte(address, ptep); |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1173 | |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 1174 | if (mm_has_pgste(vma->vm_mm)) { |
| 1175 | pgste_set_pte(ptep, entry); |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1176 | pgste_set_unlock(ptep, pgste); |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 1177 | } else |
| 1178 | *ptep = entry; |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1179 | return 1; |
| 1180 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1181 | |
| 1182 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1183 | * Conversion functions: convert a page and protection to a page entry, |
| 1184 | * and a page entry and page directory to the page they refer to. |
| 1185 | */ |
| 1186 | static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) |
| 1187 | { |
| 1188 | pte_t __pte; |
| 1189 | pte_val(__pte) = physpage + pgprot_val(pgprot); |
| 1190 | return __pte; |
| 1191 | } |
| 1192 | |
Heiko Carstens | 2dcea57 | 2006-09-29 01:58:41 -0700 | [diff] [blame] | 1193 | static inline pte_t mk_pte(struct page *page, pgprot_t pgprot) |
| 1194 | { |
Heiko Carstens | 0b2b6e1d | 2006-10-04 20:02:23 +0200 | [diff] [blame] | 1195 | unsigned long physpage = page_to_phys(page); |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 1196 | pte_t __pte = mk_pte_phys(physpage, pgprot); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1197 | |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 1198 | if ((pte_val(__pte) & _PAGE_SWW) && PageDirty(page)) { |
| 1199 | pte_val(__pte) |= _PAGE_SWC; |
| 1200 | pte_val(__pte) &= ~_PAGE_RO; |
| 1201 | } |
| 1202 | return __pte; |
Heiko Carstens | 2dcea57 | 2006-09-29 01:58:41 -0700 | [diff] [blame] | 1203 | } |
| 1204 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1205 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 1206 | #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) |
| 1207 | #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) |
| 1208 | #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1209 | |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 1210 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1211 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) |
| 1212 | |
Heiko Carstens | f4815ac | 2012-05-23 16:24:51 +0200 | [diff] [blame] | 1213 | #ifndef CONFIG_64BIT |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1214 | |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 1215 | #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) |
| 1216 | #define pud_deref(pmd) ({ BUG(); 0UL; }) |
| 1217 | #define pgd_deref(pmd) ({ BUG(); 0UL; }) |
| 1218 | |
| 1219 | #define pud_offset(pgd, address) ((pud_t *) pgd) |
| 1220 | #define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1221 | |
Heiko Carstens | f4815ac | 2012-05-23 16:24:51 +0200 | [diff] [blame] | 1222 | #else /* CONFIG_64BIT */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1223 | |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 1224 | #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) |
| 1225 | #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN) |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 1226 | #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 1227 | |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 1228 | static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address) |
| 1229 | { |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 1230 | pud_t *pud = (pud_t *) pgd; |
| 1231 | if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) |
| 1232 | pud = (pud_t *) pgd_deref(*pgd); |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 1233 | return pud + pud_index(address); |
| 1234 | } |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 1235 | |
| 1236 | static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) |
| 1237 | { |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 1238 | pmd_t *pmd = (pmd_t *) pud; |
| 1239 | if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) |
| 1240 | pmd = (pmd_t *) pud_deref(*pud); |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 1241 | return pmd + pmd_index(address); |
| 1242 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1243 | |
Heiko Carstens | f4815ac | 2012-05-23 16:24:51 +0200 | [diff] [blame] | 1244 | #endif /* CONFIG_64BIT */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1245 | |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 1246 | #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot)) |
| 1247 | #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT) |
| 1248 | #define pte_page(x) pfn_to_page(pte_pfn(x)) |
| 1249 | |
| 1250 | #define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) |
| 1251 | |
| 1252 | /* Find an entry in the lowest level page table.. */ |
| 1253 | #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr)) |
| 1254 | #define pte_offset_kernel(pmd, address) pte_offset(pmd,address) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1255 | #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1256 | #define pte_unmap(pte) do { } while (0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1257 | |
Gerald Schaefer | 1ae1c1d | 2012-10-08 16:30:24 -0700 | [diff] [blame] | 1258 | static inline void __pmd_idte(unsigned long address, pmd_t *pmdp) |
| 1259 | { |
| 1260 | unsigned long sto = (unsigned long) pmdp - |
| 1261 | pmd_index(address) * sizeof(pmd_t); |
| 1262 | |
| 1263 | if (!(pmd_val(*pmdp) & _SEGMENT_ENTRY_INV)) { |
| 1264 | asm volatile( |
| 1265 | " .insn rrf,0xb98e0000,%2,%3,0,0" |
| 1266 | : "=m" (*pmdp) |
| 1267 | : "m" (*pmdp), "a" (sto), |
| 1268 | "a" ((address & HPAGE_MASK)) |
| 1269 | : "cc" |
| 1270 | ); |
| 1271 | } |
| 1272 | } |
| 1273 | |
Gerald Schaefer | 75077af | 2012-10-08 16:30:15 -0700 | [diff] [blame] | 1274 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
Gerald Schaefer | d8e7a33 | 2012-10-25 17:42:50 +0200 | [diff] [blame] | 1275 | |
| 1276 | #define SEGMENT_NONE __pgprot(_HPAGE_TYPE_NONE) |
| 1277 | #define SEGMENT_RO __pgprot(_HPAGE_TYPE_RO) |
| 1278 | #define SEGMENT_RW __pgprot(_HPAGE_TYPE_RW) |
| 1279 | |
Gerald Schaefer | 9501d09 | 2012-10-08 16:30:18 -0700 | [diff] [blame] | 1280 | #define __HAVE_ARCH_PGTABLE_DEPOSIT |
| 1281 | extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable); |
| 1282 | |
| 1283 | #define __HAVE_ARCH_PGTABLE_WITHDRAW |
| 1284 | extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm); |
| 1285 | |
Gerald Schaefer | 75077af | 2012-10-08 16:30:15 -0700 | [diff] [blame] | 1286 | static inline int pmd_trans_splitting(pmd_t pmd) |
| 1287 | { |
| 1288 | return pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT; |
| 1289 | } |
Gerald Schaefer | 1ae1c1d | 2012-10-08 16:30:24 -0700 | [diff] [blame] | 1290 | |
| 1291 | static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, |
| 1292 | pmd_t *pmdp, pmd_t entry) |
| 1293 | { |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 1294 | if (!(pmd_val(entry) & _SEGMENT_ENTRY_INV) && MACHINE_HAS_EDAT1) |
| 1295 | pmd_val(entry) |= _SEGMENT_ENTRY_CO; |
Gerald Schaefer | 1ae1c1d | 2012-10-08 16:30:24 -0700 | [diff] [blame] | 1296 | *pmdp = entry; |
| 1297 | } |
| 1298 | |
| 1299 | static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot) |
| 1300 | { |
Gerald Schaefer | d8e7a33 | 2012-10-25 17:42:50 +0200 | [diff] [blame] | 1301 | /* |
| 1302 | * pgprot is PAGE_NONE, PAGE_RO, or PAGE_RW (see __Pxxx / __Sxxx) |
| 1303 | * Convert to segment table entry format. |
| 1304 | */ |
| 1305 | if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE)) |
| 1306 | return pgprot_val(SEGMENT_NONE); |
| 1307 | if (pgprot_val(pgprot) == pgprot_val(PAGE_RO)) |
| 1308 | return pgprot_val(SEGMENT_RO); |
| 1309 | return pgprot_val(SEGMENT_RW); |
Gerald Schaefer | 1ae1c1d | 2012-10-08 16:30:24 -0700 | [diff] [blame] | 1310 | } |
| 1311 | |
| 1312 | static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) |
| 1313 | { |
| 1314 | pmd_val(pmd) &= _SEGMENT_CHG_MASK; |
| 1315 | pmd_val(pmd) |= massage_pgprot_pmd(newprot); |
| 1316 | return pmd; |
| 1317 | } |
| 1318 | |
| 1319 | static inline pmd_t pmd_mkhuge(pmd_t pmd) |
| 1320 | { |
| 1321 | pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE; |
| 1322 | return pmd; |
| 1323 | } |
| 1324 | |
| 1325 | static inline pmd_t pmd_mkwrite(pmd_t pmd) |
| 1326 | { |
Gerald Schaefer | d8e7a33 | 2012-10-25 17:42:50 +0200 | [diff] [blame] | 1327 | /* Do not clobber _HPAGE_TYPE_NONE pages! */ |
| 1328 | if (!(pmd_val(pmd) & _SEGMENT_ENTRY_INV)) |
| 1329 | pmd_val(pmd) &= ~_SEGMENT_ENTRY_RO; |
Gerald Schaefer | 1ae1c1d | 2012-10-08 16:30:24 -0700 | [diff] [blame] | 1330 | return pmd; |
| 1331 | } |
| 1332 | |
| 1333 | static inline pmd_t pmd_wrprotect(pmd_t pmd) |
| 1334 | { |
| 1335 | pmd_val(pmd) |= _SEGMENT_ENTRY_RO; |
| 1336 | return pmd; |
| 1337 | } |
| 1338 | |
| 1339 | static inline pmd_t pmd_mkdirty(pmd_t pmd) |
| 1340 | { |
| 1341 | /* No dirty bit in the segment table entry. */ |
| 1342 | return pmd; |
| 1343 | } |
| 1344 | |
| 1345 | static inline pmd_t pmd_mkold(pmd_t pmd) |
| 1346 | { |
| 1347 | /* No referenced bit in the segment table entry. */ |
| 1348 | return pmd; |
| 1349 | } |
| 1350 | |
| 1351 | static inline pmd_t pmd_mkyoung(pmd_t pmd) |
| 1352 | { |
| 1353 | /* No referenced bit in the segment table entry. */ |
| 1354 | return pmd; |
| 1355 | } |
| 1356 | |
| 1357 | #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG |
| 1358 | static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, |
| 1359 | unsigned long address, pmd_t *pmdp) |
| 1360 | { |
| 1361 | unsigned long pmd_addr = pmd_val(*pmdp) & HPAGE_MASK; |
| 1362 | long tmp, rc; |
| 1363 | int counter; |
| 1364 | |
| 1365 | rc = 0; |
| 1366 | if (MACHINE_HAS_RRBM) { |
| 1367 | counter = PTRS_PER_PTE >> 6; |
| 1368 | asm volatile( |
| 1369 | "0: .insn rre,0xb9ae0000,%0,%3\n" /* rrbm */ |
| 1370 | " ogr %1,%0\n" |
| 1371 | " la %3,0(%4,%3)\n" |
| 1372 | " brct %2,0b\n" |
| 1373 | : "=&d" (tmp), "+&d" (rc), "+d" (counter), |
| 1374 | "+a" (pmd_addr) |
| 1375 | : "a" (64 * 4096UL) : "cc"); |
| 1376 | rc = !!rc; |
| 1377 | } else { |
| 1378 | counter = PTRS_PER_PTE; |
| 1379 | asm volatile( |
| 1380 | "0: rrbe 0,%2\n" |
| 1381 | " la %2,0(%3,%2)\n" |
| 1382 | " brc 12,1f\n" |
| 1383 | " lhi %0,1\n" |
| 1384 | "1: brct %1,0b\n" |
| 1385 | : "+d" (rc), "+d" (counter), "+a" (pmd_addr) |
| 1386 | : "a" (4096UL) : "cc"); |
| 1387 | } |
| 1388 | return rc; |
| 1389 | } |
| 1390 | |
| 1391 | #define __HAVE_ARCH_PMDP_GET_AND_CLEAR |
| 1392 | static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, |
| 1393 | unsigned long address, pmd_t *pmdp) |
| 1394 | { |
| 1395 | pmd_t pmd = *pmdp; |
| 1396 | |
| 1397 | __pmd_idte(address, pmdp); |
| 1398 | pmd_clear(pmdp); |
| 1399 | return pmd; |
| 1400 | } |
| 1401 | |
| 1402 | #define __HAVE_ARCH_PMDP_CLEAR_FLUSH |
| 1403 | static inline pmd_t pmdp_clear_flush(struct vm_area_struct *vma, |
| 1404 | unsigned long address, pmd_t *pmdp) |
| 1405 | { |
| 1406 | return pmdp_get_and_clear(vma->vm_mm, address, pmdp); |
| 1407 | } |
| 1408 | |
| 1409 | #define __HAVE_ARCH_PMDP_INVALIDATE |
| 1410 | static inline void pmdp_invalidate(struct vm_area_struct *vma, |
| 1411 | unsigned long address, pmd_t *pmdp) |
| 1412 | { |
| 1413 | __pmd_idte(address, pmdp); |
| 1414 | } |
| 1415 | |
Gerald Schaefer | be32865 | 2013-01-21 16:48:07 +0100 | [diff] [blame] | 1416 | #define __HAVE_ARCH_PMDP_SET_WRPROTECT |
| 1417 | static inline void pmdp_set_wrprotect(struct mm_struct *mm, |
| 1418 | unsigned long address, pmd_t *pmdp) |
| 1419 | { |
| 1420 | pmd_t pmd = *pmdp; |
| 1421 | |
| 1422 | if (pmd_write(pmd)) { |
| 1423 | __pmd_idte(address, pmdp); |
| 1424 | set_pmd_at(mm, address, pmdp, pmd_wrprotect(pmd)); |
| 1425 | } |
| 1426 | } |
| 1427 | |
Gerald Schaefer | 1ae1c1d | 2012-10-08 16:30:24 -0700 | [diff] [blame] | 1428 | static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot) |
| 1429 | { |
| 1430 | pmd_t __pmd; |
| 1431 | pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot); |
| 1432 | return __pmd; |
| 1433 | } |
| 1434 | |
| 1435 | #define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot)) |
| 1436 | #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot)) |
| 1437 | |
| 1438 | static inline int pmd_trans_huge(pmd_t pmd) |
| 1439 | { |
| 1440 | return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE; |
| 1441 | } |
| 1442 | |
| 1443 | static inline int has_transparent_hugepage(void) |
| 1444 | { |
| 1445 | return MACHINE_HAS_HPAGE ? 1 : 0; |
| 1446 | } |
| 1447 | |
| 1448 | static inline unsigned long pmd_pfn(pmd_t pmd) |
| 1449 | { |
Gerald Schaefer | 171c400 | 2013-01-09 18:49:51 +0100 | [diff] [blame] | 1450 | return pmd_val(pmd) >> PAGE_SHIFT; |
Gerald Schaefer | 1ae1c1d | 2012-10-08 16:30:24 -0700 | [diff] [blame] | 1451 | } |
Gerald Schaefer | 75077af | 2012-10-08 16:30:15 -0700 | [diff] [blame] | 1452 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
| 1453 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1454 | /* |
| 1455 | * 31 bit swap entry format: |
| 1456 | * A page-table entry has some bits we have to treat in a special way. |
| 1457 | * Bits 0, 20 and bit 23 have to be zero, otherwise an specification |
| 1458 | * exception will occur instead of a page translation exception. The |
| 1459 | * specifiation exception has the bad habit not to store necessary |
| 1460 | * information in the lowcore. |
| 1461 | * Bit 21 and bit 22 are the page invalid bit and the page protection |
| 1462 | * bit. We set both to indicate a swapped page. |
| 1463 | * Bit 30 and 31 are used to distinguish the different page types. For |
| 1464 | * a swapped page these bits need to be zero. |
| 1465 | * This leaves the bits 1-19 and bits 24-29 to store type and offset. |
| 1466 | * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19 |
| 1467 | * plus 24 for the offset. |
| 1468 | * 0| offset |0110|o|type |00| |
| 1469 | * 0 0000000001111111111 2222 2 22222 33 |
| 1470 | * 0 1234567890123456789 0123 4 56789 01 |
| 1471 | * |
| 1472 | * 64 bit swap entry format: |
| 1473 | * A page-table entry has some bits we have to treat in a special way. |
| 1474 | * Bits 52 and bit 55 have to be zero, otherwise an specification |
| 1475 | * exception will occur instead of a page translation exception. The |
| 1476 | * specifiation exception has the bad habit not to store necessary |
| 1477 | * information in the lowcore. |
| 1478 | * Bit 53 and bit 54 are the page invalid bit and the page protection |
| 1479 | * bit. We set both to indicate a swapped page. |
| 1480 | * Bit 62 and 63 are used to distinguish the different page types. For |
| 1481 | * a swapped page these bits need to be zero. |
| 1482 | * This leaves the bits 0-51 and bits 56-61 to store type and offset. |
| 1483 | * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51 |
| 1484 | * plus 56 for the offset. |
| 1485 | * | offset |0110|o|type |00| |
| 1486 | * 0000000000111111111122222222223333333333444444444455 5555 5 55566 66 |
| 1487 | * 0123456789012345678901234567890123456789012345678901 2345 6 78901 23 |
| 1488 | */ |
Heiko Carstens | f4815ac | 2012-05-23 16:24:51 +0200 | [diff] [blame] | 1489 | #ifndef CONFIG_64BIT |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1490 | #define __SWP_OFFSET_MASK (~0UL >> 12) |
| 1491 | #else |
| 1492 | #define __SWP_OFFSET_MASK (~0UL >> 11) |
| 1493 | #endif |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 1494 | static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1495 | { |
| 1496 | pte_t pte; |
| 1497 | offset &= __SWP_OFFSET_MASK; |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 1498 | pte_val(pte) = _PAGE_TYPE_SWAP | ((type & 0x1f) << 2) | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1499 | ((offset & 1UL) << 7) | ((offset & ~1UL) << 11); |
| 1500 | return pte; |
| 1501 | } |
| 1502 | |
| 1503 | #define __swp_type(entry) (((entry).val >> 2) & 0x1f) |
| 1504 | #define __swp_offset(entry) (((entry).val >> 11) | (((entry).val >> 7) & 1)) |
| 1505 | #define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) }) |
| 1506 | |
| 1507 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) |
| 1508 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) |
| 1509 | |
Heiko Carstens | f4815ac | 2012-05-23 16:24:51 +0200 | [diff] [blame] | 1510 | #ifndef CONFIG_64BIT |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1511 | # define PTE_FILE_MAX_BITS 26 |
Heiko Carstens | f4815ac | 2012-05-23 16:24:51 +0200 | [diff] [blame] | 1512 | #else /* CONFIG_64BIT */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1513 | # define PTE_FILE_MAX_BITS 59 |
Heiko Carstens | f4815ac | 2012-05-23 16:24:51 +0200 | [diff] [blame] | 1514 | #endif /* CONFIG_64BIT */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1515 | |
| 1516 | #define pte_to_pgoff(__pte) \ |
| 1517 | ((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f)) |
| 1518 | |
| 1519 | #define pgoff_to_pte(__off) \ |
| 1520 | ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \ |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 1521 | | _PAGE_TYPE_FILE }) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1522 | |
| 1523 | #endif /* !__ASSEMBLY__ */ |
| 1524 | |
| 1525 | #define kern_addr_valid(addr) (1) |
| 1526 | |
Heiko Carstens | 17f3458 | 2008-04-30 13:38:47 +0200 | [diff] [blame] | 1527 | extern int vmem_add_mapping(unsigned long start, unsigned long size); |
| 1528 | extern int vmem_remove_mapping(unsigned long start, unsigned long size); |
Carsten Otte | 402b086 | 2008-03-25 18:47:10 +0100 | [diff] [blame] | 1529 | extern int s390_enable_sie(void); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 1530 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1531 | /* |
| 1532 | * No page table caches to initialise |
| 1533 | */ |
| 1534 | #define pgtable_cache_init() do { } while (0) |
| 1535 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1536 | #include <asm-generic/pgtable.h> |
| 1537 | |
| 1538 | #endif /* _S390_PAGE_H */ |