blob: 57057fb1cc07eae47d4f841d5b83bfac909a6872 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * S390 version
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02003 * Copyright IBM Corp. 1999, 2000
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * Author(s): Hartmut Penner (hp@de.ibm.com)
5 * Ulrich Weigand (weigand@de.ibm.com)
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
7 *
8 * Derived from "include/asm-i386/pgtable.h"
9 */
10
11#ifndef _ASM_S390_PGTABLE_H
12#define _ASM_S390_PGTABLE_H
13
Linus Torvalds1da177e2005-04-16 15:20:36 -070014/*
Martin Schwidefskya1c843b2015-04-22 13:55:59 +020015 * The Linux memory management assumes a three-level page table setup.
16 * For s390 64 bit we use up to four of the five levels the hardware
17 * provides (region first tables are not used).
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 *
19 * The "pgd_xxx()" functions are trivial for a folded two-level
20 * setup: the pgd is never bad, and a pmd always exists (as it's folded
21 * into the pgd entry)
22 *
23 * This file contains the functions and defines necessary to modify and use
24 * the S390 page table tree.
25 */
26#ifndef __ASSEMBLY__
Heiko Carstens9789db02008-07-14 09:59:11 +020027#include <linux/sched.h>
Heiko Carstens2dcea572006-09-29 01:58:41 -070028#include <linux/mm_types.h>
Martin Schwidefskyabf09be2012-11-07 13:17:37 +010029#include <linux/page-flags.h>
Martin Schwidefsky527e30b2014-04-30 16:04:25 +020030#include <linux/radix-tree.h>
Heiko Carstens37cd9442016-05-20 08:08:14 +020031#include <linux/atomic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <asm/bug.h>
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +020033#include <asm/page.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
Heiko Carstens0ccb32c2016-05-28 10:03:55 +020035extern pgd_t swapper_pg_dir[];
Linus Torvalds1da177e2005-04-16 15:20:36 -070036extern void paging_init(void);
Heiko Carstens2b67fc42007-02-05 21:16:47 +010037extern void vmem_map_init(void);
Heiko Carstense8a97e42016-05-17 10:50:15 +020038pmd_t *vmem_pmd_alloc(void);
39pte_t *vmem_pte_alloc(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Heiko Carstens37cd9442016-05-20 08:08:14 +020041enum {
42 PG_DIRECT_MAP_4K = 0,
43 PG_DIRECT_MAP_1M,
44 PG_DIRECT_MAP_2G,
45 PG_DIRECT_MAP_MAX
46};
47
48extern atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];
49
50static inline void update_page_count(int level, long count)
51{
52 if (IS_ENABLED(CONFIG_PROC_FS))
53 atomic_long_add(count, &direct_pages_count[level]);
54}
55
56struct seq_file;
57void arch_report_meminfo(struct seq_file *m);
58
Linus Torvalds1da177e2005-04-16 15:20:36 -070059/*
60 * The S390 doesn't have any external MMU info: the kernel page
61 * tables contain all the necessary information.
62 */
Russell King4b3073e2009-12-18 16:40:18 +000063#define update_mmu_cache(vma, address, ptep) do { } while (0)
David Millerb113da62012-10-08 16:34:25 -070064#define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
66/*
Martin Schwidefsky238ec4e2010-10-25 16:10:07 +020067 * ZERO_PAGE is a global shared page that is always zero; used
Linus Torvalds1da177e2005-04-16 15:20:36 -070068 * for zero-mapped memory areas etc..
69 */
Martin Schwidefsky238ec4e2010-10-25 16:10:07 +020070
71extern unsigned long empty_zero_page;
72extern unsigned long zero_page_mask;
73
74#define ZERO_PAGE(vaddr) \
75 (virt_to_page((void *)(empty_zero_page + \
76 (((unsigned long)(vaddr)) &zero_page_mask))))
Kirill A. Shutemov816422a2012-12-12 13:52:36 -080077#define __HAVE_COLOR_ZERO_PAGE
Martin Schwidefsky238ec4e2010-10-25 16:10:07 +020078
Linus Torvalds4f2e2902013-04-17 08:46:19 -070079/* TODO: s390 cannot support io_remap_pfn_range... */
Linus Torvalds1da177e2005-04-16 15:20:36 -070080#endif /* !__ASSEMBLY__ */
81
82/*
83 * PMD_SHIFT determines the size of the area a second-level page
84 * table can map
85 * PGDIR_SHIFT determines what a third-level page table entry can map
86 */
Heiko Carstens5a79859a2015-02-12 13:08:27 +010087#define PMD_SHIFT 20
88#define PUD_SHIFT 31
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +020089#define P4D_SHIFT 42
90#define PGDIR_SHIFT 53
Linus Torvalds1da177e2005-04-16 15:20:36 -070091
92#define PMD_SIZE (1UL << PMD_SHIFT)
93#define PMD_MASK (~(PMD_SIZE-1))
Martin Schwidefsky190a1d72007-10-22 12:52:48 +020094#define PUD_SIZE (1UL << PUD_SHIFT)
95#define PUD_MASK (~(PUD_SIZE-1))
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +020096#define P4D_SIZE (1UL << P4D_SHIFT)
97#define P4D_MASK (~(P4D_SIZE-1))
Martin Schwidefsky5a216a22008-02-09 18:24:36 +010098#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
99#define PGDIR_MASK (~(PGDIR_SIZE-1))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100
101/*
102 * entries per page directory level: the S390 is two-level, so
103 * we don't really have any PMD directory physically.
104 * for S390 segment-table entries are combined to one PGD
105 * that leads to 1024 pte per pgd
106 */
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100107#define PTRS_PER_PTE 256
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100108#define PTRS_PER_PMD 2048
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100109#define PTRS_PER_PUD 2048
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +0200110#define PTRS_PER_P4D 2048
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100111#define PTRS_PER_PGD 2048
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112
Kirill A. Shutemovd016bf72015-02-11 15:26:41 -0800113#define FIRST_USER_ADDRESS 0UL
Hugh Dickinsd455a362005-04-19 13:29:23 -0700114
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115#define pte_ERROR(e) \
116 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
117#define pmd_ERROR(e) \
118 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200119#define pud_ERROR(e) \
120 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +0200121#define p4d_ERROR(e) \
122 printk("%s:%d: bad p4d %p.\n", __FILE__, __LINE__, (void *) p4d_val(e))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123#define pgd_ERROR(e) \
124 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
125
126#ifndef __ASSEMBLY__
127/*
Martin Schwidefskya1c843b2015-04-22 13:55:59 +0200128 * The vmalloc and module area will always be on the topmost area of the
129 * kernel mapping. We reserve 128GB (64bit) for vmalloc and modules.
Heiko Carstensc972cc62012-10-05 16:52:18 +0200130 * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where
131 * modules will reside. That makes sure that inter module branches always
132 * happen without trampolines and in addition the placement within a 2GB frame
133 * is branch prediction unit friendly.
Heiko Carstens8b62bc92006-12-04 15:40:56 +0100134 */
Heiko Carstens239a64252009-06-12 10:26:33 +0200135extern unsigned long VMALLOC_START;
Martin Schwidefsky14045eb2011-12-27 11:27:07 +0100136extern unsigned long VMALLOC_END;
137extern struct page *vmemmap;
Heiko Carstens239a64252009-06-12 10:26:33 +0200138
Martin Schwidefsky14045eb2011-12-27 11:27:07 +0100139#define VMEM_MAX_PHYS ((unsigned long) vmemmap)
Christian Borntraeger5fd9c6e2008-01-26 14:11:00 +0100140
Heiko Carstensc972cc62012-10-05 16:52:18 +0200141extern unsigned long MODULES_VADDR;
142extern unsigned long MODULES_END;
143#define MODULES_VADDR MODULES_VADDR
144#define MODULES_END MODULES_END
145#define MODULES_LEN (1UL << 31)
Heiko Carstensc972cc62012-10-05 16:52:18 +0200146
Heiko Carstensc9331462014-10-15 12:17:38 +0200147static inline int is_module_addr(void *addr)
148{
Heiko Carstensc9331462014-10-15 12:17:38 +0200149 BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
150 if (addr < (void *)MODULES_VADDR)
151 return 0;
152 if (addr > (void *)MODULES_END)
153 return 0;
Heiko Carstensc9331462014-10-15 12:17:38 +0200154 return 1;
155}
156
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158 * A 64 bit pagetable entry of S390 has following format:
Christian Borntraeger6a985c62009-12-07 12:52:11 +0100159 * | PFRA |0IPC| OS |
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160 * 0000000000111111111122222222223333333333444444444455555555556666
161 * 0123456789012345678901234567890123456789012345678901234567890123
162 *
163 * I Page-Invalid Bit: Page is not available for address-translation
164 * P Page-Protection Bit: Store access not possible for page
Christian Borntraeger6a985c62009-12-07 12:52:11 +0100165 * C Change-bit override: HW is not required to set change bit
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166 *
167 * A 64 bit segmenttable entry of S390 has following format:
168 * | P-table origin | TT
169 * 0000000000111111111122222222223333333333444444444455555555556666
170 * 0123456789012345678901234567890123456789012345678901234567890123
171 *
172 * I Segment-Invalid Bit: Segment is not available for address-translation
173 * C Common-Segment Bit: Segment is not private (PoP 3-30)
174 * P Page-Protection Bit: Store access not possible for page
175 * TT Type 00
176 *
177 * A 64 bit region table entry of S390 has following format:
178 * | S-table origin | TF TTTL
179 * 0000000000111111111122222222223333333333444444444455555555556666
180 * 0123456789012345678901234567890123456789012345678901234567890123
181 *
182 * I Segment-Invalid Bit: Segment is not available for address-translation
183 * TT Type 01
184 * TF
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200185 * TL Table length
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 *
187 * The 64 bit regiontable origin of S390 has following format:
188 * | region table origon | DTTL
189 * 0000000000111111111122222222223333333333444444444455555555556666
190 * 0123456789012345678901234567890123456789012345678901234567890123
191 *
192 * X Space-Switch event:
193 * G Segment-Invalid Bit:
194 * P Private-Space Bit:
195 * S Storage-Alteration:
196 * R Real space
197 * TL Table-Length:
198 *
199 * A storage key has the following format:
200 * | ACC |F|R|C|0|
201 * 0 3 4 5 6 7
202 * ACC: access key
203 * F : fetch protection bit
204 * R : referenced bit
205 * C : changed bit
206 */
207
208/* Hardware bits in the page table entry */
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100209#define _PAGE_NOEXEC 0x100 /* HW no-execute bit */
Martin Schwidefskye5098612013-07-23 20:57:57 +0200210#define _PAGE_PROTECT 0x200 /* HW read-only bit */
Martin Schwidefsky83377482006-10-18 18:30:51 +0200211#define _PAGE_INVALID 0x400 /* HW invalid bit */
Martin Schwidefskye5098612013-07-23 20:57:57 +0200212#define _PAGE_LARGE 0x800 /* Bit to mark a large pte */
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200213
214/* Software bits in the page table entry */
Martin Schwidefskye5098612013-07-23 20:57:57 +0200215#define _PAGE_PRESENT 0x001 /* SW pte present bit */
Martin Schwidefskye5098612013-07-23 20:57:57 +0200216#define _PAGE_YOUNG 0x004 /* SW pte young bit */
217#define _PAGE_DIRTY 0x008 /* SW pte dirty bit */
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200218#define _PAGE_READ 0x010 /* SW pte read bit */
219#define _PAGE_WRITE 0x020 /* SW pte write bit */
220#define _PAGE_SPECIAL 0x040 /* SW associated with special page */
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200221#define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */
Nick Piggina08cb622008-04-28 02:13:03 -0700222#define __HAVE_ARCH_PTE_SPECIAL
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
Martin Schwidefsky5614dd92015-04-22 14:47:42 +0200224#ifdef CONFIG_MEM_SOFT_DIRTY
225#define _PAGE_SOFT_DIRTY 0x002 /* SW pte soft dirty bit */
226#else
227#define _PAGE_SOFT_DIRTY 0x000
228#endif
229
Nick Piggin138c9022008-07-08 11:31:06 +0200230/* Set of bits not changed in pte_modify */
Heiko Carstens6a5c1482014-09-22 08:50:51 +0200231#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
Martin Schwidefsky5614dd92015-04-22 14:47:42 +0200232 _PAGE_YOUNG | _PAGE_SOFT_DIRTY)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233
Martin Schwidefsky83377482006-10-18 18:30:51 +0200234/*
Kirill A. Shutemov6e76d4b2015-02-10 14:11:04 -0800235 * handle_pte_fault uses pte_present and pte_none to find out the pte type
236 * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to
237 * distinguish present from not-present ptes. It is changed only with the page
238 * table lock held.
Martin Schwidefsky83377482006-10-18 18:30:51 +0200239 *
Martin Schwidefskye5098612013-07-23 20:57:57 +0200240 * The following table gives the different possible bit combinations for
Martin Schwidefskya1c843b2015-04-22 13:55:59 +0200241 * the pte hardware and software bits in the last 12 bits of a pte
242 * (. unassigned bit, x don't care, t swap type):
Martin Schwidefsky83377482006-10-18 18:30:51 +0200243 *
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200244 * 842100000000
245 * 000084210000
246 * 000000008421
Martin Schwidefskya1c843b2015-04-22 13:55:59 +0200247 * .IR.uswrdy.p
248 * empty .10.00000000
249 * swap .11..ttttt.0
250 * prot-none, clean, old .11.xx0000.1
251 * prot-none, clean, young .11.xx0001.1
Gerald Schaeferbc29b7a2016-07-18 14:35:13 +0200252 * prot-none, dirty, old .11.xx0010.1
253 * prot-none, dirty, young .11.xx0011.1
Martin Schwidefskya1c843b2015-04-22 13:55:59 +0200254 * read-only, clean, old .11.xx0100.1
255 * read-only, clean, young .01.xx0101.1
256 * read-only, dirty, old .11.xx0110.1
257 * read-only, dirty, young .01.xx0111.1
258 * read-write, clean, old .11.xx1100.1
259 * read-write, clean, young .01.xx1101.1
260 * read-write, dirty, old .10.xx1110.1
261 * read-write, dirty, young .00.xx1111.1
262 * HW-bits: R read-only, I invalid
263 * SW-bits: p present, y young, d dirty, r read, w write, s special,
264 * u unused, l large
Martin Schwidefskye5098612013-07-23 20:57:57 +0200265 *
Martin Schwidefskya1c843b2015-04-22 13:55:59 +0200266 * pte_none is true for the bit pattern .10.00000000, pte == 0x400
267 * pte_swap is true for the bit pattern .11..ooooo.0, (pte & 0x201) == 0x200
268 * pte_present is true for the bit pattern .xx.xxxxxx.1, (pte & 0x001) == 0x001
Martin Schwidefsky83377482006-10-18 18:30:51 +0200269 */
270
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200271/* Bits in the segment/region table address-space-control-element */
272#define _ASCE_ORIGIN ~0xfffUL/* segment table origin */
273#define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
274#define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
275#define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
276#define _ASCE_REAL_SPACE 0x20 /* real space control */
277#define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
278#define _ASCE_TYPE_REGION1 0x0c /* region first table type */
279#define _ASCE_TYPE_REGION2 0x08 /* region second table type */
280#define _ASCE_TYPE_REGION3 0x04 /* region third table type */
281#define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
282#define _ASCE_TABLE_LENGTH 0x03 /* region table length */
283
284/* Bits in the region table entry */
285#define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
Martin Schwidefskye5098612013-07-23 20:57:57 +0200286#define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100287#define _REGION_ENTRY_NOEXEC 0x100 /* region no-execute bit */
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100288#define _REGION_ENTRY_OFFSET 0xc0 /* region table offset */
Martin Schwidefskye5098612013-07-23 20:57:57 +0200289#define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200290#define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
291#define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
292#define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
293#define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
294#define _REGION_ENTRY_LENGTH 0x03 /* region third length */
295
296#define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
Martin Schwidefskye5098612013-07-23 20:57:57 +0200297#define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200298#define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
Martin Schwidefskye5098612013-07-23 20:57:57 +0200299#define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200300#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
Martin Schwidefskye5098612013-07-23 20:57:57 +0200301#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200302
Heiko Carstens9e20b4d2016-05-10 10:34:47 +0200303#define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL /* large page address */
Heiko Carstens2dffdcb2016-05-11 10:52:07 +0200304#define _REGION3_ENTRY_DIRTY 0x2000 /* SW region dirty bit */
305#define _REGION3_ENTRY_YOUNG 0x1000 /* SW region young bit */
306#define _REGION3_ENTRY_LARGE 0x0400 /* RTTE-format control, large page */
307#define _REGION3_ENTRY_READ 0x0002 /* SW region read bit */
308#define _REGION3_ENTRY_WRITE 0x0001 /* SW region write bit */
309
310#ifdef CONFIG_MEM_SOFT_DIRTY
311#define _REGION3_ENTRY_SOFT_DIRTY 0x4000 /* SW region soft dirty bit */
312#else
313#define _REGION3_ENTRY_SOFT_DIRTY 0x0000 /* SW region soft dirty bit */
314#endif
315
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +0200316#define _REGION_ENTRY_BITS 0xfffffffffffff22fUL
317#define _REGION_ENTRY_BITS_LARGE 0xffffffff8000fe2fUL
Gerald Schaeferd08de8e2016-07-04 14:47:01 +0200318
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200319/* Bits in the segment table entry */
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200320#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
Martin Schwidefsky152125b2014-07-24 11:03:41 +0200321#define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff0ff33UL
Heiko Carstensea815312013-03-21 12:50:39 +0100322#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200323#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
Martin Schwidefskye5098612013-07-23 20:57:57 +0200324#define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100325#define _SEGMENT_ENTRY_NOEXEC 0x100 /* region no-execute bit */
Martin Schwidefskye5098612013-07-23 20:57:57 +0200326#define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200327
328#define _SEGMENT_ENTRY (0)
Martin Schwidefskye5098612013-07-23 20:57:57 +0200329#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200330
Martin Schwidefsky152125b2014-07-24 11:03:41 +0200331#define _SEGMENT_ENTRY_DIRTY 0x2000 /* SW segment dirty bit */
332#define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */
Martin Schwidefsky152125b2014-07-24 11:03:41 +0200333#define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */
Gerald Schaeferbc29b7a2016-07-18 14:35:13 +0200334#define _SEGMENT_ENTRY_WRITE 0x0002 /* SW segment write bit */
335#define _SEGMENT_ENTRY_READ 0x0001 /* SW segment read bit */
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200336
Martin Schwidefsky5614dd92015-04-22 14:47:42 +0200337#ifdef CONFIG_MEM_SOFT_DIRTY
338#define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000 /* SW segment soft dirty bit */
339#else
340#define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000 /* SW segment soft dirty bit */
341#endif
342
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200343/*
Heiko Carstens2dffdcb2016-05-11 10:52:07 +0200344 * Segment table and region3 table entry encoding
345 * (R = read-only, I = invalid, y = young bit):
Gerald Schaeferbc29b7a2016-07-18 14:35:13 +0200346 * dy..R...I...wr
Martin Schwidefsky152125b2014-07-24 11:03:41 +0200347 * prot-none, clean, old 00..1...1...00
348 * prot-none, clean, young 01..1...1...00
349 * prot-none, dirty, old 10..1...1...00
350 * prot-none, dirty, young 11..1...1...00
Gerald Schaeferbc29b7a2016-07-18 14:35:13 +0200351 * read-only, clean, old 00..1...1...01
352 * read-only, clean, young 01..1...0...01
353 * read-only, dirty, old 10..1...1...01
354 * read-only, dirty, young 11..1...0...01
Martin Schwidefsky152125b2014-07-24 11:03:41 +0200355 * read-write, clean, old 00..1...1...11
356 * read-write, clean, young 01..1...0...11
357 * read-write, dirty, old 10..0...1...11
358 * read-write, dirty, young 11..0...0...11
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200359 * The segment table origin is used to distinguish empty (origin==0) from
360 * read-write, old segment table entries (origin!=0)
Martin Schwidefskya1c843b2015-04-22 13:55:59 +0200361 * HW-bits: R read-only, I invalid
362 * SW-bits: y young, d dirty, r read, w write
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200363 */
Martin Schwidefskye5098612013-07-23 20:57:57 +0200364
Martin Schwidefsky6c61cfe2011-06-06 14:14:42 +0200365/* Page status table bits for virtualization */
Martin Schwidefsky0d0dafc2013-05-17 14:41:33 +0200366#define PGSTE_ACC_BITS 0xf000000000000000UL
367#define PGSTE_FP_BIT 0x0800000000000000UL
368#define PGSTE_PCL_BIT 0x0080000000000000UL
369#define PGSTE_HR_BIT 0x0040000000000000UL
370#define PGSTE_HC_BIT 0x0020000000000000UL
371#define PGSTE_GR_BIT 0x0004000000000000UL
372#define PGSTE_GC_BIT 0x0002000000000000UL
Martin Schwidefsky0a61b222013-10-18 12:03:41 +0200373#define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */
374#define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100375#define PGSTE_VSIE_BIT 0x0000200000000000UL /* ref'd in a shadow table */
Martin Schwidefsky6c61cfe2011-06-06 14:14:42 +0200376
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200377/* Guest Page State used for virtualization */
Claudio Imbrenda2d42f942017-04-20 10:03:45 +0200378#define _PGSTE_GPS_ZERO 0x0000000080000000UL
379#define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL
380#define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL
381#define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL
382#define _PGSTE_GPS_USAGE_POT_VOLATILE 0x0000000002000000UL
383#define _PGSTE_GPS_USAGE_VOLATILE _PGSTE_GPS_USAGE_MASK
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200384
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200385/*
386 * A user page table pointer has the space-switch-event bit, the
387 * private-space-control bit and the storage-alteration-event-control
388 * bit set. A kernel page table pointer doesn't need them.
389 */
390#define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
391 _ASCE_ALT_EVENT)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393/*
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200394 * Page protection definitions.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 */
Gerald Schaeferbc29b7a2016-07-18 14:35:13 +0200396#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT)
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100397#define PAGE_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | \
398 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
399#define PAGE_RX __pgprot(_PAGE_PRESENT | _PAGE_READ | \
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200400 _PAGE_INVALID | _PAGE_PROTECT)
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100401#define PAGE_RW __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
402 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
403#define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200404 _PAGE_INVALID | _PAGE_PROTECT)
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200405
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200406#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100407 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200408#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100409 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200410#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100411 _PAGE_PROTECT | _PAGE_NOEXEC)
412#define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
413 _PAGE_YOUNG | _PAGE_DIRTY)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414
415/*
Martin Schwidefsky043d0702011-05-23 10:24:23 +0200416 * On s390 the page table entry has an invalid bit and a read-only bit.
417 * Read permission implies execute permission and write permission
418 * implies read permission.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 */
420 /*xwr*/
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200421#define __P000 PAGE_NONE
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100422#define __P001 PAGE_RO
423#define __P010 PAGE_RO
424#define __P011 PAGE_RO
425#define __P100 PAGE_RX
426#define __P101 PAGE_RX
427#define __P110 PAGE_RX
428#define __P111 PAGE_RX
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200430#define __S000 PAGE_NONE
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100431#define __S001 PAGE_RO
432#define __S010 PAGE_RW
433#define __S011 PAGE_RW
434#define __S100 PAGE_RX
435#define __S101 PAGE_RX
436#define __S110 PAGE_RWX
437#define __S111 PAGE_RWX
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438
Gerald Schaefer106c9922013-04-29 15:07:23 -0700439/*
440 * Segment entry (large page) protection definitions.
441 */
Martin Schwidefskye5098612013-07-23 20:57:57 +0200442#define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \
443 _SEGMENT_ENTRY_PROTECT)
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100444#define SEGMENT_RO __pgprot(_SEGMENT_ENTRY_PROTECT | \
445 _SEGMENT_ENTRY_READ | \
446 _SEGMENT_ENTRY_NOEXEC)
447#define SEGMENT_RX __pgprot(_SEGMENT_ENTRY_PROTECT | \
Martin Schwidefsky152125b2014-07-24 11:03:41 +0200448 _SEGMENT_ENTRY_READ)
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100449#define SEGMENT_RW __pgprot(_SEGMENT_ENTRY_READ | \
450 _SEGMENT_ENTRY_WRITE | \
451 _SEGMENT_ENTRY_NOEXEC)
452#define SEGMENT_RWX __pgprot(_SEGMENT_ENTRY_READ | \
Martin Schwidefsky152125b2014-07-24 11:03:41 +0200453 _SEGMENT_ENTRY_WRITE)
Heiko Carstens2dffdcb2016-05-11 10:52:07 +0200454#define SEGMENT_KERNEL __pgprot(_SEGMENT_ENTRY | \
455 _SEGMENT_ENTRY_LARGE | \
456 _SEGMENT_ENTRY_READ | \
457 _SEGMENT_ENTRY_WRITE | \
458 _SEGMENT_ENTRY_YOUNG | \
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100459 _SEGMENT_ENTRY_DIRTY | \
460 _SEGMENT_ENTRY_NOEXEC)
Heiko Carstens2dffdcb2016-05-11 10:52:07 +0200461#define SEGMENT_KERNEL_RO __pgprot(_SEGMENT_ENTRY | \
462 _SEGMENT_ENTRY_LARGE | \
463 _SEGMENT_ENTRY_READ | \
464 _SEGMENT_ENTRY_YOUNG | \
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100465 _SEGMENT_ENTRY_PROTECT | \
466 _SEGMENT_ENTRY_NOEXEC)
Heiko Carstens2dffdcb2016-05-11 10:52:07 +0200467
468/*
469 * Region3 entry (large page) protection definitions.
470 */
471
472#define REGION3_KERNEL __pgprot(_REGION_ENTRY_TYPE_R3 | \
473 _REGION3_ENTRY_LARGE | \
474 _REGION3_ENTRY_READ | \
475 _REGION3_ENTRY_WRITE | \
476 _REGION3_ENTRY_YOUNG | \
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100477 _REGION3_ENTRY_DIRTY | \
478 _REGION_ENTRY_NOEXEC)
Heiko Carstens2dffdcb2016-05-11 10:52:07 +0200479#define REGION3_KERNEL_RO __pgprot(_REGION_ENTRY_TYPE_R3 | \
480 _REGION3_ENTRY_LARGE | \
481 _REGION3_ENTRY_READ | \
482 _REGION3_ENTRY_YOUNG | \
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100483 _REGION_ENTRY_PROTECT | \
484 _REGION_ENTRY_NOEXEC)
Gerald Schaefer106c9922013-04-29 15:07:23 -0700485
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200486static inline int mm_has_pgste(struct mm_struct *mm)
487{
488#ifdef CONFIG_PGSTE
489 if (unlikely(mm->context.has_pgste))
490 return 1;
491#endif
492 return 0;
493}
Dominik Dingel65eef3352014-01-14 15:02:11 +0100494
Martin Schwidefsky0b46e0a2015-04-15 13:23:26 +0200495static inline int mm_alloc_pgste(struct mm_struct *mm)
496{
497#ifdef CONFIG_PGSTE
498 if (unlikely(mm->context.alloc_pgste))
499 return 1;
500#endif
501 return 0;
502}
503
Dominik Dingel2faee8f2014-10-23 12:08:38 +0200504/*
505 * In the case that a guest uses storage keys
506 * faults should no longer be backed by zero pages
507 */
508#define mm_forbids_zeropage mm_use_skey
Dominik Dingel65eef3352014-01-14 15:02:11 +0100509static inline int mm_use_skey(struct mm_struct *mm)
510{
511#ifdef CONFIG_PGSTE
512 if (mm->context.use_skey)
513 return 1;
514#endif
515 return 0;
516}
517
Heiko Carstens4ccccc52016-05-14 10:46:33 +0200518static inline void csp(unsigned int *ptr, unsigned int old, unsigned int new)
519{
520 register unsigned long reg2 asm("2") = old;
521 register unsigned long reg3 asm("3") = new;
522 unsigned long address = (unsigned long)ptr | 1;
523
524 asm volatile(
525 " csp %0,%3"
526 : "+d" (reg2), "+m" (*ptr)
527 : "d" (reg3), "d" (address)
528 : "cc");
529}
530
Heiko Carstense8a97e42016-05-17 10:50:15 +0200531static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new)
532{
533 register unsigned long reg2 asm("2") = old;
534 register unsigned long reg3 asm("3") = new;
535 unsigned long address = (unsigned long)ptr | 1;
536
537 asm volatile(
538 " .insn rre,0xb98a0000,%0,%3"
539 : "+d" (reg2), "+m" (*ptr)
540 : "d" (reg3), "d" (address)
541 : "cc");
542}
543
544#define CRDTE_DTT_PAGE 0x00UL
545#define CRDTE_DTT_SEGMENT 0x10UL
546#define CRDTE_DTT_REGION3 0x14UL
547#define CRDTE_DTT_REGION2 0x18UL
548#define CRDTE_DTT_REGION1 0x1cUL
549
550static inline void crdte(unsigned long old, unsigned long new,
551 unsigned long table, unsigned long dtt,
552 unsigned long address, unsigned long asce)
553{
554 register unsigned long reg2 asm("2") = old;
555 register unsigned long reg3 asm("3") = new;
556 register unsigned long reg4 asm("4") = table | dtt;
557 register unsigned long reg5 asm("5") = address;
558
559 asm volatile(".insn rrf,0xb98f0000,%0,%2,%4,0"
560 : "+d" (reg2)
561 : "d" (reg3), "d" (reg4), "d" (reg5), "a" (asce)
562 : "memory", "cc");
563}
564
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565/*
Heiko Carstenscc18b462017-05-20 11:43:26 +0200566 * pgd/p4d/pud/pmd/pte query functions
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567 */
Heiko Carstenscc18b462017-05-20 11:43:26 +0200568static inline int pgd_folded(pgd_t pgd)
569{
570 return (pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1;
571}
572
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100573static inline int pgd_present(pgd_t pgd)
574{
Heiko Carstenscc18b462017-05-20 11:43:26 +0200575 if (pgd_folded(pgd))
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100576 return 1;
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100577 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
578}
579
580static inline int pgd_none(pgd_t pgd)
581{
Heiko Carstenscc18b462017-05-20 11:43:26 +0200582 if (pgd_folded(pgd))
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100583 return 0;
Martin Schwidefskye5098612013-07-23 20:57:57 +0200584 return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100585}
586
587static inline int pgd_bad(pgd_t pgd)
588{
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100589 /*
590 * With dynamic page table levels the pgd can be a region table
591 * entry or a segment table entry. Check for the bit that are
592 * invalid for either table entry.
593 */
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100594 unsigned long mask =
Martin Schwidefskye5098612013-07-23 20:57:57 +0200595 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100596 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
597 return (pgd_val(pgd) & mask) != 0;
598}
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200599
Heiko Carstenscc18b462017-05-20 11:43:26 +0200600static inline int p4d_folded(p4d_t p4d)
601{
602 return (p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2;
603}
604
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +0200605static inline int p4d_present(p4d_t p4d)
606{
Heiko Carstenscc18b462017-05-20 11:43:26 +0200607 if (p4d_folded(p4d))
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +0200608 return 1;
609 return (p4d_val(p4d) & _REGION_ENTRY_ORIGIN) != 0UL;
610}
611
612static inline int p4d_none(p4d_t p4d)
613{
Heiko Carstenscc18b462017-05-20 11:43:26 +0200614 if (p4d_folded(p4d))
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +0200615 return 0;
616 return p4d_val(p4d) == _REGION2_ENTRY_EMPTY;
617}
618
619static inline unsigned long p4d_pfn(p4d_t p4d)
620{
621 unsigned long origin_mask;
622
623 origin_mask = _REGION_ENTRY_ORIGIN;
624 return (p4d_val(p4d) & origin_mask) >> PAGE_SHIFT;
625}
626
Heiko Carstenscc18b462017-05-20 11:43:26 +0200627static inline int pud_folded(pud_t pud)
628{
629 return (pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3;
630}
631
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200632static inline int pud_present(pud_t pud)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633{
Heiko Carstenscc18b462017-05-20 11:43:26 +0200634 if (pud_folded(pud))
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100635 return 1;
Martin Schwidefsky0d017922007-12-17 16:25:48 +0100636 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637}
638
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200639static inline int pud_none(pud_t pud)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640{
Heiko Carstenscc18b462017-05-20 11:43:26 +0200641 if (pud_folded(pud))
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100642 return 0;
Gerald Schaeferd08de8e2016-07-04 14:47:01 +0200643 return pud_val(pud) == _REGION3_ENTRY_EMPTY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644}
645
Heiko Carstens18da2362012-10-08 09:18:26 +0200646static inline int pud_large(pud_t pud)
647{
648 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
649 return 0;
650 return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
651}
652
Heiko Carstens9e20b4d2016-05-10 10:34:47 +0200653static inline unsigned long pud_pfn(pud_t pud)
654{
655 unsigned long origin_mask;
656
Heiko Carstensf96c6f72017-05-22 13:27:34 +0200657 origin_mask = _REGION_ENTRY_ORIGIN;
Heiko Carstens9e20b4d2016-05-10 10:34:47 +0200658 if (pud_large(pud))
659 origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
660 return (pud_val(pud) & origin_mask) >> PAGE_SHIFT;
661}
662
Gerald Schaeferd08de8e2016-07-04 14:47:01 +0200663static inline int pmd_large(pmd_t pmd)
664{
665 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
666}
667
668static inline int pmd_bad(pmd_t pmd)
669{
670 if (pmd_large(pmd))
671 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0;
672 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
673}
674
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200675static inline int pud_bad(pud_t pud)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676{
Gerald Schaeferd08de8e2016-07-04 14:47:01 +0200677 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
678 return pmd_bad(__pmd(pud_val(pud)));
679 if (pud_large(pud))
680 return (pud_val(pud) & ~_REGION_ENTRY_BITS_LARGE) != 0;
681 return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682}
683
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +0200684static inline int p4d_bad(p4d_t p4d)
685{
686 if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
687 return pud_bad(__pud(p4d_val(p4d)));
688 return (p4d_val(p4d) & ~_REGION_ENTRY_BITS) != 0;
689}
690
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800691static inline int pmd_present(pmd_t pmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692{
Dominik Dingel54397bb2016-04-27 11:43:07 +0200693 return pmd_val(pmd) != _SEGMENT_ENTRY_EMPTY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694}
695
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800696static inline int pmd_none(pmd_t pmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697{
Dominik Dingel54397bb2016-04-27 11:43:07 +0200698 return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699}
700
Martin Schwidefsky7cded342015-05-13 14:33:22 +0200701static inline unsigned long pmd_pfn(pmd_t pmd)
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200702{
Martin Schwidefsky152125b2014-07-24 11:03:41 +0200703 unsigned long origin_mask;
704
705 origin_mask = _SEGMENT_ENTRY_ORIGIN;
706 if (pmd_large(pmd))
707 origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
708 return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT;
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200709}
710
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -0700711#define __HAVE_ARCH_PMD_WRITE
712static inline int pmd_write(pmd_t pmd)
713{
Martin Schwidefsky152125b2014-07-24 11:03:41 +0200714 return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
715}
716
717static inline int pmd_dirty(pmd_t pmd)
718{
719 int dirty = 1;
720 if (pmd_large(pmd))
721 dirty = (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
722 return dirty;
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -0700723}
724
725static inline int pmd_young(pmd_t pmd)
726{
Martin Schwidefsky152125b2014-07-24 11:03:41 +0200727 int young = 1;
728 if (pmd_large(pmd))
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200729 young = (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200730 return young;
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -0700731}
732
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800733static inline int pte_present(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734{
Martin Schwidefskye5098612013-07-23 20:57:57 +0200735 /* Bit pattern: (pte & 0x001) == 0x001 */
736 return (pte_val(pte) & _PAGE_PRESENT) != 0;
737}
738
739static inline int pte_none(pte_t pte)
740{
741 /* Bit pattern: pte == 0x400 */
742 return pte_val(pte) == _PAGE_INVALID;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743}
744
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200745static inline int pte_swap(pte_t pte)
746{
Martin Schwidefskya1c843b2015-04-22 13:55:59 +0200747 /* Bit pattern: (pte & 0x201) == 0x200 */
748 return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT))
749 == _PAGE_PROTECT;
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200750}
751
Nick Piggin7e675132008-04-28 02:13:00 -0700752static inline int pte_special(pte_t pte)
753{
Nick Piggina08cb622008-04-28 02:13:03 -0700754 return (pte_val(pte) & _PAGE_SPECIAL);
Nick Piggin7e675132008-04-28 02:13:00 -0700755}
756
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200757#define __HAVE_ARCH_PTE_SAME
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200758static inline int pte_same(pte_t a, pte_t b)
Christian Borntraeger5b7baf02008-03-25 18:47:12 +0100759{
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200760 return pte_val(a) == pte_val(b);
Christian Borntraeger5b7baf02008-03-25 18:47:12 +0100761}
762
Martin Schwidefskyb54565b2014-09-23 14:01:34 +0200763#ifdef CONFIG_NUMA_BALANCING
764static inline int pte_protnone(pte_t pte)
765{
766 return pte_present(pte) && !(pte_val(pte) & _PAGE_READ);
767}
768
769static inline int pmd_protnone(pmd_t pmd)
770{
771 /* pmd_large(pmd) implies pmd_present(pmd) */
772 return pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ);
773}
774#endif
775
Martin Schwidefsky5614dd92015-04-22 14:47:42 +0200776static inline int pte_soft_dirty(pte_t pte)
777{
778 return pte_val(pte) & _PAGE_SOFT_DIRTY;
779}
780#define pte_swp_soft_dirty pte_soft_dirty
781
782static inline pte_t pte_mksoft_dirty(pte_t pte)
783{
784 pte_val(pte) |= _PAGE_SOFT_DIRTY;
785 return pte;
786}
787#define pte_swp_mksoft_dirty pte_mksoft_dirty
788
789static inline pte_t pte_clear_soft_dirty(pte_t pte)
790{
791 pte_val(pte) &= ~_PAGE_SOFT_DIRTY;
792 return pte;
793}
794#define pte_swp_clear_soft_dirty pte_clear_soft_dirty
795
796static inline int pmd_soft_dirty(pmd_t pmd)
797{
798 return pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY;
799}
800
801static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
802{
803 pmd_val(pmd) |= _SEGMENT_ENTRY_SOFT_DIRTY;
804 return pmd;
805}
806
807static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
808{
809 pmd_val(pmd) &= ~_SEGMENT_ENTRY_SOFT_DIRTY;
810 return pmd;
811}
812
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813/*
814 * query functions pte_write/pte_dirty/pte_young only work if
815 * pte_present() is true. Undefined behaviour if not..
816 */
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800817static inline int pte_write(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818{
Martin Schwidefskye5098612013-07-23 20:57:57 +0200819 return (pte_val(pte) & _PAGE_WRITE) != 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820}
821
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800822static inline int pte_dirty(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823{
Martin Schwidefskye5098612013-07-23 20:57:57 +0200824 return (pte_val(pte) & _PAGE_DIRTY) != 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825}
826
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800827static inline int pte_young(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828{
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200829 return (pte_val(pte) & _PAGE_YOUNG) != 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830}
831
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200832#define __HAVE_ARCH_PTE_UNUSED
833static inline int pte_unused(pte_t pte)
834{
835 return pte_val(pte) & _PAGE_UNUSED;
836}
837
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838/*
839 * pgd/pmd/pte modification functions
840 */
841
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200842static inline void pgd_clear(pgd_t *pgd)
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100843{
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +0200844 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
845 pgd_val(*pgd) = _REGION1_ENTRY_EMPTY;
846}
847
848static inline void p4d_clear(p4d_t *p4d)
849{
850 if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
851 p4d_val(*p4d) = _REGION2_ENTRY_EMPTY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852}
853
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100854static inline void pud_clear(pud_t *pud)
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100855{
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200856 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
857 pud_val(*pud) = _REGION3_ENTRY_EMPTY;
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100858}
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100859
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200860static inline void pmd_clear(pmd_t *pmdp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861{
Dominik Dingel54397bb2016-04-27 11:43:07 +0200862 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863}
864
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800865static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866{
Martin Schwidefskye5098612013-07-23 20:57:57 +0200867 pte_val(*ptep) = _PAGE_INVALID;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868}
869
870/*
871 * The following pte modification functions only work if
872 * pte_present() is true. Undefined behaviour if not..
873 */
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800874static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875{
Nick Piggin138c9022008-07-08 11:31:06 +0200876 pte_val(pte) &= _PAGE_CHG_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877 pte_val(pte) |= pgprot_val(newprot);
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200878 /*
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100879 * newprot for PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX
880 * has the invalid bit set, clear it again for readable, young pages
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200881 */
882 if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
883 pte_val(pte) &= ~_PAGE_INVALID;
884 /*
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100885 * newprot for PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX has the page
886 * protection bit set, clear it again for writable, dirty pages
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200887 */
Martin Schwidefskye5098612013-07-23 20:57:57 +0200888 if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
889 pte_val(pte) &= ~_PAGE_PROTECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890 return pte;
891}
892
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800893static inline pte_t pte_wrprotect(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894{
Martin Schwidefskye5098612013-07-23 20:57:57 +0200895 pte_val(pte) &= ~_PAGE_WRITE;
896 pte_val(pte) |= _PAGE_PROTECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897 return pte;
898}
899
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800900static inline pte_t pte_mkwrite(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901{
Martin Schwidefskye5098612013-07-23 20:57:57 +0200902 pte_val(pte) |= _PAGE_WRITE;
903 if (pte_val(pte) & _PAGE_DIRTY)
904 pte_val(pte) &= ~_PAGE_PROTECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905 return pte;
906}
907
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800908static inline pte_t pte_mkclean(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909{
Martin Schwidefskye5098612013-07-23 20:57:57 +0200910 pte_val(pte) &= ~_PAGE_DIRTY;
911 pte_val(pte) |= _PAGE_PROTECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 return pte;
913}
914
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800915static inline pte_t pte_mkdirty(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916{
Martin Schwidefsky5614dd92015-04-22 14:47:42 +0200917 pte_val(pte) |= _PAGE_DIRTY | _PAGE_SOFT_DIRTY;
Martin Schwidefskye5098612013-07-23 20:57:57 +0200918 if (pte_val(pte) & _PAGE_WRITE)
919 pte_val(pte) &= ~_PAGE_PROTECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920 return pte;
921}
922
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800923static inline pte_t pte_mkold(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924{
Martin Schwidefskye5098612013-07-23 20:57:57 +0200925 pte_val(pte) &= ~_PAGE_YOUNG;
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200926 pte_val(pte) |= _PAGE_INVALID;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927 return pte;
928}
929
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800930static inline pte_t pte_mkyoung(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931{
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200932 pte_val(pte) |= _PAGE_YOUNG;
933 if (pte_val(pte) & _PAGE_READ)
934 pte_val(pte) &= ~_PAGE_INVALID;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935 return pte;
936}
937
Nick Piggin7e675132008-04-28 02:13:00 -0700938static inline pte_t pte_mkspecial(pte_t pte)
939{
Nick Piggina08cb622008-04-28 02:13:03 -0700940 pte_val(pte) |= _PAGE_SPECIAL;
Nick Piggin7e675132008-04-28 02:13:00 -0700941 return pte;
942}
943
Heiko Carstens84afdce2010-10-25 16:10:36 +0200944#ifdef CONFIG_HUGETLB_PAGE
945static inline pte_t pte_mkhuge(pte_t pte)
946{
Martin Schwidefskye5098612013-07-23 20:57:57 +0200947 pte_val(pte) |= _PAGE_LARGE;
Heiko Carstens84afdce2010-10-25 16:10:36 +0200948 return pte;
949}
950#endif
951
Martin Schwidefsky34eeaf32016-06-14 12:38:40 +0200952#define IPTE_GLOBAL 0
953#define IPTE_LOCAL 1
954
955static inline void __ptep_ipte(unsigned long address, pte_t *ptep, int local)
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200956{
Martin Schwidefsky53e857f2012-09-10 13:00:09 +0200957 unsigned long pto = (unsigned long) ptep;
958
Martin Schwidefsky34eeaf32016-06-14 12:38:40 +0200959 /* Invalidation + TLB flush for the pte */
Martin Schwidefsky53e857f2012-09-10 13:00:09 +0200960 asm volatile(
Martin Schwidefsky34eeaf32016-06-14 12:38:40 +0200961 " .insn rrf,0xb2210000,%[r1],%[r2],0,%[m4]"
962 : "+m" (*ptep) : [r1] "a" (pto), [r2] "a" (address),
963 [m4] "i" (local));
Martin Schwidefsky53e857f2012-09-10 13:00:09 +0200964}
965
Martin Schwidefsky34eeaf32016-06-14 12:38:40 +0200966static inline void __ptep_ipte_range(unsigned long address, int nr,
967 pte_t *ptep, int local)
Martin Schwidefsky1b948d62014-04-03 13:55:01 +0200968{
969 unsigned long pto = (unsigned long) ptep;
970
Martin Schwidefsky34eeaf32016-06-14 12:38:40 +0200971 /* Invalidate a range of ptes + TLB flush of the ptes */
Heiko Carstenscfb0b242014-09-23 21:29:20 +0200972 do {
973 asm volatile(
Martin Schwidefsky34eeaf32016-06-14 12:38:40 +0200974 " .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
975 : [r2] "+a" (address), [r3] "+a" (nr)
976 : [r1] "a" (pto), [m4] "i" (local) : "memory");
Heiko Carstenscfb0b242014-09-23 21:29:20 +0200977 } while (nr != 255);
978}
979
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200980/*
981 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
982 * both clear the TLB for the unmapped pte. The reason is that
983 * ptep_get_and_clear is used in common code (e.g. change_pte_range)
984 * to modify an active pte. The sequence is
985 * 1) ptep_get_and_clear
986 * 2) set_pte_at
987 * 3) flush_tlb_range
988 * On s390 the tlb needs to get flushed with the modification of the pte
989 * if the pte is active. The only way how this can be implemented is to
990 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
991 * is a nop.
992 */
Martin Schwidefskyebde7652016-03-08 11:08:09 +0100993pte_t ptep_xchg_direct(struct mm_struct *, unsigned long, pte_t *, pte_t);
994pte_t ptep_xchg_lazy(struct mm_struct *, unsigned long, pte_t *, pte_t);
995
996#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
997static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
998 unsigned long addr, pte_t *ptep)
999{
1000 pte_t pte = *ptep;
1001
1002 pte = ptep_xchg_direct(vma->vm_mm, addr, ptep, pte_mkold(pte));
1003 return pte_young(pte);
1004}
1005
1006#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1007static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
1008 unsigned long address, pte_t *ptep)
1009{
1010 return ptep_test_and_clear_young(vma, address, ptep);
1011}
1012
Martin Schwidefskyba8a9222007-10-22 12:52:44 +02001013#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001014static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
Martin Schwidefskyebde7652016-03-08 11:08:09 +01001015 unsigned long addr, pte_t *ptep)
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001016{
Martin Schwidefskyebde7652016-03-08 11:08:09 +01001017 return ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001018}
1019
1020#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
Martin Schwidefskyebde7652016-03-08 11:08:09 +01001021pte_t ptep_modify_prot_start(struct mm_struct *, unsigned long, pte_t *);
1022void ptep_modify_prot_commit(struct mm_struct *, unsigned long, pte_t *, pte_t);
Martin Schwidefskyba8a9222007-10-22 12:52:44 +02001023
1024#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
Martin Schwidefskyf0e47c22007-07-17 04:03:03 -07001025static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
Martin Schwidefskyebde7652016-03-08 11:08:09 +01001026 unsigned long addr, pte_t *ptep)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027{
Martin Schwidefskyebde7652016-03-08 11:08:09 +01001028 return ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029}
1030
Martin Schwidefskyba8a9222007-10-22 12:52:44 +02001031/*
1032 * The batched pte unmap code uses ptep_get_and_clear_full to clear the
1033 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
1034 * tlbs of an mm if it can guarantee that the ptes of the mm_struct
1035 * cannot be accessed while the batched unmap is running. In this case
1036 * full==1 and a simple pte_clear is enough. See tlb.h.
1037 */
1038#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1039static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
Martin Schwidefskyebde7652016-03-08 11:08:09 +01001040 unsigned long addr,
Martin Schwidefskyba8a9222007-10-22 12:52:44 +02001041 pte_t *ptep, int full)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042{
Martin Schwidefskyebde7652016-03-08 11:08:09 +01001043 if (full) {
1044 pte_t pte = *ptep;
1045 *ptep = __pte(_PAGE_INVALID);
1046 return pte;
Martin Schwidefskyd3383632013-04-17 10:53:39 +02001047 }
Martin Schwidefskyebde7652016-03-08 11:08:09 +01001048 return ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049}
1050
Martin Schwidefskyba8a9222007-10-22 12:52:44 +02001051#define __HAVE_ARCH_PTEP_SET_WRPROTECT
Martin Schwidefskyebde7652016-03-08 11:08:09 +01001052static inline void ptep_set_wrprotect(struct mm_struct *mm,
1053 unsigned long addr, pte_t *ptep)
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001054{
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001055 pte_t pte = *ptep;
1056
Martin Schwidefskyebde7652016-03-08 11:08:09 +01001057 if (pte_write(pte))
1058 ptep_xchg_lazy(mm, addr, ptep, pte_wrprotect(pte));
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001059}
Martin Schwidefskyba8a9222007-10-22 12:52:44 +02001060
1061#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001062static inline int ptep_set_access_flags(struct vm_area_struct *vma,
Martin Schwidefskyebde7652016-03-08 11:08:09 +01001063 unsigned long addr, pte_t *ptep,
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001064 pte_t entry, int dirty)
1065{
Martin Schwidefskyebde7652016-03-08 11:08:09 +01001066 if (pte_same(*ptep, entry))
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001067 return 0;
Martin Schwidefskyebde7652016-03-08 11:08:09 +01001068 ptep_xchg_direct(vma->vm_mm, addr, ptep, entry);
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001069 return 1;
1070}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01001072/*
1073 * Additional functions to handle KVM guest page tables
1074 */
1075void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
1076 pte_t *ptep, pte_t entry);
1077void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001078void ptep_notify(struct mm_struct *mm, unsigned long addr,
1079 pte_t *ptep, unsigned long bits);
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01001080int ptep_force_prot(struct mm_struct *mm, unsigned long gaddr,
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001081 pte_t *ptep, int prot, unsigned long bit);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01001082void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
1083 pte_t *ptep , int reset);
1084void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001085int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
David Hildenbranda9d23e72016-03-08 12:21:41 +01001086 pte_t *sptep, pte_t *tptep, pte_t pte);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001087void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01001088
1089bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long address);
1090int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1091 unsigned char key, bool nq);
David Hildenbrand1824c722016-05-10 09:43:11 +02001092int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1093 unsigned char key, unsigned char *oldkey,
1094 bool nq, bool mr, bool mc);
David Hildenbranda7e19ab2016-05-10 09:50:21 +02001095int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr);
David Hildenbrand154c8c12016-05-09 11:22:34 +02001096int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1097 unsigned char *key);
Martin Schwidefskyebde7652016-03-08 11:08:09 +01001098
Claudio Imbrenda2d42f942017-04-20 10:03:45 +02001099int set_pgste_bits(struct mm_struct *mm, unsigned long addr,
1100 unsigned long bits, unsigned long value);
1101int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep);
1102int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
1103 unsigned long *oldpte, unsigned long *oldpgste);
1104
Martin Schwidefskyebde7652016-03-08 11:08:09 +01001105/*
1106 * Certain architectures need to do special things when PTEs
1107 * within a page table are directly modified. Thus, the following
1108 * hook is made available.
1109 */
1110static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1111 pte_t *ptep, pte_t entry)
1112{
Martin Schwidefsky57d7f932016-03-22 10:54:24 +01001113 if (!MACHINE_HAS_NX)
1114 pte_val(entry) &= ~_PAGE_NOEXEC;
Christian Borntraegera8f60d12017-04-09 22:09:38 +02001115 if (pte_present(entry))
1116 pte_val(entry) &= ~_PAGE_UNUSED;
Martin Schwidefskyebde7652016-03-08 11:08:09 +01001117 if (mm_has_pgste(mm))
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01001118 ptep_set_pte_at(mm, addr, ptep, entry);
Martin Schwidefskyebde7652016-03-08 11:08:09 +01001119 else
1120 *ptep = entry;
1121}
1122
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124 * Conversion functions: convert a page and protection to a page entry,
1125 * and a page entry and page directory to the page they refer to.
1126 */
1127static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
1128{
1129 pte_t __pte;
1130 pte_val(__pte) = physpage + pgprot_val(pgprot);
Martin Schwidefsky0944fe32013-07-23 22:11:42 +02001131 return pte_mkyoung(__pte);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132}
1133
Heiko Carstens2dcea572006-09-29 01:58:41 -07001134static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1135{
Heiko Carstens0b2b6e1d2006-10-04 20:02:23 +02001136 unsigned long physpage = page_to_phys(page);
Martin Schwidefskyabf09be2012-11-07 13:17:37 +01001137 pte_t __pte = mk_pte_phys(physpage, pgprot);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138
Martin Schwidefskye5098612013-07-23 20:57:57 +02001139 if (pte_write(__pte) && PageDirty(page))
1140 __pte = pte_mkdirty(__pte);
Martin Schwidefskyabf09be2012-11-07 13:17:37 +01001141 return __pte;
Heiko Carstens2dcea572006-09-29 01:58:41 -07001142}
1143
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +02001145#define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D-1))
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001146#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1147#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1148#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001150#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151#define pgd_offset_k(address) pgd_offset(&init_mm, address)
1152
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001153#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1154#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +02001155#define p4d_deref(pud) (p4d_val(pud) & _REGION_ENTRY_ORIGIN)
Martin Schwidefsky5a216a22008-02-09 18:24:36 +01001156#define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001157
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +02001158static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
Martin Schwidefsky5a216a22008-02-09 18:24:36 +01001159{
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +02001160 p4d_t *p4d = (p4d_t *) pgd;
1161
1162 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
1163 p4d = (p4d_t *) pgd_deref(*pgd);
1164 return p4d + p4d_index(address);
1165}
1166
1167static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
1168{
1169 pud_t *pud = (pud_t *) p4d;
1170
1171 if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
1172 pud = (pud_t *) p4d_deref(*p4d);
1173 return pud + pud_index(address);
Martin Schwidefsky5a216a22008-02-09 18:24:36 +01001174}
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001175
1176static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
1177{
Martin Schwidefsky6252d702008-02-09 18:24:37 +01001178 pmd_t *pmd = (pmd_t *) pud;
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +02001179
Martin Schwidefsky6252d702008-02-09 18:24:37 +01001180 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
1181 pmd = (pmd_t *) pud_deref(*pud);
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001182 return pmd + pmd_index(address);
1183}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001185#define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
1186#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1187#define pte_page(x) pfn_to_page(pte_pfn(x))
1188
Martin Schwidefsky152125b2014-07-24 11:03:41 +02001189#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
Gerald Schaeferd08de8e2016-07-04 14:47:01 +02001190#define pud_page(pud) pfn_to_page(pud_pfn(pud))
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +02001191#define p4d_page(pud) pfn_to_page(p4d_pfn(p4d))
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001192
1193/* Find an entry in the lowest level page table.. */
1194#define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
1195#define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197#define pte_unmap(pte) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198
Martin Schwidefsky152125b2014-07-24 11:03:41 +02001199static inline pmd_t pmd_wrprotect(pmd_t pmd)
1200{
1201 pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE;
1202 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1203 return pmd;
1204}
1205
1206static inline pmd_t pmd_mkwrite(pmd_t pmd)
1207{
1208 pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE;
1209 if (pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1210 return pmd;
1211 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1212 return pmd;
1213}
1214
1215static inline pmd_t pmd_mkclean(pmd_t pmd)
1216{
1217 if (pmd_large(pmd)) {
1218 pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY;
1219 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1220 }
1221 return pmd;
1222}
1223
1224static inline pmd_t pmd_mkdirty(pmd_t pmd)
1225{
1226 if (pmd_large(pmd)) {
Martin Schwidefsky5614dd92015-04-22 14:47:42 +02001227 pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY |
1228 _SEGMENT_ENTRY_SOFT_DIRTY;
Martin Schwidefsky152125b2014-07-24 11:03:41 +02001229 if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
1230 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1231 }
1232 return pmd;
1233}
1234
Heiko Carstens9e20b4d2016-05-10 10:34:47 +02001235static inline pud_t pud_wrprotect(pud_t pud)
1236{
1237 pud_val(pud) &= ~_REGION3_ENTRY_WRITE;
1238 pud_val(pud) |= _REGION_ENTRY_PROTECT;
1239 return pud;
1240}
1241
1242static inline pud_t pud_mkwrite(pud_t pud)
1243{
1244 pud_val(pud) |= _REGION3_ENTRY_WRITE;
1245 if (pud_large(pud) && !(pud_val(pud) & _REGION3_ENTRY_DIRTY))
1246 return pud;
1247 pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
1248 return pud;
1249}
1250
1251static inline pud_t pud_mkclean(pud_t pud)
1252{
1253 if (pud_large(pud)) {
1254 pud_val(pud) &= ~_REGION3_ENTRY_DIRTY;
1255 pud_val(pud) |= _REGION_ENTRY_PROTECT;
1256 }
1257 return pud;
1258}
1259
1260static inline pud_t pud_mkdirty(pud_t pud)
1261{
1262 if (pud_large(pud)) {
1263 pud_val(pud) |= _REGION3_ENTRY_DIRTY |
1264 _REGION3_ENTRY_SOFT_DIRTY;
1265 if (pud_val(pud) & _REGION3_ENTRY_WRITE)
1266 pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
1267 }
1268 return pud;
1269}
1270
1271#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
1272static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
1273{
1274 /*
Martin Schwidefsky57d7f932016-03-22 10:54:24 +01001275 * pgprot is PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW or PAGE_RWX
1276 * (see __Pxxx / __Sxxx). Convert to segment table entry format.
Heiko Carstens9e20b4d2016-05-10 10:34:47 +02001277 */
1278 if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
1279 return pgprot_val(SEGMENT_NONE);
Martin Schwidefsky57d7f932016-03-22 10:54:24 +01001280 if (pgprot_val(pgprot) == pgprot_val(PAGE_RO))
1281 return pgprot_val(SEGMENT_RO);
1282 if (pgprot_val(pgprot) == pgprot_val(PAGE_RX))
1283 return pgprot_val(SEGMENT_RX);
1284 if (pgprot_val(pgprot) == pgprot_val(PAGE_RW))
1285 return pgprot_val(SEGMENT_RW);
1286 return pgprot_val(SEGMENT_RWX);
Heiko Carstens9e20b4d2016-05-10 10:34:47 +02001287}
1288
Martin Schwidefsky0944fe32013-07-23 22:11:42 +02001289static inline pmd_t pmd_mkyoung(pmd_t pmd)
1290{
Martin Schwidefsky152125b2014-07-24 11:03:41 +02001291 if (pmd_large(pmd)) {
Martin Schwidefsky0944fe32013-07-23 22:11:42 +02001292 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
Martin Schwidefsky152125b2014-07-24 11:03:41 +02001293 if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
1294 pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
Martin Schwidefsky0944fe32013-07-23 22:11:42 +02001295 }
Martin Schwidefsky0944fe32013-07-23 22:11:42 +02001296 return pmd;
1297}
1298
1299static inline pmd_t pmd_mkold(pmd_t pmd)
1300{
Martin Schwidefsky152125b2014-07-24 11:03:41 +02001301 if (pmd_large(pmd)) {
Martin Schwidefsky0944fe32013-07-23 22:11:42 +02001302 pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG;
1303 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1304 }
Martin Schwidefsky0944fe32013-07-23 22:11:42 +02001305 return pmd;
1306}
1307
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001308static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1309{
Martin Schwidefsky152125b2014-07-24 11:03:41 +02001310 if (pmd_large(pmd)) {
1311 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE |
1312 _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG |
Kirill A. Shutemovfecffad2016-01-15 16:53:24 -08001313 _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SOFT_DIRTY;
Martin Schwidefsky152125b2014-07-24 11:03:41 +02001314 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1315 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1316 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1317 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
1318 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1319 return pmd;
1320 }
1321 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN;
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001322 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1323 return pmd;
1324}
1325
Gerald Schaefer106c9922013-04-29 15:07:23 -07001326static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001327{
Gerald Schaefer106c9922013-04-29 15:07:23 -07001328 pmd_t __pmd;
1329 pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
Martin Schwidefsky152125b2014-07-24 11:03:41 +02001330 return __pmd;
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001331}
1332
Gerald Schaefer106c9922013-04-29 15:07:23 -07001333#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
1334
Martin Schwidefsky1b948d62014-04-03 13:55:01 +02001335static inline void __pmdp_csp(pmd_t *pmdp)
1336{
Heiko Carstens4ccccc52016-05-14 10:46:33 +02001337 csp((unsigned int *)pmdp + 1, pmd_val(*pmdp),
1338 pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
Martin Schwidefsky1b948d62014-04-03 13:55:01 +02001339}
1340
Martin Schwidefsky47e4d852016-06-14 12:41:35 +02001341#define IDTE_GLOBAL 0
1342#define IDTE_LOCAL 1
1343
1344static inline void __pmdp_idte(unsigned long address, pmd_t *pmdp, int local)
Martin Schwidefsky1b948d62014-04-03 13:55:01 +02001345{
1346 unsigned long sto;
1347
1348 sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t);
1349 asm volatile(
Martin Schwidefsky47e4d852016-06-14 12:41:35 +02001350 " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
1351 : "+m" (*pmdp)
1352 : [r1] "a" (sto), [r2] "a" ((address & HPAGE_MASK)),
1353 [m4] "i" (local)
Martin Schwidefsky1b948d62014-04-03 13:55:01 +02001354 : "cc" );
1355}
1356
Martin Schwidefsky47e4d852016-06-14 12:41:35 +02001357static inline void __pudp_idte(unsigned long address, pud_t *pudp, int local)
Gerald Schaeferd08de8e2016-07-04 14:47:01 +02001358{
1359 unsigned long r3o;
1360
1361 r3o = (unsigned long) pudp - pud_index(address) * sizeof(pud_t);
1362 r3o |= _ASCE_TYPE_REGION3;
1363 asm volatile(
Martin Schwidefsky47e4d852016-06-14 12:41:35 +02001364 " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
1365 : "+m" (*pudp)
1366 : [r1] "a" (r3o), [r2] "a" ((address & PUD_MASK)),
1367 [m4] "i" (local)
Gerald Schaeferd08de8e2016-07-04 14:47:01 +02001368 : "cc");
1369}
1370
Martin Schwidefsky227be792016-03-08 11:09:25 +01001371pmd_t pmdp_xchg_direct(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1372pmd_t pmdp_xchg_lazy(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
Gerald Schaeferd08de8e2016-07-04 14:47:01 +02001373pud_t pudp_xchg_direct(struct mm_struct *, unsigned long, pud_t *, pud_t);
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +02001374
Gerald Schaefer106c9922013-04-29 15:07:23 -07001375#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1376
1377#define __HAVE_ARCH_PGTABLE_DEPOSIT
Martin Schwidefsky227be792016-03-08 11:09:25 +01001378void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1379 pgtable_t pgtable);
Gerald Schaefer106c9922013-04-29 15:07:23 -07001380
1381#define __HAVE_ARCH_PGTABLE_WITHDRAW
Martin Schwidefsky227be792016-03-08 11:09:25 +01001382pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
1383
1384#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1385static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
1386 unsigned long addr, pmd_t *pmdp,
1387 pmd_t entry, int dirty)
1388{
1389 VM_BUG_ON(addr & ~HPAGE_MASK);
1390
1391 entry = pmd_mkyoung(entry);
1392 if (dirty)
1393 entry = pmd_mkdirty(entry);
1394 if (pmd_val(*pmdp) == pmd_val(entry))
1395 return 0;
1396 pmdp_xchg_direct(vma->vm_mm, addr, pmdp, entry);
1397 return 1;
1398}
1399
1400#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1401static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1402 unsigned long addr, pmd_t *pmdp)
1403{
1404 pmd_t pmd = *pmdp;
1405
1406 pmd = pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd_mkold(pmd));
1407 return pmd_young(pmd);
1408}
1409
1410#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
1411static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
1412 unsigned long addr, pmd_t *pmdp)
1413{
1414 VM_BUG_ON(addr & ~HPAGE_MASK);
1415 return pmdp_test_and_clear_young(vma, addr, pmdp);
1416}
Gerald Schaefer106c9922013-04-29 15:07:23 -07001417
Gerald Schaefer106c9922013-04-29 15:07:23 -07001418static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1419 pmd_t *pmdp, pmd_t entry)
1420{
Martin Schwidefsky57d7f932016-03-22 10:54:24 +01001421 if (!MACHINE_HAS_NX)
1422 pmd_val(entry) &= ~_SEGMENT_ENTRY_NOEXEC;
Gerald Schaefer106c9922013-04-29 15:07:23 -07001423 *pmdp = entry;
1424}
1425
1426static inline pmd_t pmd_mkhuge(pmd_t pmd)
1427{
1428 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
Martin Schwidefsky152125b2014-07-24 11:03:41 +02001429 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1430 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001431 return pmd;
1432}
1433
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -07001434#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
1435static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
Martin Schwidefsky227be792016-03-08 11:09:25 +01001436 unsigned long addr, pmd_t *pmdp)
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001437{
Dominik Dingel54397bb2016-04-27 11:43:07 +02001438 return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001439}
1440
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -07001441#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
1442static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm,
Martin Schwidefsky227be792016-03-08 11:09:25 +01001443 unsigned long addr,
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -07001444 pmd_t *pmdp, int full)
Martin Schwidefskyfcbe08d62014-10-24 10:52:29 +02001445{
Martin Schwidefsky227be792016-03-08 11:09:25 +01001446 if (full) {
1447 pmd_t pmd = *pmdp;
Dominik Dingel54397bb2016-04-27 11:43:07 +02001448 *pmdp = __pmd(_SEGMENT_ENTRY_EMPTY);
Martin Schwidefsky227be792016-03-08 11:09:25 +01001449 return pmd;
1450 }
Dominik Dingel54397bb2016-04-27 11:43:07 +02001451 return pmdp_xchg_lazy(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
Martin Schwidefskyfcbe08d62014-10-24 10:52:29 +02001452}
1453
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -07001454#define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
1455static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
Martin Schwidefsky227be792016-03-08 11:09:25 +01001456 unsigned long addr, pmd_t *pmdp)
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001457{
Martin Schwidefsky227be792016-03-08 11:09:25 +01001458 return pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001459}
1460
1461#define __HAVE_ARCH_PMDP_INVALIDATE
1462static inline void pmdp_invalidate(struct vm_area_struct *vma,
Martin Schwidefsky227be792016-03-08 11:09:25 +01001463 unsigned long addr, pmd_t *pmdp)
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001464{
Dominik Dingel54397bb2016-04-27 11:43:07 +02001465 pmdp_xchg_direct(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001466}
1467
Gerald Schaeferbe328652013-01-21 16:48:07 +01001468#define __HAVE_ARCH_PMDP_SET_WRPROTECT
1469static inline void pmdp_set_wrprotect(struct mm_struct *mm,
Martin Schwidefsky227be792016-03-08 11:09:25 +01001470 unsigned long addr, pmd_t *pmdp)
Gerald Schaeferbe328652013-01-21 16:48:07 +01001471{
1472 pmd_t pmd = *pmdp;
1473
Martin Schwidefsky227be792016-03-08 11:09:25 +01001474 if (pmd_write(pmd))
1475 pmd = pmdp_xchg_lazy(mm, addr, pmdp, pmd_wrprotect(pmd));
Gerald Schaeferbe328652013-01-21 16:48:07 +01001476}
1477
Aneesh Kumar K.Vf28b6ff2015-06-24 16:57:42 -07001478static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
1479 unsigned long address,
1480 pmd_t *pmdp)
1481{
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -07001482 return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
Aneesh Kumar K.Vf28b6ff2015-06-24 16:57:42 -07001483}
1484#define pmdp_collapse_flush pmdp_collapse_flush
1485
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001486#define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
1487#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1488
1489static inline int pmd_trans_huge(pmd_t pmd)
1490{
1491 return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
1492}
1493
Hugh Dickinsfd8cfd32016-05-19 17:13:00 -07001494#define has_transparent_hugepage has_transparent_hugepage
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001495static inline int has_transparent_hugepage(void)
1496{
Heiko Carstens466178f2017-02-13 15:11:15 +01001497 return MACHINE_HAS_EDAT1 ? 1 : 0;
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001498}
Gerald Schaefer75077af2012-10-08 16:30:15 -07001499#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1500
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502 * 64 bit swap entry format:
1503 * A page-table entry has some bits we have to treat in a special way.
Geert Uytterhoeven4e0a6412015-05-21 14:00:47 +02001504 * Bits 52 and bit 55 have to be zero, otherwise a specification
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505 * exception will occur instead of a page translation exception. The
Geert Uytterhoeven4e0a6412015-05-21 14:00:47 +02001506 * specification exception has the bad habit not to store necessary
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507 * information in the lowcore.
Martin Schwidefskya1c843b2015-04-22 13:55:59 +02001508 * Bits 54 and 63 are used to indicate the page type.
1509 * A swap pte is indicated by bit pattern (pte & 0x201) == 0x200
1510 * This leaves the bits 0-51 and bits 56-62 to store type and offset.
1511 * We use the 5 bits from 57-61 for the type and the 52 bits from 0-51
1512 * for the offset.
1513 * | offset |01100|type |00|
1514 * |0000000000111111111122222222223333333333444444444455|55555|55566|66|
1515 * |0123456789012345678901234567890123456789012345678901|23456|78901|23|
Linus Torvalds1da177e2005-04-16 15:20:36 -07001516 */
Heiko Carstens5a79859a2015-02-12 13:08:27 +01001517
Martin Schwidefskya1c843b2015-04-22 13:55:59 +02001518#define __SWP_OFFSET_MASK ((1UL << 52) - 1)
1519#define __SWP_OFFSET_SHIFT 12
1520#define __SWP_TYPE_MASK ((1UL << 5) - 1)
1521#define __SWP_TYPE_SHIFT 2
Heiko Carstens5a79859a2015-02-12 13:08:27 +01001522
Adrian Bunk4448aaf2005-11-08 21:34:42 -08001523static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524{
1525 pte_t pte;
Martin Schwidefskya1c843b2015-04-22 13:55:59 +02001526
1527 pte_val(pte) = _PAGE_INVALID | _PAGE_PROTECT;
1528 pte_val(pte) |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT;
1529 pte_val(pte) |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530 return pte;
1531}
1532
Martin Schwidefskya1c843b2015-04-22 13:55:59 +02001533static inline unsigned long __swp_type(swp_entry_t entry)
1534{
1535 return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK;
1536}
1537
1538static inline unsigned long __swp_offset(swp_entry_t entry)
1539{
1540 return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK;
1541}
1542
1543static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
1544{
1545 return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) };
1546}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547
1548#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1549#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1550
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551#endif /* !__ASSEMBLY__ */
1552
1553#define kern_addr_valid(addr) (1)
1554
Heiko Carstens17f34582008-04-30 13:38:47 +02001555extern int vmem_add_mapping(unsigned long start, unsigned long size);
1556extern int vmem_remove_mapping(unsigned long start, unsigned long size);
Carsten Otte402b0862008-03-25 18:47:10 +01001557extern int s390_enable_sie(void);
Dominik Dingel3ac8e382014-10-23 12:09:17 +02001558extern int s390_enable_skey(void);
Dominik Dingela13cff32014-10-23 12:07:14 +02001559extern void s390_reset_cmma(struct mm_struct *mm);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +01001560
Martin Schwidefsky1f6b83e2015-01-14 17:51:17 +01001561/* s390 has a private copy of get unmapped area to deal with cache synonyms */
1562#define HAVE_ARCH_UNMAPPED_AREA
1563#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1564
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565/*
1566 * No page table caches to initialise
1567 */
Heiko Carstens765a0ca2013-03-23 10:29:01 +01001568static inline void pgtable_cache_init(void) { }
1569static inline void check_pgt_cache(void) { }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571#include <asm-generic/pgtable.h>
1572
1573#endif /* _S390_PAGE_H */