blob: 0c4600725fc2e908b98578d9febaa93f82df4ed5 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 * S390 version
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02004 * Copyright IBM Corp. 1999, 2000
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * Author(s): Hartmut Penner (hp@de.ibm.com)
6 * Ulrich Weigand (weigand@de.ibm.com)
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 *
9 * Derived from "include/asm-i386/pgtable.h"
10 */
11
12#ifndef _ASM_S390_PGTABLE_H
13#define _ASM_S390_PGTABLE_H
14
Heiko Carstens9789db02008-07-14 09:59:11 +020015#include <linux/sched.h>
Heiko Carstens2dcea572006-09-29 01:58:41 -070016#include <linux/mm_types.h>
Martin Schwidefskyabf09be2012-11-07 13:17:37 +010017#include <linux/page-flags.h>
Martin Schwidefsky527e30b2014-04-30 16:04:25 +020018#include <linux/radix-tree.h>
Heiko Carstens37cd9442016-05-20 08:08:14 +020019#include <linux/atomic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <asm/bug.h>
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +020021#include <asm/page.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
Heiko Carstens0ccb32c2016-05-28 10:03:55 +020023extern pgd_t swapper_pg_dir[];
Linus Torvalds1da177e2005-04-16 15:20:36 -070024extern void paging_init(void);
25
Heiko Carstens37cd9442016-05-20 08:08:14 +020026enum {
27 PG_DIRECT_MAP_4K = 0,
28 PG_DIRECT_MAP_1M,
29 PG_DIRECT_MAP_2G,
30 PG_DIRECT_MAP_MAX
31};
32
33extern atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];
34
35static inline void update_page_count(int level, long count)
36{
37 if (IS_ENABLED(CONFIG_PROC_FS))
38 atomic_long_add(count, &direct_pages_count[level]);
39}
40
41struct seq_file;
42void arch_report_meminfo(struct seq_file *m);
43
Linus Torvalds1da177e2005-04-16 15:20:36 -070044/*
45 * The S390 doesn't have any external MMU info: the kernel page
46 * tables contain all the necessary information.
47 */
Russell King4b3073e2009-12-18 16:40:18 +000048#define update_mmu_cache(vma, address, ptep) do { } while (0)
David Millerb113da62012-10-08 16:34:25 -070049#define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
51/*
Martin Schwidefsky238ec4e2010-10-25 16:10:07 +020052 * ZERO_PAGE is a global shared page that is always zero; used
Linus Torvalds1da177e2005-04-16 15:20:36 -070053 * for zero-mapped memory areas etc..
54 */
Martin Schwidefsky238ec4e2010-10-25 16:10:07 +020055
56extern unsigned long empty_zero_page;
57extern unsigned long zero_page_mask;
58
59#define ZERO_PAGE(vaddr) \
60 (virt_to_page((void *)(empty_zero_page + \
61 (((unsigned long)(vaddr)) &zero_page_mask))))
Kirill A. Shutemov816422a2012-12-12 13:52:36 -080062#define __HAVE_COLOR_ZERO_PAGE
Martin Schwidefsky238ec4e2010-10-25 16:10:07 +020063
Linus Torvalds4f2e2902013-04-17 08:46:19 -070064/* TODO: s390 cannot support io_remap_pfn_range... */
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
Kirill A. Shutemovd016bf72015-02-11 15:26:41 -080066#define FIRST_USER_ADDRESS 0UL
Hugh Dickinsd455a362005-04-19 13:29:23 -070067
Linus Torvalds1da177e2005-04-16 15:20:36 -070068#define pte_ERROR(e) \
69 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
70#define pmd_ERROR(e) \
71 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
Martin Schwidefsky190a1d72007-10-22 12:52:48 +020072#define pud_ERROR(e) \
73 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +020074#define p4d_ERROR(e) \
75 printk("%s:%d: bad p4d %p.\n", __FILE__, __LINE__, (void *) p4d_val(e))
Linus Torvalds1da177e2005-04-16 15:20:36 -070076#define pgd_ERROR(e) \
77 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
78
Linus Torvalds1da177e2005-04-16 15:20:36 -070079/*
Martin Schwidefskya1c843b2015-04-22 13:55:59 +020080 * The vmalloc and module area will always be on the topmost area of the
81 * kernel mapping. We reserve 128GB (64bit) for vmalloc and modules.
Heiko Carstensc972cc62012-10-05 16:52:18 +020082 * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where
83 * modules will reside. That makes sure that inter module branches always
84 * happen without trampolines and in addition the placement within a 2GB frame
85 * is branch prediction unit friendly.
Heiko Carstens8b62bc92006-12-04 15:40:56 +010086 */
Heiko Carstens239a64252009-06-12 10:26:33 +020087extern unsigned long VMALLOC_START;
Martin Schwidefsky14045eb2011-12-27 11:27:07 +010088extern unsigned long VMALLOC_END;
Vasily Gorbik59793c52019-08-02 12:28:20 +020089#define VMALLOC_DEFAULT_SIZE ((128UL << 30) - MODULES_LEN)
Martin Schwidefsky14045eb2011-12-27 11:27:07 +010090extern struct page *vmemmap;
Heiko Carstens239a64252009-06-12 10:26:33 +020091
Martin Schwidefsky14045eb2011-12-27 11:27:07 +010092#define VMEM_MAX_PHYS ((unsigned long) vmemmap)
Christian Borntraeger5fd9c6e2008-01-26 14:11:00 +010093
Heiko Carstensc972cc62012-10-05 16:52:18 +020094extern unsigned long MODULES_VADDR;
95extern unsigned long MODULES_END;
96#define MODULES_VADDR MODULES_VADDR
97#define MODULES_END MODULES_END
98#define MODULES_LEN (1UL << 31)
Heiko Carstensc972cc62012-10-05 16:52:18 +020099
Heiko Carstensc9331462014-10-15 12:17:38 +0200100static inline int is_module_addr(void *addr)
101{
Heiko Carstensc9331462014-10-15 12:17:38 +0200102 BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
103 if (addr < (void *)MODULES_VADDR)
104 return 0;
105 if (addr > (void *)MODULES_END)
106 return 0;
Heiko Carstensc9331462014-10-15 12:17:38 +0200107 return 1;
108}
109
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111 * A 64 bit pagetable entry of S390 has following format:
Christian Borntraeger6a985c62009-12-07 12:52:11 +0100112 * | PFRA |0IPC| OS |
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 * 0000000000111111111122222222223333333333444444444455555555556666
114 * 0123456789012345678901234567890123456789012345678901234567890123
115 *
116 * I Page-Invalid Bit: Page is not available for address-translation
117 * P Page-Protection Bit: Store access not possible for page
Christian Borntraeger6a985c62009-12-07 12:52:11 +0100118 * C Change-bit override: HW is not required to set change bit
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 *
120 * A 64 bit segmenttable entry of S390 has following format:
121 * | P-table origin | TT
122 * 0000000000111111111122222222223333333333444444444455555555556666
123 * 0123456789012345678901234567890123456789012345678901234567890123
124 *
125 * I Segment-Invalid Bit: Segment is not available for address-translation
126 * C Common-Segment Bit: Segment is not private (PoP 3-30)
127 * P Page-Protection Bit: Store access not possible for page
128 * TT Type 00
129 *
130 * A 64 bit region table entry of S390 has following format:
131 * | S-table origin | TF TTTL
132 * 0000000000111111111122222222223333333333444444444455555555556666
133 * 0123456789012345678901234567890123456789012345678901234567890123
134 *
135 * I Segment-Invalid Bit: Segment is not available for address-translation
136 * TT Type 01
137 * TF
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200138 * TL Table length
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 *
140 * The 64 bit regiontable origin of S390 has following format:
141 * | region table origon | DTTL
142 * 0000000000111111111122222222223333333333444444444455555555556666
143 * 0123456789012345678901234567890123456789012345678901234567890123
144 *
145 * X Space-Switch event:
146 * G Segment-Invalid Bit:
147 * P Private-Space Bit:
148 * S Storage-Alteration:
149 * R Real space
150 * TL Table-Length:
151 *
152 * A storage key has the following format:
153 * | ACC |F|R|C|0|
154 * 0 3 4 5 6 7
155 * ACC: access key
156 * F : fetch protection bit
157 * R : referenced bit
158 * C : changed bit
159 */
160
161/* Hardware bits in the page table entry */
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100162#define _PAGE_NOEXEC 0x100 /* HW no-execute bit */
Martin Schwidefskye5098612013-07-23 20:57:57 +0200163#define _PAGE_PROTECT 0x200 /* HW read-only bit */
Martin Schwidefsky83377482006-10-18 18:30:51 +0200164#define _PAGE_INVALID 0x400 /* HW invalid bit */
Martin Schwidefskye5098612013-07-23 20:57:57 +0200165#define _PAGE_LARGE 0x800 /* Bit to mark a large pte */
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200166
167/* Software bits in the page table entry */
Martin Schwidefskye5098612013-07-23 20:57:57 +0200168#define _PAGE_PRESENT 0x001 /* SW pte present bit */
Martin Schwidefskye5098612013-07-23 20:57:57 +0200169#define _PAGE_YOUNG 0x004 /* SW pte young bit */
170#define _PAGE_DIRTY 0x008 /* SW pte dirty bit */
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200171#define _PAGE_READ 0x010 /* SW pte read bit */
172#define _PAGE_WRITE 0x020 /* SW pte write bit */
173#define _PAGE_SPECIAL 0x040 /* SW associated with special page */
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200174#define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175
Martin Schwidefsky5614dd92015-04-22 14:47:42 +0200176#ifdef CONFIG_MEM_SOFT_DIRTY
177#define _PAGE_SOFT_DIRTY 0x002 /* SW pte soft dirty bit */
178#else
179#define _PAGE_SOFT_DIRTY 0x000
180#endif
181
Nick Piggin138c9022008-07-08 11:31:06 +0200182/* Set of bits not changed in pte_modify */
Heiko Carstens6a5c1482014-09-22 08:50:51 +0200183#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
Martin Schwidefsky5614dd92015-04-22 14:47:42 +0200184 _PAGE_YOUNG | _PAGE_SOFT_DIRTY)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185
Martin Schwidefsky83377482006-10-18 18:30:51 +0200186/*
Kirill A. Shutemov6e76d4b2015-02-10 14:11:04 -0800187 * handle_pte_fault uses pte_present and pte_none to find out the pte type
188 * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to
189 * distinguish present from not-present ptes. It is changed only with the page
190 * table lock held.
Martin Schwidefsky83377482006-10-18 18:30:51 +0200191 *
Martin Schwidefskye5098612013-07-23 20:57:57 +0200192 * The following table gives the different possible bit combinations for
Martin Schwidefskya1c843b2015-04-22 13:55:59 +0200193 * the pte hardware and software bits in the last 12 bits of a pte
194 * (. unassigned bit, x don't care, t swap type):
Martin Schwidefsky83377482006-10-18 18:30:51 +0200195 *
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200196 * 842100000000
197 * 000084210000
198 * 000000008421
Martin Schwidefskya1c843b2015-04-22 13:55:59 +0200199 * .IR.uswrdy.p
200 * empty .10.00000000
201 * swap .11..ttttt.0
202 * prot-none, clean, old .11.xx0000.1
203 * prot-none, clean, young .11.xx0001.1
Gerald Schaeferbc29b7a2016-07-18 14:35:13 +0200204 * prot-none, dirty, old .11.xx0010.1
205 * prot-none, dirty, young .11.xx0011.1
Martin Schwidefskya1c843b2015-04-22 13:55:59 +0200206 * read-only, clean, old .11.xx0100.1
207 * read-only, clean, young .01.xx0101.1
208 * read-only, dirty, old .11.xx0110.1
209 * read-only, dirty, young .01.xx0111.1
210 * read-write, clean, old .11.xx1100.1
211 * read-write, clean, young .01.xx1101.1
212 * read-write, dirty, old .10.xx1110.1
213 * read-write, dirty, young .00.xx1111.1
214 * HW-bits: R read-only, I invalid
215 * SW-bits: p present, y young, d dirty, r read, w write, s special,
216 * u unused, l large
Martin Schwidefskye5098612013-07-23 20:57:57 +0200217 *
Martin Schwidefskya1c843b2015-04-22 13:55:59 +0200218 * pte_none is true for the bit pattern .10.00000000, pte == 0x400
219 * pte_swap is true for the bit pattern .11..ooooo.0, (pte & 0x201) == 0x200
220 * pte_present is true for the bit pattern .xx.xxxxxx.1, (pte & 0x001) == 0x001
Martin Schwidefsky83377482006-10-18 18:30:51 +0200221 */
222
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200223/* Bits in the segment/region table address-space-control-element */
Heiko Carstens8457d772017-06-14 08:57:24 +0200224#define _ASCE_ORIGIN ~0xfffUL/* region/segment table origin */
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200225#define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
226#define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
227#define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
228#define _ASCE_REAL_SPACE 0x20 /* real space control */
229#define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
230#define _ASCE_TYPE_REGION1 0x0c /* region first table type */
231#define _ASCE_TYPE_REGION2 0x08 /* region second table type */
232#define _ASCE_TYPE_REGION3 0x04 /* region third table type */
233#define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
234#define _ASCE_TABLE_LENGTH 0x03 /* region table length */
235
236/* Bits in the region table entry */
237#define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
Martin Schwidefskye5098612013-07-23 20:57:57 +0200238#define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100239#define _REGION_ENTRY_NOEXEC 0x100 /* region no-execute bit */
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100240#define _REGION_ENTRY_OFFSET 0xc0 /* region table offset */
Martin Schwidefskye5098612013-07-23 20:57:57 +0200241#define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */
Martin Schwidefskyc9f62152019-04-24 12:49:44 +0200242#define _REGION_ENTRY_TYPE_MASK 0x0c /* region table type mask */
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200243#define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
244#define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
245#define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
246#define _REGION_ENTRY_LENGTH 0x03 /* region third length */
247
248#define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
Martin Schwidefskye5098612013-07-23 20:57:57 +0200249#define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200250#define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
Martin Schwidefskye5098612013-07-23 20:57:57 +0200251#define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200252#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
Martin Schwidefskye5098612013-07-23 20:57:57 +0200253#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200254
Heiko Carstens9e20b4d2016-05-10 10:34:47 +0200255#define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL /* large page address */
Heiko Carstens2dffdcb2016-05-11 10:52:07 +0200256#define _REGION3_ENTRY_DIRTY 0x2000 /* SW region dirty bit */
257#define _REGION3_ENTRY_YOUNG 0x1000 /* SW region young bit */
258#define _REGION3_ENTRY_LARGE 0x0400 /* RTTE-format control, large page */
259#define _REGION3_ENTRY_READ 0x0002 /* SW region read bit */
260#define _REGION3_ENTRY_WRITE 0x0001 /* SW region write bit */
261
262#ifdef CONFIG_MEM_SOFT_DIRTY
263#define _REGION3_ENTRY_SOFT_DIRTY 0x4000 /* SW region soft dirty bit */
264#else
265#define _REGION3_ENTRY_SOFT_DIRTY 0x0000 /* SW region soft dirty bit */
266#endif
267
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +0200268#define _REGION_ENTRY_BITS 0xfffffffffffff22fUL
269#define _REGION_ENTRY_BITS_LARGE 0xffffffff8000fe2fUL
Gerald Schaeferd08de8e2016-07-04 14:47:01 +0200270
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200271/* Bits in the segment table entry */
Janosch Frank58b7e202018-07-13 11:28:20 +0100272#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
273#define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff0ff33UL
274#define _SEGMENT_ENTRY_HARDWARE_BITS 0xfffffffffffffe30UL
275#define _SEGMENT_ENTRY_HARDWARE_BITS_LARGE 0xfffffffffff00730UL
Heiko Carstensea815312013-03-21 12:50:39 +0100276#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
Heiko Carstens8457d772017-06-14 08:57:24 +0200277#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* page table origin */
278#define _SEGMENT_ENTRY_PROTECT 0x200 /* segment protection bit */
279#define _SEGMENT_ENTRY_NOEXEC 0x100 /* segment no-execute bit */
Martin Schwidefskye5098612013-07-23 20:57:57 +0200280#define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
Martin Schwidefskyc9f62152019-04-24 12:49:44 +0200281#define _SEGMENT_ENTRY_TYPE_MASK 0x0c /* segment table type mask */
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200282
283#define _SEGMENT_ENTRY (0)
Martin Schwidefskye5098612013-07-23 20:57:57 +0200284#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200285
Martin Schwidefsky152125b2014-07-24 11:03:41 +0200286#define _SEGMENT_ENTRY_DIRTY 0x2000 /* SW segment dirty bit */
287#define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */
Martin Schwidefsky152125b2014-07-24 11:03:41 +0200288#define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */
Gerald Schaeferbc29b7a2016-07-18 14:35:13 +0200289#define _SEGMENT_ENTRY_WRITE 0x0002 /* SW segment write bit */
290#define _SEGMENT_ENTRY_READ 0x0001 /* SW segment read bit */
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200291
Martin Schwidefsky5614dd92015-04-22 14:47:42 +0200292#ifdef CONFIG_MEM_SOFT_DIRTY
293#define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000 /* SW segment soft dirty bit */
294#else
295#define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000 /* SW segment soft dirty bit */
296#endif
297
Heiko Carstensc67da7c2017-06-16 17:24:39 +0200298#define _CRST_ENTRIES 2048 /* number of region/segment table entries */
299#define _PAGE_ENTRIES 256 /* number of page table entries */
300
301#define _CRST_TABLE_SIZE (_CRST_ENTRIES * 8)
302#define _PAGE_TABLE_SIZE (_PAGE_ENTRIES * 8)
303
304#define _REGION1_SHIFT 53
305#define _REGION2_SHIFT 42
306#define _REGION3_SHIFT 31
307#define _SEGMENT_SHIFT 20
308
309#define _REGION1_INDEX (0x7ffUL << _REGION1_SHIFT)
310#define _REGION2_INDEX (0x7ffUL << _REGION2_SHIFT)
311#define _REGION3_INDEX (0x7ffUL << _REGION3_SHIFT)
312#define _SEGMENT_INDEX (0x7ffUL << _SEGMENT_SHIFT)
313#define _PAGE_INDEX (0xffUL << _PAGE_SHIFT)
314
315#define _REGION1_SIZE (1UL << _REGION1_SHIFT)
316#define _REGION2_SIZE (1UL << _REGION2_SHIFT)
317#define _REGION3_SIZE (1UL << _REGION3_SHIFT)
318#define _SEGMENT_SIZE (1UL << _SEGMENT_SHIFT)
319
320#define _REGION1_MASK (~(_REGION1_SIZE - 1))
321#define _REGION2_MASK (~(_REGION2_SIZE - 1))
322#define _REGION3_MASK (~(_REGION3_SIZE - 1))
323#define _SEGMENT_MASK (~(_SEGMENT_SIZE - 1))
324
325#define PMD_SHIFT _SEGMENT_SHIFT
326#define PUD_SHIFT _REGION3_SHIFT
327#define P4D_SHIFT _REGION2_SHIFT
328#define PGDIR_SHIFT _REGION1_SHIFT
329
330#define PMD_SIZE _SEGMENT_SIZE
331#define PUD_SIZE _REGION3_SIZE
332#define P4D_SIZE _REGION2_SIZE
333#define PGDIR_SIZE _REGION1_SIZE
334
335#define PMD_MASK _SEGMENT_MASK
336#define PUD_MASK _REGION3_MASK
337#define P4D_MASK _REGION2_MASK
338#define PGDIR_MASK _REGION1_MASK
339
340#define PTRS_PER_PTE _PAGE_ENTRIES
341#define PTRS_PER_PMD _CRST_ENTRIES
342#define PTRS_PER_PUD _CRST_ENTRIES
343#define PTRS_PER_P4D _CRST_ENTRIES
344#define PTRS_PER_PGD _CRST_ENTRIES
345
Vasily Gorbik34377d32018-09-12 13:23:58 +0200346#define MAX_PTRS_PER_P4D PTRS_PER_P4D
347
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200348/*
Heiko Carstens2dffdcb2016-05-11 10:52:07 +0200349 * Segment table and region3 table entry encoding
350 * (R = read-only, I = invalid, y = young bit):
Gerald Schaeferbc29b7a2016-07-18 14:35:13 +0200351 * dy..R...I...wr
Martin Schwidefsky152125b2014-07-24 11:03:41 +0200352 * prot-none, clean, old 00..1...1...00
353 * prot-none, clean, young 01..1...1...00
354 * prot-none, dirty, old 10..1...1...00
355 * prot-none, dirty, young 11..1...1...00
Gerald Schaeferbc29b7a2016-07-18 14:35:13 +0200356 * read-only, clean, old 00..1...1...01
357 * read-only, clean, young 01..1...0...01
358 * read-only, dirty, old 10..1...1...01
359 * read-only, dirty, young 11..1...0...01
Martin Schwidefsky152125b2014-07-24 11:03:41 +0200360 * read-write, clean, old 00..1...1...11
361 * read-write, clean, young 01..1...0...11
362 * read-write, dirty, old 10..0...1...11
363 * read-write, dirty, young 11..0...0...11
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200364 * The segment table origin is used to distinguish empty (origin==0) from
365 * read-write, old segment table entries (origin!=0)
Martin Schwidefskya1c843b2015-04-22 13:55:59 +0200366 * HW-bits: R read-only, I invalid
367 * SW-bits: y young, d dirty, r read, w write
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200368 */
Martin Schwidefskye5098612013-07-23 20:57:57 +0200369
Martin Schwidefsky6c61cfe2011-06-06 14:14:42 +0200370/* Page status table bits for virtualization */
Martin Schwidefsky0d0dafc2013-05-17 14:41:33 +0200371#define PGSTE_ACC_BITS 0xf000000000000000UL
372#define PGSTE_FP_BIT 0x0800000000000000UL
373#define PGSTE_PCL_BIT 0x0080000000000000UL
374#define PGSTE_HR_BIT 0x0040000000000000UL
375#define PGSTE_HC_BIT 0x0020000000000000UL
376#define PGSTE_GR_BIT 0x0004000000000000UL
377#define PGSTE_GC_BIT 0x0002000000000000UL
Martin Schwidefsky0a61b222013-10-18 12:03:41 +0200378#define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */
379#define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100380#define PGSTE_VSIE_BIT 0x0000200000000000UL /* ref'd in a shadow table */
Martin Schwidefsky6c61cfe2011-06-06 14:14:42 +0200381
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200382/* Guest Page State used for virtualization */
Claudio Imbrenda2d42f942017-04-20 10:03:45 +0200383#define _PGSTE_GPS_ZERO 0x0000000080000000UL
Martin Schwidefskycd774b92016-07-26 17:02:31 +0200384#define _PGSTE_GPS_NODAT 0x0000000040000000UL
Claudio Imbrenda2d42f942017-04-20 10:03:45 +0200385#define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL
386#define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL
387#define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL
388#define _PGSTE_GPS_USAGE_POT_VOLATILE 0x0000000002000000UL
389#define _PGSTE_GPS_USAGE_VOLATILE _PGSTE_GPS_USAGE_MASK
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200390
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200391/*
392 * A user page table pointer has the space-switch-event bit, the
393 * private-space-control bit and the storage-alteration-event-control
394 * bit set. A kernel page table pointer doesn't need them.
395 */
396#define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
397 _ASCE_ALT_EVENT)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399/*
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200400 * Page protection definitions.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 */
Gerald Schaeferbc29b7a2016-07-18 14:35:13 +0200402#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT)
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100403#define PAGE_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | \
404 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
405#define PAGE_RX __pgprot(_PAGE_PRESENT | _PAGE_READ | \
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200406 _PAGE_INVALID | _PAGE_PROTECT)
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100407#define PAGE_RW __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
408 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
409#define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200410 _PAGE_INVALID | _PAGE_PROTECT)
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200411
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200412#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100413 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200414#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100415 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200416#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100417 _PAGE_PROTECT | _PAGE_NOEXEC)
418#define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
419 _PAGE_YOUNG | _PAGE_DIRTY)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420
421/*
Martin Schwidefsky043d0702011-05-23 10:24:23 +0200422 * On s390 the page table entry has an invalid bit and a read-only bit.
423 * Read permission implies execute permission and write permission
424 * implies read permission.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425 */
426 /*xwr*/
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200427#define __P000 PAGE_NONE
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100428#define __P001 PAGE_RO
429#define __P010 PAGE_RO
430#define __P011 PAGE_RO
431#define __P100 PAGE_RX
432#define __P101 PAGE_RX
433#define __P110 PAGE_RX
434#define __P111 PAGE_RX
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200436#define __S000 PAGE_NONE
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100437#define __S001 PAGE_RO
438#define __S010 PAGE_RW
439#define __S011 PAGE_RW
440#define __S100 PAGE_RX
441#define __S101 PAGE_RX
442#define __S110 PAGE_RWX
443#define __S111 PAGE_RWX
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444
Gerald Schaefer106c9922013-04-29 15:07:23 -0700445/*
446 * Segment entry (large page) protection definitions.
447 */
Martin Schwidefskye5098612013-07-23 20:57:57 +0200448#define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \
449 _SEGMENT_ENTRY_PROTECT)
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100450#define SEGMENT_RO __pgprot(_SEGMENT_ENTRY_PROTECT | \
451 _SEGMENT_ENTRY_READ | \
452 _SEGMENT_ENTRY_NOEXEC)
453#define SEGMENT_RX __pgprot(_SEGMENT_ENTRY_PROTECT | \
Martin Schwidefsky152125b2014-07-24 11:03:41 +0200454 _SEGMENT_ENTRY_READ)
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100455#define SEGMENT_RW __pgprot(_SEGMENT_ENTRY_READ | \
456 _SEGMENT_ENTRY_WRITE | \
457 _SEGMENT_ENTRY_NOEXEC)
458#define SEGMENT_RWX __pgprot(_SEGMENT_ENTRY_READ | \
Martin Schwidefsky152125b2014-07-24 11:03:41 +0200459 _SEGMENT_ENTRY_WRITE)
Heiko Carstens2dffdcb2016-05-11 10:52:07 +0200460#define SEGMENT_KERNEL __pgprot(_SEGMENT_ENTRY | \
461 _SEGMENT_ENTRY_LARGE | \
462 _SEGMENT_ENTRY_READ | \
463 _SEGMENT_ENTRY_WRITE | \
464 _SEGMENT_ENTRY_YOUNG | \
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100465 _SEGMENT_ENTRY_DIRTY | \
466 _SEGMENT_ENTRY_NOEXEC)
Heiko Carstens2dffdcb2016-05-11 10:52:07 +0200467#define SEGMENT_KERNEL_RO __pgprot(_SEGMENT_ENTRY | \
468 _SEGMENT_ENTRY_LARGE | \
469 _SEGMENT_ENTRY_READ | \
470 _SEGMENT_ENTRY_YOUNG | \
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100471 _SEGMENT_ENTRY_PROTECT | \
472 _SEGMENT_ENTRY_NOEXEC)
Vasily Gorbikd58106c2017-11-17 18:44:28 +0100473#define SEGMENT_KERNEL_EXEC __pgprot(_SEGMENT_ENTRY | \
474 _SEGMENT_ENTRY_LARGE | \
475 _SEGMENT_ENTRY_READ | \
476 _SEGMENT_ENTRY_WRITE | \
477 _SEGMENT_ENTRY_YOUNG | \
478 _SEGMENT_ENTRY_DIRTY)
Heiko Carstens2dffdcb2016-05-11 10:52:07 +0200479
480/*
481 * Region3 entry (large page) protection definitions.
482 */
483
484#define REGION3_KERNEL __pgprot(_REGION_ENTRY_TYPE_R3 | \
485 _REGION3_ENTRY_LARGE | \
486 _REGION3_ENTRY_READ | \
487 _REGION3_ENTRY_WRITE | \
488 _REGION3_ENTRY_YOUNG | \
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100489 _REGION3_ENTRY_DIRTY | \
490 _REGION_ENTRY_NOEXEC)
Heiko Carstens2dffdcb2016-05-11 10:52:07 +0200491#define REGION3_KERNEL_RO __pgprot(_REGION_ENTRY_TYPE_R3 | \
492 _REGION3_ENTRY_LARGE | \
493 _REGION3_ENTRY_READ | \
494 _REGION3_ENTRY_YOUNG | \
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100495 _REGION_ENTRY_PROTECT | \
496 _REGION_ENTRY_NOEXEC)
Gerald Schaefer106c9922013-04-29 15:07:23 -0700497
Martin Schwidefskye12e4042018-10-15 11:09:16 +0200498static inline bool mm_p4d_folded(struct mm_struct *mm)
499{
500 return mm->context.asce_limit <= _REGION1_SIZE;
501}
502#define mm_p4d_folded(mm) mm_p4d_folded(mm)
503
504static inline bool mm_pud_folded(struct mm_struct *mm)
505{
506 return mm->context.asce_limit <= _REGION2_SIZE;
507}
508#define mm_pud_folded(mm) mm_pud_folded(mm)
509
510static inline bool mm_pmd_folded(struct mm_struct *mm)
511{
512 return mm->context.asce_limit <= _REGION3_SIZE;
513}
514#define mm_pmd_folded(mm) mm_pmd_folded(mm)
515
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200516static inline int mm_has_pgste(struct mm_struct *mm)
517{
518#ifdef CONFIG_PGSTE
519 if (unlikely(mm->context.has_pgste))
520 return 1;
521#endif
522 return 0;
523}
Dominik Dingel65eef3352014-01-14 15:02:11 +0100524
Martin Schwidefsky0b46e0a2015-04-15 13:23:26 +0200525static inline int mm_alloc_pgste(struct mm_struct *mm)
526{
527#ifdef CONFIG_PGSTE
528 if (unlikely(mm->context.alloc_pgste))
529 return 1;
530#endif
531 return 0;
532}
533
Dominik Dingel2faee8f2014-10-23 12:08:38 +0200534/*
535 * In the case that a guest uses storage keys
536 * faults should no longer be backed by zero pages
537 */
Christian Borntraegerfa41ba02017-08-24 12:55:08 +0200538#define mm_forbids_zeropage mm_has_pgste
Janosch Frank55531b72018-02-15 16:33:47 +0100539static inline int mm_uses_skeys(struct mm_struct *mm)
Dominik Dingel65eef3352014-01-14 15:02:11 +0100540{
541#ifdef CONFIG_PGSTE
Janosch Frank55531b72018-02-15 16:33:47 +0100542 if (mm->context.uses_skeys)
Dominik Dingel65eef3352014-01-14 15:02:11 +0100543 return 1;
544#endif
545 return 0;
546}
547
Heiko Carstens4ccccc52016-05-14 10:46:33 +0200548static inline void csp(unsigned int *ptr, unsigned int old, unsigned int new)
549{
550 register unsigned long reg2 asm("2") = old;
551 register unsigned long reg3 asm("3") = new;
552 unsigned long address = (unsigned long)ptr | 1;
553
554 asm volatile(
555 " csp %0,%3"
556 : "+d" (reg2), "+m" (*ptr)
557 : "d" (reg3), "d" (address)
558 : "cc");
559}
560
Heiko Carstense8a97e42016-05-17 10:50:15 +0200561static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new)
562{
563 register unsigned long reg2 asm("2") = old;
564 register unsigned long reg3 asm("3") = new;
565 unsigned long address = (unsigned long)ptr | 1;
566
567 asm volatile(
568 " .insn rre,0xb98a0000,%0,%3"
569 : "+d" (reg2), "+m" (*ptr)
570 : "d" (reg3), "d" (address)
571 : "cc");
572}
573
574#define CRDTE_DTT_PAGE 0x00UL
575#define CRDTE_DTT_SEGMENT 0x10UL
576#define CRDTE_DTT_REGION3 0x14UL
577#define CRDTE_DTT_REGION2 0x18UL
578#define CRDTE_DTT_REGION1 0x1cUL
579
580static inline void crdte(unsigned long old, unsigned long new,
581 unsigned long table, unsigned long dtt,
582 unsigned long address, unsigned long asce)
583{
584 register unsigned long reg2 asm("2") = old;
585 register unsigned long reg3 asm("3") = new;
586 register unsigned long reg4 asm("4") = table | dtt;
587 register unsigned long reg5 asm("5") = address;
588
589 asm volatile(".insn rrf,0xb98f0000,%0,%2,%4,0"
590 : "+d" (reg2)
591 : "d" (reg3), "d" (reg4), "d" (reg5), "a" (asce)
592 : "memory", "cc");
593}
594
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595/*
Heiko Carstenscc18b462017-05-20 11:43:26 +0200596 * pgd/p4d/pud/pmd/pte query functions
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597 */
Heiko Carstenscc18b462017-05-20 11:43:26 +0200598static inline int pgd_folded(pgd_t pgd)
599{
600 return (pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1;
601}
602
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100603static inline int pgd_present(pgd_t pgd)
604{
Heiko Carstenscc18b462017-05-20 11:43:26 +0200605 if (pgd_folded(pgd))
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100606 return 1;
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100607 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
608}
609
610static inline int pgd_none(pgd_t pgd)
611{
Heiko Carstenscc18b462017-05-20 11:43:26 +0200612 if (pgd_folded(pgd))
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100613 return 0;
Martin Schwidefskye5098612013-07-23 20:57:57 +0200614 return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100615}
616
617static inline int pgd_bad(pgd_t pgd)
618{
Martin Schwidefskyc9f62152019-04-24 12:49:44 +0200619 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1)
620 return 0;
621 return (pgd_val(pgd) & ~_REGION_ENTRY_BITS) != 0;
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100622}
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200623
Vasily Gorbikd0e2eb02018-09-13 10:59:43 +0200624static inline unsigned long pgd_pfn(pgd_t pgd)
625{
626 unsigned long origin_mask;
627
628 origin_mask = _REGION_ENTRY_ORIGIN;
629 return (pgd_val(pgd) & origin_mask) >> PAGE_SHIFT;
630}
631
Heiko Carstenscc18b462017-05-20 11:43:26 +0200632static inline int p4d_folded(p4d_t p4d)
633{
634 return (p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2;
635}
636
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +0200637static inline int p4d_present(p4d_t p4d)
638{
Heiko Carstenscc18b462017-05-20 11:43:26 +0200639 if (p4d_folded(p4d))
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +0200640 return 1;
641 return (p4d_val(p4d) & _REGION_ENTRY_ORIGIN) != 0UL;
642}
643
644static inline int p4d_none(p4d_t p4d)
645{
Heiko Carstenscc18b462017-05-20 11:43:26 +0200646 if (p4d_folded(p4d))
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +0200647 return 0;
648 return p4d_val(p4d) == _REGION2_ENTRY_EMPTY;
649}
650
651static inline unsigned long p4d_pfn(p4d_t p4d)
652{
653 unsigned long origin_mask;
654
655 origin_mask = _REGION_ENTRY_ORIGIN;
656 return (p4d_val(p4d) & origin_mask) >> PAGE_SHIFT;
657}
658
Heiko Carstenscc18b462017-05-20 11:43:26 +0200659static inline int pud_folded(pud_t pud)
660{
661 return (pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3;
662}
663
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200664static inline int pud_present(pud_t pud)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665{
Heiko Carstenscc18b462017-05-20 11:43:26 +0200666 if (pud_folded(pud))
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100667 return 1;
Martin Schwidefsky0d017922007-12-17 16:25:48 +0100668 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669}
670
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200671static inline int pud_none(pud_t pud)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672{
Heiko Carstenscc18b462017-05-20 11:43:26 +0200673 if (pud_folded(pud))
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100674 return 0;
Gerald Schaeferd08de8e2016-07-04 14:47:01 +0200675 return pud_val(pud) == _REGION3_ENTRY_EMPTY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676}
677
Heiko Carstens18da2362012-10-08 09:18:26 +0200678static inline int pud_large(pud_t pud)
679{
680 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
681 return 0;
682 return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
683}
684
Heiko Carstens9e20b4d2016-05-10 10:34:47 +0200685static inline unsigned long pud_pfn(pud_t pud)
686{
687 unsigned long origin_mask;
688
Heiko Carstensf96c6f72017-05-22 13:27:34 +0200689 origin_mask = _REGION_ENTRY_ORIGIN;
Heiko Carstens9e20b4d2016-05-10 10:34:47 +0200690 if (pud_large(pud))
691 origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
692 return (pud_val(pud) & origin_mask) >> PAGE_SHIFT;
693}
694
Gerald Schaeferd08de8e2016-07-04 14:47:01 +0200695static inline int pmd_large(pmd_t pmd)
696{
697 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
698}
699
700static inline int pmd_bad(pmd_t pmd)
701{
Martin Schwidefskyc9f62152019-04-24 12:49:44 +0200702 if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0)
703 return 1;
Gerald Schaeferd08de8e2016-07-04 14:47:01 +0200704 if (pmd_large(pmd))
705 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0;
706 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
707}
708
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200709static inline int pud_bad(pud_t pud)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710{
Martin Schwidefskyc9f62152019-04-24 12:49:44 +0200711 unsigned long type = pud_val(pud) & _REGION_ENTRY_TYPE_MASK;
712
713 if (type > _REGION_ENTRY_TYPE_R3)
714 return 1;
715 if (type < _REGION_ENTRY_TYPE_R3)
716 return 0;
Gerald Schaeferd08de8e2016-07-04 14:47:01 +0200717 if (pud_large(pud))
718 return (pud_val(pud) & ~_REGION_ENTRY_BITS_LARGE) != 0;
719 return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720}
721
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +0200722static inline int p4d_bad(p4d_t p4d)
723{
Martin Schwidefskyc9f62152019-04-24 12:49:44 +0200724 unsigned long type = p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK;
725
726 if (type > _REGION_ENTRY_TYPE_R2)
727 return 1;
728 if (type < _REGION_ENTRY_TYPE_R2)
729 return 0;
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +0200730 return (p4d_val(p4d) & ~_REGION_ENTRY_BITS) != 0;
731}
732
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800733static inline int pmd_present(pmd_t pmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734{
Dominik Dingel54397bb2016-04-27 11:43:07 +0200735 return pmd_val(pmd) != _SEGMENT_ENTRY_EMPTY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736}
737
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800738static inline int pmd_none(pmd_t pmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739{
Dominik Dingel54397bb2016-04-27 11:43:07 +0200740 return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741}
742
Martin Schwidefsky7cded342015-05-13 14:33:22 +0200743static inline unsigned long pmd_pfn(pmd_t pmd)
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200744{
Martin Schwidefsky152125b2014-07-24 11:03:41 +0200745 unsigned long origin_mask;
746
747 origin_mask = _SEGMENT_ENTRY_ORIGIN;
748 if (pmd_large(pmd))
749 origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
750 return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT;
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200751}
752
Dan Williamse4e40e02017-11-29 16:10:10 -0800753#define pmd_write pmd_write
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -0700754static inline int pmd_write(pmd_t pmd)
755{
Martin Schwidefsky152125b2014-07-24 11:03:41 +0200756 return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
757}
758
759static inline int pmd_dirty(pmd_t pmd)
760{
761 int dirty = 1;
762 if (pmd_large(pmd))
763 dirty = (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
764 return dirty;
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -0700765}
766
767static inline int pmd_young(pmd_t pmd)
768{
Martin Schwidefsky152125b2014-07-24 11:03:41 +0200769 int young = 1;
770 if (pmd_large(pmd))
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200771 young = (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200772 return young;
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -0700773}
774
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800775static inline int pte_present(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776{
Martin Schwidefskye5098612013-07-23 20:57:57 +0200777 /* Bit pattern: (pte & 0x001) == 0x001 */
778 return (pte_val(pte) & _PAGE_PRESENT) != 0;
779}
780
781static inline int pte_none(pte_t pte)
782{
783 /* Bit pattern: pte == 0x400 */
784 return pte_val(pte) == _PAGE_INVALID;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785}
786
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200787static inline int pte_swap(pte_t pte)
788{
Martin Schwidefskya1c843b2015-04-22 13:55:59 +0200789 /* Bit pattern: (pte & 0x201) == 0x200 */
790 return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT))
791 == _PAGE_PROTECT;
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200792}
793
Nick Piggin7e675132008-04-28 02:13:00 -0700794static inline int pte_special(pte_t pte)
795{
Nick Piggina08cb622008-04-28 02:13:03 -0700796 return (pte_val(pte) & _PAGE_SPECIAL);
Nick Piggin7e675132008-04-28 02:13:00 -0700797}
798
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200799#define __HAVE_ARCH_PTE_SAME
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200800static inline int pte_same(pte_t a, pte_t b)
Christian Borntraeger5b7baf02008-03-25 18:47:12 +0100801{
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200802 return pte_val(a) == pte_val(b);
Christian Borntraeger5b7baf02008-03-25 18:47:12 +0100803}
804
Martin Schwidefskyb54565b2014-09-23 14:01:34 +0200805#ifdef CONFIG_NUMA_BALANCING
806static inline int pte_protnone(pte_t pte)
807{
808 return pte_present(pte) && !(pte_val(pte) & _PAGE_READ);
809}
810
811static inline int pmd_protnone(pmd_t pmd)
812{
813 /* pmd_large(pmd) implies pmd_present(pmd) */
814 return pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ);
815}
816#endif
817
Martin Schwidefsky5614dd92015-04-22 14:47:42 +0200818static inline int pte_soft_dirty(pte_t pte)
819{
820 return pte_val(pte) & _PAGE_SOFT_DIRTY;
821}
822#define pte_swp_soft_dirty pte_soft_dirty
823
824static inline pte_t pte_mksoft_dirty(pte_t pte)
825{
826 pte_val(pte) |= _PAGE_SOFT_DIRTY;
827 return pte;
828}
829#define pte_swp_mksoft_dirty pte_mksoft_dirty
830
831static inline pte_t pte_clear_soft_dirty(pte_t pte)
832{
833 pte_val(pte) &= ~_PAGE_SOFT_DIRTY;
834 return pte;
835}
836#define pte_swp_clear_soft_dirty pte_clear_soft_dirty
837
838static inline int pmd_soft_dirty(pmd_t pmd)
839{
840 return pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY;
841}
842
843static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
844{
845 pmd_val(pmd) |= _SEGMENT_ENTRY_SOFT_DIRTY;
846 return pmd;
847}
848
849static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
850{
851 pmd_val(pmd) &= ~_SEGMENT_ENTRY_SOFT_DIRTY;
852 return pmd;
853}
854
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855/*
856 * query functions pte_write/pte_dirty/pte_young only work if
857 * pte_present() is true. Undefined behaviour if not..
858 */
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800859static inline int pte_write(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860{
Martin Schwidefskye5098612013-07-23 20:57:57 +0200861 return (pte_val(pte) & _PAGE_WRITE) != 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862}
863
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800864static inline int pte_dirty(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865{
Martin Schwidefskye5098612013-07-23 20:57:57 +0200866 return (pte_val(pte) & _PAGE_DIRTY) != 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867}
868
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800869static inline int pte_young(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870{
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200871 return (pte_val(pte) & _PAGE_YOUNG) != 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872}
873
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200874#define __HAVE_ARCH_PTE_UNUSED
875static inline int pte_unused(pte_t pte)
876{
877 return pte_val(pte) & _PAGE_UNUSED;
878}
879
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880/*
881 * pgd/pmd/pte modification functions
882 */
883
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200884static inline void pgd_clear(pgd_t *pgd)
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100885{
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +0200886 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
887 pgd_val(*pgd) = _REGION1_ENTRY_EMPTY;
888}
889
890static inline void p4d_clear(p4d_t *p4d)
891{
892 if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
893 p4d_val(*p4d) = _REGION2_ENTRY_EMPTY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894}
895
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100896static inline void pud_clear(pud_t *pud)
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100897{
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200898 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
899 pud_val(*pud) = _REGION3_ENTRY_EMPTY;
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100900}
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100901
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200902static inline void pmd_clear(pmd_t *pmdp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903{
Dominik Dingel54397bb2016-04-27 11:43:07 +0200904 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905}
906
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800907static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908{
Martin Schwidefskye5098612013-07-23 20:57:57 +0200909 pte_val(*ptep) = _PAGE_INVALID;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910}
911
912/*
913 * The following pte modification functions only work if
914 * pte_present() is true. Undefined behaviour if not..
915 */
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800916static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917{
Nick Piggin138c9022008-07-08 11:31:06 +0200918 pte_val(pte) &= _PAGE_CHG_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919 pte_val(pte) |= pgprot_val(newprot);
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200920 /*
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100921 * newprot for PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX
922 * has the invalid bit set, clear it again for readable, young pages
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200923 */
924 if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
925 pte_val(pte) &= ~_PAGE_INVALID;
926 /*
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100927 * newprot for PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX has the page
928 * protection bit set, clear it again for writable, dirty pages
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200929 */
Martin Schwidefskye5098612013-07-23 20:57:57 +0200930 if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
931 pte_val(pte) &= ~_PAGE_PROTECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932 return pte;
933}
934
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800935static inline pte_t pte_wrprotect(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936{
Martin Schwidefskye5098612013-07-23 20:57:57 +0200937 pte_val(pte) &= ~_PAGE_WRITE;
938 pte_val(pte) |= _PAGE_PROTECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939 return pte;
940}
941
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800942static inline pte_t pte_mkwrite(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943{
Martin Schwidefskye5098612013-07-23 20:57:57 +0200944 pte_val(pte) |= _PAGE_WRITE;
945 if (pte_val(pte) & _PAGE_DIRTY)
946 pte_val(pte) &= ~_PAGE_PROTECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947 return pte;
948}
949
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800950static inline pte_t pte_mkclean(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951{
Martin Schwidefskye5098612013-07-23 20:57:57 +0200952 pte_val(pte) &= ~_PAGE_DIRTY;
953 pte_val(pte) |= _PAGE_PROTECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954 return pte;
955}
956
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800957static inline pte_t pte_mkdirty(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958{
Martin Schwidefsky5614dd92015-04-22 14:47:42 +0200959 pte_val(pte) |= _PAGE_DIRTY | _PAGE_SOFT_DIRTY;
Martin Schwidefskye5098612013-07-23 20:57:57 +0200960 if (pte_val(pte) & _PAGE_WRITE)
961 pte_val(pte) &= ~_PAGE_PROTECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962 return pte;
963}
964
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800965static inline pte_t pte_mkold(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966{
Martin Schwidefskye5098612013-07-23 20:57:57 +0200967 pte_val(pte) &= ~_PAGE_YOUNG;
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200968 pte_val(pte) |= _PAGE_INVALID;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969 return pte;
970}
971
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800972static inline pte_t pte_mkyoung(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973{
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200974 pte_val(pte) |= _PAGE_YOUNG;
975 if (pte_val(pte) & _PAGE_READ)
976 pte_val(pte) &= ~_PAGE_INVALID;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977 return pte;
978}
979
Nick Piggin7e675132008-04-28 02:13:00 -0700980static inline pte_t pte_mkspecial(pte_t pte)
981{
Nick Piggina08cb622008-04-28 02:13:03 -0700982 pte_val(pte) |= _PAGE_SPECIAL;
Nick Piggin7e675132008-04-28 02:13:00 -0700983 return pte;
984}
985
Heiko Carstens84afdce2010-10-25 16:10:36 +0200986#ifdef CONFIG_HUGETLB_PAGE
987static inline pte_t pte_mkhuge(pte_t pte)
988{
Martin Schwidefskye5098612013-07-23 20:57:57 +0200989 pte_val(pte) |= _PAGE_LARGE;
Heiko Carstens84afdce2010-10-25 16:10:36 +0200990 return pte;
991}
992#endif
993
Martin Schwidefsky34eeaf32016-06-14 12:38:40 +0200994#define IPTE_GLOBAL 0
995#define IPTE_LOCAL 1
996
Martin Schwidefsky118bd312016-07-26 16:53:09 +0200997#define IPTE_NODAT 0x400
Martin Schwidefsky28c807e2016-07-26 16:00:22 +0200998#define IPTE_GUEST_ASCE 0x800
Martin Schwidefsky118bd312016-07-26 16:53:09 +0200999
1000static inline void __ptep_ipte(unsigned long address, pte_t *ptep,
Martin Schwidefsky28c807e2016-07-26 16:00:22 +02001001 unsigned long opt, unsigned long asce,
1002 int local)
Gerald Schaefer9282ed92006-09-20 15:59:37 +02001003{
Martin Schwidefsky53e857f2012-09-10 13:00:09 +02001004 unsigned long pto = (unsigned long) ptep;
1005
Martin Schwidefsky118bd312016-07-26 16:53:09 +02001006 if (__builtin_constant_p(opt) && opt == 0) {
1007 /* Invalidation + TLB flush for the pte */
1008 asm volatile(
1009 " .insn rrf,0xb2210000,%[r1],%[r2],0,%[m4]"
1010 : "+m" (*ptep) : [r1] "a" (pto), [r2] "a" (address),
1011 [m4] "i" (local));
1012 return;
1013 }
1014
1015 /* Invalidate ptes with options + TLB flush of the ptes */
Martin Schwidefsky28c807e2016-07-26 16:00:22 +02001016 opt = opt | (asce & _ASCE_ORIGIN);
Martin Schwidefsky53e857f2012-09-10 13:00:09 +02001017 asm volatile(
Martin Schwidefsky118bd312016-07-26 16:53:09 +02001018 " .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
1019 : [r2] "+a" (address), [r3] "+a" (opt)
1020 : [r1] "a" (pto), [m4] "i" (local) : "memory");
Martin Schwidefsky53e857f2012-09-10 13:00:09 +02001021}
1022
Martin Schwidefsky34eeaf32016-06-14 12:38:40 +02001023static inline void __ptep_ipte_range(unsigned long address, int nr,
1024 pte_t *ptep, int local)
Martin Schwidefsky1b948d62014-04-03 13:55:01 +02001025{
1026 unsigned long pto = (unsigned long) ptep;
1027
Martin Schwidefsky34eeaf32016-06-14 12:38:40 +02001028 /* Invalidate a range of ptes + TLB flush of the ptes */
Heiko Carstenscfb0b242014-09-23 21:29:20 +02001029 do {
1030 asm volatile(
Martin Schwidefsky34eeaf32016-06-14 12:38:40 +02001031 " .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
1032 : [r2] "+a" (address), [r3] "+a" (nr)
1033 : [r1] "a" (pto), [m4] "i" (local) : "memory");
Heiko Carstenscfb0b242014-09-23 21:29:20 +02001034 } while (nr != 255);
1035}
1036
Martin Schwidefskyba8a9222007-10-22 12:52:44 +02001037/*
1038 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
1039 * both clear the TLB for the unmapped pte. The reason is that
1040 * ptep_get_and_clear is used in common code (e.g. change_pte_range)
1041 * to modify an active pte. The sequence is
1042 * 1) ptep_get_and_clear
1043 * 2) set_pte_at
1044 * 3) flush_tlb_range
1045 * On s390 the tlb needs to get flushed with the modification of the pte
1046 * if the pte is active. The only way how this can be implemented is to
1047 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
1048 * is a nop.
1049 */
Martin Schwidefskyebde7652016-03-08 11:08:09 +01001050pte_t ptep_xchg_direct(struct mm_struct *, unsigned long, pte_t *, pte_t);
1051pte_t ptep_xchg_lazy(struct mm_struct *, unsigned long, pte_t *, pte_t);
1052
1053#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1054static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
1055 unsigned long addr, pte_t *ptep)
1056{
1057 pte_t pte = *ptep;
1058
1059 pte = ptep_xchg_direct(vma->vm_mm, addr, ptep, pte_mkold(pte));
1060 return pte_young(pte);
1061}
1062
1063#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1064static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
1065 unsigned long address, pte_t *ptep)
1066{
1067 return ptep_test_and_clear_young(vma, address, ptep);
1068}
1069
Martin Schwidefskyba8a9222007-10-22 12:52:44 +02001070#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001071static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
Martin Schwidefskyebde7652016-03-08 11:08:09 +01001072 unsigned long addr, pte_t *ptep)
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001073{
Martin Schwidefskyebde7652016-03-08 11:08:09 +01001074 return ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001075}
1076
1077#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
Aneesh Kumar K.V0cbe3e22019-03-05 15:46:26 -08001078pte_t ptep_modify_prot_start(struct vm_area_struct *, unsigned long, pte_t *);
Aneesh Kumar K.V04a86452019-03-05 15:46:29 -08001079void ptep_modify_prot_commit(struct vm_area_struct *, unsigned long,
1080 pte_t *, pte_t, pte_t);
Martin Schwidefskyba8a9222007-10-22 12:52:44 +02001081
1082#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
Martin Schwidefskyf0e47c22007-07-17 04:03:03 -07001083static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
Martin Schwidefskyebde7652016-03-08 11:08:09 +01001084 unsigned long addr, pte_t *ptep)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085{
Martin Schwidefskyebde7652016-03-08 11:08:09 +01001086 return ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087}
1088
Martin Schwidefskyba8a9222007-10-22 12:52:44 +02001089/*
1090 * The batched pte unmap code uses ptep_get_and_clear_full to clear the
1091 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
1092 * tlbs of an mm if it can guarantee that the ptes of the mm_struct
1093 * cannot be accessed while the batched unmap is running. In this case
1094 * full==1 and a simple pte_clear is enough. See tlb.h.
1095 */
1096#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1097static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
Martin Schwidefskyebde7652016-03-08 11:08:09 +01001098 unsigned long addr,
Martin Schwidefskyba8a9222007-10-22 12:52:44 +02001099 pte_t *ptep, int full)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100{
Martin Schwidefskyebde7652016-03-08 11:08:09 +01001101 if (full) {
1102 pte_t pte = *ptep;
1103 *ptep = __pte(_PAGE_INVALID);
1104 return pte;
Martin Schwidefskyd3383632013-04-17 10:53:39 +02001105 }
Martin Schwidefskyebde7652016-03-08 11:08:09 +01001106 return ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107}
1108
Martin Schwidefskyba8a9222007-10-22 12:52:44 +02001109#define __HAVE_ARCH_PTEP_SET_WRPROTECT
Martin Schwidefskyebde7652016-03-08 11:08:09 +01001110static inline void ptep_set_wrprotect(struct mm_struct *mm,
1111 unsigned long addr, pte_t *ptep)
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001112{
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001113 pte_t pte = *ptep;
1114
Martin Schwidefskyebde7652016-03-08 11:08:09 +01001115 if (pte_write(pte))
1116 ptep_xchg_lazy(mm, addr, ptep, pte_wrprotect(pte));
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001117}
Martin Schwidefskyba8a9222007-10-22 12:52:44 +02001118
1119#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001120static inline int ptep_set_access_flags(struct vm_area_struct *vma,
Martin Schwidefskyebde7652016-03-08 11:08:09 +01001121 unsigned long addr, pte_t *ptep,
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001122 pte_t entry, int dirty)
1123{
Martin Schwidefskyebde7652016-03-08 11:08:09 +01001124 if (pte_same(*ptep, entry))
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001125 return 0;
Martin Schwidefskyebde7652016-03-08 11:08:09 +01001126 ptep_xchg_direct(vma->vm_mm, addr, ptep, entry);
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001127 return 1;
1128}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01001130/*
1131 * Additional functions to handle KVM guest page tables
1132 */
1133void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
1134 pte_t *ptep, pte_t entry);
1135void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001136void ptep_notify(struct mm_struct *mm, unsigned long addr,
1137 pte_t *ptep, unsigned long bits);
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01001138int ptep_force_prot(struct mm_struct *mm, unsigned long gaddr,
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001139 pte_t *ptep, int prot, unsigned long bit);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01001140void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
1141 pte_t *ptep , int reset);
1142void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001143int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
David Hildenbranda9d23e72016-03-08 12:21:41 +01001144 pte_t *sptep, pte_t *tptep, pte_t pte);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001145void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01001146
Janosch Frank0959e162018-07-17 13:21:22 +01001147bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long address,
1148 pte_t *ptep);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01001149int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1150 unsigned char key, bool nq);
David Hildenbrand1824c722016-05-10 09:43:11 +02001151int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1152 unsigned char key, unsigned char *oldkey,
1153 bool nq, bool mr, bool mc);
David Hildenbranda7e19ab2016-05-10 09:50:21 +02001154int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr);
David Hildenbrand154c8c12016-05-09 11:22:34 +02001155int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1156 unsigned char *key);
Martin Schwidefskyebde7652016-03-08 11:08:09 +01001157
Claudio Imbrenda2d42f942017-04-20 10:03:45 +02001158int set_pgste_bits(struct mm_struct *mm, unsigned long addr,
1159 unsigned long bits, unsigned long value);
1160int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep);
1161int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
1162 unsigned long *oldpte, unsigned long *oldpgste);
Janosch Frank6a376272018-07-13 11:28:22 +01001163void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr);
1164void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr);
1165void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr);
1166void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr);
Claudio Imbrenda2d42f942017-04-20 10:03:45 +02001167
Martin Schwidefskyebde7652016-03-08 11:08:09 +01001168/*
1169 * Certain architectures need to do special things when PTEs
1170 * within a page table are directly modified. Thus, the following
1171 * hook is made available.
1172 */
1173static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1174 pte_t *ptep, pte_t entry)
1175{
Martin Schwidefsky57d7f932016-03-22 10:54:24 +01001176 if (!MACHINE_HAS_NX)
1177 pte_val(entry) &= ~_PAGE_NOEXEC;
Christian Borntraegera8f60d12017-04-09 22:09:38 +02001178 if (pte_present(entry))
1179 pte_val(entry) &= ~_PAGE_UNUSED;
Martin Schwidefskyebde7652016-03-08 11:08:09 +01001180 if (mm_has_pgste(mm))
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01001181 ptep_set_pte_at(mm, addr, ptep, entry);
Martin Schwidefskyebde7652016-03-08 11:08:09 +01001182 else
1183 *ptep = entry;
1184}
1185
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187 * Conversion functions: convert a page and protection to a page entry,
1188 * and a page entry and page directory to the page they refer to.
1189 */
1190static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
1191{
1192 pte_t __pte;
1193 pte_val(__pte) = physpage + pgprot_val(pgprot);
Martin Schwidefsky0944fe32013-07-23 22:11:42 +02001194 return pte_mkyoung(__pte);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195}
1196
Heiko Carstens2dcea572006-09-29 01:58:41 -07001197static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1198{
Heiko Carstens0b2b6e1d2006-10-04 20:02:23 +02001199 unsigned long physpage = page_to_phys(page);
Martin Schwidefskyabf09be2012-11-07 13:17:37 +01001200 pte_t __pte = mk_pte_phys(physpage, pgprot);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201
Martin Schwidefskye5098612013-07-23 20:57:57 +02001202 if (pte_write(__pte) && PageDirty(page))
1203 __pte = pte_mkdirty(__pte);
Martin Schwidefskyabf09be2012-11-07 13:17:37 +01001204 return __pte;
Heiko Carstens2dcea572006-09-29 01:58:41 -07001205}
1206
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +02001208#define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D-1))
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001209#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1210#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1211#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001213#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1214#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +02001215#define p4d_deref(pud) (p4d_val(pud) & _REGION_ENTRY_ORIGIN)
Martin Schwidefsky5a216a22008-02-09 18:24:36 +01001216#define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001217
Martin Schwidefskyd1874a02019-04-23 10:51:12 +02001218/*
1219 * The pgd_offset function *always* adds the index for the top-level
1220 * region/segment table. This is done to get a sequence like the
1221 * following to work:
1222 * pgdp = pgd_offset(current->mm, addr);
1223 * pgd = READ_ONCE(*pgdp);
1224 * p4dp = p4d_offset(&pgd, addr);
1225 * ...
1226 * The subsequent p4d_offset, pud_offset and pmd_offset functions
1227 * only add an index if they dereferenced the pointer.
1228 */
1229static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address)
1230{
1231 unsigned long rste;
1232 unsigned int shift;
1233
1234 /* Get the first entry of the top level table */
1235 rste = pgd_val(*pgd);
1236 /* Pick up the shift from the table type of the first entry */
1237 shift = ((rste & _REGION_ENTRY_TYPE_MASK) >> 2) * 11 + 20;
1238 return pgd + ((address >> shift) & (PTRS_PER_PGD - 1));
1239}
1240
1241#define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address)
1242#define pgd_offset_k(address) pgd_offset(&init_mm, address)
1243
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +02001244static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
Martin Schwidefsky5a216a22008-02-09 18:24:36 +01001245{
Martin Schwidefskyd1874a02019-04-23 10:51:12 +02001246 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1)
1247 return (p4d_t *) pgd_deref(*pgd) + p4d_index(address);
1248 return (p4d_t *) pgd;
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +02001249}
1250
1251static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
1252{
Martin Schwidefskyd1874a02019-04-23 10:51:12 +02001253 if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2)
1254 return (pud_t *) p4d_deref(*p4d) + pud_index(address);
1255 return (pud_t *) p4d;
Martin Schwidefsky5a216a22008-02-09 18:24:36 +01001256}
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001257
1258static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
1259{
Martin Schwidefskyd1874a02019-04-23 10:51:12 +02001260 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3)
1261 return (pmd_t *) pud_deref(*pud) + pmd_index(address);
1262 return (pmd_t *) pud;
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001263}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264
Martin Schwidefskyd1874a02019-04-23 10:51:12 +02001265static inline pte_t *pte_offset(pmd_t *pmd, unsigned long address)
1266{
1267 return (pte_t *) pmd_deref(*pmd) + pte_index(address);
1268}
1269
1270#define pte_offset_kernel(pmd, address) pte_offset(pmd, address)
1271#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
1272#define pte_unmap(pte) do { } while (0)
1273
Christoph Hellwig26f4c322019-07-11 20:56:45 -07001274static inline bool gup_fast_permitted(unsigned long start, unsigned long end)
Martin Schwidefsky1a420102019-04-23 10:53:21 +02001275{
Martin Schwidefsky1a420102019-04-23 10:53:21 +02001276 return end <= current->mm->context.asce_limit;
1277}
1278#define gup_fast_permitted gup_fast_permitted
1279
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001280#define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
1281#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1282#define pte_page(x) pfn_to_page(pte_pfn(x))
1283
Martin Schwidefsky152125b2014-07-24 11:03:41 +02001284#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
Gerald Schaeferd08de8e2016-07-04 14:47:01 +02001285#define pud_page(pud) pfn_to_page(pud_pfn(pud))
Vasily Gorbikd0e2eb02018-09-13 10:59:43 +02001286#define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d))
1287#define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001288
Martin Schwidefsky152125b2014-07-24 11:03:41 +02001289static inline pmd_t pmd_wrprotect(pmd_t pmd)
1290{
1291 pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE;
1292 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1293 return pmd;
1294}
1295
1296static inline pmd_t pmd_mkwrite(pmd_t pmd)
1297{
1298 pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE;
1299 if (pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1300 return pmd;
1301 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1302 return pmd;
1303}
1304
1305static inline pmd_t pmd_mkclean(pmd_t pmd)
1306{
1307 if (pmd_large(pmd)) {
1308 pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY;
1309 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1310 }
1311 return pmd;
1312}
1313
1314static inline pmd_t pmd_mkdirty(pmd_t pmd)
1315{
1316 if (pmd_large(pmd)) {
Martin Schwidefsky5614dd92015-04-22 14:47:42 +02001317 pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY |
1318 _SEGMENT_ENTRY_SOFT_DIRTY;
Martin Schwidefsky152125b2014-07-24 11:03:41 +02001319 if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
1320 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1321 }
1322 return pmd;
1323}
1324
Heiko Carstens9e20b4d2016-05-10 10:34:47 +02001325static inline pud_t pud_wrprotect(pud_t pud)
1326{
1327 pud_val(pud) &= ~_REGION3_ENTRY_WRITE;
1328 pud_val(pud) |= _REGION_ENTRY_PROTECT;
1329 return pud;
1330}
1331
1332static inline pud_t pud_mkwrite(pud_t pud)
1333{
1334 pud_val(pud) |= _REGION3_ENTRY_WRITE;
1335 if (pud_large(pud) && !(pud_val(pud) & _REGION3_ENTRY_DIRTY))
1336 return pud;
1337 pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
1338 return pud;
1339}
1340
1341static inline pud_t pud_mkclean(pud_t pud)
1342{
1343 if (pud_large(pud)) {
1344 pud_val(pud) &= ~_REGION3_ENTRY_DIRTY;
1345 pud_val(pud) |= _REGION_ENTRY_PROTECT;
1346 }
1347 return pud;
1348}
1349
1350static inline pud_t pud_mkdirty(pud_t pud)
1351{
1352 if (pud_large(pud)) {
1353 pud_val(pud) |= _REGION3_ENTRY_DIRTY |
1354 _REGION3_ENTRY_SOFT_DIRTY;
1355 if (pud_val(pud) & _REGION3_ENTRY_WRITE)
1356 pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
1357 }
1358 return pud;
1359}
1360
1361#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
1362static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
1363{
1364 /*
Martin Schwidefsky57d7f932016-03-22 10:54:24 +01001365 * pgprot is PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW or PAGE_RWX
1366 * (see __Pxxx / __Sxxx). Convert to segment table entry format.
Heiko Carstens9e20b4d2016-05-10 10:34:47 +02001367 */
1368 if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
1369 return pgprot_val(SEGMENT_NONE);
Martin Schwidefsky57d7f932016-03-22 10:54:24 +01001370 if (pgprot_val(pgprot) == pgprot_val(PAGE_RO))
1371 return pgprot_val(SEGMENT_RO);
1372 if (pgprot_val(pgprot) == pgprot_val(PAGE_RX))
1373 return pgprot_val(SEGMENT_RX);
1374 if (pgprot_val(pgprot) == pgprot_val(PAGE_RW))
1375 return pgprot_val(SEGMENT_RW);
1376 return pgprot_val(SEGMENT_RWX);
Heiko Carstens9e20b4d2016-05-10 10:34:47 +02001377}
1378
Martin Schwidefsky0944fe32013-07-23 22:11:42 +02001379static inline pmd_t pmd_mkyoung(pmd_t pmd)
1380{
Martin Schwidefsky152125b2014-07-24 11:03:41 +02001381 if (pmd_large(pmd)) {
Martin Schwidefsky0944fe32013-07-23 22:11:42 +02001382 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
Martin Schwidefsky152125b2014-07-24 11:03:41 +02001383 if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
1384 pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
Martin Schwidefsky0944fe32013-07-23 22:11:42 +02001385 }
Martin Schwidefsky0944fe32013-07-23 22:11:42 +02001386 return pmd;
1387}
1388
1389static inline pmd_t pmd_mkold(pmd_t pmd)
1390{
Martin Schwidefsky152125b2014-07-24 11:03:41 +02001391 if (pmd_large(pmd)) {
Martin Schwidefsky0944fe32013-07-23 22:11:42 +02001392 pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG;
1393 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1394 }
Martin Schwidefsky0944fe32013-07-23 22:11:42 +02001395 return pmd;
1396}
1397
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001398static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1399{
Martin Schwidefsky152125b2014-07-24 11:03:41 +02001400 if (pmd_large(pmd)) {
1401 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE |
1402 _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG |
Kirill A. Shutemovfecffad2016-01-15 16:53:24 -08001403 _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SOFT_DIRTY;
Martin Schwidefsky152125b2014-07-24 11:03:41 +02001404 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1405 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1406 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1407 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
1408 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1409 return pmd;
1410 }
1411 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN;
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001412 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1413 return pmd;
1414}
1415
Gerald Schaefer106c9922013-04-29 15:07:23 -07001416static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001417{
Gerald Schaefer106c9922013-04-29 15:07:23 -07001418 pmd_t __pmd;
1419 pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
Martin Schwidefsky152125b2014-07-24 11:03:41 +02001420 return __pmd;
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001421}
1422
Gerald Schaefer106c9922013-04-29 15:07:23 -07001423#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
1424
Martin Schwidefsky1b948d62014-04-03 13:55:01 +02001425static inline void __pmdp_csp(pmd_t *pmdp)
1426{
Heiko Carstens4ccccc52016-05-14 10:46:33 +02001427 csp((unsigned int *)pmdp + 1, pmd_val(*pmdp),
1428 pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
Martin Schwidefsky1b948d62014-04-03 13:55:01 +02001429}
1430
Martin Schwidefsky47e4d852016-06-14 12:41:35 +02001431#define IDTE_GLOBAL 0
1432#define IDTE_LOCAL 1
1433
Martin Schwidefsky118bd312016-07-26 16:53:09 +02001434#define IDTE_PTOA 0x0800
1435#define IDTE_NODAT 0x1000
Martin Schwidefsky28c807e2016-07-26 16:00:22 +02001436#define IDTE_GUEST_ASCE 0x2000
Martin Schwidefsky118bd312016-07-26 16:53:09 +02001437
1438static inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp,
Martin Schwidefsky28c807e2016-07-26 16:00:22 +02001439 unsigned long opt, unsigned long asce,
1440 int local)
Martin Schwidefsky1b948d62014-04-03 13:55:01 +02001441{
1442 unsigned long sto;
1443
Martin Schwidefsky118bd312016-07-26 16:53:09 +02001444 sto = (unsigned long) pmdp - pmd_index(addr) * sizeof(pmd_t);
Martin Schwidefsky28c807e2016-07-26 16:00:22 +02001445 if (__builtin_constant_p(opt) && opt == 0) {
1446 /* flush without guest asce */
1447 asm volatile(
1448 " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
1449 : "+m" (*pmdp)
1450 : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK)),
1451 [m4] "i" (local)
1452 : "cc" );
1453 } else {
1454 /* flush with guest asce */
1455 asm volatile(
1456 " .insn rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
1457 : "+m" (*pmdp)
1458 : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK) | opt),
1459 [r3] "a" (asce), [m4] "i" (local)
1460 : "cc" );
1461 }
Martin Schwidefsky1b948d62014-04-03 13:55:01 +02001462}
1463
Martin Schwidefsky118bd312016-07-26 16:53:09 +02001464static inline void __pudp_idte(unsigned long addr, pud_t *pudp,
Martin Schwidefsky28c807e2016-07-26 16:00:22 +02001465 unsigned long opt, unsigned long asce,
1466 int local)
Gerald Schaeferd08de8e2016-07-04 14:47:01 +02001467{
1468 unsigned long r3o;
1469
Martin Schwidefsky118bd312016-07-26 16:53:09 +02001470 r3o = (unsigned long) pudp - pud_index(addr) * sizeof(pud_t);
Gerald Schaeferd08de8e2016-07-04 14:47:01 +02001471 r3o |= _ASCE_TYPE_REGION3;
Martin Schwidefsky28c807e2016-07-26 16:00:22 +02001472 if (__builtin_constant_p(opt) && opt == 0) {
1473 /* flush without guest asce */
1474 asm volatile(
1475 " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
1476 : "+m" (*pudp)
1477 : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK)),
1478 [m4] "i" (local)
1479 : "cc");
1480 } else {
1481 /* flush with guest asce */
1482 asm volatile(
1483 " .insn rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
1484 : "+m" (*pudp)
1485 : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK) | opt),
1486 [r3] "a" (asce), [m4] "i" (local)
1487 : "cc" );
1488 }
Gerald Schaeferd08de8e2016-07-04 14:47:01 +02001489}
1490
Martin Schwidefsky227be792016-03-08 11:09:25 +01001491pmd_t pmdp_xchg_direct(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1492pmd_t pmdp_xchg_lazy(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
Gerald Schaeferd08de8e2016-07-04 14:47:01 +02001493pud_t pudp_xchg_direct(struct mm_struct *, unsigned long, pud_t *, pud_t);
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +02001494
Gerald Schaefer106c9922013-04-29 15:07:23 -07001495#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1496
1497#define __HAVE_ARCH_PGTABLE_DEPOSIT
Martin Schwidefsky227be792016-03-08 11:09:25 +01001498void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1499 pgtable_t pgtable);
Gerald Schaefer106c9922013-04-29 15:07:23 -07001500
1501#define __HAVE_ARCH_PGTABLE_WITHDRAW
Martin Schwidefsky227be792016-03-08 11:09:25 +01001502pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
1503
1504#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1505static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
1506 unsigned long addr, pmd_t *pmdp,
1507 pmd_t entry, int dirty)
1508{
1509 VM_BUG_ON(addr & ~HPAGE_MASK);
1510
1511 entry = pmd_mkyoung(entry);
1512 if (dirty)
1513 entry = pmd_mkdirty(entry);
1514 if (pmd_val(*pmdp) == pmd_val(entry))
1515 return 0;
1516 pmdp_xchg_direct(vma->vm_mm, addr, pmdp, entry);
1517 return 1;
1518}
1519
1520#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1521static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1522 unsigned long addr, pmd_t *pmdp)
1523{
1524 pmd_t pmd = *pmdp;
1525
1526 pmd = pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd_mkold(pmd));
1527 return pmd_young(pmd);
1528}
1529
1530#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
1531static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
1532 unsigned long addr, pmd_t *pmdp)
1533{
1534 VM_BUG_ON(addr & ~HPAGE_MASK);
1535 return pmdp_test_and_clear_young(vma, addr, pmdp);
1536}
Gerald Schaefer106c9922013-04-29 15:07:23 -07001537
Gerald Schaefer106c9922013-04-29 15:07:23 -07001538static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1539 pmd_t *pmdp, pmd_t entry)
1540{
Martin Schwidefsky57d7f932016-03-22 10:54:24 +01001541 if (!MACHINE_HAS_NX)
1542 pmd_val(entry) &= ~_SEGMENT_ENTRY_NOEXEC;
Gerald Schaefer106c9922013-04-29 15:07:23 -07001543 *pmdp = entry;
1544}
1545
1546static inline pmd_t pmd_mkhuge(pmd_t pmd)
1547{
1548 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
Martin Schwidefsky152125b2014-07-24 11:03:41 +02001549 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1550 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001551 return pmd;
1552}
1553
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -07001554#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
1555static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
Martin Schwidefsky227be792016-03-08 11:09:25 +01001556 unsigned long addr, pmd_t *pmdp)
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001557{
Dominik Dingel54397bb2016-04-27 11:43:07 +02001558 return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001559}
1560
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -07001561#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
1562static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm,
Martin Schwidefsky227be792016-03-08 11:09:25 +01001563 unsigned long addr,
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -07001564 pmd_t *pmdp, int full)
Martin Schwidefskyfcbe08d62014-10-24 10:52:29 +02001565{
Martin Schwidefsky227be792016-03-08 11:09:25 +01001566 if (full) {
1567 pmd_t pmd = *pmdp;
Dominik Dingel54397bb2016-04-27 11:43:07 +02001568 *pmdp = __pmd(_SEGMENT_ENTRY_EMPTY);
Martin Schwidefsky227be792016-03-08 11:09:25 +01001569 return pmd;
1570 }
Dominik Dingel54397bb2016-04-27 11:43:07 +02001571 return pmdp_xchg_lazy(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
Martin Schwidefskyfcbe08d62014-10-24 10:52:29 +02001572}
1573
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -07001574#define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
1575static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
Martin Schwidefsky227be792016-03-08 11:09:25 +01001576 unsigned long addr, pmd_t *pmdp)
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001577{
Martin Schwidefsky227be792016-03-08 11:09:25 +01001578 return pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001579}
1580
1581#define __HAVE_ARCH_PMDP_INVALIDATE
Martin Schwidefsky9c4563f2018-01-31 16:18:05 -08001582static inline pmd_t pmdp_invalidate(struct vm_area_struct *vma,
Martin Schwidefsky227be792016-03-08 11:09:25 +01001583 unsigned long addr, pmd_t *pmdp)
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001584{
Gerald Schaefer91c575b2017-09-18 16:10:35 +02001585 pmd_t pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1586
Martin Schwidefsky9c4563f2018-01-31 16:18:05 -08001587 return pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd);
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001588}
1589
Gerald Schaeferbe328652013-01-21 16:48:07 +01001590#define __HAVE_ARCH_PMDP_SET_WRPROTECT
1591static inline void pmdp_set_wrprotect(struct mm_struct *mm,
Martin Schwidefsky227be792016-03-08 11:09:25 +01001592 unsigned long addr, pmd_t *pmdp)
Gerald Schaeferbe328652013-01-21 16:48:07 +01001593{
1594 pmd_t pmd = *pmdp;
1595
Martin Schwidefsky227be792016-03-08 11:09:25 +01001596 if (pmd_write(pmd))
1597 pmd = pmdp_xchg_lazy(mm, addr, pmdp, pmd_wrprotect(pmd));
Gerald Schaeferbe328652013-01-21 16:48:07 +01001598}
1599
Aneesh Kumar K.Vf28b6ff2015-06-24 16:57:42 -07001600static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
1601 unsigned long address,
1602 pmd_t *pmdp)
1603{
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -07001604 return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
Aneesh Kumar K.Vf28b6ff2015-06-24 16:57:42 -07001605}
1606#define pmdp_collapse_flush pmdp_collapse_flush
1607
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001608#define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
1609#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1610
1611static inline int pmd_trans_huge(pmd_t pmd)
1612{
1613 return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
1614}
1615
Hugh Dickinsfd8cfd32016-05-19 17:13:00 -07001616#define has_transparent_hugepage has_transparent_hugepage
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001617static inline int has_transparent_hugepage(void)
1618{
Heiko Carstens466178f2017-02-13 15:11:15 +01001619 return MACHINE_HAS_EDAT1 ? 1 : 0;
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001620}
Gerald Schaefer75077af2012-10-08 16:30:15 -07001621#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1622
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001624 * 64 bit swap entry format:
1625 * A page-table entry has some bits we have to treat in a special way.
Geert Uytterhoeven4e0a6412015-05-21 14:00:47 +02001626 * Bits 52 and bit 55 have to be zero, otherwise a specification
Linus Torvalds1da177e2005-04-16 15:20:36 -07001627 * exception will occur instead of a page translation exception. The
Geert Uytterhoeven4e0a6412015-05-21 14:00:47 +02001628 * specification exception has the bad habit not to store necessary
Linus Torvalds1da177e2005-04-16 15:20:36 -07001629 * information in the lowcore.
Martin Schwidefskya1c843b2015-04-22 13:55:59 +02001630 * Bits 54 and 63 are used to indicate the page type.
1631 * A swap pte is indicated by bit pattern (pte & 0x201) == 0x200
1632 * This leaves the bits 0-51 and bits 56-62 to store type and offset.
1633 * We use the 5 bits from 57-61 for the type and the 52 bits from 0-51
1634 * for the offset.
1635 * | offset |01100|type |00|
1636 * |0000000000111111111122222222223333333333444444444455|55555|55566|66|
1637 * |0123456789012345678901234567890123456789012345678901|23456|78901|23|
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638 */
Heiko Carstens5a79859a2015-02-12 13:08:27 +01001639
Martin Schwidefskya1c843b2015-04-22 13:55:59 +02001640#define __SWP_OFFSET_MASK ((1UL << 52) - 1)
1641#define __SWP_OFFSET_SHIFT 12
1642#define __SWP_TYPE_MASK ((1UL << 5) - 1)
1643#define __SWP_TYPE_SHIFT 2
Heiko Carstens5a79859a2015-02-12 13:08:27 +01001644
Adrian Bunk4448aaf2005-11-08 21:34:42 -08001645static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646{
1647 pte_t pte;
Martin Schwidefskya1c843b2015-04-22 13:55:59 +02001648
1649 pte_val(pte) = _PAGE_INVALID | _PAGE_PROTECT;
1650 pte_val(pte) |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT;
1651 pte_val(pte) |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652 return pte;
1653}
1654
Martin Schwidefskya1c843b2015-04-22 13:55:59 +02001655static inline unsigned long __swp_type(swp_entry_t entry)
1656{
1657 return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK;
1658}
1659
1660static inline unsigned long __swp_offset(swp_entry_t entry)
1661{
1662 return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK;
1663}
1664
1665static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
1666{
1667 return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) };
1668}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001669
1670#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1671#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1672
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673#define kern_addr_valid(addr) (1)
1674
Heiko Carstens17f34582008-04-30 13:38:47 +02001675extern int vmem_add_mapping(unsigned long start, unsigned long size);
1676extern int vmem_remove_mapping(unsigned long start, unsigned long size);
Carsten Otte402b0862008-03-25 18:47:10 +01001677extern int s390_enable_sie(void);
Dominik Dingel3ac8e382014-10-23 12:09:17 +02001678extern int s390_enable_skey(void);
Dominik Dingela13cff32014-10-23 12:07:14 +02001679extern void s390_reset_cmma(struct mm_struct *mm);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +01001680
Martin Schwidefsky1f6b83e2015-01-14 17:51:17 +01001681/* s390 has a private copy of get unmapped area to deal with cache synonyms */
1682#define HAVE_ARCH_UNMAPPED_AREA
1683#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1684
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685/*
1686 * No page table caches to initialise
1687 */
Heiko Carstens765a0ca2013-03-23 10:29:01 +01001688static inline void pgtable_cache_init(void) { }
1689static inline void check_pgt_cache(void) { }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691#include <asm-generic/pgtable.h>
1692
1693#endif /* _S390_PAGE_H */