blob: 572001c8913d4d37a1487fb8816dc3eea12fa374 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * S390 version
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02003 * Copyright IBM Corp. 1999, 2000
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * Author(s): Hartmut Penner (hp@de.ibm.com)
5 * Ulrich Weigand (weigand@de.ibm.com)
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
7 *
8 * Derived from "include/asm-i386/pgtable.h"
9 */
10
11#ifndef _ASM_S390_PGTABLE_H
12#define _ASM_S390_PGTABLE_H
13
Linus Torvalds1da177e2005-04-16 15:20:36 -070014/*
Martin Schwidefskya1c843b2015-04-22 13:55:59 +020015 * The Linux memory management assumes a three-level page table setup.
16 * For s390 64 bit we use up to four of the five levels the hardware
17 * provides (region first tables are not used).
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 *
19 * The "pgd_xxx()" functions are trivial for a folded two-level
20 * setup: the pgd is never bad, and a pmd always exists (as it's folded
21 * into the pgd entry)
22 *
23 * This file contains the functions and defines necessary to modify and use
24 * the S390 page table tree.
25 */
26#ifndef __ASSEMBLY__
Heiko Carstens9789db02008-07-14 09:59:11 +020027#include <linux/sched.h>
Heiko Carstens2dcea572006-09-29 01:58:41 -070028#include <linux/mm_types.h>
Martin Schwidefskyabf09be2012-11-07 13:17:37 +010029#include <linux/page-flags.h>
Martin Schwidefsky527e30b2014-04-30 16:04:25 +020030#include <linux/radix-tree.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <asm/bug.h>
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +020032#include <asm/page.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
Linus Torvalds1da177e2005-04-16 15:20:36 -070034extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
35extern void paging_init(void);
Heiko Carstens2b67fc42007-02-05 21:16:47 +010036extern void vmem_map_init(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
38/*
39 * The S390 doesn't have any external MMU info: the kernel page
40 * tables contain all the necessary information.
41 */
Russell King4b3073e2009-12-18 16:40:18 +000042#define update_mmu_cache(vma, address, ptep) do { } while (0)
David Millerb113da62012-10-08 16:34:25 -070043#define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
45/*
Martin Schwidefsky238ec4e2010-10-25 16:10:07 +020046 * ZERO_PAGE is a global shared page that is always zero; used
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 * for zero-mapped memory areas etc..
48 */
Martin Schwidefsky238ec4e2010-10-25 16:10:07 +020049
50extern unsigned long empty_zero_page;
51extern unsigned long zero_page_mask;
52
53#define ZERO_PAGE(vaddr) \
54 (virt_to_page((void *)(empty_zero_page + \
55 (((unsigned long)(vaddr)) &zero_page_mask))))
Kirill A. Shutemov816422a2012-12-12 13:52:36 -080056#define __HAVE_COLOR_ZERO_PAGE
Martin Schwidefsky238ec4e2010-10-25 16:10:07 +020057
Linus Torvalds4f2e2902013-04-17 08:46:19 -070058/* TODO: s390 cannot support io_remap_pfn_range... */
Linus Torvalds1da177e2005-04-16 15:20:36 -070059#endif /* !__ASSEMBLY__ */
60
61/*
62 * PMD_SHIFT determines the size of the area a second-level page
63 * table can map
64 * PGDIR_SHIFT determines what a third-level page table entry can map
65 */
Heiko Carstens5a79859a2015-02-12 13:08:27 +010066#define PMD_SHIFT 20
67#define PUD_SHIFT 31
68#define PGDIR_SHIFT 42
Linus Torvalds1da177e2005-04-16 15:20:36 -070069
70#define PMD_SIZE (1UL << PMD_SHIFT)
71#define PMD_MASK (~(PMD_SIZE-1))
Martin Schwidefsky190a1d72007-10-22 12:52:48 +020072#define PUD_SIZE (1UL << PUD_SHIFT)
73#define PUD_MASK (~(PUD_SIZE-1))
Martin Schwidefsky5a216a22008-02-09 18:24:36 +010074#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
75#define PGDIR_MASK (~(PGDIR_SIZE-1))
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
77/*
78 * entries per page directory level: the S390 is two-level, so
79 * we don't really have any PMD directory physically.
80 * for S390 segment-table entries are combined to one PGD
81 * that leads to 1024 pte per pgd
82 */
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010083#define PTRS_PER_PTE 256
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010084#define PTRS_PER_PMD 2048
Martin Schwidefsky5a216a22008-02-09 18:24:36 +010085#define PTRS_PER_PUD 2048
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010086#define PTRS_PER_PGD 2048
Linus Torvalds1da177e2005-04-16 15:20:36 -070087
Kirill A. Shutemovd016bf72015-02-11 15:26:41 -080088#define FIRST_USER_ADDRESS 0UL
Hugh Dickinsd455a362005-04-19 13:29:23 -070089
Linus Torvalds1da177e2005-04-16 15:20:36 -070090#define pte_ERROR(e) \
91 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
92#define pmd_ERROR(e) \
93 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
Martin Schwidefsky190a1d72007-10-22 12:52:48 +020094#define pud_ERROR(e) \
95 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
Linus Torvalds1da177e2005-04-16 15:20:36 -070096#define pgd_ERROR(e) \
97 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
98
99#ifndef __ASSEMBLY__
100/*
Martin Schwidefskya1c843b2015-04-22 13:55:59 +0200101 * The vmalloc and module area will always be on the topmost area of the
102 * kernel mapping. We reserve 128GB (64bit) for vmalloc and modules.
Heiko Carstensc972cc62012-10-05 16:52:18 +0200103 * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where
104 * modules will reside. That makes sure that inter module branches always
105 * happen without trampolines and in addition the placement within a 2GB frame
106 * is branch prediction unit friendly.
Heiko Carstens8b62bc92006-12-04 15:40:56 +0100107 */
Heiko Carstens239a64252009-06-12 10:26:33 +0200108extern unsigned long VMALLOC_START;
Martin Schwidefsky14045eb2011-12-27 11:27:07 +0100109extern unsigned long VMALLOC_END;
110extern struct page *vmemmap;
Heiko Carstens239a64252009-06-12 10:26:33 +0200111
Martin Schwidefsky14045eb2011-12-27 11:27:07 +0100112#define VMEM_MAX_PHYS ((unsigned long) vmemmap)
Christian Borntraeger5fd9c6e2008-01-26 14:11:00 +0100113
Heiko Carstensc972cc62012-10-05 16:52:18 +0200114extern unsigned long MODULES_VADDR;
115extern unsigned long MODULES_END;
116#define MODULES_VADDR MODULES_VADDR
117#define MODULES_END MODULES_END
118#define MODULES_LEN (1UL << 31)
Heiko Carstensc972cc62012-10-05 16:52:18 +0200119
Heiko Carstensc9331462014-10-15 12:17:38 +0200120static inline int is_module_addr(void *addr)
121{
Heiko Carstensc9331462014-10-15 12:17:38 +0200122 BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
123 if (addr < (void *)MODULES_VADDR)
124 return 0;
125 if (addr > (void *)MODULES_END)
126 return 0;
Heiko Carstensc9331462014-10-15 12:17:38 +0200127 return 1;
128}
129
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131 * A 64 bit pagetable entry of S390 has following format:
Christian Borntraeger6a985c62009-12-07 12:52:11 +0100132 * | PFRA |0IPC| OS |
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 * 0000000000111111111122222222223333333333444444444455555555556666
134 * 0123456789012345678901234567890123456789012345678901234567890123
135 *
136 * I Page-Invalid Bit: Page is not available for address-translation
137 * P Page-Protection Bit: Store access not possible for page
Christian Borntraeger6a985c62009-12-07 12:52:11 +0100138 * C Change-bit override: HW is not required to set change bit
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 *
140 * A 64 bit segmenttable entry of S390 has following format:
141 * | P-table origin | TT
142 * 0000000000111111111122222222223333333333444444444455555555556666
143 * 0123456789012345678901234567890123456789012345678901234567890123
144 *
145 * I Segment-Invalid Bit: Segment is not available for address-translation
146 * C Common-Segment Bit: Segment is not private (PoP 3-30)
147 * P Page-Protection Bit: Store access not possible for page
148 * TT Type 00
149 *
150 * A 64 bit region table entry of S390 has following format:
151 * | S-table origin | TF TTTL
152 * 0000000000111111111122222222223333333333444444444455555555556666
153 * 0123456789012345678901234567890123456789012345678901234567890123
154 *
155 * I Segment-Invalid Bit: Segment is not available for address-translation
156 * TT Type 01
157 * TF
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200158 * TL Table length
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 *
160 * The 64 bit regiontable origin of S390 has following format:
161 * | region table origon | DTTL
162 * 0000000000111111111122222222223333333333444444444455555555556666
163 * 0123456789012345678901234567890123456789012345678901234567890123
164 *
165 * X Space-Switch event:
166 * G Segment-Invalid Bit:
167 * P Private-Space Bit:
168 * S Storage-Alteration:
169 * R Real space
170 * TL Table-Length:
171 *
172 * A storage key has the following format:
173 * | ACC |F|R|C|0|
174 * 0 3 4 5 6 7
175 * ACC: access key
176 * F : fetch protection bit
177 * R : referenced bit
178 * C : changed bit
179 */
180
181/* Hardware bits in the page table entry */
Martin Schwidefskye5098612013-07-23 20:57:57 +0200182#define _PAGE_PROTECT 0x200 /* HW read-only bit */
Martin Schwidefsky83377482006-10-18 18:30:51 +0200183#define _PAGE_INVALID 0x400 /* HW invalid bit */
Martin Schwidefskye5098612013-07-23 20:57:57 +0200184#define _PAGE_LARGE 0x800 /* Bit to mark a large pte */
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200185
186/* Software bits in the page table entry */
Martin Schwidefskye5098612013-07-23 20:57:57 +0200187#define _PAGE_PRESENT 0x001 /* SW pte present bit */
Martin Schwidefskye5098612013-07-23 20:57:57 +0200188#define _PAGE_YOUNG 0x004 /* SW pte young bit */
189#define _PAGE_DIRTY 0x008 /* SW pte dirty bit */
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200190#define _PAGE_READ 0x010 /* SW pte read bit */
191#define _PAGE_WRITE 0x020 /* SW pte write bit */
192#define _PAGE_SPECIAL 0x040 /* SW associated with special page */
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200193#define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */
Nick Piggina08cb622008-04-28 02:13:03 -0700194#define __HAVE_ARCH_PTE_SPECIAL
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195
Martin Schwidefsky5614dd92015-04-22 14:47:42 +0200196#ifdef CONFIG_MEM_SOFT_DIRTY
197#define _PAGE_SOFT_DIRTY 0x002 /* SW pte soft dirty bit */
198#else
199#define _PAGE_SOFT_DIRTY 0x000
200#endif
201
Nick Piggin138c9022008-07-08 11:31:06 +0200202/* Set of bits not changed in pte_modify */
Heiko Carstens6a5c1482014-09-22 08:50:51 +0200203#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
Martin Schwidefsky5614dd92015-04-22 14:47:42 +0200204 _PAGE_YOUNG | _PAGE_SOFT_DIRTY)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
Martin Schwidefsky83377482006-10-18 18:30:51 +0200206/*
Kirill A. Shutemov6e76d4b2015-02-10 14:11:04 -0800207 * handle_pte_fault uses pte_present and pte_none to find out the pte type
208 * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to
209 * distinguish present from not-present ptes. It is changed only with the page
210 * table lock held.
Martin Schwidefsky83377482006-10-18 18:30:51 +0200211 *
Martin Schwidefskye5098612013-07-23 20:57:57 +0200212 * The following table gives the different possible bit combinations for
Martin Schwidefskya1c843b2015-04-22 13:55:59 +0200213 * the pte hardware and software bits in the last 12 bits of a pte
214 * (. unassigned bit, x don't care, t swap type):
Martin Schwidefsky83377482006-10-18 18:30:51 +0200215 *
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200216 * 842100000000
217 * 000084210000
218 * 000000008421
Martin Schwidefskya1c843b2015-04-22 13:55:59 +0200219 * .IR.uswrdy.p
220 * empty .10.00000000
221 * swap .11..ttttt.0
222 * prot-none, clean, old .11.xx0000.1
223 * prot-none, clean, young .11.xx0001.1
224 * prot-none, dirty, old .10.xx0010.1
225 * prot-none, dirty, young .10.xx0011.1
226 * read-only, clean, old .11.xx0100.1
227 * read-only, clean, young .01.xx0101.1
228 * read-only, dirty, old .11.xx0110.1
229 * read-only, dirty, young .01.xx0111.1
230 * read-write, clean, old .11.xx1100.1
231 * read-write, clean, young .01.xx1101.1
232 * read-write, dirty, old .10.xx1110.1
233 * read-write, dirty, young .00.xx1111.1
234 * HW-bits: R read-only, I invalid
235 * SW-bits: p present, y young, d dirty, r read, w write, s special,
236 * u unused, l large
Martin Schwidefskye5098612013-07-23 20:57:57 +0200237 *
Martin Schwidefskya1c843b2015-04-22 13:55:59 +0200238 * pte_none is true for the bit pattern .10.00000000, pte == 0x400
239 * pte_swap is true for the bit pattern .11..ooooo.0, (pte & 0x201) == 0x200
240 * pte_present is true for the bit pattern .xx.xxxxxx.1, (pte & 0x001) == 0x001
Martin Schwidefsky83377482006-10-18 18:30:51 +0200241 */
242
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200243/* Bits in the segment/region table address-space-control-element */
244#define _ASCE_ORIGIN ~0xfffUL/* segment table origin */
245#define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
246#define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
247#define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
248#define _ASCE_REAL_SPACE 0x20 /* real space control */
249#define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
250#define _ASCE_TYPE_REGION1 0x0c /* region first table type */
251#define _ASCE_TYPE_REGION2 0x08 /* region second table type */
252#define _ASCE_TYPE_REGION3 0x04 /* region third table type */
253#define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
254#define _ASCE_TABLE_LENGTH 0x03 /* region table length */
255
256/* Bits in the region table entry */
257#define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
Martin Schwidefskye5098612013-07-23 20:57:57 +0200258#define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */
259#define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200260#define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
261#define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
262#define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
263#define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
264#define _REGION_ENTRY_LENGTH 0x03 /* region third length */
265
266#define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
Martin Schwidefskye5098612013-07-23 20:57:57 +0200267#define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200268#define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
Martin Schwidefskye5098612013-07-23 20:57:57 +0200269#define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200270#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
Martin Schwidefskye5098612013-07-23 20:57:57 +0200271#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200272
Heiko Carstens18da2362012-10-08 09:18:26 +0200273#define _REGION3_ENTRY_LARGE 0x400 /* RTTE-format control, large page */
Heiko Carstens1819ed12013-02-16 11:47:27 +0100274#define _REGION3_ENTRY_RO 0x200 /* page protection bit */
Heiko Carstens18da2362012-10-08 09:18:26 +0200275
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200276/* Bits in the segment table entry */
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200277#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
Martin Schwidefsky152125b2014-07-24 11:03:41 +0200278#define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff0ff33UL
Heiko Carstensea815312013-03-21 12:50:39 +0100279#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200280#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
Martin Schwidefskye5098612013-07-23 20:57:57 +0200281#define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */
282#define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200283
284#define _SEGMENT_ENTRY (0)
Martin Schwidefskye5098612013-07-23 20:57:57 +0200285#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200286
Martin Schwidefsky152125b2014-07-24 11:03:41 +0200287#define _SEGMENT_ENTRY_DIRTY 0x2000 /* SW segment dirty bit */
288#define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */
Martin Schwidefsky152125b2014-07-24 11:03:41 +0200289#define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */
Martin Schwidefsky152125b2014-07-24 11:03:41 +0200290#define _SEGMENT_ENTRY_READ 0x0002 /* SW segment read bit */
291#define _SEGMENT_ENTRY_WRITE 0x0001 /* SW segment write bit */
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200292
Martin Schwidefsky5614dd92015-04-22 14:47:42 +0200293#ifdef CONFIG_MEM_SOFT_DIRTY
294#define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000 /* SW segment soft dirty bit */
295#else
296#define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000 /* SW segment soft dirty bit */
297#endif
298
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200299/*
300 * Segment table entry encoding (R = read-only, I = invalid, y = young bit):
Martin Schwidefsky13c6a792016-02-10 16:47:14 +0100301 * dy..R...I...rw
Martin Schwidefsky152125b2014-07-24 11:03:41 +0200302 * prot-none, clean, old 00..1...1...00
303 * prot-none, clean, young 01..1...1...00
304 * prot-none, dirty, old 10..1...1...00
305 * prot-none, dirty, young 11..1...1...00
Martin Schwidefsky13c6a792016-02-10 16:47:14 +0100306 * read-only, clean, old 00..1...1...10
307 * read-only, clean, young 01..1...0...10
308 * read-only, dirty, old 10..1...1...10
309 * read-only, dirty, young 11..1...0...10
Martin Schwidefsky152125b2014-07-24 11:03:41 +0200310 * read-write, clean, old 00..1...1...11
311 * read-write, clean, young 01..1...0...11
312 * read-write, dirty, old 10..0...1...11
313 * read-write, dirty, young 11..0...0...11
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200314 * The segment table origin is used to distinguish empty (origin==0) from
315 * read-write, old segment table entries (origin!=0)
Martin Schwidefskya1c843b2015-04-22 13:55:59 +0200316 * HW-bits: R read-only, I invalid
317 * SW-bits: y young, d dirty, r read, w write
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200318 */
Martin Schwidefskye5098612013-07-23 20:57:57 +0200319
Martin Schwidefsky6c61cfe2011-06-06 14:14:42 +0200320/* Page status table bits for virtualization */
Martin Schwidefsky0d0dafc2013-05-17 14:41:33 +0200321#define PGSTE_ACC_BITS 0xf000000000000000UL
322#define PGSTE_FP_BIT 0x0800000000000000UL
323#define PGSTE_PCL_BIT 0x0080000000000000UL
324#define PGSTE_HR_BIT 0x0040000000000000UL
325#define PGSTE_HC_BIT 0x0020000000000000UL
326#define PGSTE_GR_BIT 0x0004000000000000UL
327#define PGSTE_GC_BIT 0x0002000000000000UL
Martin Schwidefsky0a61b222013-10-18 12:03:41 +0200328#define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */
329#define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */
Martin Schwidefsky6c61cfe2011-06-06 14:14:42 +0200330
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200331/* Guest Page State used for virtualization */
332#define _PGSTE_GPS_ZERO 0x0000000080000000UL
333#define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL
334#define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL
335#define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL
336
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200337/*
338 * A user page table pointer has the space-switch-event bit, the
339 * private-space-control bit and the storage-alteration-event-control
340 * bit set. A kernel page table pointer doesn't need them.
341 */
342#define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
343 _ASCE_ALT_EVENT)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345/*
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200346 * Page protection definitions.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347 */
Martin Schwidefskye5098612013-07-23 20:57:57 +0200348#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID)
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200349#define PAGE_READ __pgprot(_PAGE_PRESENT | _PAGE_READ | \
350 _PAGE_INVALID | _PAGE_PROTECT)
351#define PAGE_WRITE __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
352 _PAGE_INVALID | _PAGE_PROTECT)
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200353
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200354#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
355 _PAGE_YOUNG | _PAGE_DIRTY)
356#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
357 _PAGE_YOUNG | _PAGE_DIRTY)
358#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
359 _PAGE_PROTECT)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360
361/*
Martin Schwidefsky043d0702011-05-23 10:24:23 +0200362 * On s390 the page table entry has an invalid bit and a read-only bit.
363 * Read permission implies execute permission and write permission
364 * implies read permission.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 */
366 /*xwr*/
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200367#define __P000 PAGE_NONE
Martin Schwidefskye5098612013-07-23 20:57:57 +0200368#define __P001 PAGE_READ
369#define __P010 PAGE_READ
370#define __P011 PAGE_READ
371#define __P100 PAGE_READ
372#define __P101 PAGE_READ
373#define __P110 PAGE_READ
374#define __P111 PAGE_READ
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200376#define __S000 PAGE_NONE
Martin Schwidefskye5098612013-07-23 20:57:57 +0200377#define __S001 PAGE_READ
378#define __S010 PAGE_WRITE
379#define __S011 PAGE_WRITE
380#define __S100 PAGE_READ
381#define __S101 PAGE_READ
382#define __S110 PAGE_WRITE
383#define __S111 PAGE_WRITE
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384
Gerald Schaefer106c9922013-04-29 15:07:23 -0700385/*
386 * Segment entry (large page) protection definitions.
387 */
Martin Schwidefskye5098612013-07-23 20:57:57 +0200388#define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \
389 _SEGMENT_ENTRY_PROTECT)
Martin Schwidefsky152125b2014-07-24 11:03:41 +0200390#define SEGMENT_READ __pgprot(_SEGMENT_ENTRY_PROTECT | \
391 _SEGMENT_ENTRY_READ)
392#define SEGMENT_WRITE __pgprot(_SEGMENT_ENTRY_READ | \
393 _SEGMENT_ENTRY_WRITE)
Gerald Schaefer106c9922013-04-29 15:07:23 -0700394
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200395static inline int mm_has_pgste(struct mm_struct *mm)
396{
397#ifdef CONFIG_PGSTE
398 if (unlikely(mm->context.has_pgste))
399 return 1;
400#endif
401 return 0;
402}
Dominik Dingel65eef3352014-01-14 15:02:11 +0100403
Martin Schwidefsky0b46e0a2015-04-15 13:23:26 +0200404static inline int mm_alloc_pgste(struct mm_struct *mm)
405{
406#ifdef CONFIG_PGSTE
407 if (unlikely(mm->context.alloc_pgste))
408 return 1;
409#endif
410 return 0;
411}
412
Dominik Dingel2faee8f2014-10-23 12:08:38 +0200413/*
414 * In the case that a guest uses storage keys
415 * faults should no longer be backed by zero pages
416 */
417#define mm_forbids_zeropage mm_use_skey
Dominik Dingel65eef3352014-01-14 15:02:11 +0100418static inline int mm_use_skey(struct mm_struct *mm)
419{
420#ifdef CONFIG_PGSTE
421 if (mm->context.use_skey)
422 return 1;
423#endif
424 return 0;
425}
426
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427/*
428 * pgd/pmd/pte query functions
429 */
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100430static inline int pgd_present(pgd_t pgd)
431{
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100432 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
433 return 1;
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100434 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
435}
436
437static inline int pgd_none(pgd_t pgd)
438{
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100439 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
440 return 0;
Martin Schwidefskye5098612013-07-23 20:57:57 +0200441 return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100442}
443
444static inline int pgd_bad(pgd_t pgd)
445{
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100446 /*
447 * With dynamic page table levels the pgd can be a region table
448 * entry or a segment table entry. Check for the bit that are
449 * invalid for either table entry.
450 */
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100451 unsigned long mask =
Martin Schwidefskye5098612013-07-23 20:57:57 +0200452 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100453 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
454 return (pgd_val(pgd) & mask) != 0;
455}
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200456
457static inline int pud_present(pud_t pud)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458{
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100459 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
460 return 1;
Martin Schwidefsky0d017922007-12-17 16:25:48 +0100461 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462}
463
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200464static inline int pud_none(pud_t pud)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465{
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100466 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
467 return 0;
Martin Schwidefskye5098612013-07-23 20:57:57 +0200468 return (pud_val(pud) & _REGION_ENTRY_INVALID) != 0UL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469}
470
Heiko Carstens18da2362012-10-08 09:18:26 +0200471static inline int pud_large(pud_t pud)
472{
473 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
474 return 0;
475 return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
476}
477
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200478static inline int pud_bad(pud_t pud)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479{
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100480 /*
481 * With dynamic page table levels the pud can be a region table
482 * entry or a segment table entry. Check for the bit that are
483 * invalid for either table entry.
484 */
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100485 unsigned long mask =
Martin Schwidefskye5098612013-07-23 20:57:57 +0200486 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100487 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
488 return (pud_val(pud) & mask) != 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489}
490
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800491static inline int pmd_present(pmd_t pmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492{
Martin Schwidefskye5098612013-07-23 20:57:57 +0200493 return pmd_val(pmd) != _SEGMENT_ENTRY_INVALID;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494}
495
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800496static inline int pmd_none(pmd_t pmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497{
Martin Schwidefskye5098612013-07-23 20:57:57 +0200498 return pmd_val(pmd) == _SEGMENT_ENTRY_INVALID;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499}
500
Heiko Carstens378b1e72012-10-01 12:58:34 +0200501static inline int pmd_large(pmd_t pmd)
502{
Martin Schwidefskye5098612013-07-23 20:57:57 +0200503 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
Heiko Carstens378b1e72012-10-01 12:58:34 +0200504}
505
Martin Schwidefsky7cded342015-05-13 14:33:22 +0200506static inline unsigned long pmd_pfn(pmd_t pmd)
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200507{
Martin Schwidefsky152125b2014-07-24 11:03:41 +0200508 unsigned long origin_mask;
509
510 origin_mask = _SEGMENT_ENTRY_ORIGIN;
511 if (pmd_large(pmd))
512 origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
513 return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT;
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200514}
515
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800516static inline int pmd_bad(pmd_t pmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517{
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200518 if (pmd_large(pmd))
519 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0;
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200520 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521}
522
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -0700523#define __HAVE_ARCH_PMD_WRITE
524static inline int pmd_write(pmd_t pmd)
525{
Martin Schwidefsky152125b2014-07-24 11:03:41 +0200526 return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
527}
528
529static inline int pmd_dirty(pmd_t pmd)
530{
531 int dirty = 1;
532 if (pmd_large(pmd))
533 dirty = (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
534 return dirty;
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -0700535}
536
537static inline int pmd_young(pmd_t pmd)
538{
Martin Schwidefsky152125b2014-07-24 11:03:41 +0200539 int young = 1;
540 if (pmd_large(pmd))
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200541 young = (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200542 return young;
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -0700543}
544
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800545static inline int pte_present(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546{
Martin Schwidefskye5098612013-07-23 20:57:57 +0200547 /* Bit pattern: (pte & 0x001) == 0x001 */
548 return (pte_val(pte) & _PAGE_PRESENT) != 0;
549}
550
551static inline int pte_none(pte_t pte)
552{
553 /* Bit pattern: pte == 0x400 */
554 return pte_val(pte) == _PAGE_INVALID;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555}
556
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200557static inline int pte_swap(pte_t pte)
558{
Martin Schwidefskya1c843b2015-04-22 13:55:59 +0200559 /* Bit pattern: (pte & 0x201) == 0x200 */
560 return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT))
561 == _PAGE_PROTECT;
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200562}
563
Nick Piggin7e675132008-04-28 02:13:00 -0700564static inline int pte_special(pte_t pte)
565{
Nick Piggina08cb622008-04-28 02:13:03 -0700566 return (pte_val(pte) & _PAGE_SPECIAL);
Nick Piggin7e675132008-04-28 02:13:00 -0700567}
568
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200569#define __HAVE_ARCH_PTE_SAME
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200570static inline int pte_same(pte_t a, pte_t b)
Christian Borntraeger5b7baf02008-03-25 18:47:12 +0100571{
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200572 return pte_val(a) == pte_val(b);
Christian Borntraeger5b7baf02008-03-25 18:47:12 +0100573}
574
Martin Schwidefskyb54565b2014-09-23 14:01:34 +0200575#ifdef CONFIG_NUMA_BALANCING
576static inline int pte_protnone(pte_t pte)
577{
578 return pte_present(pte) && !(pte_val(pte) & _PAGE_READ);
579}
580
581static inline int pmd_protnone(pmd_t pmd)
582{
583 /* pmd_large(pmd) implies pmd_present(pmd) */
584 return pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ);
585}
586#endif
587
Martin Schwidefsky5614dd92015-04-22 14:47:42 +0200588static inline int pte_soft_dirty(pte_t pte)
589{
590 return pte_val(pte) & _PAGE_SOFT_DIRTY;
591}
592#define pte_swp_soft_dirty pte_soft_dirty
593
594static inline pte_t pte_mksoft_dirty(pte_t pte)
595{
596 pte_val(pte) |= _PAGE_SOFT_DIRTY;
597 return pte;
598}
599#define pte_swp_mksoft_dirty pte_mksoft_dirty
600
601static inline pte_t pte_clear_soft_dirty(pte_t pte)
602{
603 pte_val(pte) &= ~_PAGE_SOFT_DIRTY;
604 return pte;
605}
606#define pte_swp_clear_soft_dirty pte_clear_soft_dirty
607
608static inline int pmd_soft_dirty(pmd_t pmd)
609{
610 return pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY;
611}
612
613static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
614{
615 pmd_val(pmd) |= _SEGMENT_ENTRY_SOFT_DIRTY;
616 return pmd;
617}
618
619static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
620{
621 pmd_val(pmd) &= ~_SEGMENT_ENTRY_SOFT_DIRTY;
622 return pmd;
623}
624
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200625static inline pgste_t pgste_get_lock(pte_t *ptep)
626{
627 unsigned long new = 0;
628#ifdef CONFIG_PGSTE
629 unsigned long old;
630
631 preempt_disable();
632 asm(
633 " lg %0,%2\n"
634 "0: lgr %1,%0\n"
Martin Schwidefsky0d0dafc2013-05-17 14:41:33 +0200635 " nihh %0,0xff7f\n" /* clear PCL bit in old */
636 " oihh %1,0x0080\n" /* set PCL bit in new */
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200637 " csg %0,%1,%2\n"
638 " jl 0b\n"
639 : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
Christian Borntraegera8f6e7f2013-06-05 09:25:34 +0200640 : "Q" (ptep[PTRS_PER_PTE]) : "cc", "memory");
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200641#endif
642 return __pgste(new);
643}
644
645static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
Christian Borntraeger5b7baf02008-03-25 18:47:12 +0100646{
647#ifdef CONFIG_PGSTE
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200648 asm(
Martin Schwidefsky0d0dafc2013-05-17 14:41:33 +0200649 " nihh %1,0xff7f\n" /* clear PCL bit */
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200650 " stg %1,%0\n"
651 : "=Q" (ptep[PTRS_PER_PTE])
Christian Borntraegera8f6e7f2013-06-05 09:25:34 +0200652 : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE])
653 : "cc", "memory");
Christian Borntraeger5b7baf02008-03-25 18:47:12 +0100654 preempt_enable();
655#endif
656}
657
Martin Schwidefskyd56c8932013-07-19 11:15:54 +0200658static inline pgste_t pgste_get(pte_t *ptep)
659{
660 unsigned long pgste = 0;
661#ifdef CONFIG_PGSTE
662 pgste = *(unsigned long *)(ptep + PTRS_PER_PTE);
663#endif
664 return __pgste(pgste);
665}
666
Christian Borntraeger3a826032013-06-05 09:22:33 +0200667static inline void pgste_set(pte_t *ptep, pgste_t pgste)
668{
669#ifdef CONFIG_PGSTE
670 *(pgste_t *)(ptep + PTRS_PER_PTE) = pgste;
671#endif
672}
673
Martin Schwidefskyebde7652016-03-08 11:08:09 +0100674bool pgste_test_and_clear_dirty(struct mm_struct *, unsigned long address);
675void ptep_ipte_notify(struct mm_struct *, unsigned long addr, pte_t *);
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100676
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200677/**
678 * struct gmap_struct - guest address space
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200679 * @crst_list: list of all crst tables used in the guest address space
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200680 * @mm: pointer to the parent mm_struct
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200681 * @guest_to_host: radix tree with guest to host address translation
682 * @host_to_guest: radix tree with pointer to segment table entries
683 * @guest_table_lock: spinlock to protect all entries in the guest page table
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200684 * @table: pointer to the page directory
Christian Borntraeger480e5922011-09-20 17:07:28 +0200685 * @asce: address space control element for gmap page table
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200686 * @pfault_enabled: defines if pfaults are applicable for the guest
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200687 */
688struct gmap {
689 struct list_head list;
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200690 struct list_head crst_list;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200691 struct mm_struct *mm;
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200692 struct radix_tree_root guest_to_host;
693 struct radix_tree_root host_to_guest;
694 spinlock_t guest_table_lock;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200695 unsigned long *table;
Christian Borntraeger480e5922011-09-20 17:07:28 +0200696 unsigned long asce;
Martin Schwidefskyc6c956b2014-07-01 14:36:04 +0200697 unsigned long asce_end;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200698 void *private;
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200699 bool pfault_enabled;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200700};
701
702/**
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200703 * struct gmap_notifier - notify function block for page invalidation
704 * @notifier_call: address of callback function
705 */
706struct gmap_notifier {
707 struct list_head list;
Martin Schwidefsky6e0a0432014-04-29 09:34:41 +0200708 void (*notifier_call)(struct gmap *gmap, unsigned long gaddr);
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200709};
710
Martin Schwidefskyc6c956b2014-07-01 14:36:04 +0200711struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200712void gmap_free(struct gmap *gmap);
713void gmap_enable(struct gmap *gmap);
714void gmap_disable(struct gmap *gmap);
715int gmap_map_segment(struct gmap *gmap, unsigned long from,
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200716 unsigned long to, unsigned long len);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200717int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
Martin Schwidefsky6e0a0432014-04-29 09:34:41 +0200718unsigned long __gmap_translate(struct gmap *, unsigned long gaddr);
719unsigned long gmap_translate(struct gmap *, unsigned long gaddr);
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200720int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr);
721int gmap_fault(struct gmap *, unsigned long gaddr, unsigned int fault_flags);
Martin Schwidefsky6e0a0432014-04-29 09:34:41 +0200722void gmap_discard(struct gmap *, unsigned long from, unsigned long to);
723void __gmap_zap(struct gmap *, unsigned long gaddr);
Dominik Dingela0bf4f12014-03-24 14:27:58 +0100724
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200725
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200726void gmap_register_ipte_notifier(struct gmap_notifier *);
727void gmap_unregister_ipte_notifier(struct gmap_notifier *);
728int gmap_ipte_notify(struct gmap *, unsigned long start, unsigned long len);
Christian Borntraeger5b7baf02008-03-25 18:47:12 +0100729
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730/*
731 * query functions pte_write/pte_dirty/pte_young only work if
732 * pte_present() is true. Undefined behaviour if not..
733 */
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800734static inline int pte_write(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735{
Martin Schwidefskye5098612013-07-23 20:57:57 +0200736 return (pte_val(pte) & _PAGE_WRITE) != 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737}
738
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800739static inline int pte_dirty(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740{
Martin Schwidefskye5098612013-07-23 20:57:57 +0200741 return (pte_val(pte) & _PAGE_DIRTY) != 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742}
743
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800744static inline int pte_young(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745{
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200746 return (pte_val(pte) & _PAGE_YOUNG) != 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747}
748
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200749#define __HAVE_ARCH_PTE_UNUSED
750static inline int pte_unused(pte_t pte)
751{
752 return pte_val(pte) & _PAGE_UNUSED;
753}
754
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755/*
756 * pgd/pmd/pte modification functions
757 */
758
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200759static inline void pgd_clear(pgd_t *pgd)
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100760{
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100761 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
762 pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763}
764
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100765static inline void pud_clear(pud_t *pud)
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100766{
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200767 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
768 pud_val(*pud) = _REGION3_ENTRY_EMPTY;
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100769}
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100770
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200771static inline void pmd_clear(pmd_t *pmdp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772{
Martin Schwidefskye5098612013-07-23 20:57:57 +0200773 pmd_val(*pmdp) = _SEGMENT_ENTRY_INVALID;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774}
775
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800776static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777{
Martin Schwidefskye5098612013-07-23 20:57:57 +0200778 pte_val(*ptep) = _PAGE_INVALID;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779}
780
781/*
782 * The following pte modification functions only work if
783 * pte_present() is true. Undefined behaviour if not..
784 */
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800785static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786{
Nick Piggin138c9022008-07-08 11:31:06 +0200787 pte_val(pte) &= _PAGE_CHG_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788 pte_val(pte) |= pgprot_val(newprot);
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200789 /*
790 * newprot for PAGE_NONE, PAGE_READ and PAGE_WRITE has the
791 * invalid bit set, clear it again for readable, young pages
792 */
793 if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
794 pte_val(pte) &= ~_PAGE_INVALID;
795 /*
796 * newprot for PAGE_READ and PAGE_WRITE has the page protection
797 * bit set, clear it again for writable, dirty pages
798 */
Martin Schwidefskye5098612013-07-23 20:57:57 +0200799 if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
800 pte_val(pte) &= ~_PAGE_PROTECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801 return pte;
802}
803
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800804static inline pte_t pte_wrprotect(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805{
Martin Schwidefskye5098612013-07-23 20:57:57 +0200806 pte_val(pte) &= ~_PAGE_WRITE;
807 pte_val(pte) |= _PAGE_PROTECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808 return pte;
809}
810
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800811static inline pte_t pte_mkwrite(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812{
Martin Schwidefskye5098612013-07-23 20:57:57 +0200813 pte_val(pte) |= _PAGE_WRITE;
814 if (pte_val(pte) & _PAGE_DIRTY)
815 pte_val(pte) &= ~_PAGE_PROTECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816 return pte;
817}
818
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800819static inline pte_t pte_mkclean(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820{
Martin Schwidefskye5098612013-07-23 20:57:57 +0200821 pte_val(pte) &= ~_PAGE_DIRTY;
822 pte_val(pte) |= _PAGE_PROTECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823 return pte;
824}
825
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800826static inline pte_t pte_mkdirty(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827{
Martin Schwidefsky5614dd92015-04-22 14:47:42 +0200828 pte_val(pte) |= _PAGE_DIRTY | _PAGE_SOFT_DIRTY;
Martin Schwidefskye5098612013-07-23 20:57:57 +0200829 if (pte_val(pte) & _PAGE_WRITE)
830 pte_val(pte) &= ~_PAGE_PROTECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831 return pte;
832}
833
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800834static inline pte_t pte_mkold(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835{
Martin Schwidefskye5098612013-07-23 20:57:57 +0200836 pte_val(pte) &= ~_PAGE_YOUNG;
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200837 pte_val(pte) |= _PAGE_INVALID;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838 return pte;
839}
840
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800841static inline pte_t pte_mkyoung(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842{
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200843 pte_val(pte) |= _PAGE_YOUNG;
844 if (pte_val(pte) & _PAGE_READ)
845 pte_val(pte) &= ~_PAGE_INVALID;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 return pte;
847}
848
Nick Piggin7e675132008-04-28 02:13:00 -0700849static inline pte_t pte_mkspecial(pte_t pte)
850{
Nick Piggina08cb622008-04-28 02:13:03 -0700851 pte_val(pte) |= _PAGE_SPECIAL;
Nick Piggin7e675132008-04-28 02:13:00 -0700852 return pte;
853}
854
Heiko Carstens84afdce2010-10-25 16:10:36 +0200855#ifdef CONFIG_HUGETLB_PAGE
856static inline pte_t pte_mkhuge(pte_t pte)
857{
Martin Schwidefskye5098612013-07-23 20:57:57 +0200858 pte_val(pte) |= _PAGE_LARGE;
Heiko Carstens84afdce2010-10-25 16:10:36 +0200859 return pte;
860}
861#endif
862
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200863static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
864{
Martin Schwidefsky53e857f2012-09-10 13:00:09 +0200865 unsigned long pto = (unsigned long) ptep;
866
Martin Schwidefsky53e857f2012-09-10 13:00:09 +0200867 /* Invalidation + global TLB flush for the pte */
868 asm volatile(
869 " ipte %2,%3"
870 : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address));
871}
872
Martin Schwidefsky1b948d62014-04-03 13:55:01 +0200873static inline void __ptep_ipte_local(unsigned long address, pte_t *ptep)
874{
875 unsigned long pto = (unsigned long) ptep;
876
Martin Schwidefsky1b948d62014-04-03 13:55:01 +0200877 /* Invalidation + local TLB flush for the pte */
878 asm volatile(
879 " .insn rrf,0xb2210000,%2,%3,0,1"
880 : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address));
881}
882
Heiko Carstenscfb0b242014-09-23 21:29:20 +0200883static inline void __ptep_ipte_range(unsigned long address, int nr, pte_t *ptep)
884{
885 unsigned long pto = (unsigned long) ptep;
886
Heiko Carstenscfb0b242014-09-23 21:29:20 +0200887 /* Invalidate a range of ptes + global TLB flush of the ptes */
888 do {
889 asm volatile(
890 " .insn rrf,0xb2210000,%2,%0,%1,0"
891 : "+a" (address), "+a" (nr) : "a" (pto) : "memory");
892 } while (nr != 255);
893}
894
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200895/*
896 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
897 * both clear the TLB for the unmapped pte. The reason is that
898 * ptep_get_and_clear is used in common code (e.g. change_pte_range)
899 * to modify an active pte. The sequence is
900 * 1) ptep_get_and_clear
901 * 2) set_pte_at
902 * 3) flush_tlb_range
903 * On s390 the tlb needs to get flushed with the modification of the pte
904 * if the pte is active. The only way how this can be implemented is to
905 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
906 * is a nop.
907 */
Martin Schwidefskyebde7652016-03-08 11:08:09 +0100908pte_t ptep_xchg_direct(struct mm_struct *, unsigned long, pte_t *, pte_t);
909pte_t ptep_xchg_lazy(struct mm_struct *, unsigned long, pte_t *, pte_t);
910
911#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
912static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
913 unsigned long addr, pte_t *ptep)
914{
915 pte_t pte = *ptep;
916
917 pte = ptep_xchg_direct(vma->vm_mm, addr, ptep, pte_mkold(pte));
918 return pte_young(pte);
919}
920
921#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
922static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
923 unsigned long address, pte_t *ptep)
924{
925 return ptep_test_and_clear_young(vma, address, ptep);
926}
927
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200928#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200929static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
Martin Schwidefskyebde7652016-03-08 11:08:09 +0100930 unsigned long addr, pte_t *ptep)
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200931{
Martin Schwidefskyebde7652016-03-08 11:08:09 +0100932 return ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200933}
934
935#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
Martin Schwidefskyebde7652016-03-08 11:08:09 +0100936pte_t ptep_modify_prot_start(struct mm_struct *, unsigned long, pte_t *);
937void ptep_modify_prot_commit(struct mm_struct *, unsigned long, pte_t *, pte_t);
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200938
939#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
Martin Schwidefskyf0e47c22007-07-17 04:03:03 -0700940static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
Martin Schwidefskyebde7652016-03-08 11:08:09 +0100941 unsigned long addr, pte_t *ptep)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942{
Martin Schwidefskyebde7652016-03-08 11:08:09 +0100943 return ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944}
945
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200946/*
947 * The batched pte unmap code uses ptep_get_and_clear_full to clear the
948 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
949 * tlbs of an mm if it can guarantee that the ptes of the mm_struct
950 * cannot be accessed while the batched unmap is running. In this case
951 * full==1 and a simple pte_clear is enough. See tlb.h.
952 */
953#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
954static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
Martin Schwidefskyebde7652016-03-08 11:08:09 +0100955 unsigned long addr,
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200956 pte_t *ptep, int full)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957{
Martin Schwidefskyebde7652016-03-08 11:08:09 +0100958 if (full) {
959 pte_t pte = *ptep;
960 *ptep = __pte(_PAGE_INVALID);
961 return pte;
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200962 }
Martin Schwidefskyebde7652016-03-08 11:08:09 +0100963 return ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964}
965
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200966#define __HAVE_ARCH_PTEP_SET_WRPROTECT
Martin Schwidefskyebde7652016-03-08 11:08:09 +0100967static inline void ptep_set_wrprotect(struct mm_struct *mm,
968 unsigned long addr, pte_t *ptep)
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200969{
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200970 pte_t pte = *ptep;
971
Martin Schwidefskyebde7652016-03-08 11:08:09 +0100972 if (pte_write(pte))
973 ptep_xchg_lazy(mm, addr, ptep, pte_wrprotect(pte));
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200974}
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200975
976#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200977static inline int ptep_set_access_flags(struct vm_area_struct *vma,
Martin Schwidefskyebde7652016-03-08 11:08:09 +0100978 unsigned long addr, pte_t *ptep,
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200979 pte_t entry, int dirty)
980{
Martin Schwidefskyebde7652016-03-08 11:08:09 +0100981 if (pte_same(*ptep, entry))
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200982 return 0;
Martin Schwidefskyebde7652016-03-08 11:08:09 +0100983 ptep_xchg_direct(vma->vm_mm, addr, ptep, entry);
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200984 return 1;
985}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986
Martin Schwidefskyebde7652016-03-08 11:08:09 +0100987void set_pte_pgste_at(struct mm_struct *, unsigned long, pte_t *, pte_t);
988
989/*
990 * Certain architectures need to do special things when PTEs
991 * within a page table are directly modified. Thus, the following
992 * hook is made available.
993 */
994static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
995 pte_t *ptep, pte_t entry)
996{
997 if (mm_has_pgste(mm))
998 set_pte_pgste_at(mm, addr, ptep, entry);
999 else
1000 *ptep = entry;
1001}
1002
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004 * Conversion functions: convert a page and protection to a page entry,
1005 * and a page entry and page directory to the page they refer to.
1006 */
1007static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
1008{
1009 pte_t __pte;
1010 pte_val(__pte) = physpage + pgprot_val(pgprot);
Martin Schwidefsky0944fe32013-07-23 22:11:42 +02001011 return pte_mkyoung(__pte);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012}
1013
Heiko Carstens2dcea572006-09-29 01:58:41 -07001014static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1015{
Heiko Carstens0b2b6e1d2006-10-04 20:02:23 +02001016 unsigned long physpage = page_to_phys(page);
Martin Schwidefskyabf09be2012-11-07 13:17:37 +01001017 pte_t __pte = mk_pte_phys(physpage, pgprot);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018
Martin Schwidefskye5098612013-07-23 20:57:57 +02001019 if (pte_write(__pte) && PageDirty(page))
1020 __pte = pte_mkdirty(__pte);
Martin Schwidefskyabf09be2012-11-07 13:17:37 +01001021 return __pte;
Heiko Carstens2dcea572006-09-29 01:58:41 -07001022}
1023
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001025#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1026#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1027#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001029#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030#define pgd_offset_k(address) pgd_offset(&init_mm, address)
1031
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001032#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1033#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
Martin Schwidefsky5a216a22008-02-09 18:24:36 +01001034#define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001035
Martin Schwidefsky5a216a22008-02-09 18:24:36 +01001036static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
1037{
Martin Schwidefsky6252d702008-02-09 18:24:37 +01001038 pud_t *pud = (pud_t *) pgd;
1039 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
1040 pud = (pud_t *) pgd_deref(*pgd);
Martin Schwidefsky5a216a22008-02-09 18:24:36 +01001041 return pud + pud_index(address);
1042}
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001043
1044static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
1045{
Martin Schwidefsky6252d702008-02-09 18:24:37 +01001046 pmd_t *pmd = (pmd_t *) pud;
1047 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
1048 pmd = (pmd_t *) pud_deref(*pud);
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001049 return pmd + pmd_index(address);
1050}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001052#define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
1053#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1054#define pte_page(x) pfn_to_page(pte_pfn(x))
1055
Martin Schwidefsky152125b2014-07-24 11:03:41 +02001056#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001057
1058/* Find an entry in the lowest level page table.. */
1059#define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
1060#define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062#define pte_unmap(pte) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063
Gerald Schaefer106c9922013-04-29 15:07:23 -07001064#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001065static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
1066{
Gerald Schaeferd8e7a332012-10-25 17:42:50 +02001067 /*
Martin Schwidefskye5098612013-07-23 20:57:57 +02001068 * pgprot is PAGE_NONE, PAGE_READ, or PAGE_WRITE (see __Pxxx / __Sxxx)
Gerald Schaeferd8e7a332012-10-25 17:42:50 +02001069 * Convert to segment table entry format.
1070 */
1071 if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
1072 return pgprot_val(SEGMENT_NONE);
Martin Schwidefskye5098612013-07-23 20:57:57 +02001073 if (pgprot_val(pgprot) == pgprot_val(PAGE_READ))
1074 return pgprot_val(SEGMENT_READ);
1075 return pgprot_val(SEGMENT_WRITE);
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001076}
1077
Martin Schwidefsky152125b2014-07-24 11:03:41 +02001078static inline pmd_t pmd_wrprotect(pmd_t pmd)
1079{
1080 pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE;
1081 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1082 return pmd;
1083}
1084
1085static inline pmd_t pmd_mkwrite(pmd_t pmd)
1086{
1087 pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE;
1088 if (pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1089 return pmd;
1090 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1091 return pmd;
1092}
1093
1094static inline pmd_t pmd_mkclean(pmd_t pmd)
1095{
1096 if (pmd_large(pmd)) {
1097 pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY;
1098 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1099 }
1100 return pmd;
1101}
1102
1103static inline pmd_t pmd_mkdirty(pmd_t pmd)
1104{
1105 if (pmd_large(pmd)) {
Martin Schwidefsky5614dd92015-04-22 14:47:42 +02001106 pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY |
1107 _SEGMENT_ENTRY_SOFT_DIRTY;
Martin Schwidefsky152125b2014-07-24 11:03:41 +02001108 if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
1109 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1110 }
1111 return pmd;
1112}
1113
Martin Schwidefsky0944fe32013-07-23 22:11:42 +02001114static inline pmd_t pmd_mkyoung(pmd_t pmd)
1115{
Martin Schwidefsky152125b2014-07-24 11:03:41 +02001116 if (pmd_large(pmd)) {
Martin Schwidefsky0944fe32013-07-23 22:11:42 +02001117 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
Martin Schwidefsky152125b2014-07-24 11:03:41 +02001118 if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
1119 pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
Martin Schwidefsky0944fe32013-07-23 22:11:42 +02001120 }
Martin Schwidefsky0944fe32013-07-23 22:11:42 +02001121 return pmd;
1122}
1123
1124static inline pmd_t pmd_mkold(pmd_t pmd)
1125{
Martin Schwidefsky152125b2014-07-24 11:03:41 +02001126 if (pmd_large(pmd)) {
Martin Schwidefsky0944fe32013-07-23 22:11:42 +02001127 pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG;
1128 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1129 }
Martin Schwidefsky0944fe32013-07-23 22:11:42 +02001130 return pmd;
1131}
1132
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001133static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1134{
Martin Schwidefsky152125b2014-07-24 11:03:41 +02001135 if (pmd_large(pmd)) {
1136 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE |
1137 _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG |
Kirill A. Shutemovfecffad2016-01-15 16:53:24 -08001138 _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SOFT_DIRTY;
Martin Schwidefsky152125b2014-07-24 11:03:41 +02001139 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1140 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1141 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1142 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
1143 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1144 return pmd;
1145 }
1146 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN;
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001147 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1148 return pmd;
1149}
1150
Gerald Schaefer106c9922013-04-29 15:07:23 -07001151static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001152{
Gerald Schaefer106c9922013-04-29 15:07:23 -07001153 pmd_t __pmd;
1154 pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
Martin Schwidefsky152125b2014-07-24 11:03:41 +02001155 return __pmd;
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001156}
1157
Gerald Schaefer106c9922013-04-29 15:07:23 -07001158#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
1159
Martin Schwidefsky1b948d62014-04-03 13:55:01 +02001160static inline void __pmdp_csp(pmd_t *pmdp)
1161{
1162 register unsigned long reg2 asm("2") = pmd_val(*pmdp);
1163 register unsigned long reg3 asm("3") = pmd_val(*pmdp) |
1164 _SEGMENT_ENTRY_INVALID;
1165 register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5;
1166
1167 asm volatile(
1168 " csp %1,%3"
1169 : "=m" (*pmdp)
1170 : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc");
1171}
1172
1173static inline void __pmdp_idte(unsigned long address, pmd_t *pmdp)
1174{
1175 unsigned long sto;
1176
1177 sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t);
1178 asm volatile(
1179 " .insn rrf,0xb98e0000,%2,%3,0,0"
1180 : "=m" (*pmdp)
1181 : "m" (*pmdp), "a" (sto), "a" ((address & HPAGE_MASK))
1182 : "cc" );
1183}
1184
1185static inline void __pmdp_idte_local(unsigned long address, pmd_t *pmdp)
1186{
1187 unsigned long sto;
1188
1189 sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t);
1190 asm volatile(
1191 " .insn rrf,0xb98e0000,%2,%3,0,1"
1192 : "=m" (*pmdp)
1193 : "m" (*pmdp), "a" (sto), "a" ((address & HPAGE_MASK))
1194 : "cc" );
1195}
1196
Martin Schwidefsky227be792016-03-08 11:09:25 +01001197pmd_t pmdp_xchg_direct(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1198pmd_t pmdp_xchg_lazy(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +02001199
Gerald Schaefer106c9922013-04-29 15:07:23 -07001200#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1201
1202#define __HAVE_ARCH_PGTABLE_DEPOSIT
Martin Schwidefsky227be792016-03-08 11:09:25 +01001203void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1204 pgtable_t pgtable);
Gerald Schaefer106c9922013-04-29 15:07:23 -07001205
1206#define __HAVE_ARCH_PGTABLE_WITHDRAW
Martin Schwidefsky227be792016-03-08 11:09:25 +01001207pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
1208
1209#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1210static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
1211 unsigned long addr, pmd_t *pmdp,
1212 pmd_t entry, int dirty)
1213{
1214 VM_BUG_ON(addr & ~HPAGE_MASK);
1215
1216 entry = pmd_mkyoung(entry);
1217 if (dirty)
1218 entry = pmd_mkdirty(entry);
1219 if (pmd_val(*pmdp) == pmd_val(entry))
1220 return 0;
1221 pmdp_xchg_direct(vma->vm_mm, addr, pmdp, entry);
1222 return 1;
1223}
1224
1225#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1226static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1227 unsigned long addr, pmd_t *pmdp)
1228{
1229 pmd_t pmd = *pmdp;
1230
1231 pmd = pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd_mkold(pmd));
1232 return pmd_young(pmd);
1233}
1234
1235#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
1236static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
1237 unsigned long addr, pmd_t *pmdp)
1238{
1239 VM_BUG_ON(addr & ~HPAGE_MASK);
1240 return pmdp_test_and_clear_young(vma, addr, pmdp);
1241}
Gerald Schaefer106c9922013-04-29 15:07:23 -07001242
Gerald Schaefer106c9922013-04-29 15:07:23 -07001243static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1244 pmd_t *pmdp, pmd_t entry)
1245{
Gerald Schaefer106c9922013-04-29 15:07:23 -07001246 *pmdp = entry;
1247}
1248
1249static inline pmd_t pmd_mkhuge(pmd_t pmd)
1250{
1251 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
Martin Schwidefsky152125b2014-07-24 11:03:41 +02001252 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1253 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001254 return pmd;
1255}
1256
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -07001257#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
1258static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
Martin Schwidefsky227be792016-03-08 11:09:25 +01001259 unsigned long addr, pmd_t *pmdp)
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001260{
Martin Schwidefsky227be792016-03-08 11:09:25 +01001261 return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_INVALID));
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001262}
1263
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -07001264#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
1265static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm,
Martin Schwidefsky227be792016-03-08 11:09:25 +01001266 unsigned long addr,
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -07001267 pmd_t *pmdp, int full)
Martin Schwidefskyfcbe08d62014-10-24 10:52:29 +02001268{
Martin Schwidefsky227be792016-03-08 11:09:25 +01001269 if (full) {
1270 pmd_t pmd = *pmdp;
1271 *pmdp = __pmd(_SEGMENT_ENTRY_INVALID);
1272 return pmd;
1273 }
1274 return pmdp_xchg_lazy(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_INVALID));
Martin Schwidefskyfcbe08d62014-10-24 10:52:29 +02001275}
1276
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -07001277#define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
1278static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
Martin Schwidefsky227be792016-03-08 11:09:25 +01001279 unsigned long addr, pmd_t *pmdp)
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001280{
Martin Schwidefsky227be792016-03-08 11:09:25 +01001281 return pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001282}
1283
1284#define __HAVE_ARCH_PMDP_INVALIDATE
1285static inline void pmdp_invalidate(struct vm_area_struct *vma,
Martin Schwidefsky227be792016-03-08 11:09:25 +01001286 unsigned long addr, pmd_t *pmdp)
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001287{
Martin Schwidefsky227be792016-03-08 11:09:25 +01001288 pmdp_xchg_direct(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_INVALID));
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001289}
1290
Gerald Schaeferbe328652013-01-21 16:48:07 +01001291#define __HAVE_ARCH_PMDP_SET_WRPROTECT
1292static inline void pmdp_set_wrprotect(struct mm_struct *mm,
Martin Schwidefsky227be792016-03-08 11:09:25 +01001293 unsigned long addr, pmd_t *pmdp)
Gerald Schaeferbe328652013-01-21 16:48:07 +01001294{
1295 pmd_t pmd = *pmdp;
1296
Martin Schwidefsky227be792016-03-08 11:09:25 +01001297 if (pmd_write(pmd))
1298 pmd = pmdp_xchg_lazy(mm, addr, pmdp, pmd_wrprotect(pmd));
Gerald Schaeferbe328652013-01-21 16:48:07 +01001299}
1300
Aneesh Kumar K.Vf28b6ff2015-06-24 16:57:42 -07001301static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
1302 unsigned long address,
1303 pmd_t *pmdp)
1304{
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -07001305 return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
Aneesh Kumar K.Vf28b6ff2015-06-24 16:57:42 -07001306}
1307#define pmdp_collapse_flush pmdp_collapse_flush
1308
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001309#define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
1310#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1311
1312static inline int pmd_trans_huge(pmd_t pmd)
1313{
1314 return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
1315}
1316
1317static inline int has_transparent_hugepage(void)
1318{
1319 return MACHINE_HAS_HPAGE ? 1 : 0;
1320}
Gerald Schaefer75077af2012-10-08 16:30:15 -07001321#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1322
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324 * 64 bit swap entry format:
1325 * A page-table entry has some bits we have to treat in a special way.
Geert Uytterhoeven4e0a6412015-05-21 14:00:47 +02001326 * Bits 52 and bit 55 have to be zero, otherwise a specification
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327 * exception will occur instead of a page translation exception. The
Geert Uytterhoeven4e0a6412015-05-21 14:00:47 +02001328 * specification exception has the bad habit not to store necessary
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329 * information in the lowcore.
Martin Schwidefskya1c843b2015-04-22 13:55:59 +02001330 * Bits 54 and 63 are used to indicate the page type.
1331 * A swap pte is indicated by bit pattern (pte & 0x201) == 0x200
1332 * This leaves the bits 0-51 and bits 56-62 to store type and offset.
1333 * We use the 5 bits from 57-61 for the type and the 52 bits from 0-51
1334 * for the offset.
1335 * | offset |01100|type |00|
1336 * |0000000000111111111122222222223333333333444444444455|55555|55566|66|
1337 * |0123456789012345678901234567890123456789012345678901|23456|78901|23|
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338 */
Heiko Carstens5a79859a2015-02-12 13:08:27 +01001339
Martin Schwidefskya1c843b2015-04-22 13:55:59 +02001340#define __SWP_OFFSET_MASK ((1UL << 52) - 1)
1341#define __SWP_OFFSET_SHIFT 12
1342#define __SWP_TYPE_MASK ((1UL << 5) - 1)
1343#define __SWP_TYPE_SHIFT 2
Heiko Carstens5a79859a2015-02-12 13:08:27 +01001344
Adrian Bunk4448aaf2005-11-08 21:34:42 -08001345static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346{
1347 pte_t pte;
Martin Schwidefskya1c843b2015-04-22 13:55:59 +02001348
1349 pte_val(pte) = _PAGE_INVALID | _PAGE_PROTECT;
1350 pte_val(pte) |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT;
1351 pte_val(pte) |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352 return pte;
1353}
1354
Martin Schwidefskya1c843b2015-04-22 13:55:59 +02001355static inline unsigned long __swp_type(swp_entry_t entry)
1356{
1357 return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK;
1358}
1359
1360static inline unsigned long __swp_offset(swp_entry_t entry)
1361{
1362 return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK;
1363}
1364
1365static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
1366{
1367 return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) };
1368}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369
1370#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1371#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1372
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373#endif /* !__ASSEMBLY__ */
1374
1375#define kern_addr_valid(addr) (1)
1376
Heiko Carstens17f34582008-04-30 13:38:47 +02001377extern int vmem_add_mapping(unsigned long start, unsigned long size);
1378extern int vmem_remove_mapping(unsigned long start, unsigned long size);
Carsten Otte402b0862008-03-25 18:47:10 +01001379extern int s390_enable_sie(void);
Dominik Dingel3ac8e382014-10-23 12:09:17 +02001380extern int s390_enable_skey(void);
Dominik Dingela13cff32014-10-23 12:07:14 +02001381extern void s390_reset_cmma(struct mm_struct *mm);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +01001382
Martin Schwidefsky1f6b83e2015-01-14 17:51:17 +01001383/* s390 has a private copy of get unmapped area to deal with cache synonyms */
1384#define HAVE_ARCH_UNMAPPED_AREA
1385#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1386
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387/*
1388 * No page table caches to initialise
1389 */
Heiko Carstens765a0ca2013-03-23 10:29:01 +01001390static inline void pgtable_cache_init(void) { }
1391static inline void check_pgt_cache(void) { }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393#include <asm-generic/pgtable.h>
1394
1395#endif /* _S390_PAGE_H */