blob: b444d83cfc952fc121d77599a938bbcc6c0d864b [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
H. Peter Anvin1965aae2008-10-22 22:26:29 -07002#ifndef _ASM_X86_PGTABLE_H
3#define _ASM_X86_PGTABLE_H
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +01004
Tom Lendacky21729f82017-07-17 16:10:07 -05005#include <linux/mem_encrypt.h>
Ingo Molnarc47c1b12009-02-09 11:57:45 +01006#include <asm/page.h>
Jeremy Fitzhardinge8d19c992009-02-08 18:46:18 -08007#include <asm/pgtable_types.h>
Suresh Siddhab2bc2732008-09-23 14:00:36 -07008
venkatesh.pallipadi@intel.com8a7b12f2008-12-18 11:41:31 -08009/*
10 * Macro to mark a page protection value as UC-
11 */
Juergen Grossd85f3332014-11-03 14:01:53 +010012#define pgprot_noncached(prot) \
13 ((boot_cpu_data.x86 > 3) \
14 ? (__pgprot(pgprot_val(prot) | \
15 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS))) \
venkatesh.pallipadi@intel.com8a7b12f2008-12-18 11:41:31 -080016 : (prot))
17
Tom Lendacky21729f82017-07-17 16:10:07 -050018/*
19 * Macros to add or remove encryption attribute
20 */
21#define pgprot_encrypted(prot) __pgprot(__sme_set(pgprot_val(prot)))
22#define pgprot_decrypted(prot) __pgprot(__sme_clr(pgprot_val(prot)))
23
Jeremy Fitzhardinge46141392008-01-30 13:32:56 +010024#ifndef __ASSEMBLY__
H. Peter Anvin55a6ca22009-11-23 15:12:07 -080025#include <asm/x86_init.h>
26
Tom Lendackyb9d05202017-07-17 16:10:11 -050027extern pgd_t early_top_pgt[PTRS_PER_PGD];
28int __init __early_make_pgtable(unsigned long address, pmdval_t pmd);
29
Borislav Petkovef6bea62014-01-18 12:48:14 +010030void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd);
Thomas Gleixnera4b51ef2017-12-04 15:08:06 +010031void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd, bool user);
Stephen Smalleye1a58322015-10-05 12:55:20 -040032void ptdump_walk_pgd_level_checkwx(void);
33
34#ifdef CONFIG_DEBUG_WX
35#define debug_checkwx() ptdump_walk_pgd_level_checkwx()
36#else
37#define debug_checkwx() do { } while (0)
38#endif
Borislav Petkovef6bea62014-01-18 12:48:14 +010039
Jeremy Fitzhardinge46141392008-01-30 13:32:56 +010040/*
Jeremy Fitzhardinge8405b122008-01-30 13:32:58 +010041 * ZERO_PAGE is a global shared page that is always zero: used
42 * for zero-mapped memory areas etc..
43 */
Andi Kleen277d5b42013-08-05 15:02:43 -070044extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
45 __visible;
Jeremy Fitzhardinge8405b122008-01-30 13:32:58 +010046#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
47
Jeremy Fitzhardingee3ed9102008-01-30 13:34:11 +010048extern spinlock_t pgd_lock;
49extern struct list_head pgd_list;
Jeremy Fitzhardinge8405b122008-01-30 13:32:58 +010050
Jeremy Fitzhardinge617d34d2010-09-21 12:01:51 -070051extern struct mm_struct *pgd_page_get_mm(struct page *page);
52
Tom Lendacky21729f82017-07-17 16:10:07 -050053extern pmdval_t early_pmd_flags;
54
Jeremy Fitzhardinge54321d92009-02-11 10:20:05 -080055#ifdef CONFIG_PARAVIRT
56#include <asm/paravirt.h>
57#else /* !CONFIG_PARAVIRT */
58#define set_pte(ptep, pte) native_set_pte(ptep, pte)
59#define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
60
Jeremy Fitzhardinge54321d92009-02-11 10:20:05 -080061#define set_pte_atomic(ptep, pte) \
62 native_set_pte_atomic(ptep, pte)
63
64#define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
65
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +030066#ifndef __PAGETABLE_P4D_FOLDED
Jeremy Fitzhardinge54321d92009-02-11 10:20:05 -080067#define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
68#define pgd_clear(pgd) native_pgd_clear(pgd)
69#endif
70
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +030071#ifndef set_p4d
72# define set_p4d(p4dp, p4d) native_set_p4d(p4dp, p4d)
73#endif
74
75#ifndef __PAGETABLE_PUD_FOLDED
76#define p4d_clear(p4d) native_p4d_clear(p4d)
77#endif
78
Jeremy Fitzhardinge54321d92009-02-11 10:20:05 -080079#ifndef set_pud
80# define set_pud(pudp, pud) native_set_pud(pudp, pud)
81#endif
82
Arnd Bergmannd0f33ac2017-03-16 16:40:24 -070083#ifndef __PAGETABLE_PUD_FOLDED
Jeremy Fitzhardinge54321d92009-02-11 10:20:05 -080084#define pud_clear(pud) native_pud_clear(pud)
85#endif
86
87#define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
88#define pmd_clear(pmd) native_pmd_clear(pmd)
89
Jeremy Fitzhardinge54321d92009-02-11 10:20:05 -080090#define pgd_val(x) native_pgd_val(x)
91#define __pgd(x) native_make_pgd(x)
92
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +030093#ifndef __PAGETABLE_P4D_FOLDED
94#define p4d_val(x) native_p4d_val(x)
95#define __p4d(x) native_make_p4d(x)
96#endif
97
Jeremy Fitzhardinge54321d92009-02-11 10:20:05 -080098#ifndef __PAGETABLE_PUD_FOLDED
99#define pud_val(x) native_pud_val(x)
100#define __pud(x) native_make_pud(x)
101#endif
102
103#ifndef __PAGETABLE_PMD_FOLDED
104#define pmd_val(x) native_pmd_val(x)
105#define __pmd(x) native_make_pmd(x)
106#endif
107
108#define pte_val(x) native_pte_val(x)
109#define __pte(x) native_make_pte(x)
110
Jeremy Fitzhardinge224101e2009-02-18 11:18:57 -0800111#define arch_end_context_switch(prev) do {} while(0)
112
Jeremy Fitzhardinge54321d92009-02-11 10:20:05 -0800113#endif /* CONFIG_PARAVIRT */
114
Jeremy Fitzhardinge8405b122008-01-30 13:32:58 +0100115/*
Jeremy Fitzhardinge46141392008-01-30 13:32:56 +0100116 * The following only work if pte_present() is true.
117 * Undefined behaviour if not..
118 */
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700119static inline int pte_dirty(pte_t pte)
120{
Jeremy Fitzhardingea15af1c2008-05-26 23:31:06 +0100121 return pte_flags(pte) & _PAGE_DIRTY;
Jeremy Fitzhardinge46141392008-01-30 13:32:56 +0100122}
123
Dave Hansena927cb82016-02-12 13:02:15 -0800124
125static inline u32 read_pkru(void)
126{
127 if (boot_cpu_has(X86_FEATURE_OSPKE))
128 return __read_pkru();
129 return 0;
130}
131
Xiao Guangrong9e901992016-03-22 16:51:17 +0800132static inline void write_pkru(u32 pkru)
133{
134 if (boot_cpu_has(X86_FEATURE_OSPKE))
135 __write_pkru(pkru);
136}
137
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700138static inline int pte_young(pte_t pte)
139{
Jeremy Fitzhardingea15af1c2008-05-26 23:31:06 +0100140 return pte_flags(pte) & _PAGE_ACCESSED;
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700141}
142
Kirill A. Shutemovc164e032014-12-10 15:44:36 -0800143static inline int pmd_dirty(pmd_t pmd)
144{
145 return pmd_flags(pmd) & _PAGE_DIRTY;
146}
147
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800148static inline int pmd_young(pmd_t pmd)
149{
150 return pmd_flags(pmd) & _PAGE_ACCESSED;
151}
152
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -0800153static inline int pud_dirty(pud_t pud)
154{
155 return pud_flags(pud) & _PAGE_DIRTY;
156}
157
158static inline int pud_young(pud_t pud)
159{
160 return pud_flags(pud) & _PAGE_ACCESSED;
161}
162
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700163static inline int pte_write(pte_t pte)
164{
Jeremy Fitzhardingea15af1c2008-05-26 23:31:06 +0100165 return pte_flags(pte) & _PAGE_RW;
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700166}
167
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700168static inline int pte_huge(pte_t pte)
169{
Jeremy Fitzhardingea15af1c2008-05-26 23:31:06 +0100170 return pte_flags(pte) & _PAGE_PSE;
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700171}
172
173static inline int pte_global(pte_t pte)
174{
Jeremy Fitzhardingea15af1c2008-05-26 23:31:06 +0100175 return pte_flags(pte) & _PAGE_GLOBAL;
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700176}
177
178static inline int pte_exec(pte_t pte)
179{
Jeremy Fitzhardingea15af1c2008-05-26 23:31:06 +0100180 return !(pte_flags(pte) & _PAGE_NX);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700181}
182
Nick Piggin7e675132008-04-28 02:13:00 -0700183static inline int pte_special(pte_t pte)
184{
Mel Gormanc819f372015-02-12 14:58:38 -0800185 return pte_flags(pte) & _PAGE_SPECIAL;
Nick Piggin7e675132008-04-28 02:13:00 -0700186}
187
Hugh Dickins91030ca2008-09-09 16:42:45 +0100188static inline unsigned long pte_pfn(pte_t pte)
189{
190 return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
191}
192
Akinobu Mita087975b2009-06-27 15:35:15 +0900193static inline unsigned long pmd_pfn(pmd_t pmd)
194{
Toshi Kanif70abb02015-09-17 12:24:17 -0600195 return (pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
Akinobu Mita087975b2009-06-27 15:35:15 +0900196}
197
Mel Gorman0ee364e2013-02-11 14:52:36 +0000198static inline unsigned long pud_pfn(pud_t pud)
199{
Toshi Kanif70abb02015-09-17 12:24:17 -0600200 return (pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT;
Mel Gorman0ee364e2013-02-11 14:52:36 +0000201}
202
Kirill A. Shutemovfe1e8c32017-03-13 17:33:04 +0300203static inline unsigned long p4d_pfn(p4d_t p4d)
204{
205 return (p4d_val(p4d) & p4d_pfn_mask(p4d)) >> PAGE_SHIFT;
206}
207
Tom Lendackyfd7e3152017-07-17 16:10:06 -0500208static inline unsigned long pgd_pfn(pgd_t pgd)
209{
210 return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
211}
212
Kirill A. Shutemovfe1e8c32017-03-13 17:33:04 +0300213static inline int p4d_large(p4d_t p4d)
214{
215 /* No 512 GiB pages yet */
216 return 0;
217}
218
Hugh Dickins91030ca2008-09-09 16:42:45 +0100219#define pte_page(pte) pfn_to_page(pte_pfn(pte))
220
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700221static inline int pmd_large(pmd_t pte)
222{
Andrea Arcangeli027ef6c2012-10-08 16:33:27 -0700223 return pmd_flags(pte) & _PAGE_PSE;
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700224}
225
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800226#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800227static inline int pmd_trans_huge(pmd_t pmd)
228{
Dan Williams5c7fb562016-01-15 16:56:52 -0800229 return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800230}
Andrea Arcangeli4b7167b2011-01-13 15:47:09 -0800231
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -0800232#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
233static inline int pud_trans_huge(pud_t pud)
234{
235 return (pud_val(pud) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
236}
237#endif
238
Hugh Dickinsfd8cfd32016-05-19 17:13:00 -0700239#define has_transparent_hugepage has_transparent_hugepage
Andrea Arcangeli4b7167b2011-01-13 15:47:09 -0800240static inline int has_transparent_hugepage(void)
241{
Borislav Petkov16bf9222016-03-29 17:42:03 +0200242 return boot_cpu_has(X86_FEATURE_PSE);
Andrea Arcangeli4b7167b2011-01-13 15:47:09 -0800243}
Dan Williams5c7fb562016-01-15 16:56:52 -0800244
245#ifdef __HAVE_ARCH_PTE_DEVMAP
246static inline int pmd_devmap(pmd_t pmd)
247{
248 return !!(pmd_val(pmd) & _PAGE_DEVMAP);
249}
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -0800250
251#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
252static inline int pud_devmap(pud_t pud)
253{
254 return !!(pud_val(pud) & _PAGE_DEVMAP);
255}
256#else
257static inline int pud_devmap(pud_t pud)
258{
259 return 0;
260}
261#endif
Kirill A. Shutemove5855132017-06-06 14:31:20 +0300262
263static inline int pgd_devmap(pgd_t pgd)
264{
265 return 0;
266}
Dan Williams5c7fb562016-01-15 16:56:52 -0800267#endif
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800268#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
269
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800270static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
271{
272 pteval_t v = native_pte_val(pte);
273
274 return native_make_pte(v | set);
275}
276
277static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
278{
279 pteval_t v = native_pte_val(pte);
280
281 return native_make_pte(v & ~clear);
282}
283
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700284static inline pte_t pte_mkclean(pte_t pte)
285{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800286 return pte_clear_flags(pte, _PAGE_DIRTY);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700287}
288
289static inline pte_t pte_mkold(pte_t pte)
290{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800291 return pte_clear_flags(pte, _PAGE_ACCESSED);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700292}
293
294static inline pte_t pte_wrprotect(pte_t pte)
295{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800296 return pte_clear_flags(pte, _PAGE_RW);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700297}
298
299static inline pte_t pte_mkexec(pte_t pte)
300{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800301 return pte_clear_flags(pte, _PAGE_NX);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700302}
303
304static inline pte_t pte_mkdirty(pte_t pte)
305{
Pavel Emelyanov0f8975e2013-07-03 15:01:20 -0700306 return pte_set_flags(pte, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700307}
308
309static inline pte_t pte_mkyoung(pte_t pte)
310{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800311 return pte_set_flags(pte, _PAGE_ACCESSED);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700312}
313
314static inline pte_t pte_mkwrite(pte_t pte)
315{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800316 return pte_set_flags(pte, _PAGE_RW);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700317}
318
319static inline pte_t pte_mkhuge(pte_t pte)
320{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800321 return pte_set_flags(pte, _PAGE_PSE);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700322}
323
324static inline pte_t pte_clrhuge(pte_t pte)
325{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800326 return pte_clear_flags(pte, _PAGE_PSE);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700327}
328
329static inline pte_t pte_mkglobal(pte_t pte)
330{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800331 return pte_set_flags(pte, _PAGE_GLOBAL);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700332}
333
334static inline pte_t pte_clrglobal(pte_t pte)
335{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800336 return pte_clear_flags(pte, _PAGE_GLOBAL);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700337}
Jeremy Fitzhardinge46141392008-01-30 13:32:56 +0100338
Nick Piggin7e675132008-04-28 02:13:00 -0700339static inline pte_t pte_mkspecial(pte_t pte)
340{
Jeremy Fitzhardinge65228692009-01-22 14:24:22 -0800341 return pte_set_flags(pte, _PAGE_SPECIAL);
Nick Piggin7e675132008-04-28 02:13:00 -0700342}
343
Dan Williams01c8f1c2016-01-15 16:56:40 -0800344static inline pte_t pte_mkdevmap(pte_t pte)
345{
346 return pte_set_flags(pte, _PAGE_SPECIAL|_PAGE_DEVMAP);
347}
348
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800349static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
350{
351 pmdval_t v = native_pmd_val(pmd);
352
Jan Beulich842cef92018-02-19 07:48:11 -0700353 return native_make_pmd(v | set);
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800354}
355
356static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
357{
358 pmdval_t v = native_pmd_val(pmd);
359
Jan Beulich842cef92018-02-19 07:48:11 -0700360 return native_make_pmd(v & ~clear);
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800361}
362
363static inline pmd_t pmd_mkold(pmd_t pmd)
364{
365 return pmd_clear_flags(pmd, _PAGE_ACCESSED);
366}
367
Minchan Kim590a4712016-01-15 16:55:20 -0800368static inline pmd_t pmd_mkclean(pmd_t pmd)
369{
370 return pmd_clear_flags(pmd, _PAGE_DIRTY);
371}
372
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800373static inline pmd_t pmd_wrprotect(pmd_t pmd)
374{
375 return pmd_clear_flags(pmd, _PAGE_RW);
376}
377
378static inline pmd_t pmd_mkdirty(pmd_t pmd)
379{
Pavel Emelyanov0f8975e2013-07-03 15:01:20 -0700380 return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800381}
382
Dan Williamsf25748e32016-01-15 16:56:43 -0800383static inline pmd_t pmd_mkdevmap(pmd_t pmd)
384{
385 return pmd_set_flags(pmd, _PAGE_DEVMAP);
386}
387
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800388static inline pmd_t pmd_mkhuge(pmd_t pmd)
389{
390 return pmd_set_flags(pmd, _PAGE_PSE);
391}
392
393static inline pmd_t pmd_mkyoung(pmd_t pmd)
394{
395 return pmd_set_flags(pmd, _PAGE_ACCESSED);
396}
397
398static inline pmd_t pmd_mkwrite(pmd_t pmd)
399{
400 return pmd_set_flags(pmd, _PAGE_RW);
401}
402
403static inline pmd_t pmd_mknotpresent(pmd_t pmd)
404{
Mel Gorman21d9ee32015-02-12 14:58:32 -0800405 return pmd_clear_flags(pmd, _PAGE_PRESENT | _PAGE_PROTNONE);
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800406}
407
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -0800408static inline pud_t pud_set_flags(pud_t pud, pudval_t set)
409{
410 pudval_t v = native_pud_val(pud);
411
Jan Beulich842cef92018-02-19 07:48:11 -0700412 return native_make_pud(v | set);
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -0800413}
414
415static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear)
416{
417 pudval_t v = native_pud_val(pud);
418
Jan Beulich842cef92018-02-19 07:48:11 -0700419 return native_make_pud(v & ~clear);
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -0800420}
421
422static inline pud_t pud_mkold(pud_t pud)
423{
424 return pud_clear_flags(pud, _PAGE_ACCESSED);
425}
426
427static inline pud_t pud_mkclean(pud_t pud)
428{
429 return pud_clear_flags(pud, _PAGE_DIRTY);
430}
431
432static inline pud_t pud_wrprotect(pud_t pud)
433{
434 return pud_clear_flags(pud, _PAGE_RW);
435}
436
437static inline pud_t pud_mkdirty(pud_t pud)
438{
439 return pud_set_flags(pud, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
440}
441
442static inline pud_t pud_mkdevmap(pud_t pud)
443{
444 return pud_set_flags(pud, _PAGE_DEVMAP);
445}
446
447static inline pud_t pud_mkhuge(pud_t pud)
448{
449 return pud_set_flags(pud, _PAGE_PSE);
450}
451
452static inline pud_t pud_mkyoung(pud_t pud)
453{
454 return pud_set_flags(pud, _PAGE_ACCESSED);
455}
456
457static inline pud_t pud_mkwrite(pud_t pud)
458{
459 return pud_set_flags(pud, _PAGE_RW);
460}
461
462static inline pud_t pud_mknotpresent(pud_t pud)
463{
464 return pud_clear_flags(pud, _PAGE_PRESENT | _PAGE_PROTNONE);
465}
466
Cyrill Gorcunov2bf01f92014-06-04 16:08:16 -0700467#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
Pavel Emelyanov0f8975e2013-07-03 15:01:20 -0700468static inline int pte_soft_dirty(pte_t pte)
469{
470 return pte_flags(pte) & _PAGE_SOFT_DIRTY;
471}
472
473static inline int pmd_soft_dirty(pmd_t pmd)
474{
475 return pmd_flags(pmd) & _PAGE_SOFT_DIRTY;
476}
477
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -0800478static inline int pud_soft_dirty(pud_t pud)
479{
480 return pud_flags(pud) & _PAGE_SOFT_DIRTY;
481}
482
Pavel Emelyanov0f8975e2013-07-03 15:01:20 -0700483static inline pte_t pte_mksoft_dirty(pte_t pte)
484{
485 return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
486}
487
488static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
489{
490 return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
491}
492
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -0800493static inline pud_t pud_mksoft_dirty(pud_t pud)
494{
495 return pud_set_flags(pud, _PAGE_SOFT_DIRTY);
496}
497
Martin Schwidefskya7b76172015-04-22 14:20:47 +0200498static inline pte_t pte_clear_soft_dirty(pte_t pte)
499{
500 return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
501}
502
503static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
504{
505 return pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY);
506}
507
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -0800508static inline pud_t pud_clear_soft_dirty(pud_t pud)
509{
510 return pud_clear_flags(pud, _PAGE_SOFT_DIRTY);
511}
512
Cyrill Gorcunov2bf01f92014-06-04 16:08:16 -0700513#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
514
Jeremy Fitzhardingeb5348162009-02-04 18:33:38 -0800515/*
516 * Mask out unsupported bits in a present pgprot. Non-present pgprots
517 * can use those bits for other purposes, so leave them be.
518 */
519static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
520{
521 pgprotval_t protval = pgprot_val(pgprot);
522
523 if (protval & _PAGE_PRESENT)
524 protval &= __supported_pte_mask;
525
526 return protval;
527}
528
Jeremy Fitzhardinge6fdc05d2008-01-30 13:32:57 +0100529static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
530{
Jeremy Fitzhardingeb5348162009-02-04 18:33:38 -0800531 return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) |
532 massage_pgprot(pgprot));
Jeremy Fitzhardinge6fdc05d2008-01-30 13:32:57 +0100533}
534
535static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
536{
Jeremy Fitzhardingeb5348162009-02-04 18:33:38 -0800537 return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) |
538 massage_pgprot(pgprot));
Jeremy Fitzhardinge6fdc05d2008-01-30 13:32:57 +0100539}
540
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -0800541static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot)
542{
543 return __pud(((phys_addr_t)page_nr << PAGE_SHIFT) |
544 massage_pgprot(pgprot));
545}
546
Ingo Molnar38472312008-01-30 13:32:57 +0100547static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
548{
549 pteval_t val = pte_val(pte);
550
551 /*
552 * Chop off the NX bit (if present), and add the NX portion of
553 * the newprot (if present):
554 */
Venki Pallipadi1c12c4c2008-05-14 16:05:51 -0700555 val &= _PAGE_CHG_MASK;
Jeremy Fitzhardingeb5348162009-02-04 18:33:38 -0800556 val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK;
Ingo Molnar38472312008-01-30 13:32:57 +0100557
558 return __pte(val);
559}
560
Johannes Weinerc489f122011-01-13 15:47:02 -0800561static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
562{
563 pmdval_t val = pmd_val(pmd);
564
565 val &= _HPAGE_CHG_MASK;
566 val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK;
567
568 return __pmd(val);
569}
570
Venki Pallipadi1c12c4c2008-05-14 16:05:51 -0700571/* mprotect needs to preserve PAT bits when updating vm_page_prot */
572#define pgprot_modify pgprot_modify
573static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
574{
575 pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
576 pgprotval_t addbits = pgprot_val(newprot);
577 return __pgprot(preservebits | addbits);
578}
579
Toshi Kanibbac8c62015-09-17 12:24:18 -0600580#define pte_pgprot(x) __pgprot(pte_flags(x))
581#define pmd_pgprot(x) __pgprot(pmd_flags(x))
582#define pud_pgprot(x) __pgprot(pud_flags(x))
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +0300583#define p4d_pgprot(x) __pgprot(p4d_flags(x))
Andi Kleenc6ca18e2008-01-30 13:33:51 +0100584
Jeremy Fitzhardingeb5348162009-02-04 18:33:38 -0800585#define canon_pgprot(p) __pgprot(massage_pgprot(p))
Andi Kleen1e8e23b2008-01-30 13:33:53 +0100586
Suresh Siddha1adcaaf2009-08-17 13:23:50 -0700587static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
Juergen Grossd85f3332014-11-03 14:01:53 +0100588 enum page_cache_mode pcm,
589 enum page_cache_mode new_pcm)
venkatesh.pallipadi@intel.comafc7d202009-01-09 16:13:10 -0800590{
591 /*
H. Peter Anvin55a6ca22009-11-23 15:12:07 -0800592 * PAT type is always WB for untracked ranges, so no need to check.
Suresh Siddha1adcaaf2009-08-17 13:23:50 -0700593 */
H. Peter Anvin8a271382009-11-23 14:49:20 -0800594 if (x86_platform.is_untracked_pat_range(paddr, paddr + size))
Suresh Siddha1adcaaf2009-08-17 13:23:50 -0700595 return 1;
596
597 /*
venkatesh.pallipadi@intel.comafc7d202009-01-09 16:13:10 -0800598 * Certain new memtypes are not allowed with certain
599 * requested memtype:
600 * - request is uncached, return cannot be write-back
601 * - request is write-combine, return cannot be write-back
Toshi Kaniecb2feb2015-06-04 18:55:14 +0200602 * - request is write-through, return cannot be write-back
603 * - request is write-through, return cannot be write-combine
venkatesh.pallipadi@intel.comafc7d202009-01-09 16:13:10 -0800604 */
Juergen Grossd85f3332014-11-03 14:01:53 +0100605 if ((pcm == _PAGE_CACHE_MODE_UC_MINUS &&
606 new_pcm == _PAGE_CACHE_MODE_WB) ||
607 (pcm == _PAGE_CACHE_MODE_WC &&
Toshi Kaniecb2feb2015-06-04 18:55:14 +0200608 new_pcm == _PAGE_CACHE_MODE_WB) ||
609 (pcm == _PAGE_CACHE_MODE_WT &&
610 new_pcm == _PAGE_CACHE_MODE_WB) ||
611 (pcm == _PAGE_CACHE_MODE_WT &&
612 new_pcm == _PAGE_CACHE_MODE_WC)) {
venkatesh.pallipadi@intel.comafc7d202009-01-09 16:13:10 -0800613 return 0;
614 }
615
616 return 1;
617}
618
Tejun Heo458a3e62009-02-24 11:57:21 +0900619pmd_t *populate_extra_pmd(unsigned long vaddr);
620pte_t *populate_extra_pte(unsigned long vaddr);
Jeremy Fitzhardinge46141392008-01-30 13:32:56 +0100621#endif /* __ASSEMBLY__ */
622
Thomas Gleixner96a388d2007-10-11 11:20:03 +0200623#ifdef CONFIG_X86_32
David Howellsa1ce3922012-10-02 18:01:25 +0100624# include <asm/pgtable_32.h>
Thomas Gleixner96a388d2007-10-11 11:20:03 +0200625#else
David Howellsa1ce3922012-10-02 18:01:25 +0100626# include <asm/pgtable_64.h>
Thomas Gleixner96a388d2007-10-11 11:20:03 +0200627#endif
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +0100628
Jeremy Fitzhardingeaca159d2009-02-05 11:30:54 -0800629#ifndef __ASSEMBLY__
Jeremy Fitzhardingef4769612009-02-05 11:30:55 -0800630#include <linux/mm_types.h>
Cyrill Gorcunovfa0f2812013-09-11 14:22:47 -0700631#include <linux/mmdebug.h>
Dave Hansen4cbeb512013-01-22 13:24:31 -0800632#include <linux/log2.h>
Thomas Garnieref37bc32017-03-21 08:17:25 +0100633#include <asm/fixmap.h>
Jeremy Fitzhardingeaca159d2009-02-05 11:30:54 -0800634
Jeremy Fitzhardingea034a012009-02-05 11:30:43 -0800635static inline int pte_none(pte_t pte)
636{
Dave Hansen97e3c602016-07-07 17:19:12 -0700637 return !(pte.pte & ~(_PAGE_KNL_ERRATUM_MASK));
Jeremy Fitzhardingea034a012009-02-05 11:30:43 -0800638}
639
Jeremy Fitzhardinge8de01da2009-02-05 11:30:44 -0800640#define __HAVE_ARCH_PTE_SAME
641static inline int pte_same(pte_t a, pte_t b)
642{
643 return a.pte == b.pte;
644}
645
Jeremy Fitzhardinge7c683852009-02-05 11:30:45 -0800646static inline int pte_present(pte_t a)
647{
Mel Gormanc46a7c82014-06-04 16:06:30 -0700648 return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
649}
650
Dan Williams3565fce2016-01-15 16:56:55 -0800651#ifdef __HAVE_ARCH_PTE_DEVMAP
652static inline int pte_devmap(pte_t a)
653{
654 return (pte_flags(a) & _PAGE_DEVMAP) == _PAGE_DEVMAP;
655}
656#endif
657
Rik van Riel2c3cf552012-10-09 15:31:12 +0200658#define pte_accessible pte_accessible
Rik van Riel20841402013-12-18 17:08:44 -0800659static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
Rik van Riel2c3cf552012-10-09 15:31:12 +0200660{
Rik van Riel20841402013-12-18 17:08:44 -0800661 if (pte_flags(a) & _PAGE_PRESENT)
662 return true;
663
Mel Gorman21d9ee32015-02-12 14:58:32 -0800664 if ((pte_flags(a) & _PAGE_PROTNONE) &&
Rik van Riel20841402013-12-18 17:08:44 -0800665 mm_tlb_flush_pending(mm))
666 return true;
667
668 return false;
Rik van Riel2c3cf552012-10-09 15:31:12 +0200669}
670
Jeremy Fitzhardinge649e8ef2009-02-05 11:30:50 -0800671static inline int pmd_present(pmd_t pmd)
672{
Andrea Arcangeli027ef6c2012-10-08 16:33:27 -0700673 /*
674 * Checking for _PAGE_PSE is needed too because
675 * split_huge_page will temporarily clear the present bit (but
676 * the _PAGE_PSE flag will remain set at all times while the
677 * _PAGE_PRESENT bit is clear).
678 */
Mel Gorman21d9ee32015-02-12 14:58:32 -0800679 return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE);
Jeremy Fitzhardinge649e8ef2009-02-05 11:30:50 -0800680}
681
Mel Gormane7bb4b6d2015-02-12 14:58:19 -0800682#ifdef CONFIG_NUMA_BALANCING
683/*
684 * These work without NUMA balancing but the kernel does not care. See the
685 * comment in include/asm-generic/pgtable.h
686 */
687static inline int pte_protnone(pte_t pte)
688{
David Vrabele3a1f6c2015-02-19 13:06:53 +0000689 return (pte_flags(pte) & (_PAGE_PROTNONE | _PAGE_PRESENT))
690 == _PAGE_PROTNONE;
Mel Gormane7bb4b6d2015-02-12 14:58:19 -0800691}
692
693static inline int pmd_protnone(pmd_t pmd)
694{
David Vrabele3a1f6c2015-02-19 13:06:53 +0000695 return (pmd_flags(pmd) & (_PAGE_PROTNONE | _PAGE_PRESENT))
696 == _PAGE_PROTNONE;
Mel Gormane7bb4b6d2015-02-12 14:58:19 -0800697}
698#endif /* CONFIG_NUMA_BALANCING */
699
Jeremy Fitzhardinge4fea8012009-02-05 11:30:51 -0800700static inline int pmd_none(pmd_t pmd)
701{
702 /* Only check low word on 32-bit platforms, since it might be
703 out of sync with upper half. */
Dave Hansen97e3c602016-07-07 17:19:12 -0700704 unsigned long val = native_pmd_val(pmd);
705 return (val & ~_PAGE_KNL_ERRATUM_MASK) == 0;
Jeremy Fitzhardinge4fea8012009-02-05 11:30:51 -0800706}
707
Jeremy Fitzhardinge3ffb3562009-02-05 11:30:59 -0800708static inline unsigned long pmd_page_vaddr(pmd_t pmd)
709{
Toshi Kanif70abb02015-09-17 12:24:17 -0600710 return (unsigned long)__va(pmd_val(pmd) & pmd_pfn_mask(pmd));
Jeremy Fitzhardinge3ffb3562009-02-05 11:30:59 -0800711}
712
Ingo Molnare5f7f2022009-02-09 11:42:57 +0100713/*
714 * Currently stuck as a macro due to indirect forward reference to
715 * linux/mmzone.h's __section_mem_map_addr() definition:
716 */
Tom Lendackyfd7e3152017-07-17 16:10:06 -0500717#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
Jeremy Fitzhardinge20063ca2009-02-05 11:31:00 -0800718
Jeremy Fitzhardingee24d7ee2009-02-05 11:31:01 -0800719/*
720 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
721 *
722 * this macro returns the index of the entry in the pmd page which would
723 * control the given virtual address
724 */
Figo.zhangce0c0f92009-06-28 18:07:39 +0800725static inline unsigned long pmd_index(unsigned long address)
Jeremy Fitzhardingee24d7ee2009-02-05 11:31:01 -0800726{
727 return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
728}
729
Jeremy Fitzhardinge97e28172009-02-05 11:31:05 -0800730/*
731 * Conversion functions: convert a page and protection to a page entry,
732 * and a page entry and page directory to the page they refer to.
733 *
734 * (Currently stuck as a macro because of indirect forward reference
735 * to linux/mm.h:page_to_nid())
736 */
737#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
738
Jeremy Fitzhardinge346309c2009-02-05 11:31:06 -0800739/*
740 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
741 *
742 * this function returns the index of the entry in the pte page which would
743 * control the given virtual address
744 */
Figo.zhangce0c0f92009-06-28 18:07:39 +0800745static inline unsigned long pte_index(unsigned long address)
Jeremy Fitzhardinge346309c2009-02-05 11:31:06 -0800746{
747 return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
748}
749
Jeremy Fitzhardinge3fbc2442009-02-05 11:31:07 -0800750static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
751{
752 return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
753}
754
Jeremy Fitzhardinge99510232009-02-05 11:31:11 -0800755static inline int pmd_bad(pmd_t pmd)
756{
Jeremy Fitzhardinge18a7a1992009-02-05 11:31:16 -0800757 return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
Jeremy Fitzhardinge99510232009-02-05 11:31:11 -0800758}
759
Jeremy Fitzhardingecc290ca2009-02-05 11:31:12 -0800760static inline unsigned long pages_to_mb(unsigned long npg)
761{
762 return npg >> (20 - PAGE_SHIFT);
763}
764
Kirill A. Shutemov98233362015-04-14 15:46:14 -0700765#if CONFIG_PGTABLE_LEVELS > 2
Jeremy Fitzhardingedeb79cf2009-02-05 11:31:13 -0800766static inline int pud_none(pud_t pud)
767{
Dave Hansen97e3c602016-07-07 17:19:12 -0700768 return (native_pud_val(pud) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0;
Jeremy Fitzhardingedeb79cf2009-02-05 11:31:13 -0800769}
770
Jeremy Fitzhardinge5ba7c912009-02-05 11:30:48 -0800771static inline int pud_present(pud_t pud)
772{
Jeremy Fitzhardinge18a7a1992009-02-05 11:31:16 -0800773 return pud_flags(pud) & _PAGE_PRESENT;
Jeremy Fitzhardinge5ba7c912009-02-05 11:30:48 -0800774}
Jeremy Fitzhardinge6fff47e2009-02-05 11:30:53 -0800775
776static inline unsigned long pud_page_vaddr(pud_t pud)
777{
Toshi Kanif70abb02015-09-17 12:24:17 -0600778 return (unsigned long)__va(pud_val(pud) & pud_pfn_mask(pud));
Jeremy Fitzhardinge6fff47e2009-02-05 11:30:53 -0800779}
Jeremy Fitzhardingef4769612009-02-05 11:30:55 -0800780
Ingo Molnare5f7f2022009-02-09 11:42:57 +0100781/*
782 * Currently stuck as a macro due to indirect forward reference to
783 * linux/mmzone.h's __section_mem_map_addr() definition:
784 */
Tom Lendackyfd7e3152017-07-17 16:10:06 -0500785#define pud_page(pud) pfn_to_page(pud_pfn(pud))
Jeremy Fitzhardinge01ade20d5a2009-02-05 11:31:02 -0800786
787/* Find an entry in the second-level page table.. */
788static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
789{
790 return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
791}
Jeremy Fitzhardinge3180fba2009-02-05 11:31:04 -0800792
Jeremy Fitzhardinge3f6cbef2009-02-05 11:31:08 -0800793static inline int pud_large(pud_t pud)
794{
Jeremy Fitzhardingee2f5bda2009-02-09 00:09:52 -0800795 return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
Jeremy Fitzhardinge3f6cbef2009-02-05 11:31:08 -0800796 (_PAGE_PSE | _PAGE_PRESENT);
797}
Jeremy Fitzhardingea61bb292009-02-05 11:31:10 -0800798
799static inline int pud_bad(pud_t pud)
800{
Jeremy Fitzhardinge18a7a1992009-02-05 11:31:16 -0800801 return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
Jeremy Fitzhardingea61bb292009-02-05 11:31:10 -0800802}
Jeremy Fitzhardingee2f5bda2009-02-09 00:09:52 -0800803#else
804static inline int pud_large(pud_t pud)
805{
806 return 0;
807}
Kirill A. Shutemov98233362015-04-14 15:46:14 -0700808#endif /* CONFIG_PGTABLE_LEVELS > 2 */
Jeremy Fitzhardinge5ba7c912009-02-05 11:30:48 -0800809
Kirill A. Shutemovfe1e8c32017-03-13 17:33:04 +0300810static inline unsigned long pud_index(unsigned long address)
811{
812 return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
813}
814
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +0300815#if CONFIG_PGTABLE_LEVELS > 3
816static inline int p4d_none(p4d_t p4d)
817{
818 return (native_p4d_val(p4d) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0;
819}
820
821static inline int p4d_present(p4d_t p4d)
822{
823 return p4d_flags(p4d) & _PAGE_PRESENT;
824}
825
826static inline unsigned long p4d_page_vaddr(p4d_t p4d)
827{
828 return (unsigned long)__va(p4d_val(p4d) & p4d_pfn_mask(p4d));
829}
830
831/*
832 * Currently stuck as a macro due to indirect forward reference to
833 * linux/mmzone.h's __section_mem_map_addr() definition:
834 */
Tom Lendackyfd7e3152017-07-17 16:10:06 -0500835#define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d))
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +0300836
837/* Find an entry in the third-level page table.. */
838static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
839{
840 return (pud_t *)p4d_page_vaddr(*p4d) + pud_index(address);
841}
842
843static inline int p4d_bad(p4d_t p4d)
844{
Dave Hansen1c4de1f2017-12-04 15:07:38 +0100845 unsigned long ignore_flags = _KERNPG_TABLE | _PAGE_USER;
846
847 if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
848 ignore_flags |= _PAGE_NX;
849
850 return (p4d_flags(p4d) & ~ignore_flags) != 0;
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +0300851}
852#endif /* CONFIG_PGTABLE_LEVELS > 3 */
853
Kirill A. Shutemovfe1e8c32017-03-13 17:33:04 +0300854static inline unsigned long p4d_index(unsigned long address)
855{
856 return (address >> P4D_SHIFT) & (PTRS_PER_P4D - 1);
857}
858
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +0300859#if CONFIG_PGTABLE_LEVELS > 4
Jeremy Fitzhardinge9f38d7e2009-02-05 11:30:49 -0800860static inline int pgd_present(pgd_t pgd)
861{
Jeremy Fitzhardinge18a7a1992009-02-05 11:31:16 -0800862 return pgd_flags(pgd) & _PAGE_PRESENT;
Jeremy Fitzhardinge9f38d7e2009-02-05 11:30:49 -0800863}
Jeremy Fitzhardingec5f040b2009-02-05 11:30:52 -0800864
865static inline unsigned long pgd_page_vaddr(pgd_t pgd)
866{
867 return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
868}
Jeremy Fitzhardinge777cba12009-02-05 11:30:56 -0800869
Ingo Molnare5f7f2022009-02-09 11:42:57 +0100870/*
871 * Currently stuck as a macro due to indirect forward reference to
872 * linux/mmzone.h's __section_mem_map_addr() definition:
873 */
Tom Lendackyfd7e3152017-07-17 16:10:06 -0500874#define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
Jeremy Fitzhardinge7cfb8102009-02-05 11:30:57 -0800875
876/* to find an entry in a page-table-directory. */
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +0300877static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
Jeremy Fitzhardinge3d081b12009-02-05 11:30:58 -0800878{
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +0300879 return (p4d_t *)pgd_page_vaddr(*pgd) + p4d_index(address);
Jeremy Fitzhardinge3d081b12009-02-05 11:30:58 -0800880}
Jeremy Fitzhardinge30f10312009-02-05 11:31:09 -0800881
882static inline int pgd_bad(pgd_t pgd)
883{
Dave Hansen1c4de1f2017-12-04 15:07:38 +0100884 unsigned long ignore_flags = _PAGE_USER;
885
886 if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
887 ignore_flags |= _PAGE_NX;
888
889 return (pgd_flags(pgd) & ~ignore_flags) != _KERNPG_TABLE;
Jeremy Fitzhardinge30f10312009-02-05 11:31:09 -0800890}
Jeremy Fitzhardinge7325cc22009-02-05 11:31:14 -0800891
892static inline int pgd_none(pgd_t pgd)
893{
Dave Hansen97e3c602016-07-07 17:19:12 -0700894 /*
895 * There is no need to do a workaround for the KNL stray
896 * A/D bit erratum here. PGDs only point to page tables
897 * except on 32-bit non-PAE which is not supported on
898 * KNL.
899 */
Jeremy Fitzhardinge26c8e312009-02-05 11:31:17 -0800900 return !native_pgd_val(pgd);
Jeremy Fitzhardinge7325cc22009-02-05 11:31:14 -0800901}
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +0300902#endif /* CONFIG_PGTABLE_LEVELS > 4 */
Jeremy Fitzhardinge9f38d7e2009-02-05 11:30:49 -0800903
Thomas Gleixner96a388d2007-10-11 11:20:03 +0200904#endif /* __ASSEMBLY__ */
905
Jeremy Fitzhardingefb15a9b2008-06-25 00:19:06 -0400906/*
907 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
908 *
909 * this macro returns the index of the entry in the pgd page which would
910 * control the given virtual address
911 */
912#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
913
914/*
915 * pgd_offset() returns a (pgd_t *)
916 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
917 */
Dave Hansen61e9b362017-12-04 15:07:37 +0100918#define pgd_offset_pgd(pgd, address) (pgd + pgd_index((address)))
919/*
920 * a shortcut to get a pgd_t in a given mm
921 */
922#define pgd_offset(mm, address) pgd_offset_pgd((mm)->pgd, (address))
Jeremy Fitzhardingefb15a9b2008-06-25 00:19:06 -0400923/*
924 * a shortcut which implies the use of the kernel's pgd, instead
925 * of a process's
926 */
927#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
928
929
Jeremy Fitzhardinge68db0652008-03-17 16:37:13 -0700930#define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
931#define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
932
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100933#ifndef __ASSEMBLY__
934
Jaswinder Singh Rajput2c1b2842009-04-11 00:03:10 +0530935extern int direct_gbpages;
Yinghai Lu22ddfca2012-11-16 19:38:41 -0800936void init_mem_mapping(void);
Yinghai Lu8d574702012-11-16 19:38:58 -0800937void early_alloc_pgt_buf(void);
Ingo Molnar4270fd82017-01-28 12:45:40 +0100938extern void memblock_find_dma_reserve(void);
Jaswinder Singh Rajput2c1b2842009-04-11 00:03:10 +0530939
Thomas Garnierb234e8a2016-06-21 17:47:01 -0700940#ifdef CONFIG_X86_64
941/* Realmode trampoline initialization. */
942extern pgd_t trampoline_pgd_entry;
Thomas Garnier0483e1f2016-06-21 17:47:02 -0700943static inline void __meminit init_trampoline_default(void)
Thomas Garnierb234e8a2016-06-21 17:47:01 -0700944{
945 /* Default trampoline pgd value */
Kirill A. Shutemov65ade2f2017-06-06 14:31:27 +0300946 trampoline_pgd_entry = init_top_pgt[pgd_index(__PAGE_OFFSET)];
Thomas Garnierb234e8a2016-06-21 17:47:01 -0700947}
Thomas Garnier0483e1f2016-06-21 17:47:02 -0700948# ifdef CONFIG_RANDOMIZE_MEMORY
949void __meminit init_trampoline(void);
950# else
951# define init_trampoline init_trampoline_default
952# endif
Thomas Garnierb234e8a2016-06-21 17:47:01 -0700953#else
954static inline void init_trampoline(void) { }
955#endif
956
Jeremy Fitzhardinge48916452008-01-30 13:32:58 +0100957/* local pte updates need not use xchg for locking */
958static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
959{
960 pte_t res = *ptep;
961
962 /* Pure native function needs no input for mm, addr */
963 native_pte_clear(NULL, 0, ptep);
964 return res;
965}
966
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800967static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp)
968{
969 pmd_t res = *pmdp;
970
971 native_pmd_clear(pmdp);
972 return res;
973}
974
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -0800975static inline pud_t native_local_pudp_get_and_clear(pud_t *pudp)
976{
977 pud_t res = *pudp;
978
979 native_pud_clear(pudp);
980 return res;
981}
982
Jeremy Fitzhardinge48916452008-01-30 13:32:58 +0100983static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
984 pte_t *ptep , pte_t pte)
985{
986 native_set_pte(ptep, pte);
987}
988
Juergen Gross87930012017-09-04 12:25:27 +0200989static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
990 pmd_t *pmdp, pmd_t pmd)
Andrea Arcangeli0a47de52011-01-13 15:46:35 -0800991{
992 native_set_pmd(pmdp, pmd);
993}
994
Juergen Gross87930012017-09-04 12:25:27 +0200995static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
996 pud_t *pudp, pud_t pud)
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -0800997{
998 native_set_pud(pudp, pud);
999}
1000
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +01001001/*
1002 * We only update the dirty/accessed state if we set
1003 * the dirty bit by hand in the kernel, since the hardware
1004 * will do the accessed bit for us, and we don't want to
1005 * race with other CPU's that might be updating the dirty
1006 * bit at the same time.
1007 */
Jeremy Fitzhardingebea41802008-06-25 00:18:57 -04001008struct vm_area_struct;
1009
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +01001010#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
Jeremy Fitzhardingeee5aa8d2008-03-17 16:37:03 -07001011extern int ptep_set_access_flags(struct vm_area_struct *vma,
1012 unsigned long address, pte_t *ptep,
1013 pte_t entry, int dirty);
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +01001014
1015#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
Jeremy Fitzhardingef9fbf1a2008-03-17 16:37:04 -07001016extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
1017 unsigned long addr, pte_t *ptep);
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +01001018
1019#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
Jeremy Fitzhardingec20311e2008-03-17 16:37:05 -07001020extern int ptep_clear_flush_young(struct vm_area_struct *vma,
1021 unsigned long address, pte_t *ptep);
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +01001022
1023#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
Joe Perches3cbaeaf2008-03-23 01:03:12 -07001024static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
1025 pte_t *ptep)
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +01001026{
1027 pte_t pte = native_ptep_get_and_clear(ptep);
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +01001028 return pte;
1029}
1030
1031#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
Joe Perches3cbaeaf2008-03-23 01:03:12 -07001032static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1033 unsigned long addr, pte_t *ptep,
1034 int full)
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +01001035{
1036 pte_t pte;
1037 if (full) {
1038 /*
1039 * Full address destruction in progress; paravirt does not
1040 * care about updates and native needs no locking
1041 */
1042 pte = native_local_ptep_get_and_clear(ptep);
1043 } else {
1044 pte = ptep_get_and_clear(mm, addr, ptep);
1045 }
1046 return pte;
1047}
1048
1049#define __HAVE_ARCH_PTEP_SET_WRPROTECT
Joe Perches3cbaeaf2008-03-23 01:03:12 -07001050static inline void ptep_set_wrprotect(struct mm_struct *mm,
1051 unsigned long addr, pte_t *ptep)
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +01001052{
Jeremy Fitzhardinged8d89822008-01-30 13:32:58 +01001053 clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +01001054}
1055
Jesper Juhl2ac13462011-12-18 01:32:09 +01001056#define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
Shaohua Li61c77322010-08-16 09:16:55 +08001057
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -08001058#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1059
1060#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1061extern int pmdp_set_access_flags(struct vm_area_struct *vma,
1062 unsigned long address, pmd_t *pmdp,
1063 pmd_t entry, int dirty);
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08001064extern int pudp_set_access_flags(struct vm_area_struct *vma,
1065 unsigned long address, pud_t *pudp,
1066 pud_t entry, int dirty);
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -08001067
1068#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1069extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1070 unsigned long addr, pmd_t *pmdp);
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08001071extern int pudp_test_and_clear_young(struct vm_area_struct *vma,
1072 unsigned long addr, pud_t *pudp);
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -08001073
1074#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
1075extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
1076 unsigned long address, pmd_t *pmdp);
1077
1078
Dan Williamse4e40e02017-11-29 16:10:10 -08001079#define pmd_write pmd_write
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -08001080static inline int pmd_write(pmd_t pmd)
1081{
1082 return pmd_flags(pmd) & _PAGE_RW;
1083}
1084
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -07001085#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
1086static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr,
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -08001087 pmd_t *pmdp)
1088{
Juergen Grossd6ccc3e2015-11-17 15:51:19 +01001089 return native_pmdp_get_and_clear(pmdp);
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -08001090}
1091
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08001092#define __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
1093static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
1094 unsigned long addr, pud_t *pudp)
1095{
1096 return native_pudp_get_and_clear(pudp);
1097}
1098
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -08001099#define __HAVE_ARCH_PMDP_SET_WRPROTECT
1100static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1101 unsigned long addr, pmd_t *pmdp)
1102{
1103 clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -08001104}
1105
Dan Williams15018992017-11-29 16:10:06 -08001106#define pud_write pud_write
1107static inline int pud_write(pud_t pud)
1108{
1109 return pud_flags(pud) & _PAGE_RW;
1110}
1111
Kirill A. Shutemov86fa9492018-01-31 16:18:13 -08001112#ifndef pmdp_establish
1113#define pmdp_establish pmdp_establish
1114static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
1115 unsigned long address, pmd_t *pmdp, pmd_t pmd)
1116{
1117 if (IS_ENABLED(CONFIG_SMP)) {
1118 return xchg(pmdp, pmd);
1119 } else {
1120 pmd_t old = *pmdp;
1121 *pmdp = pmd;
1122 return old;
1123 }
1124}
1125#endif
1126
Jeremy Fitzhardinge85958b42008-03-17 16:37:14 -07001127/*
1128 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
1129 *
1130 * dst - pointer to pgd range anwhere on a pgd page
1131 * src - ""
1132 * count - the number of pgds to copy.
1133 *
1134 * dst and src can be on the same page, but the range must not overlap,
1135 * and must not cross a page boundary.
1136 */
1137static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
1138{
Dave Hansenfc2fbc82017-12-04 15:07:40 +01001139 memcpy(dst, src, count * sizeof(pgd_t));
1140#ifdef CONFIG_PAGE_TABLE_ISOLATION
1141 if (!static_cpu_has(X86_FEATURE_PTI))
1142 return;
1143 /* Clone the user space pgd as well */
1144 memcpy(kernel_to_user_pgdp(dst), kernel_to_user_pgdp(src),
1145 count * sizeof(pgd_t));
1146#endif
Jeremy Fitzhardinge85958b42008-03-17 16:37:14 -07001147}
1148
Dave Hansen4cbeb512013-01-22 13:24:31 -08001149#define PTE_SHIFT ilog2(PTRS_PER_PTE)
1150static inline int page_level_shift(enum pg_level level)
1151{
1152 return (PAGE_SHIFT - PTE_SHIFT) + level * PTE_SHIFT;
1153}
1154static inline unsigned long page_level_size(enum pg_level level)
1155{
1156 return 1UL << page_level_shift(level);
1157}
1158static inline unsigned long page_level_mask(enum pg_level level)
1159{
1160 return ~(page_level_size(level) - 1);
1161}
Jeremy Fitzhardinge85958b42008-03-17 16:37:14 -07001162
Kirill A. Shutemov602e0182012-12-18 12:22:18 -08001163/*
1164 * The x86 doesn't have any external MMU info: the kernel page
1165 * tables contain all the necessary information.
1166 */
1167static inline void update_mmu_cache(struct vm_area_struct *vma,
1168 unsigned long addr, pte_t *ptep)
1169{
1170}
1171static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
1172 unsigned long addr, pmd_t *pmd)
1173{
1174}
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08001175static inline void update_mmu_cache_pud(struct vm_area_struct *vma,
1176 unsigned long addr, pud_t *pud)
1177{
1178}
Thomas Gleixner96a388d2007-10-11 11:20:03 +02001179
Cyrill Gorcunov2bf01f92014-06-04 16:08:16 -07001180#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
Cyrill Gorcunovfa0f2812013-09-11 14:22:47 -07001181static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
1182{
Cyrill Gorcunovfa0f2812013-09-11 14:22:47 -07001183 return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
1184}
1185
1186static inline int pte_swp_soft_dirty(pte_t pte)
1187{
Cyrill Gorcunovfa0f2812013-09-11 14:22:47 -07001188 return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
1189}
1190
1191static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
1192{
Cyrill Gorcunovfa0f2812013-09-11 14:22:47 -07001193 return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
1194}
Naoya Horiguchiab6e3d02017-09-08 16:11:04 -07001195
1196#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1197static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
1198{
1199 return pmd_set_flags(pmd, _PAGE_SWP_SOFT_DIRTY);
1200}
1201
1202static inline int pmd_swp_soft_dirty(pmd_t pmd)
1203{
1204 return pmd_flags(pmd) & _PAGE_SWP_SOFT_DIRTY;
1205}
1206
1207static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
1208{
1209 return pmd_clear_flags(pmd, _PAGE_SWP_SOFT_DIRTY);
1210}
1211#endif
Cyrill Gorcunov2bf01f92014-06-04 16:08:16 -07001212#endif
Cyrill Gorcunovfa0f2812013-09-11 14:22:47 -07001213
Dave Hansen33a709b2016-02-12 13:02:19 -08001214#define PKRU_AD_BIT 0x1
1215#define PKRU_WD_BIT 0x2
Dave Hansen84594292016-02-12 13:02:36 -08001216#define PKRU_BITS_PER_PKEY 2
Dave Hansen33a709b2016-02-12 13:02:19 -08001217
1218static inline bool __pkru_allows_read(u32 pkru, u16 pkey)
1219{
Dave Hansen84594292016-02-12 13:02:36 -08001220 int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY;
Dave Hansen33a709b2016-02-12 13:02:19 -08001221 return !(pkru & (PKRU_AD_BIT << pkru_pkey_bits));
1222}
1223
1224static inline bool __pkru_allows_write(u32 pkru, u16 pkey)
1225{
Dave Hansen84594292016-02-12 13:02:36 -08001226 int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY;
Dave Hansen33a709b2016-02-12 13:02:19 -08001227 /*
1228 * Access-disable disables writes too so we need to check
1229 * both bits here.
1230 */
1231 return !(pkru & ((PKRU_AD_BIT|PKRU_WD_BIT) << pkru_pkey_bits));
1232}
1233
1234static inline u16 pte_flags_pkey(unsigned long pte_flags)
1235{
1236#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
1237 /* ifdef to avoid doing 59-bit shift on 32-bit values */
1238 return (pte_flags & _PAGE_PKEY_MASK) >> _PAGE_BIT_PKEY_BIT0;
1239#else
1240 return 0;
1241#endif
1242}
1243
Kirill A. Shutemove5855132017-06-06 14:31:20 +03001244static inline bool __pkru_allows_pkey(u16 pkey, bool write)
1245{
1246 u32 pkru = read_pkru();
1247
1248 if (!__pkru_allows_read(pkru, pkey))
1249 return false;
1250 if (write && !__pkru_allows_write(pkru, pkey))
1251 return false;
1252
1253 return true;
1254}
1255
1256/*
1257 * 'pteval' can come from a PTE, PMD or PUD. We only check
1258 * _PAGE_PRESENT, _PAGE_USER, and _PAGE_RW in here which are the
1259 * same value on all 3 types.
1260 */
1261static inline bool __pte_access_permitted(unsigned long pteval, bool write)
1262{
1263 unsigned long need_pte_bits = _PAGE_PRESENT|_PAGE_USER;
1264
1265 if (write)
1266 need_pte_bits |= _PAGE_RW;
1267
1268 if ((pteval & need_pte_bits) != need_pte_bits)
1269 return 0;
1270
1271 return __pkru_allows_pkey(pte_flags_pkey(pteval), write);
1272}
1273
1274#define pte_access_permitted pte_access_permitted
1275static inline bool pte_access_permitted(pte_t pte, bool write)
1276{
1277 return __pte_access_permitted(pte_val(pte), write);
1278}
1279
1280#define pmd_access_permitted pmd_access_permitted
1281static inline bool pmd_access_permitted(pmd_t pmd, bool write)
1282{
1283 return __pte_access_permitted(pmd_val(pmd), write);
1284}
1285
1286#define pud_access_permitted pud_access_permitted
1287static inline bool pud_access_permitted(pud_t pud, bool write)
1288{
1289 return __pte_access_permitted(pud_val(pud), write);
1290}
1291
Thomas Gleixner96a388d2007-10-11 11:20:03 +02001292#include <asm-generic/pgtable.h>
1293#endif /* __ASSEMBLY__ */
1294
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001295#endif /* _ASM_X86_PGTABLE_H */