Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 2 | #ifndef _ASM_X86_PGTABLE_2LEVEL_H |
| 3 | #define _ASM_X86_PGTABLE_2LEVEL_H |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | #define pte_ERROR(e) \ |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 6 | pr_err("%s:%d: bad pte %08lx\n", __FILE__, __LINE__, (e).pte_low) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | #define pgd_ERROR(e) \ |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 8 | pr_err("%s:%d: bad pgd %08lx\n", __FILE__, __LINE__, pgd_val(e)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | |
| 10 | /* |
| 11 | * Certain architectures need to do special things when PTEs |
| 12 | * within a page table are directly modified. Thus, the following |
| 13 | * hook is made available. |
| 14 | */ |
Jeremy Fitzhardinge | 3dc494e | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 15 | static inline void native_set_pte(pte_t *ptep , pte_t pte) |
| 16 | { |
| 17 | *ptep = pte; |
| 18 | } |
Jeremy Fitzhardinge | 4891645 | 2008-01-30 13:32:58 +0100 | [diff] [blame] | 19 | |
Jeremy Fitzhardinge | 3dc494e | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 20 | static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) |
| 21 | { |
| 22 | *pmdp = pmd; |
| 23 | } |
Rusty Russell | da181a8 | 2006-12-07 02:14:08 +0100 | [diff] [blame] | 24 | |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 25 | static inline void native_set_pud(pud_t *pudp, pud_t pud) |
| 26 | { |
| 27 | } |
| 28 | |
Jeremy Fitzhardinge | 4891645 | 2008-01-30 13:32:58 +0100 | [diff] [blame] | 29 | static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) |
| 30 | { |
| 31 | native_set_pte(ptep, pte); |
| 32 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | |
Jeremy Fitzhardinge | 4891645 | 2008-01-30 13:32:58 +0100 | [diff] [blame] | 34 | static inline void native_pmd_clear(pmd_t *pmdp) |
| 35 | { |
| 36 | native_set_pmd(pmdp, __pmd(0)); |
| 37 | } |
Zachary Amsden | 6e5882c | 2006-04-27 11:32:29 -0700 | [diff] [blame] | 38 | |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 39 | static inline void native_pud_clear(pud_t *pudp) |
| 40 | { |
| 41 | } |
| 42 | |
Joe Perches | 65e05d1 | 2008-03-23 01:03:08 -0700 | [diff] [blame] | 43 | static inline void native_pte_clear(struct mm_struct *mm, |
| 44 | unsigned long addr, pte_t *xp) |
Zachary Amsden | c2c1acc | 2007-05-02 19:27:19 +0200 | [diff] [blame] | 45 | { |
Jeremy Fitzhardinge | 4891645 | 2008-01-30 13:32:58 +0100 | [diff] [blame] | 46 | *xp = native_make_pte(0); |
Zachary Amsden | c2c1acc | 2007-05-02 19:27:19 +0200 | [diff] [blame] | 47 | } |
| 48 | |
Zachary Amsden | 142dd97 | 2007-05-02 19:27:19 +0200 | [diff] [blame] | 49 | #ifdef CONFIG_SMP |
Jeremy Fitzhardinge | 3dc494e | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 50 | static inline pte_t native_ptep_get_and_clear(pte_t *xp) |
| 51 | { |
| 52 | return __pte(xchg(&xp->pte_low, 0)); |
| 53 | } |
Zachary Amsden | 142dd97 | 2007-05-02 19:27:19 +0200 | [diff] [blame] | 54 | #else |
| 55 | #define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp) |
| 56 | #endif |
Rusty Russell | 6049742 | 2006-09-25 23:32:30 -0700 | [diff] [blame] | 57 | |
Johannes Weiner | f2d6bfe | 2011-01-13 15:47:01 -0800 | [diff] [blame] | 58 | #ifdef CONFIG_SMP |
| 59 | static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp) |
| 60 | { |
| 61 | return __pmd(xchg((pmdval_t *)xp, 0)); |
| 62 | } |
| 63 | #else |
| 64 | #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp) |
| 65 | #endif |
| 66 | |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 67 | #ifdef CONFIG_SMP |
| 68 | static inline pud_t native_pudp_get_and_clear(pud_t *xp) |
| 69 | { |
| 70 | return __pud(xchg((pudval_t *)xp, 0)); |
| 71 | } |
| 72 | #else |
| 73 | #define native_pudp_get_and_clear(xp) native_local_pudp_get_and_clear(xp) |
| 74 | #endif |
| 75 | |
Cyrill Gorcunov | 5305ca1 | 2013-11-15 14:14:00 -0800 | [diff] [blame] | 76 | /* Bit manipulation helper on pte/pgoff entry */ |
| 77 | static inline unsigned long pte_bitop(unsigned long value, unsigned int rightshift, |
| 78 | unsigned long mask, unsigned int leftshift) |
| 79 | { |
| 80 | return ((value >> rightshift) & mask) << leftshift; |
| 81 | } |
| 82 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 | /* Encode and de-code a swap entry */ |
Kirill A. Shutemov | 0a19136 | 2015-02-10 14:11:22 -0800 | [diff] [blame] | 84 | #define SWP_TYPE_BITS 5 |
Jan Beulich | 1796316 | 2008-12-16 11:35:24 +0000 | [diff] [blame] | 85 | #define SWP_OFFSET_SHIFT (_PAGE_BIT_PROTNONE + 1) |
Jan Beulich | 1796316 | 2008-12-16 11:35:24 +0000 | [diff] [blame] | 86 | |
| 87 | #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS) |
| 88 | |
| 89 | #define __swp_type(x) (((x).val >> (_PAGE_BIT_PRESENT + 1)) \ |
| 90 | & ((1U << SWP_TYPE_BITS) - 1)) |
| 91 | #define __swp_offset(x) ((x).val >> SWP_OFFSET_SHIFT) |
| 92 | #define __swp_entry(type, offset) ((swp_entry_t) { \ |
| 93 | ((type) << (_PAGE_BIT_PRESENT + 1)) \ |
| 94 | | ((offset) << SWP_OFFSET_SHIFT) }) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low }) |
Jeremy Fitzhardinge | c8e5393 | 2008-01-30 13:32:57 +0100 | [diff] [blame] | 96 | #define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val }) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | |
Andi Kleen | 6b28bac | 2018-06-13 15:48:24 -0700 | [diff] [blame] | 98 | /* No inverted PFNs on 2 level page tables */ |
| 99 | |
| 100 | static inline u64 protnone_mask(u64 val) |
| 101 | { |
| 102 | return 0; |
| 103 | } |
| 104 | |
| 105 | static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask) |
| 106 | { |
| 107 | return val; |
| 108 | } |
| 109 | |
| 110 | static inline bool __pte_needs_invert(u64 val) |
| 111 | { |
| 112 | return false; |
| 113 | } |
| 114 | |
H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 115 | #endif /* _ASM_X86_PGTABLE_2LEVEL_H */ |