Thomas Gleixner | d2912cb | 2019-06-04 10:11:33 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Vineet Gupta | fe6c1b8 | 2014-07-08 18:43:47 +0530 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2013-15 Synopsys, Inc. (www.synopsys.com) |
Vineet Gupta | fe6c1b8 | 2014-07-08 18:43:47 +0530 | [diff] [blame] | 4 | */ |
| 5 | |
| 6 | |
| 7 | #ifndef _ASM_ARC_HUGEPAGE_H |
| 8 | #define _ASM_ARC_HUGEPAGE_H |
| 9 | |
| 10 | #include <linux/types.h> |
Kirill A. Shutemov | 9849a56 | 2017-03-09 17:24:05 +0300 | [diff] [blame] | 11 | #define __ARCH_USE_5LEVEL_HACK |
Vineet Gupta | fe6c1b8 | 2014-07-08 18:43:47 +0530 | [diff] [blame] | 12 | #include <asm-generic/pgtable-nopmd.h> |
| 13 | |
| 14 | static inline pte_t pmd_pte(pmd_t pmd) |
| 15 | { |
| 16 | return __pte(pmd_val(pmd)); |
| 17 | } |
| 18 | |
| 19 | static inline pmd_t pte_pmd(pte_t pte) |
| 20 | { |
| 21 | return __pmd(pte_val(pte)); |
| 22 | } |
| 23 | |
| 24 | #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) |
| 25 | #define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd))) |
| 26 | #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) |
| 27 | #define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd))) |
| 28 | #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) |
| 29 | #define pmd_mkhuge(pmd) pte_pmd(pte_mkhuge(pmd_pte(pmd))) |
| 30 | #define pmd_mknotpresent(pmd) pte_pmd(pte_mknotpresent(pmd_pte(pmd))) |
Vineet Gupta | fe6c1b8 | 2014-07-08 18:43:47 +0530 | [diff] [blame] | 31 | #define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd))) |
| 32 | |
| 33 | #define pmd_write(pmd) pte_write(pmd_pte(pmd)) |
| 34 | #define pmd_young(pmd) pte_young(pmd_pte(pmd)) |
| 35 | #define pmd_pfn(pmd) pte_pfn(pmd_pte(pmd)) |
| 36 | #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd)) |
Vineet Gupta | fe6c1b8 | 2014-07-08 18:43:47 +0530 | [diff] [blame] | 37 | |
| 38 | #define mk_pmd(page, prot) pte_pmd(mk_pte(page, prot)) |
| 39 | |
| 40 | #define pmd_trans_huge(pmd) (pmd_val(pmd) & _PAGE_HW_SZ) |
Vineet Gupta | fe6c1b8 | 2014-07-08 18:43:47 +0530 | [diff] [blame] | 41 | |
| 42 | #define pfn_pmd(pfn, prot) (__pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))) |
| 43 | |
| 44 | static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) |
| 45 | { |
| 46 | /* |
| 47 | * open-coded pte_modify() with additional retaining of HW_SZ bit |
| 48 | * so that pmd_trans_huge() remains true for this PMD |
| 49 | */ |
| 50 | return __pmd((pmd_val(pmd) & (_PAGE_CHG_MASK | _PAGE_HW_SZ)) | pgprot_val(newprot)); |
| 51 | } |
| 52 | |
| 53 | static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, |
| 54 | pmd_t *pmdp, pmd_t pmd) |
| 55 | { |
| 56 | *pmdp = pmd; |
| 57 | } |
| 58 | |
| 59 | extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, |
| 60 | pmd_t *pmd); |
| 61 | |
Vineet Gupta | fe6c1b8 | 2014-07-08 18:43:47 +0530 | [diff] [blame] | 62 | /* Generic variants assume pgtable_t is struct page *, hence need for these */ |
| 63 | #define __HAVE_ARCH_PGTABLE_DEPOSIT |
| 64 | extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, |
| 65 | pgtable_t pgtable); |
| 66 | |
| 67 | #define __HAVE_ARCH_PGTABLE_WITHDRAW |
| 68 | extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); |
| 69 | |
Vineet Gupta | 722fe8f | 2015-02-27 19:36:35 +0530 | [diff] [blame] | 70 | #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE |
| 71 | extern void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, |
| 72 | unsigned long end); |
| 73 | |
Kirill A. Shutemov | 5c8aa7e | 2018-01-31 16:17:48 -0800 | [diff] [blame] | 74 | /* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/ |
| 75 | #define pmdp_establish generic_pmdp_establish |
| 76 | |
Vineet Gupta | fe6c1b8 | 2014-07-08 18:43:47 +0530 | [diff] [blame] | 77 | #endif |