Aneesh Kumar K.V | 94171b1 | 2017-07-27 11:54:53 +0530 | [diff] [blame] | 1 | #ifndef _ASM_POWERPC_PTE_WALK_H |
| 2 | #define _ASM_POWERPC_PTE_WALK_H |
| 3 | |
| 4 | #include <linux/sched.h> |
| 5 | |
| 6 | /* Don't use this directly */ |
| 7 | extern pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea, |
| 8 | bool *is_thp, unsigned *hshift); |
| 9 | |
| 10 | static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea, |
| 11 | bool *is_thp, unsigned *hshift) |
| 12 | { |
Aneesh Kumar K.V | d6eaced | 2019-05-14 11:33:00 +0530 | [diff] [blame] | 13 | pte_t *pte; |
| 14 | |
Aneesh Kumar K.V | 94171b1 | 2017-07-27 11:54:53 +0530 | [diff] [blame] | 15 | VM_WARN(!arch_irqs_disabled(), "%s called with irq enabled\n", __func__); |
Aneesh Kumar K.V | d6eaced | 2019-05-14 11:33:00 +0530 | [diff] [blame] | 16 | pte = __find_linux_pte(pgdir, ea, is_thp, hshift); |
| 17 | |
| 18 | #if defined(CONFIG_DEBUG_VM) && \ |
| 19 | !(defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)) |
| 20 | /* |
| 21 | * We should not find huge page if these configs are not enabled. |
| 22 | */ |
| 23 | if (hshift) |
| 24 | WARN_ON(*hshift); |
| 25 | #endif |
| 26 | return pte; |
Aneesh Kumar K.V | 94171b1 | 2017-07-27 11:54:53 +0530 | [diff] [blame] | 27 | } |
| 28 | |
| 29 | static inline pte_t *find_init_mm_pte(unsigned long ea, unsigned *hshift) |
| 30 | { |
| 31 | pgd_t *pgdir = init_mm.pgd; |
| 32 | return __find_linux_pte(pgdir, ea, NULL, hshift); |
| 33 | } |
Nicholas Piggin | 5362a4b | 2021-05-26 22:00:05 +1000 | [diff] [blame] | 34 | |
| 35 | /* |
| 36 | * Convert a kernel vmap virtual address (vmalloc or ioremap space) to a |
| 37 | * physical address, without taking locks. This can be used in real-mode. |
| 38 | */ |
| 39 | static inline phys_addr_t ppc_find_vmap_phys(unsigned long addr) |
| 40 | { |
| 41 | pte_t *ptep; |
| 42 | phys_addr_t pa; |
| 43 | int hugepage_shift; |
| 44 | |
| 45 | /* |
| 46 | * init_mm does not free page tables, and does not do THP. It may |
| 47 | * have huge pages from huge vmalloc / ioremap etc. |
| 48 | */ |
| 49 | ptep = find_init_mm_pte(addr, &hugepage_shift); |
| 50 | if (WARN_ON(!ptep)) |
| 51 | return 0; |
| 52 | |
| 53 | pa = PFN_PHYS(pte_pfn(*ptep)); |
| 54 | |
| 55 | if (!hugepage_shift) |
| 56 | hugepage_shift = PAGE_SHIFT; |
| 57 | |
| 58 | pa |= addr & ((1ul << hugepage_shift) - 1); |
| 59 | |
| 60 | return pa; |
| 61 | } |
| 62 | |
Aneesh Kumar K.V | 94171b1 | 2017-07-27 11:54:53 +0530 | [diff] [blame] | 63 | /* |
| 64 | * This is what we should always use. Any other lockless page table lookup needs |
| 65 | * careful audit against THP split. |
| 66 | */ |
| 67 | static inline pte_t *find_current_mm_pte(pgd_t *pgdir, unsigned long ea, |
| 68 | bool *is_thp, unsigned *hshift) |
| 69 | { |
Aneesh Kumar K.V | d6eaced | 2019-05-14 11:33:00 +0530 | [diff] [blame] | 70 | pte_t *pte; |
| 71 | |
Aneesh Kumar K.V | 94171b1 | 2017-07-27 11:54:53 +0530 | [diff] [blame] | 72 | VM_WARN(!arch_irqs_disabled(), "%s called with irq enabled\n", __func__); |
| 73 | VM_WARN(pgdir != current->mm->pgd, |
| 74 | "%s lock less page table lookup called on wrong mm\n", __func__); |
Aneesh Kumar K.V | d6eaced | 2019-05-14 11:33:00 +0530 | [diff] [blame] | 75 | pte = __find_linux_pte(pgdir, ea, is_thp, hshift); |
| 76 | |
| 77 | #if defined(CONFIG_DEBUG_VM) && \ |
| 78 | !(defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)) |
| 79 | /* |
| 80 | * We should not find huge page if these configs are not enabled. |
| 81 | */ |
| 82 | if (hshift) |
| 83 | WARN_ON(*hshift); |
| 84 | #endif |
| 85 | return pte; |
Aneesh Kumar K.V | 94171b1 | 2017-07-27 11:54:53 +0530 | [diff] [blame] | 86 | } |
| 87 | |
| 88 | #endif /* _ASM_POWERPC_PTE_WALK_H */ |