blob: 714a35f0d425b960ae5e15a667fcf615a4ef3525 [file] [log] [blame]
Aneesh Kumar K.V94171b12017-07-27 11:54:53 +05301#ifndef _ASM_POWERPC_PTE_WALK_H
2#define _ASM_POWERPC_PTE_WALK_H
3
4#include <linux/sched.h>
5
6/* Don't use this directly */
7extern pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
8 bool *is_thp, unsigned *hshift);
9
10static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea,
11 bool *is_thp, unsigned *hshift)
12{
Aneesh Kumar K.Vd6eaced2019-05-14 11:33:00 +053013 pte_t *pte;
14
Aneesh Kumar K.V94171b12017-07-27 11:54:53 +053015 VM_WARN(!arch_irqs_disabled(), "%s called with irq enabled\n", __func__);
Aneesh Kumar K.Vd6eaced2019-05-14 11:33:00 +053016 pte = __find_linux_pte(pgdir, ea, is_thp, hshift);
17
18#if defined(CONFIG_DEBUG_VM) && \
19 !(defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE))
20 /*
21 * We should not find huge page if these configs are not enabled.
22 */
23 if (hshift)
24 WARN_ON(*hshift);
25#endif
26 return pte;
Aneesh Kumar K.V94171b12017-07-27 11:54:53 +053027}
28
29static inline pte_t *find_init_mm_pte(unsigned long ea, unsigned *hshift)
30{
31 pgd_t *pgdir = init_mm.pgd;
32 return __find_linux_pte(pgdir, ea, NULL, hshift);
33}
Nicholas Piggin5362a4b2021-05-26 22:00:05 +100034
35/*
36 * Convert a kernel vmap virtual address (vmalloc or ioremap space) to a
37 * physical address, without taking locks. This can be used in real-mode.
38 */
39static inline phys_addr_t ppc_find_vmap_phys(unsigned long addr)
40{
41 pte_t *ptep;
42 phys_addr_t pa;
43 int hugepage_shift;
44
45 /*
46 * init_mm does not free page tables, and does not do THP. It may
47 * have huge pages from huge vmalloc / ioremap etc.
48 */
49 ptep = find_init_mm_pte(addr, &hugepage_shift);
50 if (WARN_ON(!ptep))
51 return 0;
52
53 pa = PFN_PHYS(pte_pfn(*ptep));
54
55 if (!hugepage_shift)
56 hugepage_shift = PAGE_SHIFT;
57
58 pa |= addr & ((1ul << hugepage_shift) - 1);
59
60 return pa;
61}
62
Aneesh Kumar K.V94171b12017-07-27 11:54:53 +053063/*
64 * This is what we should always use. Any other lockless page table lookup needs
65 * careful audit against THP split.
66 */
67static inline pte_t *find_current_mm_pte(pgd_t *pgdir, unsigned long ea,
68 bool *is_thp, unsigned *hshift)
69{
Aneesh Kumar K.Vd6eaced2019-05-14 11:33:00 +053070 pte_t *pte;
71
Aneesh Kumar K.V94171b12017-07-27 11:54:53 +053072 VM_WARN(!arch_irqs_disabled(), "%s called with irq enabled\n", __func__);
73 VM_WARN(pgdir != current->mm->pgd,
74 "%s lock less page table lookup called on wrong mm\n", __func__);
Aneesh Kumar K.Vd6eaced2019-05-14 11:33:00 +053075 pte = __find_linux_pte(pgdir, ea, is_thp, hshift);
76
77#if defined(CONFIG_DEBUG_VM) && \
78 !(defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE))
79 /*
80 * We should not find huge page if these configs are not enabled.
81 */
82 if (hshift)
83 WARN_ON(*hshift);
84#endif
85 return pte;
Aneesh Kumar K.V94171b12017-07-27 11:54:53 +053086}
87
88#endif /* _ASM_POWERPC_PTE_WALK_H */