blob: 4e640baf979488acc90a7b4c8dc1aa4b8d00f037 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Andrea Arcangelie2cda322011-01-13 15:46:40 -08002/*
3 * mm/pgtable-generic.c
4 *
Mike Rapoportca5999f2020-06-08 21:32:38 -07005 * Generic pgtable methods declared in linux/pgtable.h
Andrea Arcangelie2cda322011-01-13 15:46:40 -08006 *
7 * Copyright (C) 2010 Linus Torvalds
8 */
9
Andrew Mortonf95ba942011-01-25 15:07:11 -080010#include <linux/pagemap.h>
Peter Zijlstraa31acd32018-08-26 12:56:48 +020011#include <linux/hugetlb.h>
Mike Rapoportca5999f2020-06-08 21:32:38 -070012#include <linux/pgtable.h>
Andrea Arcangelie2cda322011-01-13 15:46:40 -080013#include <asm/tlb.h>
Andrea Arcangelie2cda322011-01-13 15:46:40 -080014
Joonsoo Kimbc4b4442013-09-11 14:21:28 -070015/*
16 * If a p?d_bad entry is found while walking page tables, report
17 * the error, before resetting entry to p?d_none. Usually (but
18 * very seldom) called out from the p?d_none_or_clear_bad macros.
19 */
20
21void pgd_clear_bad(pgd_t *pgd)
22{
23 pgd_ERROR(*pgd);
24 pgd_clear(pgd);
25}
26
Vineet Guptaf2400ab2019-11-30 17:51:20 -080027#ifndef __PAGETABLE_P4D_FOLDED
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +030028void p4d_clear_bad(p4d_t *p4d)
29{
30 p4d_ERROR(*p4d);
31 p4d_clear(p4d);
32}
Vineet Guptaf2400ab2019-11-30 17:51:20 -080033#endif
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +030034
Vineet Guptaf2400ab2019-11-30 17:51:20 -080035#ifndef __PAGETABLE_PUD_FOLDED
Joonsoo Kimbc4b4442013-09-11 14:21:28 -070036void pud_clear_bad(pud_t *pud)
37{
38 pud_ERROR(*pud);
39 pud_clear(pud);
40}
Vineet Guptaf2400ab2019-11-30 17:51:20 -080041#endif
Joonsoo Kimbc4b4442013-09-11 14:21:28 -070042
Vineet Guptaf2400ab2019-11-30 17:51:20 -080043/*
44 * Note that the pmd variant below can't be stub'ed out just as for p4d/pud
45 * above. pmd folding is special and typically pmd_* macros refer to upper
46 * level even when folded
47 */
Joonsoo Kimbc4b4442013-09-11 14:21:28 -070048void pmd_clear_bad(pmd_t *pmd)
49{
50 pmd_ERROR(*pmd);
51 pmd_clear(pmd);
52}
53
Andrea Arcangelie2cda322011-01-13 15:46:40 -080054#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
55/*
Mike Rapoportca5999f2020-06-08 21:32:38 -070056 * Only sets the access flags (dirty, accessed), as well as write
Rik van Rielcef23d92012-11-06 09:56:01 +000057 * permission. Furthermore, we know it always gets set to a "more
Andrea Arcangelie2cda322011-01-13 15:46:40 -080058 * permissive" setting, which allows most architectures to optimize
59 * this. We return whether the PTE actually changed, which in turn
60 * instructs the caller to do things like update__mmu_cache. This
61 * used to be done in the caller, but sparc needs minor faults to
62 * force that call on sun4c so we changed this macro slightly
63 */
64int ptep_set_access_flags(struct vm_area_struct *vma,
65 unsigned long address, pte_t *ptep,
66 pte_t entry, int dirty)
67{
68 int changed = !pte_same(*ptep, entry);
69 if (changed) {
70 set_pte_at(vma->vm_mm, address, ptep, entry);
Rik van Rielcef23d92012-11-06 09:56:01 +000071 flush_tlb_fix_spurious_fault(vma, address);
Andrea Arcangelie2cda322011-01-13 15:46:40 -080072 }
73 return changed;
74}
75#endif
76
Vineet Gupta52585bc2015-07-09 17:19:30 +053077#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
78int ptep_clear_flush_young(struct vm_area_struct *vma,
79 unsigned long address, pte_t *ptep)
80{
81 int young;
82 young = ptep_test_and_clear_young(vma, address, ptep);
83 if (young)
84 flush_tlb_page(vma, address);
85 return young;
86}
87#endif
88
89#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
90pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
91 pte_t *ptep)
92{
93 struct mm_struct *mm = (vma)->vm_mm;
94 pte_t pte;
95 pte = ptep_get_and_clear(mm, address, ptep);
96 if (pte_accessible(mm, pte))
97 flush_tlb_page(vma, address);
98 return pte;
99}
100#endif
101
Vineet Guptabd5e88a2015-07-09 17:22:44 +0530102#ifdef CONFIG_TRANSPARENT_HUGEPAGE
103
Andrea Arcangelie2cda322011-01-13 15:46:40 -0800104#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
105int pmdp_set_access_flags(struct vm_area_struct *vma,
106 unsigned long address, pmd_t *pmdp,
107 pmd_t entry, int dirty)
108{
Andrea Arcangelie2cda322011-01-13 15:46:40 -0800109 int changed = !pmd_same(*pmdp, entry);
110 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
111 if (changed) {
112 set_pmd_at(vma->vm_mm, address, pmdp, entry);
Vineet Gupta12ebc152015-02-20 10:36:28 +0530113 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
Andrea Arcangelie2cda322011-01-13 15:46:40 -0800114 }
115 return changed;
Andrea Arcangelie2cda322011-01-13 15:46:40 -0800116}
117#endif
118
Andrea Arcangelie2cda322011-01-13 15:46:40 -0800119#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
120int pmdp_clear_flush_young(struct vm_area_struct *vma,
121 unsigned long address, pmd_t *pmdp)
122{
123 int young;
Naoya Horiguchid8c37c42012-03-21 16:34:27 -0700124 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
Andrea Arcangelie2cda322011-01-13 15:46:40 -0800125 young = pmdp_test_and_clear_young(vma, address, pmdp);
126 if (young)
Vineet Gupta12ebc152015-02-20 10:36:28 +0530127 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
Andrea Arcangelie2cda322011-01-13 15:46:40 -0800128 return young;
129}
130#endif
131
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -0700132#ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -0700133pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
134 pmd_t *pmdp)
Andrea Arcangelie2cda322011-01-13 15:46:40 -0800135{
136 pmd_t pmd;
Andrea Arcangelie2cda322011-01-13 15:46:40 -0800137 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
Hugh Dickins99fa8a42021-06-15 18:23:45 -0700138 VM_BUG_ON(pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) &&
139 !pmd_devmap(*pmdp));
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -0700140 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
Vineet Gupta12ebc152015-02-20 10:36:28 +0530141 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
Andrea Arcangelie2cda322011-01-13 15:46:40 -0800142 return pmd;
143}
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -0800144
145#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
146pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
147 pud_t *pudp)
148{
149 pud_t pud;
150
151 VM_BUG_ON(address & ~HPAGE_PUD_MASK);
152 VM_BUG_ON(!pud_trans_huge(*pudp) && !pud_devmap(*pudp));
153 pud = pudp_huge_get_and_clear(vma->vm_mm, address, pudp);
154 flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE);
155 return pud;
156}
157#endif
Andrea Arcangelie2cda322011-01-13 15:46:40 -0800158#endif
159
Gerald Schaefere3ebcf642012-10-08 16:30:07 -0700160#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
Aneesh Kumar K.V6b0b50b2013-06-05 17:14:02 -0700161void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
162 pgtable_t pgtable)
Gerald Schaefere3ebcf642012-10-08 16:30:07 -0700163{
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -0800164 assert_spin_locked(pmd_lockptr(mm, pmdp));
Gerald Schaefere3ebcf642012-10-08 16:30:07 -0700165
166 /* FIFO */
Kirill A. Shutemovc389a252013-11-14 14:30:59 -0800167 if (!pmd_huge_pte(mm, pmdp))
Gerald Schaefere3ebcf642012-10-08 16:30:07 -0700168 INIT_LIST_HEAD(&pgtable->lru);
169 else
Kirill A. Shutemovc389a252013-11-14 14:30:59 -0800170 list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru);
171 pmd_huge_pte(mm, pmdp) = pgtable;
Gerald Schaefere3ebcf642012-10-08 16:30:07 -0700172}
Gerald Schaefere3ebcf642012-10-08 16:30:07 -0700173#endif
174
175#ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
Gerald Schaefere3ebcf642012-10-08 16:30:07 -0700176/* no "address" argument so destroys page coloring of some arch */
Aneesh Kumar K.V6b0b50b2013-06-05 17:14:02 -0700177pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
Gerald Schaefere3ebcf642012-10-08 16:30:07 -0700178{
179 pgtable_t pgtable;
180
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -0800181 assert_spin_locked(pmd_lockptr(mm, pmdp));
Gerald Schaefere3ebcf642012-10-08 16:30:07 -0700182
183 /* FIFO */
Kirill A. Shutemovc389a252013-11-14 14:30:59 -0800184 pgtable = pmd_huge_pte(mm, pmdp);
Geliang Tang14669342016-01-14 15:19:32 -0800185 pmd_huge_pte(mm, pmdp) = list_first_entry_or_null(&pgtable->lru,
186 struct page, lru);
187 if (pmd_huge_pte(mm, pmdp))
Gerald Schaefere3ebcf642012-10-08 16:30:07 -0700188 list_del(&pgtable->lru);
Gerald Schaefere3ebcf642012-10-08 16:30:07 -0700189 return pgtable;
190}
Gerald Schaefere3ebcf642012-10-08 16:30:07 -0700191#endif
Gerald Schaefer46dcde72012-10-08 16:30:09 -0700192
193#ifndef __HAVE_ARCH_PMDP_INVALIDATE
Kirill A. Shutemovd52605d2018-01-31 16:18:16 -0800194pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
Gerald Schaefer46dcde72012-10-08 16:30:09 -0700195 pmd_t *pmdp)
196{
Anshuman Khandual86ec2da2020-06-03 16:03:45 -0700197 pmd_t old = pmdp_establish(vma, address, pmdp, pmd_mkinvalid(*pmdp));
Vineet Gupta12ebc152015-02-20 10:36:28 +0530198 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
Kirill A. Shutemovd52605d2018-01-31 16:18:16 -0800199 return old;
Gerald Schaefer46dcde72012-10-08 16:30:09 -0700200}
Gerald Schaefer46dcde72012-10-08 16:30:09 -0700201#endif
Aneesh Kumar K.Vf28b6ff2015-06-24 16:57:42 -0700202
203#ifndef pmdp_collapse_flush
Aneesh Kumar K.Vf28b6ff2015-06-24 16:57:42 -0700204pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
205 pmd_t *pmdp)
206{
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -0700207 /*
208 * pmd and hugepage pte format are same. So we could
209 * use the same function.
210 */
Aneesh Kumar K.Vf28b6ff2015-06-24 16:57:42 -0700211 pmd_t pmd;
212
213 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
214 VM_BUG_ON(pmd_trans_huge(*pmdp));
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -0700215 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
Vineet Gupta6a6ac722016-02-11 16:13:09 -0800216
217 /* collapse entails shooting down ptes not pmd */
218 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
Aneesh Kumar K.Vf28b6ff2015-06-24 16:57:42 -0700219 return pmd;
220}
Aneesh Kumar K.Vf28b6ff2015-06-24 16:57:42 -0700221#endif
Vineet Guptabd5e88a2015-07-09 17:22:44 +0530222#endif /* CONFIG_TRANSPARENT_HUGEPAGE */