blob: e37bd43904af7a372ef60a52f17be25abf1b4456 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Kirill A. Shutemovace71a12017-02-24 14:57:45 -08002#include <linux/mm.h>
3#include <linux/rmap.h>
4#include <linux/hugetlb.h>
5#include <linux/swap.h>
6#include <linux/swapops.h>
7
8#include "internal.h"
9
Kirill A. Shutemovace71a12017-02-24 14:57:45 -080010static inline bool not_found(struct page_vma_mapped_walk *pvmw)
11{
12 page_vma_mapped_walk_done(pvmw);
13 return false;
14}
15
16static bool map_pte(struct page_vma_mapped_walk *pvmw)
17{
18 pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address);
19 if (!(pvmw->flags & PVMW_SYNC)) {
20 if (pvmw->flags & PVMW_MIGRATION) {
21 if (!is_swap_pte(*pvmw->pte))
22 return false;
23 } else {
Ralph Campbellaab8d052018-10-30 15:04:11 -070024 /*
25 * We get here when we are trying to unmap a private
26 * device page from the process address space. Such
27 * page is not CPU accessible and thus is mapped as
28 * a special swap entry, nonetheless it still does
29 * count as a valid regular mapping for the page (and
30 * is accounted as such in page maps count).
31 *
32 * So handle this special case as if it was a normal
33 * page mapping ie lock CPU page table and returns
34 * true.
35 *
36 * For more details on device private memory see HMM
37 * (include/linux/hmm.h or mm/hmm.c).
38 */
39 if (is_swap_pte(*pvmw->pte)) {
40 swp_entry_t entry;
41
42 /* Handle un-addressable ZONE_DEVICE memory */
43 entry = pte_to_swp_entry(*pvmw->pte);
44 if (!is_device_private_entry(entry))
45 return false;
46 } else if (!pte_present(*pvmw->pte))
Kirill A. Shutemovace71a12017-02-24 14:57:45 -080047 return false;
48 }
49 }
50 pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
51 spin_lock(pvmw->ptl);
52 return true;
53}
54
Li Xinhai5b8d6e32020-01-30 22:13:45 -080055static inline bool pfn_is_match(struct page *page, unsigned long pfn)
Kirill A. Shutemov72227082018-01-22 12:22:30 +030056{
Li Xinhai5b8d6e32020-01-30 22:13:45 -080057 unsigned long page_pfn = page_to_pfn(page);
58
59 /* normal page and hugetlbfs page */
60 if (!PageTransCompound(page) || PageHuge(page))
61 return page_pfn == pfn;
Kirill A. Shutemov72227082018-01-22 12:22:30 +030062
63 /* THP can be referenced by any subpage */
Matthew Wilcox (Oracle)6c357842020-08-14 17:30:37 -070064 return pfn >= page_pfn && pfn - page_pfn < thp_nr_pages(page);
Kirill A. Shutemov72227082018-01-22 12:22:30 +030065}
66
Kirill A. Shutemov0d665e72018-01-19 15:49:24 +030067/**
68 * check_pte - check if @pvmw->page is mapped at the @pvmw->pte
Alex Shi777f3032020-12-14 19:07:51 -080069 * @pvmw: page_vma_mapped_walk struct, includes a pair pte and page for checking
Kirill A. Shutemov0d665e72018-01-19 15:49:24 +030070 *
71 * page_vma_mapped_walk() found a place where @pvmw->page is *potentially*
72 * mapped. check_pte() has to validate this.
73 *
Alex Shi777f3032020-12-14 19:07:51 -080074 * pvmw->pte may point to empty PTE, swap PTE or PTE pointing to
75 * arbitrary page.
Kirill A. Shutemov0d665e72018-01-19 15:49:24 +030076 *
77 * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration
78 * entry that points to @pvmw->page or any subpage in case of THP.
79 *
Alex Shi777f3032020-12-14 19:07:51 -080080 * If PVMW_MIGRATION flag is not set, returns true if pvmw->pte points to
81 * pvmw->page or any subpage in case of THP.
Kirill A. Shutemov0d665e72018-01-19 15:49:24 +030082 *
83 * Otherwise, return false.
84 *
85 */
Kirill A. Shutemovace71a12017-02-24 14:57:45 -080086static bool check_pte(struct page_vma_mapped_walk *pvmw)
87{
Kirill A. Shutemov0d665e72018-01-19 15:49:24 +030088 unsigned long pfn;
89
Kirill A. Shutemovace71a12017-02-24 14:57:45 -080090 if (pvmw->flags & PVMW_MIGRATION) {
Kirill A. Shutemovace71a12017-02-24 14:57:45 -080091 swp_entry_t entry;
92 if (!is_swap_pte(*pvmw->pte))
93 return false;
94 entry = pte_to_swp_entry(*pvmw->pte);
Jérôme Glissea5430dd2017-09-08 16:12:17 -070095
Kirill A. Shutemovace71a12017-02-24 14:57:45 -080096 if (!is_migration_entry(entry))
97 return false;
Kirill A. Shutemov0d665e72018-01-19 15:49:24 +030098
99 pfn = migration_entry_to_pfn(entry);
100 } else if (is_swap_pte(*pvmw->pte)) {
101 swp_entry_t entry;
102
103 /* Handle un-addressable ZONE_DEVICE memory */
104 entry = pte_to_swp_entry(*pvmw->pte);
105 if (!is_device_private_entry(entry))
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800106 return false;
Kirill A. Shutemov0d665e72018-01-19 15:49:24 +0300107
108 pfn = device_private_entry_to_pfn(entry);
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800109 } else {
110 if (!pte_present(*pvmw->pte))
111 return false;
112
Kirill A. Shutemov0d665e72018-01-19 15:49:24 +0300113 pfn = pte_pfn(*pvmw->pte);
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800114 }
115
Li Xinhai5b8d6e32020-01-30 22:13:45 -0800116 return pfn_is_match(pvmw->page, pfn);
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800117}
118
119/**
120 * page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at
121 * @pvmw->address
122 * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
123 * must be set. pmd, pte and ptl must be NULL.
124 *
125 * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
126 * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
127 * adjusted if needed (for PTE-mapped THPs).
128 *
129 * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
130 * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in
131 * a loop to find all PTEs that map the THP.
132 *
133 * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
134 * regardless of which page table level the page is mapped at. @pvmw->pmd is
135 * NULL.
136 *
Lu Jialinbaf2f902021-05-06 18:06:50 -0700137 * Returns false if there are no more page table entries for the page in
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800138 * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
139 *
140 * If you need to stop the walk before page_vma_mapped_walk() returned false,
141 * use page_vma_mapped_walk_done(). It will do the housekeeping.
142 */
143bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
144{
145 struct mm_struct *mm = pvmw->vma->vm_mm;
146 struct page *page = pvmw->page;
147 pgd_t *pgd;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300148 p4d_t *p4d;
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800149 pud_t *pud;
Will Deacona7b10092017-10-13 15:58:25 -0700150 pmd_t pmde;
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800151
152 /* The only possible pmd mapping has been handled on last iteration */
153 if (pvmw->pmd && !pvmw->pte)
154 return not_found(pvmw);
155
Hugh Dickinsd75450f2017-04-07 16:04:39 -0700156 if (pvmw->pte)
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800157 goto next_pte;
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800158
159 if (unlikely(PageHuge(pvmw->page))) {
160 /* when pud is not present, pte will be NULL */
Matthew Wilcox (Oracle)a50b8542019-09-23 15:34:25 -0700161 pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page));
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800162 if (!pvmw->pte)
163 return false;
164
165 pvmw->ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte);
166 spin_lock(pvmw->ptl);
167 if (!check_pte(pvmw))
168 return not_found(pvmw);
169 return true;
170 }
171restart:
172 pgd = pgd_offset(mm, pvmw->address);
173 if (!pgd_present(*pgd))
174 return false;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300175 p4d = p4d_offset(pgd, pvmw->address);
176 if (!p4d_present(*p4d))
177 return false;
178 pud = pud_offset(p4d, pvmw->address);
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800179 if (!pud_present(*pud))
180 return false;
181 pvmw->pmd = pmd_offset(pud, pvmw->address);
Will Deacona7b10092017-10-13 15:58:25 -0700182 /*
183 * Make sure the pmd value isn't cached in a register by the
184 * compiler and used as a stale value after we've observed a
185 * subsequent update.
186 */
187 pmde = READ_ONCE(*pvmw->pmd);
188 if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800189 pvmw->ptl = pmd_lock(mm, pvmw->pmd);
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800190 if (likely(pmd_trans_huge(*pvmw->pmd))) {
191 if (pvmw->flags & PVMW_MIGRATION)
192 return not_found(pvmw);
193 if (pmd_page(*pvmw->pmd) != page)
194 return not_found(pvmw);
195 return true;
Zi Yan616b8372017-09-08 16:10:57 -0700196 } else if (!pmd_present(*pvmw->pmd)) {
197 if (thp_migration_supported()) {
198 if (!(pvmw->flags & PVMW_MIGRATION))
199 return not_found(pvmw);
200 if (is_migration_entry(pmd_to_swp_entry(*pvmw->pmd))) {
201 swp_entry_t entry = pmd_to_swp_entry(*pvmw->pmd);
202
203 if (migration_entry_to_page(entry) != page)
204 return not_found(pvmw);
205 return true;
206 }
Zi Yanaf0db982017-10-13 15:57:47 -0700207 }
Zi Yan616b8372017-09-08 16:10:57 -0700208 return not_found(pvmw);
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800209 } else {
210 /* THP pmd was split under us: handle on pte level */
211 spin_unlock(pvmw->ptl);
212 pvmw->ptl = NULL;
213 }
Will Deacona7b10092017-10-13 15:58:25 -0700214 } else if (!pmd_present(pmde)) {
Hugh Dickins732ed552021-06-15 18:23:53 -0700215 /*
216 * If PVMW_SYNC, take and drop THP pmd lock so that we
217 * cannot return prematurely, while zap_huge_pmd() has
218 * cleared *pmd but not decremented compound_mapcount().
219 */
220 if ((pvmw->flags & PVMW_SYNC) &&
221 PageTransCompound(pvmw->page)) {
222 spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
223
224 spin_unlock(ptl);
225 }
Will Deacona7b10092017-10-13 15:58:25 -0700226 return false;
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800227 }
228 if (!map_pte(pvmw))
229 goto next_pte;
230 while (1) {
Hugh Dickins494334e2021-06-15 18:23:56 -0700231 unsigned long end;
232
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800233 if (check_pte(pvmw))
234 return true;
Hugh Dickinsd75450f2017-04-07 16:04:39 -0700235next_pte:
236 /* Seek to next pte only makes sense for THP */
237 if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
238 return not_found(pvmw);
Hugh Dickins494334e2021-06-15 18:23:56 -0700239 end = vma_address_end(pvmw->page, pvmw->vma);
Hugh Dickinsd75450f2017-04-07 16:04:39 -0700240 do {
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800241 pvmw->address += PAGE_SIZE;
Hugh Dickins494334e2021-06-15 18:23:56 -0700242 if (pvmw->address >= end)
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800243 return not_found(pvmw);
244 /* Did we cross page table boundary? */
245 if (pvmw->address % PMD_SIZE == 0) {
246 pte_unmap(pvmw->pte);
247 if (pvmw->ptl) {
248 spin_unlock(pvmw->ptl);
249 pvmw->ptl = NULL;
250 }
251 goto restart;
252 } else {
253 pvmw->pte++;
254 }
255 } while (pte_none(*pvmw->pte));
256
257 if (!pvmw->ptl) {
258 pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
259 spin_lock(pvmw->ptl);
260 }
261 }
262}
Kirill A. Shutemov6a328a62017-02-24 14:58:10 -0800263
264/**
265 * page_mapped_in_vma - check whether a page is really mapped in a VMA
266 * @page: the page to test
267 * @vma: the VMA to test
268 *
269 * Returns 1 if the page is mapped into the page tables of the VMA, 0
270 * if the page is not mapped into the page tables of this VMA. Only
271 * valid for normal file or anonymous VMAs.
272 */
273int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
274{
275 struct page_vma_mapped_walk pvmw = {
276 .page = page,
277 .vma = vma,
278 .flags = PVMW_SYNC,
279 };
Kirill A. Shutemov6a328a62017-02-24 14:58:10 -0800280
Hugh Dickins494334e2021-06-15 18:23:56 -0700281 pvmw.address = vma_address(page, vma);
282 if (pvmw.address == -EFAULT)
Kirill A. Shutemov6a328a62017-02-24 14:58:10 -0800283 return 0;
Kirill A. Shutemov6a328a62017-02-24 14:58:10 -0800284 if (!page_vma_mapped_walk(&pvmw))
285 return 0;
286 page_vma_mapped_walk_done(&pvmw);
287 return 1;
288}