blob: f7b3310817910f26ea3c8e384995e1a138a1f601 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Kirill A. Shutemovace71a12017-02-24 14:57:45 -08002#include <linux/mm.h>
3#include <linux/rmap.h>
4#include <linux/hugetlb.h>
5#include <linux/swap.h>
6#include <linux/swapops.h>
7
8#include "internal.h"
9
Kirill A. Shutemovace71a12017-02-24 14:57:45 -080010static inline bool not_found(struct page_vma_mapped_walk *pvmw)
11{
12 page_vma_mapped_walk_done(pvmw);
13 return false;
14}
15
16static bool map_pte(struct page_vma_mapped_walk *pvmw)
17{
18 pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address);
19 if (!(pvmw->flags & PVMW_SYNC)) {
20 if (pvmw->flags & PVMW_MIGRATION) {
21 if (!is_swap_pte(*pvmw->pte))
22 return false;
23 } else {
Ralph Campbellaab8d052018-10-30 15:04:11 -070024 /*
25 * We get here when we are trying to unmap a private
26 * device page from the process address space. Such
27 * page is not CPU accessible and thus is mapped as
28 * a special swap entry, nonetheless it still does
29 * count as a valid regular mapping for the page (and
30 * is accounted as such in page maps count).
31 *
32 * So handle this special case as if it was a normal
33 * page mapping ie lock CPU page table and returns
34 * true.
35 *
36 * For more details on device private memory see HMM
37 * (include/linux/hmm.h or mm/hmm.c).
38 */
39 if (is_swap_pte(*pvmw->pte)) {
40 swp_entry_t entry;
41
42 /* Handle un-addressable ZONE_DEVICE memory */
43 entry = pte_to_swp_entry(*pvmw->pte);
Alistair Poppleb756a3b2021-06-30 18:54:25 -070044 if (!is_device_private_entry(entry) &&
45 !is_device_exclusive_entry(entry))
Ralph Campbellaab8d052018-10-30 15:04:11 -070046 return false;
47 } else if (!pte_present(*pvmw->pte))
Kirill A. Shutemovace71a12017-02-24 14:57:45 -080048 return false;
49 }
50 }
51 pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
52 spin_lock(pvmw->ptl);
53 return true;
54}
55
Li Xinhai5b8d6e32020-01-30 22:13:45 -080056static inline bool pfn_is_match(struct page *page, unsigned long pfn)
Kirill A. Shutemov72227082018-01-22 12:22:30 +030057{
Li Xinhai5b8d6e32020-01-30 22:13:45 -080058 unsigned long page_pfn = page_to_pfn(page);
59
60 /* normal page and hugetlbfs page */
61 if (!PageTransCompound(page) || PageHuge(page))
62 return page_pfn == pfn;
Kirill A. Shutemov72227082018-01-22 12:22:30 +030063
64 /* THP can be referenced by any subpage */
Matthew Wilcox (Oracle)6c357842020-08-14 17:30:37 -070065 return pfn >= page_pfn && pfn - page_pfn < thp_nr_pages(page);
Kirill A. Shutemov72227082018-01-22 12:22:30 +030066}
67
Kirill A. Shutemov0d665e72018-01-19 15:49:24 +030068/**
69 * check_pte - check if @pvmw->page is mapped at the @pvmw->pte
Alex Shi777f3032020-12-14 19:07:51 -080070 * @pvmw: page_vma_mapped_walk struct, includes a pair pte and page for checking
Kirill A. Shutemov0d665e72018-01-19 15:49:24 +030071 *
72 * page_vma_mapped_walk() found a place where @pvmw->page is *potentially*
73 * mapped. check_pte() has to validate this.
74 *
Alex Shi777f3032020-12-14 19:07:51 -080075 * pvmw->pte may point to empty PTE, swap PTE or PTE pointing to
76 * arbitrary page.
Kirill A. Shutemov0d665e72018-01-19 15:49:24 +030077 *
78 * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration
79 * entry that points to @pvmw->page or any subpage in case of THP.
80 *
Alex Shi777f3032020-12-14 19:07:51 -080081 * If PVMW_MIGRATION flag is not set, returns true if pvmw->pte points to
82 * pvmw->page or any subpage in case of THP.
Kirill A. Shutemov0d665e72018-01-19 15:49:24 +030083 *
84 * Otherwise, return false.
85 *
86 */
Kirill A. Shutemovace71a12017-02-24 14:57:45 -080087static bool check_pte(struct page_vma_mapped_walk *pvmw)
88{
Kirill A. Shutemov0d665e72018-01-19 15:49:24 +030089 unsigned long pfn;
90
Kirill A. Shutemovace71a12017-02-24 14:57:45 -080091 if (pvmw->flags & PVMW_MIGRATION) {
Kirill A. Shutemovace71a12017-02-24 14:57:45 -080092 swp_entry_t entry;
93 if (!is_swap_pte(*pvmw->pte))
94 return false;
95 entry = pte_to_swp_entry(*pvmw->pte);
Jérôme Glissea5430dd2017-09-08 16:12:17 -070096
Alistair Poppleb756a3b2021-06-30 18:54:25 -070097 if (!is_migration_entry(entry) &&
98 !is_device_exclusive_entry(entry))
Kirill A. Shutemovace71a12017-02-24 14:57:45 -080099 return false;
Kirill A. Shutemov0d665e72018-01-19 15:49:24 +0300100
Alistair Poppleaf5cdaf2021-06-30 18:54:06 -0700101 pfn = swp_offset(entry);
Kirill A. Shutemov0d665e72018-01-19 15:49:24 +0300102 } else if (is_swap_pte(*pvmw->pte)) {
103 swp_entry_t entry;
104
105 /* Handle un-addressable ZONE_DEVICE memory */
106 entry = pte_to_swp_entry(*pvmw->pte);
Alistair Poppleb756a3b2021-06-30 18:54:25 -0700107 if (!is_device_private_entry(entry) &&
108 !is_device_exclusive_entry(entry))
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800109 return false;
Kirill A. Shutemov0d665e72018-01-19 15:49:24 +0300110
Alistair Poppleaf5cdaf2021-06-30 18:54:06 -0700111 pfn = swp_offset(entry);
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800112 } else {
113 if (!pte_present(*pvmw->pte))
114 return false;
115
Kirill A. Shutemov0d665e72018-01-19 15:49:24 +0300116 pfn = pte_pfn(*pvmw->pte);
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800117 }
118
Li Xinhai5b8d6e32020-01-30 22:13:45 -0800119 return pfn_is_match(pvmw->page, pfn);
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800120}
121
Hugh Dickinsa9a75042021-06-24 18:39:26 -0700122static void step_forward(struct page_vma_mapped_walk *pvmw, unsigned long size)
123{
124 pvmw->address = (pvmw->address + size) & ~(size - 1);
125 if (!pvmw->address)
126 pvmw->address = ULONG_MAX;
127}
128
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800129/**
130 * page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at
131 * @pvmw->address
132 * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
133 * must be set. pmd, pte and ptl must be NULL.
134 *
135 * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
136 * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
137 * adjusted if needed (for PTE-mapped THPs).
138 *
139 * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
140 * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in
141 * a loop to find all PTEs that map the THP.
142 *
143 * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
144 * regardless of which page table level the page is mapped at. @pvmw->pmd is
145 * NULL.
146 *
Lu Jialinbaf2f902021-05-06 18:06:50 -0700147 * Returns false if there are no more page table entries for the page in
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800148 * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
149 *
150 * If you need to stop the walk before page_vma_mapped_walk() returned false,
151 * use page_vma_mapped_walk_done(). It will do the housekeeping.
152 */
153bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
154{
155 struct mm_struct *mm = pvmw->vma->vm_mm;
156 struct page *page = pvmw->page;
Hugh Dickins47446632021-06-24 18:39:20 -0700157 unsigned long end;
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800158 pgd_t *pgd;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300159 p4d_t *p4d;
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800160 pud_t *pud;
Will Deacona7b10092017-10-13 15:58:25 -0700161 pmd_t pmde;
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800162
163 /* The only possible pmd mapping has been handled on last iteration */
164 if (pvmw->pmd && !pvmw->pte)
165 return not_found(pvmw);
166
Hugh Dickinsf003c032021-06-24 18:39:01 -0700167 if (unlikely(PageHuge(page))) {
Hugh Dickins6d0fd592021-06-24 18:39:04 -0700168 /* The only possible mapping was handled on last iteration */
169 if (pvmw->pte)
170 return not_found(pvmw);
171
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800172 /* when pud is not present, pte will be NULL */
Matthew Wilcox (Oracle)a50b8542019-09-23 15:34:25 -0700173 pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page));
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800174 if (!pvmw->pte)
175 return false;
176
177 pvmw->ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte);
178 spin_lock(pvmw->ptl);
179 if (!check_pte(pvmw))
180 return not_found(pvmw);
181 return true;
182 }
Hugh Dickins6d0fd592021-06-24 18:39:04 -0700183
Hugh Dickinsa765c4172021-06-24 18:39:23 -0700184 /*
185 * Seek to next pte only makes sense for THP.
186 * But more important than that optimization, is to filter out
187 * any PageKsm page: whose page->index misleads vma_address()
188 * and vma_address_end() to disaster.
189 */
190 end = PageTransCompound(page) ?
191 vma_address_end(page, pvmw->vma) :
192 pvmw->address + PAGE_SIZE;
Hugh Dickins6d0fd592021-06-24 18:39:04 -0700193 if (pvmw->pte)
194 goto next_pte;
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800195restart:
Hugh Dickinsa9a75042021-06-24 18:39:26 -0700196 do {
Hugh Dickinsb3807a92021-06-24 18:39:17 -0700197 pgd = pgd_offset(mm, pvmw->address);
Hugh Dickinsa9a75042021-06-24 18:39:26 -0700198 if (!pgd_present(*pgd)) {
199 step_forward(pvmw, PGDIR_SIZE);
200 continue;
201 }
Hugh Dickinsb3807a92021-06-24 18:39:17 -0700202 p4d = p4d_offset(pgd, pvmw->address);
Hugh Dickinsa9a75042021-06-24 18:39:26 -0700203 if (!p4d_present(*p4d)) {
204 step_forward(pvmw, P4D_SIZE);
205 continue;
206 }
Hugh Dickinsb3807a92021-06-24 18:39:17 -0700207 pud = pud_offset(p4d, pvmw->address);
Hugh Dickinsa9a75042021-06-24 18:39:26 -0700208 if (!pud_present(*pud)) {
209 step_forward(pvmw, PUD_SIZE);
210 continue;
211 }
Hugh Dickinse2e1d402021-06-24 18:39:10 -0700212
Hugh Dickinsb3807a92021-06-24 18:39:17 -0700213 pvmw->pmd = pmd_offset(pud, pvmw->address);
Hugh Dickins732ed552021-06-15 18:23:53 -0700214 /*
Hugh Dickinsb3807a92021-06-24 18:39:17 -0700215 * Make sure the pmd value isn't cached in a register by the
216 * compiler and used as a stale value after we've observed a
217 * subsequent update.
Hugh Dickins732ed552021-06-15 18:23:53 -0700218 */
Hugh Dickinsb3807a92021-06-24 18:39:17 -0700219 pmde = READ_ONCE(*pvmw->pmd);
Hugh Dickins732ed552021-06-15 18:23:53 -0700220
Hugh Dickinsb3807a92021-06-24 18:39:17 -0700221 if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
222 pvmw->ptl = pmd_lock(mm, pvmw->pmd);
223 pmde = *pvmw->pmd;
224 if (likely(pmd_trans_huge(pmde))) {
225 if (pvmw->flags & PVMW_MIGRATION)
226 return not_found(pvmw);
227 if (pmd_page(pmde) != page)
228 return not_found(pvmw);
229 return true;
230 }
231 if (!pmd_present(pmde)) {
232 swp_entry_t entry;
233
234 if (!thp_migration_supported() ||
235 !(pvmw->flags & PVMW_MIGRATION))
236 return not_found(pvmw);
237 entry = pmd_to_swp_entry(pmde);
238 if (!is_migration_entry(entry) ||
Alistair Poppleaf5cdaf2021-06-30 18:54:06 -0700239 pfn_swap_entry_to_page(entry) != page)
Hugh Dickinsb3807a92021-06-24 18:39:17 -0700240 return not_found(pvmw);
241 return true;
242 }
243 /* THP pmd was split under us: handle on pte level */
244 spin_unlock(pvmw->ptl);
245 pvmw->ptl = NULL;
246 } else if (!pmd_present(pmde)) {
247 /*
248 * If PVMW_SYNC, take and drop THP pmd lock so that we
249 * cannot return prematurely, while zap_huge_pmd() has
250 * cleared *pmd but not decremented compound_mapcount().
251 */
252 if ((pvmw->flags & PVMW_SYNC) &&
253 PageTransCompound(page)) {
254 spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
255
256 spin_unlock(ptl);
257 }
Hugh Dickinsa9a75042021-06-24 18:39:26 -0700258 step_forward(pvmw, PMD_SIZE);
259 continue;
Hugh Dickins732ed552021-06-15 18:23:53 -0700260 }
Hugh Dickinsb3807a92021-06-24 18:39:17 -0700261 if (!map_pte(pvmw))
262 goto next_pte;
Hugh Dickins47446632021-06-24 18:39:20 -0700263this_pte:
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800264 if (check_pte(pvmw))
265 return true;
Hugh Dickinsd75450f2017-04-07 16:04:39 -0700266next_pte:
Hugh Dickinsd75450f2017-04-07 16:04:39 -0700267 do {
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800268 pvmw->address += PAGE_SIZE;
Hugh Dickins494334e2021-06-15 18:23:56 -0700269 if (pvmw->address >= end)
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800270 return not_found(pvmw);
271 /* Did we cross page table boundary? */
Hugh Dickins44828242021-06-24 18:39:14 -0700272 if ((pvmw->address & (PMD_SIZE - PAGE_SIZE)) == 0) {
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800273 if (pvmw->ptl) {
274 spin_unlock(pvmw->ptl);
275 pvmw->ptl = NULL;
276 }
Hugh Dickins44828242021-06-24 18:39:14 -0700277 pte_unmap(pvmw->pte);
278 pvmw->pte = NULL;
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800279 goto restart;
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800280 }
Hugh Dickins44828242021-06-24 18:39:14 -0700281 pvmw->pte++;
Hugh Dickinsa7a69d82021-06-24 18:39:30 -0700282 if ((pvmw->flags & PVMW_SYNC) && !pvmw->ptl) {
283 pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
284 spin_lock(pvmw->ptl);
285 }
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800286 } while (pte_none(*pvmw->pte));
287
288 if (!pvmw->ptl) {
289 pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
290 spin_lock(pvmw->ptl);
291 }
Hugh Dickins47446632021-06-24 18:39:20 -0700292 goto this_pte;
Hugh Dickinsa9a75042021-06-24 18:39:26 -0700293 } while (pvmw->address < end);
294
295 return false;
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800296}
Kirill A. Shutemov6a328a62017-02-24 14:58:10 -0800297
298/**
299 * page_mapped_in_vma - check whether a page is really mapped in a VMA
300 * @page: the page to test
301 * @vma: the VMA to test
302 *
303 * Returns 1 if the page is mapped into the page tables of the VMA, 0
304 * if the page is not mapped into the page tables of this VMA. Only
305 * valid for normal file or anonymous VMAs.
306 */
307int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
308{
309 struct page_vma_mapped_walk pvmw = {
310 .page = page,
311 .vma = vma,
312 .flags = PVMW_SYNC,
313 };
Kirill A. Shutemov6a328a62017-02-24 14:58:10 -0800314
Hugh Dickins494334e2021-06-15 18:23:56 -0700315 pvmw.address = vma_address(page, vma);
316 if (pvmw.address == -EFAULT)
Kirill A. Shutemov6a328a62017-02-24 14:58:10 -0800317 return 0;
Kirill A. Shutemov6a328a62017-02-24 14:58:10 -0800318 if (!page_vma_mapped_walk(&pvmw))
319 return 0;
320 page_vma_mapped_walk_done(&pvmw);
321 return 1;
322}