blob: 9122676b54d67c4914543146a07f2491c3262f12 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * linux/mm/mincore.c
4 *
Linus Torvalds2f77d102006-12-16 09:44:32 -08005 * Copyright (C) 1994-2006 Linus Torvalds
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 */
7
8/*
9 * The mincore() system call.
10 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/pagemap.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090012#include <linux/gfp.h>
Christoph Hellwiga5201102019-08-28 16:19:53 +020013#include <linux/pagewalk.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/mman.h>
15#include <linux/syscalls.h>
Nick Piggin42da9cb2007-02-12 00:51:39 -080016#include <linux/swap.h>
17#include <linux/swapops.h>
Hugh Dickins3a4f8a02017-02-24 14:59:36 -080018#include <linux/shmem_fs.h>
Naoya Horiguchi4f16fc12009-12-14 17:59:58 -080019#include <linux/hugetlb.h>
Mike Rapoport65fddcf2020-06-08 21:32:42 -070020#include <linux/pgtable.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080022#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
Naoya Horiguchi1e25a272015-02-11 15:28:11 -080024static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr,
25 unsigned long end, struct mm_walk *walk)
Johannes Weinerf4884012010-05-24 14:32:10 -070026{
27#ifdef CONFIG_HUGETLB_PAGE
Naoya Horiguchi1e25a272015-02-11 15:28:11 -080028 unsigned char present;
29 unsigned char *vec = walk->private;
Johannes Weinerf4884012010-05-24 14:32:10 -070030
Naoya Horiguchi1e25a272015-02-11 15:28:11 -080031 /*
32 * Hugepages under user process are always in RAM and never
33 * swapped out, but theoretically it needs to be checked.
34 */
35 present = pte && !huge_pte_none(huge_ptep_get(pte));
36 for (; addr != end; vec++, addr += PAGE_SIZE)
37 *vec = present;
38 walk->private = vec;
Johannes Weinerf4884012010-05-24 14:32:10 -070039#else
40 BUG();
41#endif
Naoya Horiguchi1e25a272015-02-11 15:28:11 -080042 return 0;
Johannes Weinerf4884012010-05-24 14:32:10 -070043}
44
Linus Torvalds30bac162019-01-24 09:04:37 +130045/*
46 * Later we can get more picky about what "in core" means precisely.
47 * For now, simply check to see if the page is in the page cache,
48 * and is up to date; i.e. that no page-in operation would be required
49 * at this time if an application were to map and access this page.
50 */
Matthew Wilcox (Oracle)61ef1862020-10-13 16:51:17 -070051static unsigned char mincore_page(struct address_space *mapping, pgoff_t index)
Linus Torvalds30bac162019-01-24 09:04:37 +130052{
53 unsigned char present = 0;
54 struct page *page;
55
56 /*
57 * When tmpfs swaps out a page from a file, any process mapping that
58 * file will not get a swp_entry_t in its pte, but rather it is like
59 * any other file mapping (ie. marked !present and faulted in with
60 * tmpfs's .fault). So swapped out tmpfs mappings are tested here.
61 */
Matthew Wilcox (Oracle)61ef1862020-10-13 16:51:17 -070062 page = find_get_incore_page(mapping, index);
Linus Torvalds30bac162019-01-24 09:04:37 +130063 if (page) {
64 present = PageUptodate(page);
65 put_page(page);
66 }
67
68 return present;
69}
70
71static int __mincore_unmapped_range(unsigned long addr, unsigned long end,
72 struct vm_area_struct *vma, unsigned char *vec)
73{
74 unsigned long nr = (end - addr) >> PAGE_SHIFT;
75 int i;
76
77 if (vma->vm_file) {
78 pgoff_t pgoff;
79
80 pgoff = linear_page_index(vma, addr);
81 for (i = 0; i < nr; i++, pgoff++)
82 vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff);
83 } else {
84 for (i = 0; i < nr; i++)
85 vec[i] = 0;
86 }
87 return nr;
88}
89
Naoya Horiguchi1e25a272015-02-11 15:28:11 -080090static int mincore_unmapped_range(unsigned long addr, unsigned long end,
Steven Priceb7a16c72020-02-03 17:36:03 -080091 __always_unused int depth,
Naoya Horiguchi1e25a272015-02-11 15:28:11 -080092 struct mm_walk *walk)
Johannes Weinerf4884012010-05-24 14:32:10 -070093{
Linus Torvalds30bac162019-01-24 09:04:37 +130094 walk->private += __mincore_unmapped_range(addr, end,
95 walk->vma, walk->private);
Naoya Horiguchi1e25a272015-02-11 15:28:11 -080096 return 0;
97}
Linus Torvalds2f77d102006-12-16 09:44:32 -080098
Naoya Horiguchi1e25a272015-02-11 15:28:11 -080099static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
100 struct mm_walk *walk)
101{
102 spinlock_t *ptl;
103 struct vm_area_struct *vma = walk->vma;
104 pte_t *ptep;
105 unsigned char *vec = walk->private;
106 int nr = (end - addr) >> PAGE_SHIFT;
107
Kirill A. Shutemovb6ec57f2016-01-21 16:40:25 -0800108 ptl = pmd_trans_huge_lock(pmd, vma);
109 if (ptl) {
Naoya Horiguchi1e25a272015-02-11 15:28:11 -0800110 memset(vec, 1, nr);
111 spin_unlock(ptl);
112 goto out;
113 }
114
115 if (pmd_trans_unstable(pmd)) {
Linus Torvalds30bac162019-01-24 09:04:37 +1300116 __mincore_unmapped_range(addr, end, vma, vec);
Naoya Horiguchi1e25a272015-02-11 15:28:11 -0800117 goto out;
118 }
119
120 ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
121 for (; addr != end; ptep++, addr += PAGE_SIZE) {
Nick Piggin42da9cb2007-02-12 00:51:39 -0800122 pte_t pte = *ptep;
123
Johannes Weinerf4884012010-05-24 14:32:10 -0700124 if (pte_none(pte))
Linus Torvalds30bac162019-01-24 09:04:37 +1300125 __mincore_unmapped_range(addr, addr + PAGE_SIZE,
126 vma, vec);
Johannes Weinerf4884012010-05-24 14:32:10 -0700127 else if (pte_present(pte))
Johannes Weiner25ef0e52010-05-24 14:32:11 -0700128 *vec = 1;
Kirill A. Shutemov0661a332015-02-10 14:10:04 -0800129 else { /* pte is a swap entry */
Nick Piggin42da9cb2007-02-12 00:51:39 -0800130 swp_entry_t entry = pte_to_swp_entry(pte);
Johannes Weiner6a60f1b2010-05-24 14:32:09 -0700131
Linus Torvalds30bac162019-01-24 09:04:37 +1300132 if (non_swap_entry(entry)) {
133 /*
134 * migration or hwpoison entries are always
135 * uptodate
136 */
137 *vec = 1;
138 } else {
139#ifdef CONFIG_SWAP
140 *vec = mincore_page(swap_address_space(entry),
141 swp_offset(entry));
142#else
143 WARN_ON(1);
144 *vec = 1;
145#endif
146 }
Nick Piggin42da9cb2007-02-12 00:51:39 -0800147 }
Johannes Weiner25ef0e52010-05-24 14:32:11 -0700148 vec++;
Naoya Horiguchi1e25a272015-02-11 15:28:11 -0800149 }
Johannes Weiner6a60f1b2010-05-24 14:32:09 -0700150 pte_unmap_unlock(ptep - 1, ptl);
Naoya Horiguchi1e25a272015-02-11 15:28:11 -0800151out:
152 walk->private += nr;
153 cond_resched();
154 return 0;
Johannes Weinere48293f2010-05-24 14:32:11 -0700155}
156
Jiri Kosina134fca92019-05-14 15:41:38 -0700157static inline bool can_do_mincore(struct vm_area_struct *vma)
158{
159 if (vma_is_anonymous(vma))
160 return true;
161 if (!vma->vm_file)
162 return false;
163 /*
164 * Reveal pagecache information only for non-anonymous mappings that
165 * correspond to the files the calling process could (if tried) open
166 * for writing; otherwise we'd be including shared non-exclusive
167 * mappings, which opens a side channel.
168 */
Christian Brauner21cb47b2021-01-21 14:19:25 +0100169 return inode_owner_or_capable(&init_user_ns,
170 file_inode(vma->vm_file)) ||
Christian Brauner02f92b32021-01-21 14:19:22 +0100171 file_permission(vma->vm_file, MAY_WRITE) == 0;
Jiri Kosina134fca92019-05-14 15:41:38 -0700172}
173
Christoph Hellwig7b86ac32019-08-28 16:19:54 +0200174static const struct mm_walk_ops mincore_walk_ops = {
175 .pmd_entry = mincore_pte_range,
176 .pte_hole = mincore_unmapped_range,
177 .hugetlb_entry = mincore_hugetlb,
178};
179
Johannes Weinerf4884012010-05-24 14:32:10 -0700180/*
181 * Do a chunk of "sys_mincore()". We've already checked
182 * all the arguments, we hold the mmap semaphore: we should
183 * just return the amount of info we're asked for.
184 */
185static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *vec)
186{
Johannes Weinerf4884012010-05-24 14:32:10 -0700187 struct vm_area_struct *vma;
Johannes Weiner25ef0e52010-05-24 14:32:11 -0700188 unsigned long end;
Naoya Horiguchi1e25a272015-02-11 15:28:11 -0800189 int err;
Johannes Weinerf4884012010-05-24 14:32:10 -0700190
191 vma = find_vma(current->mm, addr);
192 if (!vma || addr < vma->vm_start)
193 return -ENOMEM;
Johannes Weiner25ef0e52010-05-24 14:32:11 -0700194 end = min(vma->vm_end, addr + (pages << PAGE_SHIFT));
Jiri Kosina134fca92019-05-14 15:41:38 -0700195 if (!can_do_mincore(vma)) {
196 unsigned long pages = DIV_ROUND_UP(end - addr, PAGE_SIZE);
197 memset(vec, 1, pages);
198 return pages;
199 }
Christoph Hellwig7b86ac32019-08-28 16:19:54 +0200200 err = walk_page_range(vma->vm_mm, addr, end, &mincore_walk_ops, vec);
Naoya Horiguchi1e25a272015-02-11 15:28:11 -0800201 if (err < 0)
202 return err;
Johannes Weiner25ef0e52010-05-24 14:32:11 -0700203 return (end - addr) >> PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204}
205
206/*
207 * The mincore(2) system call.
208 *
209 * mincore() returns the memory residency status of the pages in the
210 * current process's address space specified by [addr, addr + len).
211 * The status is returned in a vector of bytes. The least significant
212 * bit of each byte is 1 if the referenced page is in memory, otherwise
213 * it is zero.
214 *
215 * Because the status of a page can change after mincore() checks it
216 * but before it returns to the application, the returned vector may
217 * contain stale information. Only locked pages are guaranteed to
218 * remain in memory.
219 *
220 * return values:
221 * zero - success
222 * -EFAULT - vec points to an illegal address
Kirill A. Shutemovea1754a2016-04-01 15:29:48 +0300223 * -EINVAL - addr is not a multiple of PAGE_SIZE
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 * -ENOMEM - Addresses in the range [addr, addr + len] are
225 * invalid for the address space of this process, or
226 * specify one or more pages which are not currently
227 * mapped
228 * -EAGAIN - A kernel resource was temporarily unavailable.
229 */
Heiko Carstens3480b252009-01-14 14:14:16 +0100230SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len,
231 unsigned char __user *, vec)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232{
Linus Torvalds2f77d102006-12-16 09:44:32 -0800233 long retval;
234 unsigned long pages;
235 unsigned char *tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236
Andrey Konovalov057d33892019-09-25 16:48:30 -0700237 start = untagged_addr(start);
238
Linus Torvalds2f77d102006-12-16 09:44:32 -0800239 /* Check the start address: needs to be page-aligned.. */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300240 if (start & ~PAGE_MASK)
Linus Torvalds2f77d102006-12-16 09:44:32 -0800241 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242
Linus Torvalds2f77d102006-12-16 09:44:32 -0800243 /* ..and we need to be passed a valid user-space range */
Linus Torvalds96d4f262019-01-03 18:57:57 -0800244 if (!access_ok((void __user *) start, len))
Linus Torvalds2f77d102006-12-16 09:44:32 -0800245 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246
Kirill A. Shutemovea1754a2016-04-01 15:29:48 +0300247 /* This also avoids any overflows on PAGE_ALIGN */
Linus Torvalds2f77d102006-12-16 09:44:32 -0800248 pages = len >> PAGE_SHIFT;
Alexander Kuleshove7bbdd02015-11-05 18:46:38 -0800249 pages += (offset_in_page(len)) != 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250
Linus Torvalds96d4f262019-01-03 18:57:57 -0800251 if (!access_ok(vec, pages))
Linus Torvalds2f77d102006-12-16 09:44:32 -0800252 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253
Linus Torvalds2f77d102006-12-16 09:44:32 -0800254 tmp = (void *) __get_free_page(GFP_USER);
255 if (!tmp)
Linus Torvalds4fb23e42006-12-16 16:01:50 -0800256 return -EAGAIN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257
Linus Torvalds2f77d102006-12-16 09:44:32 -0800258 retval = 0;
259 while (pages) {
260 /*
261 * Do at most PAGE_SIZE entries per iteration, due to
262 * the temporary buffer size.
263 */
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700264 mmap_read_lock(current->mm);
Johannes Weiner6a60f1b2010-05-24 14:32:09 -0700265 retval = do_mincore(start, min(pages, PAGE_SIZE), tmp);
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700266 mmap_read_unlock(current->mm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267
Linus Torvalds2f77d102006-12-16 09:44:32 -0800268 if (retval <= 0)
269 break;
270 if (copy_to_user(vec, tmp, retval)) {
271 retval = -EFAULT;
272 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273 }
Linus Torvalds2f77d102006-12-16 09:44:32 -0800274 pages -= retval;
275 vec += retval;
276 start += retval << PAGE_SHIFT;
277 retval = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 }
Linus Torvalds2f77d102006-12-16 09:44:32 -0800279 free_page((unsigned long) tmp);
280 return retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281}