blob: 926eeb9bf4ebeb8a324ec575ea8c6cb09612a9e5 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * hugetlbpage-backed filesystem. Based on ramfs.
3 *
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +01004 * Nadia Yvette Chambers, 2002
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 *
6 * Copyright (C) 2002 Linus Torvalds.
Paul Gortmaker3e89e1c2016-01-14 15:21:52 -08007 * License: GPL
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 */
9
Andrew Morton9b857d22014-06-04 16:07:21 -070010#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/thread_info.h>
13#include <asm/current.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010014#include <linux/sched/signal.h> /* remove ASAP */
Mike Kravetz70c35472015-09-08 15:01:54 -070015#include <linux/falloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/fs.h>
17#include <linux/mount.h>
18#include <linux/file.h>
Randy Dunlape73a75f2007-07-15 23:40:52 -070019#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <linux/writeback.h>
21#include <linux/pagemap.h>
22#include <linux/highmem.h>
23#include <linux/init.h>
24#include <linux/string.h>
Randy Dunlap16f7e0f2006-01-11 12:17:46 -080025#include <linux/capability.h>
Randy Dunlape73a75f2007-07-15 23:40:52 -070026#include <linux/ctype.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/backing-dev.h>
28#include <linux/hugetlb.h>
29#include <linux/pagevec.h>
David Howells32021982018-11-01 23:07:26 +000030#include <linux/fs_parser.h>
Benjamin Herrenschmidt036e0852007-05-06 14:50:12 -070031#include <linux/mman.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <linux/slab.h>
33#include <linux/dnotify.h>
34#include <linux/statfs.h>
35#include <linux/security.h>
Nick Black1fd7317d2009-09-22 16:43:33 -070036#include <linux/magic.h>
Naoya Horiguchi290408d2010-09-08 10:19:35 +090037#include <linux/migrate.h>
Al Viro34d06402015-04-03 11:31:35 -040038#include <linux/uio.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080040#include <linux/uaccess.h>
Shijie Hu88590252020-06-03 16:03:34 -070041#include <linux/sched/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
Josef 'Jeff' Sipekee9b6d62007-02-12 00:55:41 -080043static const struct super_operations hugetlbfs_ops;
Christoph Hellwigf5e54d62006-06-28 04:26:44 -070044static const struct address_space_operations hugetlbfs_aops;
Arjan van de Ven4b6f5d22006-03-28 01:56:42 -080045const struct file_operations hugetlbfs_file_operations;
Arjan van de Ven92e1d5b2007-02-12 00:55:39 -080046static const struct inode_operations hugetlbfs_dir_inode_operations;
47static const struct inode_operations hugetlbfs_inode_operations;
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
David Howells32021982018-11-01 23:07:26 +000049enum hugetlbfs_size_type { NO_SIZE, SIZE_STD, SIZE_PERCENT };
50
51struct hugetlbfs_fs_context {
David Howells4a252202017-07-05 16:24:18 +010052 struct hstate *hstate;
David Howells32021982018-11-01 23:07:26 +000053 unsigned long long max_size_opt;
54 unsigned long long min_size_opt;
David Howells4a252202017-07-05 16:24:18 +010055 long max_hpages;
56 long nr_inodes;
57 long min_hpages;
David Howells32021982018-11-01 23:07:26 +000058 enum hugetlbfs_size_type max_val_type;
59 enum hugetlbfs_size_type min_val_type;
David Howells4a252202017-07-05 16:24:18 +010060 kuid_t uid;
61 kgid_t gid;
62 umode_t mode;
David Gibsona1d776e2012-03-21 16:34:12 -070063};
64
Linus Torvalds1da177e2005-04-16 15:20:36 -070065int sysctl_hugetlb_shm_group;
66
David Howells32021982018-11-01 23:07:26 +000067enum hugetlb_param {
68 Opt_gid,
69 Opt_min_size,
70 Opt_mode,
71 Opt_nr_inodes,
72 Opt_pagesize,
73 Opt_size,
74 Opt_uid,
Randy Dunlape73a75f2007-07-15 23:40:52 -070075};
76
Al Virod7167b12019-09-07 07:23:15 -040077static const struct fs_parameter_spec hugetlb_fs_parameters[] = {
David Howells32021982018-11-01 23:07:26 +000078 fsparam_u32 ("gid", Opt_gid),
79 fsparam_string("min_size", Opt_min_size),
80 fsparam_u32 ("mode", Opt_mode),
81 fsparam_string("nr_inodes", Opt_nr_inodes),
82 fsparam_string("pagesize", Opt_pagesize),
83 fsparam_string("size", Opt_size),
84 fsparam_u32 ("uid", Opt_uid),
85 {}
86};
87
Mike Kravetz70c35472015-09-08 15:01:54 -070088#ifdef CONFIG_NUMA
89static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
90 struct inode *inode, pgoff_t index)
91{
92 vma->vm_policy = mpol_shared_policy_lookup(&HUGETLBFS_I(inode)->policy,
93 index);
94}
95
96static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
97{
98 mpol_cond_put(vma->vm_policy);
99}
100#else
101static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
102 struct inode *inode, pgoff_t index)
103{
104}
105
106static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
107{
108}
109#endif
110
Adam Litke2e9b367c2005-10-29 18:16:47 -0700111static void huge_pagevec_release(struct pagevec *pvec)
112{
113 int i;
114
115 for (i = 0; i < pagevec_count(pvec); ++i)
116 put_page(pvec->pages[i]);
117
118 pagevec_reinit(pvec);
119}
120
Mike Kravetz63489f82018-03-22 16:17:13 -0700121/*
122 * Mask used when checking the page offset value passed in via system
123 * calls. This value will be converted to a loff_t which is signed.
124 * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the
125 * value. The extra bit (- 1 in the shift value) is to take the sign
126 * bit into account.
127 */
128#define PGOFF_LOFFT_MAX \
129 (((1UL << (PAGE_SHIFT + 1)) - 1) << (BITS_PER_LONG - (PAGE_SHIFT + 1)))
130
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
132{
Al Viro496ad9a2013-01-23 17:07:38 -0500133 struct inode *inode = file_inode(file);
Peter Xu22247ef2021-05-14 17:27:04 -0700134 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 loff_t len, vma_len;
136 int ret;
Andi Kleena5516432008-07-23 21:27:41 -0700137 struct hstate *h = hstate_file(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138
Hugh Dickins68589bc2006-11-14 02:03:32 -0800139 /*
David Gibsondec4ad82007-08-30 23:56:40 -0700140 * vma address alignment (but not the pgoff alignment) has
141 * already been checked by prepare_hugepage_range. If you add
142 * any error returns here, do so after setting VM_HUGETLB, so
143 * is_vm_hugetlb_page tests below unmap_region go the right
Peter Collingbourne45e55302020-08-06 23:23:37 -0700144 * way when do_mmap unwinds (may be important on powerpc
David Gibsondec4ad82007-08-30 23:56:40 -0700145 * and ia64).
Hugh Dickins68589bc2006-11-14 02:03:32 -0800146 */
Naoya Horiguchia2fce912013-04-17 15:58:27 -0700147 vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
Hugh Dickins68589bc2006-11-14 02:03:32 -0800148 vma->vm_ops = &hugetlb_vm_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149
Peter Xu22247ef2021-05-14 17:27:04 -0700150 ret = seal_check_future_write(info->seals, vma);
151 if (ret)
152 return ret;
153
Mike Kravetz045c7a32017-04-13 14:56:32 -0700154 /*
Mike Kravetz63489f82018-03-22 16:17:13 -0700155 * page based offset in vm_pgoff could be sufficiently large to
Mike Kravetz5df63c22018-04-05 16:18:21 -0700156 * overflow a loff_t when converted to byte offset. This can
157 * only happen on architectures where sizeof(loff_t) ==
158 * sizeof(unsigned long). So, only check in those instances.
Mike Kravetz045c7a32017-04-13 14:56:32 -0700159 */
Mike Kravetz5df63c22018-04-05 16:18:21 -0700160 if (sizeof(unsigned long) == sizeof(loff_t)) {
161 if (vma->vm_pgoff & PGOFF_LOFFT_MAX)
162 return -EINVAL;
163 }
Mike Kravetz045c7a32017-04-13 14:56:32 -0700164
Mike Kravetz63489f82018-03-22 16:17:13 -0700165 /* must be huge page aligned */
Becky Bruce2b37c352011-07-25 17:11:49 -0700166 if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
David Gibsondec4ad82007-08-30 23:56:40 -0700167 return -EINVAL;
168
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 vma_len = (loff_t)(vma->vm_end - vma->vm_start);
Mike Kravetz045c7a32017-04-13 14:56:32 -0700170 len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
171 /* check for overflow */
172 if (len < vma_len)
173 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174
Al Viro59551022016-01-22 15:40:57 -0500175 inode_lock(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 file_accessed(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177
178 ret = -ENOMEM;
Mike Kravetz33b8f842021-02-24 12:09:54 -0800179 if (!hugetlb_reserve_pages(inode,
Andi Kleena5516432008-07-23 21:27:41 -0700180 vma->vm_pgoff >> huge_page_order(h),
Mel Gorman5a6fe122009-02-10 14:02:27 +0000181 len >> huge_page_shift(h), vma,
182 vma->vm_flags))
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -0700183 goto out;
David Gibsonb45b5bd2006-03-22 00:08:55 -0800184
Adam Litke4c887262005-10-29 18:16:46 -0700185 ret = 0;
Zhang, Yanminb6174df2006-07-10 04:44:49 -0700186 if (vma->vm_flags & VM_WRITE && inode->i_size < len)
Mike Kravetz045c7a32017-04-13 14:56:32 -0700187 i_size_write(inode, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188out:
Al Viro59551022016-01-22 15:40:57 -0500189 inode_unlock(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190
191 return ret;
192}
193
194/*
Michel Lespinasse3e4e28c2020-06-08 21:33:51 -0700195 * Called under mmap_write_lock(mm).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 */
197
Adrian Bunkd2ba27e82007-05-06 14:49:00 -0700198#ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199static unsigned long
Shijie Hu88590252020-06-03 16:03:34 -0700200hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr,
201 unsigned long len, unsigned long pgoff, unsigned long flags)
202{
203 struct hstate *h = hstate_file(file);
204 struct vm_unmapped_area_info info;
205
206 info.flags = 0;
207 info.length = len;
208 info.low_limit = current->mm->mmap_base;
209 info.high_limit = TASK_SIZE;
210 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
211 info.align_offset = 0;
212 return vm_unmapped_area(&info);
213}
214
215static unsigned long
216hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr,
217 unsigned long len, unsigned long pgoff, unsigned long flags)
218{
219 struct hstate *h = hstate_file(file);
220 struct vm_unmapped_area_info info;
221
222 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
223 info.length = len;
224 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
225 info.high_limit = current->mm->mmap_base;
226 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
227 info.align_offset = 0;
228 addr = vm_unmapped_area(&info);
229
230 /*
231 * A failed mmap() very likely causes application failure,
232 * so fall back to the bottom-up function here. This scenario
233 * can happen with large stack limits and large mmap()
234 * allocations.
235 */
236 if (unlikely(offset_in_page(addr))) {
237 VM_BUG_ON(addr != -ENOMEM);
238 info.flags = 0;
239 info.low_limit = current->mm->mmap_base;
240 info.high_limit = TASK_SIZE;
241 addr = vm_unmapped_area(&info);
242 }
243
244 return addr;
245}
246
247static unsigned long
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
249 unsigned long len, unsigned long pgoff, unsigned long flags)
250{
251 struct mm_struct *mm = current->mm;
252 struct vm_area_struct *vma;
Andi Kleena5516432008-07-23 21:27:41 -0700253 struct hstate *h = hstate_file(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254
Andi Kleena5516432008-07-23 21:27:41 -0700255 if (len & ~huge_page_mask(h))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 return -EINVAL;
257 if (len > TASK_SIZE)
258 return -ENOMEM;
259
Benjamin Herrenschmidt036e0852007-05-06 14:50:12 -0700260 if (flags & MAP_FIXED) {
Andi Kleena5516432008-07-23 21:27:41 -0700261 if (prepare_hugepage_range(file, addr, len))
Benjamin Herrenschmidt036e0852007-05-06 14:50:12 -0700262 return -EINVAL;
263 return addr;
264 }
265
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266 if (addr) {
Andi Kleena5516432008-07-23 21:27:41 -0700267 addr = ALIGN(addr, huge_page_size(h));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268 vma = find_vma(mm, addr);
269 if (TASK_SIZE - len >= addr &&
Hugh Dickins1be71072017-06-19 04:03:24 -0700270 (!vma || addr + len <= vm_start_gap(vma)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271 return addr;
272 }
273
Shijie Hu88590252020-06-03 16:03:34 -0700274 /*
275 * Use mm->get_unmapped_area value as a hint to use topdown routine.
276 * If architectures have special needs, they should define their own
277 * version of hugetlb_get_unmapped_area.
278 */
279 if (mm->get_unmapped_area == arch_get_unmapped_area_topdown)
280 return hugetlb_get_unmapped_area_topdown(file, addr, len,
281 pgoff, flags);
282 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
283 pgoff, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284}
285#endif
286
Al Viro34d06402015-04-03 11:31:35 -0400287static size_t
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700288hugetlbfs_read_actor(struct page *page, unsigned long offset,
Al Viro34d06402015-04-03 11:31:35 -0400289 struct iov_iter *to, unsigned long size)
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700290{
Al Viro34d06402015-04-03 11:31:35 -0400291 size_t copied = 0;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700292 int i, chunksize;
293
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700294 /* Find which 4k chunk and offset with in that chunk */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300295 i = offset >> PAGE_SHIFT;
296 offset = offset & ~PAGE_MASK;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700297
298 while (size) {
Al Viro34d06402015-04-03 11:31:35 -0400299 size_t n;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300300 chunksize = PAGE_SIZE;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700301 if (offset)
302 chunksize -= offset;
303 if (chunksize > size)
304 chunksize = size;
Al Viro34d06402015-04-03 11:31:35 -0400305 n = copy_page_to_iter(&page[i], offset, chunksize, to);
306 copied += n;
307 if (n != chunksize)
308 return copied;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700309 offset = 0;
310 size -= chunksize;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700311 i++;
312 }
Al Viro34d06402015-04-03 11:31:35 -0400313 return copied;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700314}
315
316/*
317 * Support for read() - Find the page attached to f_mapping and copy out the
Miaohe Linc7e285e2021-02-24 12:10:08 -0800318 * data. Its *very* similar to generic_file_buffered_read(), we can't use that
Kirill A. Shutemovea1754a2016-04-01 15:29:48 +0300319 * since it has PAGE_SIZE assumptions.
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700320 */
Al Viro34d06402015-04-03 11:31:35 -0400321static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700322{
Al Viro34d06402015-04-03 11:31:35 -0400323 struct file *file = iocb->ki_filp;
324 struct hstate *h = hstate_file(file);
325 struct address_space *mapping = file->f_mapping;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700326 struct inode *inode = mapping->host;
Al Viro34d06402015-04-03 11:31:35 -0400327 unsigned long index = iocb->ki_pos >> huge_page_shift(h);
328 unsigned long offset = iocb->ki_pos & ~huge_page_mask(h);
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700329 unsigned long end_index;
330 loff_t isize;
331 ssize_t retval = 0;
332
Al Viro34d06402015-04-03 11:31:35 -0400333 while (iov_iter_count(to)) {
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700334 struct page *page;
Al Viro34d06402015-04-03 11:31:35 -0400335 size_t nr, copied;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700336
337 /* nr is the maximum number of bytes to copy from this page */
Andi Kleena5516432008-07-23 21:27:41 -0700338 nr = huge_page_size(h);
Aneesh Kumar K.Va05b0852012-03-21 16:34:08 -0700339 isize = i_size_read(inode);
340 if (!isize)
Al Viro34d06402015-04-03 11:31:35 -0400341 break;
Aneesh Kumar K.Va05b0852012-03-21 16:34:08 -0700342 end_index = (isize - 1) >> huge_page_shift(h);
Al Viro34d06402015-04-03 11:31:35 -0400343 if (index > end_index)
344 break;
345 if (index == end_index) {
Andi Kleena5516432008-07-23 21:27:41 -0700346 nr = ((isize - 1) & ~huge_page_mask(h)) + 1;
Aneesh Kumar K.Va05b0852012-03-21 16:34:08 -0700347 if (nr <= offset)
Al Viro34d06402015-04-03 11:31:35 -0400348 break;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700349 }
350 nr = nr - offset;
351
352 /* Find the page */
Aneesh Kumar K.Va05b0852012-03-21 16:34:08 -0700353 page = find_lock_page(mapping, index);
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700354 if (unlikely(page == NULL)) {
355 /*
356 * We have a HOLE, zero out the user-buffer for the
357 * length of the hole or request.
358 */
Al Viro34d06402015-04-03 11:31:35 -0400359 copied = iov_iter_zero(nr, to);
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700360 } else {
Aneesh Kumar K.Va05b0852012-03-21 16:34:08 -0700361 unlock_page(page);
362
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700363 /*
364 * We have the page, copy it to user space buffer.
365 */
Al Viro34d06402015-04-03 11:31:35 -0400366 copied = hugetlbfs_read_actor(page, offset, to, nr);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300367 put_page(page);
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700368 }
Al Viro34d06402015-04-03 11:31:35 -0400369 offset += copied;
370 retval += copied;
371 if (copied != nr && iov_iter_count(to)) {
372 if (!retval)
373 retval = -EFAULT;
374 break;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700375 }
Andi Kleena5516432008-07-23 21:27:41 -0700376 index += offset >> huge_page_shift(h);
377 offset &= ~huge_page_mask(h);
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700378 }
Al Viro34d06402015-04-03 11:31:35 -0400379 iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700380 return retval;
381}
382
Nick Piggin800d15a2007-10-16 01:25:03 -0700383static int hugetlbfs_write_begin(struct file *file,
384 struct address_space *mapping,
385 loff_t pos, unsigned len, unsigned flags,
386 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387{
388 return -EINVAL;
389}
390
Nick Piggin800d15a2007-10-16 01:25:03 -0700391static int hugetlbfs_write_end(struct file *file, struct address_space *mapping,
392 loff_t pos, unsigned len, unsigned copied,
393 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394{
Nick Piggin800d15a2007-10-16 01:25:03 -0700395 BUG();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 return -EINVAL;
397}
398
Mike Kravetzb5cec282015-09-08 15:01:41 -0700399static void remove_huge_page(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400{
Konstantin Khlebnikovb9ea2512015-04-14 15:45:27 -0700401 ClearPageDirty(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 ClearPageUptodate(page);
Minchan Kimbd65cb82011-03-22 16:30:54 -0700403 delete_from_page_cache(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404}
405
Mike Kravetz4aae8d12016-01-15 16:57:40 -0800406static void
Davidlohr Buesof808c132017-09-08 16:15:08 -0700407hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end)
Mike Kravetz4aae8d12016-01-15 16:57:40 -0800408{
409 struct vm_area_struct *vma;
410
411 /*
412 * end == 0 indicates that the entire range after
413 * start should be unmapped.
414 */
415 vma_interval_tree_foreach(vma, root, start, end ? end : ULONG_MAX) {
416 unsigned long v_offset;
417 unsigned long v_end;
418
419 /*
420 * Can the expression below overflow on 32-bit arches?
421 * No, because the interval tree returns us only those vmas
422 * which overlap the truncated area starting at pgoff,
423 * and no vma on a 32-bit arch can span beyond the 4GB.
424 */
425 if (vma->vm_pgoff < start)
426 v_offset = (start - vma->vm_pgoff) << PAGE_SHIFT;
427 else
428 v_offset = 0;
429
430 if (!end)
431 v_end = vma->vm_end;
432 else {
433 v_end = ((end - vma->vm_pgoff) << PAGE_SHIFT)
434 + vma->vm_start;
435 if (v_end > vma->vm_end)
436 v_end = vma->vm_end;
437 }
438
439 unmap_hugepage_range(vma, vma->vm_start + v_offset, v_end,
440 NULL);
441 }
442}
Mike Kravetzb5cec282015-09-08 15:01:41 -0700443
444/*
445 * remove_inode_hugepages handles two distinct cases: truncation and hole
446 * punch. There are subtle differences in operation for each case.
Mike Kravetz4aae8d12016-01-15 16:57:40 -0800447 *
Mike Kravetzb5cec282015-09-08 15:01:41 -0700448 * truncation is indicated by end of range being LLONG_MAX
449 * In this case, we first scan the range and release found pages.
Miaohe Lin1935ebd2021-02-24 12:10:21 -0800450 * After releasing pages, hugetlb_unreserve_pages cleans up region/reserve
Mike Kravetze7c58092019-01-08 15:23:32 -0800451 * maps and global counts. Page faults can not race with truncation
Mike Kravetz87bf91d2020-04-01 21:11:08 -0700452 * in this routine. hugetlb_no_page() holds i_mmap_rwsem and prevents
453 * page faults in the truncated range by checking i_size. i_size is
454 * modified while holding i_mmap_rwsem.
Mike Kravetzb5cec282015-09-08 15:01:41 -0700455 * hole punch is indicated if end is not LLONG_MAX
456 * In the hole punch case we scan the range and release found pages.
Miaohe Lin1935ebd2021-02-24 12:10:21 -0800457 * Only when releasing a page is the associated region/reserve map
458 * deleted. The region/reserve map for ranges without associated
Mike Kravetze7c58092019-01-08 15:23:32 -0800459 * pages are not modified. Page faults can race with hole punch.
460 * This is indicated if we find a mapped page.
Mike Kravetzb5cec282015-09-08 15:01:41 -0700461 * Note: If the passed end of range value is beyond the end of file, but
462 * not LLONG_MAX this routine still performs a hole punch operation.
463 */
464static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
465 loff_t lend)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466{
Andi Kleena5516432008-07-23 21:27:41 -0700467 struct hstate *h = hstate_inode(inode);
David Gibsonb45b5bd2006-03-22 00:08:55 -0800468 struct address_space *mapping = &inode->i_data;
Andi Kleena5516432008-07-23 21:27:41 -0700469 const pgoff_t start = lstart >> huge_page_shift(h);
Mike Kravetzb5cec282015-09-08 15:01:41 -0700470 const pgoff_t end = lend >> huge_page_shift(h);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 struct pagevec pvec;
Jan Karad72dc8a2017-09-06 16:21:18 -0700472 pgoff_t next, index;
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -0700473 int i, freed = 0;
Mike Kravetzb5cec282015-09-08 15:01:41 -0700474 bool truncate_op = (lend == LLONG_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475
Mel Gorman86679822017-11-15 17:37:52 -0800476 pagevec_init(&pvec);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477 next = start;
Mike Kravetzb5cec282015-09-08 15:01:41 -0700478 while (next < end) {
479 /*
Mike Kravetz18178892015-11-20 15:57:13 -0800480 * When no more pages are found, we are done.
Mike Kravetzb5cec282015-09-08 15:01:41 -0700481 */
Jan Kara397162f2017-09-06 16:21:43 -0700482 if (!pagevec_lookup_range(&pvec, mapping, &next, end - 1))
Mike Kravetz18178892015-11-20 15:57:13 -0800483 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484
485 for (i = 0; i < pagevec_count(&pvec); ++i) {
486 struct page *page = pvec.pages[i];
Miaohe Lind4241a02021-05-04 18:33:34 -0700487 u32 hash = 0;
Mike Kravetzb5cec282015-09-08 15:01:41 -0700488
Jan Karad72dc8a2017-09-06 16:21:18 -0700489 index = page->index;
Mike Kravetz87bf91d2020-04-01 21:11:08 -0700490 if (!truncate_op) {
491 /*
492 * Only need to hold the fault mutex in the
493 * hole punch case. This prevents races with
494 * page faults. Races are not possible in the
495 * case of truncation.
496 */
Miaohe Lind4241a02021-05-04 18:33:34 -0700497 hash = hugetlb_fault_mutex_hash(mapping, index);
Mike Kravetz87bf91d2020-04-01 21:11:08 -0700498 mutex_lock(&hugetlb_fault_mutex_table[hash]);
499 }
Mike Kravetze7c58092019-01-08 15:23:32 -0800500
Mike Kravetz4aae8d12016-01-15 16:57:40 -0800501 /*
Mike Kravetze7c58092019-01-08 15:23:32 -0800502 * If page is mapped, it was faulted in after being
503 * unmapped in caller. Unmap (again) now after taking
504 * the fault mutex. The mutex will prevent faults
505 * until we finish removing the page.
506 *
507 * This race can only happen in the hole punch case.
508 * Getting here in a truncate operation is a bug.
Mike Kravetz4aae8d12016-01-15 16:57:40 -0800509 */
Mike Kravetze7c58092019-01-08 15:23:32 -0800510 if (unlikely(page_mapped(page))) {
511 BUG_ON(truncate_op);
512
Mike Kravetzc0d03812020-04-01 21:11:05 -0700513 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
Mike Kravetze7c58092019-01-08 15:23:32 -0800514 i_mmap_lock_write(mapping);
Mike Kravetzc0d03812020-04-01 21:11:05 -0700515 mutex_lock(&hugetlb_fault_mutex_table[hash]);
Mike Kravetze7c58092019-01-08 15:23:32 -0800516 hugetlb_vmdelete_list(&mapping->i_mmap,
517 index * pages_per_huge_page(h),
518 (index + 1) * pages_per_huge_page(h));
519 i_mmap_unlock_write(mapping);
520 }
Mike Kravetz4aae8d12016-01-15 16:57:40 -0800521
522 lock_page(page);
523 /*
524 * We must free the huge page and remove from page
525 * cache (remove_huge_page) BEFORE removing the
526 * region/reserve map (hugetlb_unreserve_pages). In
527 * rare out of memory conditions, removal of the
zhong jiang72e29362016-10-07 17:02:01 -0700528 * region/reserve map could fail. Correspondingly,
529 * the subpool and global reserve usage count can need
530 * to be adjusted.
Mike Kravetz4aae8d12016-01-15 16:57:40 -0800531 */
Mike Kravetze32905e2021-05-22 17:42:11 -0700532 VM_BUG_ON(HPageRestoreReserve(page));
Mike Kravetz4aae8d12016-01-15 16:57:40 -0800533 remove_huge_page(page);
534 freed++;
535 if (!truncate_op) {
536 if (unlikely(hugetlb_unreserve_pages(inode,
Jan Karad72dc8a2017-09-06 16:21:18 -0700537 index, index + 1, 1)))
zhong jiang72e29362016-10-07 17:02:01 -0700538 hugetlb_fix_reserve_counts(inode);
Mike Kravetzb5cec282015-09-08 15:01:41 -0700539 }
540
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541 unlock_page(page);
Mike Kravetz87bf91d2020-04-01 21:11:08 -0700542 if (!truncate_op)
543 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544 }
545 huge_pagevec_release(&pvec);
Mike Kravetz18178892015-11-20 15:57:13 -0800546 cond_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 }
Mike Kravetzb5cec282015-09-08 15:01:41 -0700548
549 if (truncate_op)
550 (void)hugetlb_unreserve_pages(inode, start, LONG_MAX, freed);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551}
552
Al Viro2bbbda32010-06-04 19:52:12 -0400553static void hugetlbfs_evict_inode(struct inode *inode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554{
Joonsoo Kim9119a412014-04-03 14:47:25 -0700555 struct resv_map *resv_map;
556
Mike Kravetzb5cec282015-09-08 15:01:41 -0700557 remove_inode_hugepages(inode, 0, LLONG_MAX);
Mike Kravetzf27a5132019-05-13 17:22:55 -0700558
559 /*
560 * Get the resv_map from the address space embedded in the inode.
561 * This is the address space which points to any resv_map allocated
562 * at inode creation time. If this is a device special inode,
563 * i_mapping may not point to the original address space.
564 */
565 resv_map = (struct resv_map *)(&inode->i_data)->private_data;
566 /* Only regular and link inodes have associated reserve maps */
Joonsoo Kim9119a412014-04-03 14:47:25 -0700567 if (resv_map)
568 resv_map_release(&resv_map->refs);
Jan Karadbd57682012-05-03 14:48:02 +0200569 clear_inode(inode);
Christoph Hellwig149f4212005-10-29 18:16:43 -0700570}
571
Miaohe Line5d319d2021-02-24 12:10:25 -0800572static void hugetlb_vmtruncate(struct inode *inode, loff_t offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573{
Hugh Dickins856fc292006-10-28 10:38:43 -0700574 pgoff_t pgoff;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 struct address_space *mapping = inode->i_mapping;
Andi Kleena5516432008-07-23 21:27:41 -0700576 struct hstate *h = hstate_inode(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577
Andi Kleena5516432008-07-23 21:27:41 -0700578 BUG_ON(offset & ~huge_page_mask(h));
Hugh Dickins856fc292006-10-28 10:38:43 -0700579 pgoff = offset >> PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580
Davidlohr Bueso83cde9e2014-12-12 16:54:21 -0800581 i_mmap_lock_write(mapping);
Mike Kravetz87bf91d2020-04-01 21:11:08 -0700582 i_size_write(inode, offset);
Davidlohr Buesof808c132017-09-08 16:15:08 -0700583 if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
Mike Kravetz1bfad992015-09-08 15:01:38 -0700584 hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0);
Mike Kravetzc86aa7b2018-12-28 00:39:42 -0800585 i_mmap_unlock_write(mapping);
Mike Kravetze7c58092019-01-08 15:23:32 -0800586 remove_inode_hugepages(inode, offset, LLONG_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587}
588
Mike Kravetz70c35472015-09-08 15:01:54 -0700589static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
590{
591 struct hstate *h = hstate_inode(inode);
592 loff_t hpage_size = huge_page_size(h);
593 loff_t hole_start, hole_end;
594
595 /*
596 * For hole punch round up the beginning offset of the hole and
597 * round down the end.
598 */
599 hole_start = round_up(offset, hpage_size);
600 hole_end = round_down(offset + len, hpage_size);
601
602 if (hole_end > hole_start) {
603 struct address_space *mapping = inode->i_mapping;
Marc-André Lureauff62a342018-01-31 16:19:25 -0800604 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
Mike Kravetz70c35472015-09-08 15:01:54 -0700605
Al Viro59551022016-01-22 15:40:57 -0500606 inode_lock(inode);
Marc-André Lureauff62a342018-01-31 16:19:25 -0800607
Miaohe Lin398c0da2021-02-24 12:10:18 -0800608 /* protected by i_rwsem */
Joel Fernandes (Google)ab3948f2019-03-05 15:47:54 -0800609 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
Marc-André Lureauff62a342018-01-31 16:19:25 -0800610 inode_unlock(inode);
611 return -EPERM;
612 }
613
Mike Kravetz70c35472015-09-08 15:01:54 -0700614 i_mmap_lock_write(mapping);
Davidlohr Buesof808c132017-09-08 16:15:08 -0700615 if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
Mike Kravetz70c35472015-09-08 15:01:54 -0700616 hugetlb_vmdelete_list(&mapping->i_mmap,
617 hole_start >> PAGE_SHIFT,
618 hole_end >> PAGE_SHIFT);
Mike Kravetzc86aa7b2018-12-28 00:39:42 -0800619 i_mmap_unlock_write(mapping);
Mike Kravetze7c58092019-01-08 15:23:32 -0800620 remove_inode_hugepages(inode, hole_start, hole_end);
Al Viro59551022016-01-22 15:40:57 -0500621 inode_unlock(inode);
Mike Kravetz70c35472015-09-08 15:01:54 -0700622 }
623
624 return 0;
625}
626
627static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
628 loff_t len)
629{
630 struct inode *inode = file_inode(file);
Marc-André Lureauff62a342018-01-31 16:19:25 -0800631 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
Mike Kravetz70c35472015-09-08 15:01:54 -0700632 struct address_space *mapping = inode->i_mapping;
633 struct hstate *h = hstate_inode(inode);
634 struct vm_area_struct pseudo_vma;
635 struct mm_struct *mm = current->mm;
636 loff_t hpage_size = huge_page_size(h);
637 unsigned long hpage_shift = huge_page_shift(h);
638 pgoff_t start, index, end;
639 int error;
640 u32 hash;
641
642 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
643 return -EOPNOTSUPP;
644
645 if (mode & FALLOC_FL_PUNCH_HOLE)
646 return hugetlbfs_punch_hole(inode, offset, len);
647
648 /*
649 * Default preallocate case.
650 * For this range, start is rounded down and end is rounded up
651 * as well as being converted to page offsets.
652 */
653 start = offset >> hpage_shift;
654 end = (offset + len + hpage_size - 1) >> hpage_shift;
655
Al Viro59551022016-01-22 15:40:57 -0500656 inode_lock(inode);
Mike Kravetz70c35472015-09-08 15:01:54 -0700657
658 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
659 error = inode_newsize_ok(inode, offset + len);
660 if (error)
661 goto out;
662
Marc-André Lureauff62a342018-01-31 16:19:25 -0800663 if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
664 error = -EPERM;
665 goto out;
666 }
667
Mike Kravetz70c35472015-09-08 15:01:54 -0700668 /*
669 * Initialize a pseudo vma as this is required by the huge page
670 * allocation routines. If NUMA is configured, use page index
671 * as input to create an allocation policy.
672 */
Kirill A. Shutemov2c4541e2018-07-26 16:37:30 -0700673 vma_init(&pseudo_vma, mm);
Mike Kravetz70c35472015-09-08 15:01:54 -0700674 pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
675 pseudo_vma.vm_file = file;
676
677 for (index = start; index < end; index++) {
678 /*
679 * This is supposed to be the vaddr where the page is being
680 * faulted in, but we have no vaddr here.
681 */
682 struct page *page;
683 unsigned long addr;
Mike Kravetz70c35472015-09-08 15:01:54 -0700684
685 cond_resched();
686
687 /*
688 * fallocate(2) manpage permits EINTR; we may have been
689 * interrupted because we are using up too much memory.
690 */
691 if (signal_pending(current)) {
692 error = -EINTR;
693 break;
694 }
695
696 /* Set numa allocation policy based on index */
697 hugetlb_set_vma_policy(&pseudo_vma, inode, index);
698
699 /* addr is the offset within the file (zero based) */
700 addr = index * hpage_size;
701
Mike Kravetz87bf91d2020-04-01 21:11:08 -0700702 /*
703 * fault mutex taken here, protects against fault path
704 * and hole punch. inode_lock previously taken protects
705 * against truncation.
706 */
Wei Yang188b04a2019-11-30 17:57:02 -0800707 hash = hugetlb_fault_mutex_hash(mapping, index);
Mike Kravetz70c35472015-09-08 15:01:54 -0700708 mutex_lock(&hugetlb_fault_mutex_table[hash]);
709
710 /* See if already present in mapping to avoid alloc/free */
711 page = find_get_page(mapping, index);
712 if (page) {
713 put_page(page);
714 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
715 hugetlb_drop_vma_policy(&pseudo_vma);
716 continue;
717 }
718
Miaohe Lin88ce3fe2021-02-24 12:10:11 -0800719 /*
720 * Allocate page without setting the avoid_reserve argument.
721 * There certainly are no reserves associated with the
722 * pseudo_vma. However, there could be shared mappings with
723 * reserves for the file at the inode level. If we fallocate
724 * pages in these areas, we need to consume the reserves
725 * to keep reservation accounting consistent.
726 */
727 page = alloc_huge_page(&pseudo_vma, addr, 0);
Mike Kravetz70c35472015-09-08 15:01:54 -0700728 hugetlb_drop_vma_policy(&pseudo_vma);
729 if (IS_ERR(page)) {
730 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
731 error = PTR_ERR(page);
732 goto out;
733 }
734 clear_huge_page(page, addr, pages_per_huge_page(h));
735 __SetPageUptodate(page);
736 error = huge_add_to_page_cache(page, mapping, index);
737 if (unlikely(error)) {
Mike Kravetz846be082021-06-15 18:23:29 -0700738 restore_reserve_on_error(h, &pseudo_vma, addr, page);
Mike Kravetz70c35472015-09-08 15:01:54 -0700739 put_page(page);
740 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
741 goto out;
742 }
743
744 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
745
Mike Kravetz8f251a32021-02-24 12:08:56 -0800746 SetHPageMigratable(page);
Mike Kravetz70c35472015-09-08 15:01:54 -0700747 /*
Mike Kravetz70c35472015-09-08 15:01:54 -0700748 * unlock_page because locked by add_to_page_cache()
Muchun Song585fc0d2021-02-04 18:32:03 -0800749 * put_page() due to reference from alloc_huge_page()
Mike Kravetz70c35472015-09-08 15:01:54 -0700750 */
Mike Kravetz70c35472015-09-08 15:01:54 -0700751 unlock_page(page);
Nadav Amit72639e62017-11-29 16:11:33 -0800752 put_page(page);
Mike Kravetz70c35472015-09-08 15:01:54 -0700753 }
754
755 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
756 i_size_write(inode, offset + len);
Deepa Dinamani078cd822016-09-14 07:48:04 -0700757 inode->i_ctime = current_time(inode);
Mike Kravetz70c35472015-09-08 15:01:54 -0700758out:
Al Viro59551022016-01-22 15:40:57 -0500759 inode_unlock(inode);
Mike Kravetz70c35472015-09-08 15:01:54 -0700760 return error;
761}
762
Christian Brauner549c7292021-01-21 14:19:43 +0100763static int hugetlbfs_setattr(struct user_namespace *mnt_userns,
764 struct dentry *dentry, struct iattr *attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765{
David Howells2b0143b2015-03-17 22:25:59 +0000766 struct inode *inode = d_inode(dentry);
Andi Kleena5516432008-07-23 21:27:41 -0700767 struct hstate *h = hstate_inode(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768 int error;
769 unsigned int ia_valid = attr->ia_valid;
Marc-André Lureauff62a342018-01-31 16:19:25 -0800770 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771
Christian Brauner2f221d62021-01-21 14:19:26 +0100772 error = setattr_prepare(&init_user_ns, dentry, attr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 if (error)
Christoph Hellwig10257742010-06-04 11:30:02 +0200774 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775
776 if (ia_valid & ATTR_SIZE) {
Marc-André Lureauff62a342018-01-31 16:19:25 -0800777 loff_t oldsize = inode->i_size;
778 loff_t newsize = attr->ia_size;
779
780 if (newsize & ~huge_page_mask(h))
Christoph Hellwig10257742010-06-04 11:30:02 +0200781 return -EINVAL;
Miaohe Lin398c0da2021-02-24 12:10:18 -0800782 /* protected by i_rwsem */
Marc-André Lureauff62a342018-01-31 16:19:25 -0800783 if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
784 (newsize > oldsize && (info->seals & F_SEAL_GROW)))
785 return -EPERM;
Miaohe Line5d319d2021-02-24 12:10:25 -0800786 hugetlb_vmtruncate(inode, newsize);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787 }
Christoph Hellwig10257742010-06-04 11:30:02 +0200788
Christian Brauner2f221d62021-01-21 14:19:26 +0100789 setattr_copy(&init_user_ns, inode, attr);
Christoph Hellwig10257742010-06-04 11:30:02 +0200790 mark_inode_dirty(inode);
791 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792}
793
Al Viro7d54fa62011-07-24 20:20:48 -0400794static struct inode *hugetlbfs_get_root(struct super_block *sb,
David Howells32021982018-11-01 23:07:26 +0000795 struct hugetlbfs_fs_context *ctx)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796{
797 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798
799 inode = new_inode(sb);
800 if (inode) {
Christoph Hellwig85fe4022010-10-23 11:19:54 -0400801 inode->i_ino = get_next_ino();
David Howells32021982018-11-01 23:07:26 +0000802 inode->i_mode = S_IFDIR | ctx->mode;
803 inode->i_uid = ctx->uid;
804 inode->i_gid = ctx->gid;
Deepa Dinamani078cd822016-09-14 07:48:04 -0700805 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
Al Viro7d54fa62011-07-24 20:20:48 -0400806 inode->i_op = &hugetlbfs_dir_inode_operations;
807 inode->i_fop = &simple_dir_operations;
808 /* directory inodes start off with i_nlink == 2 (for "." entry) */
809 inc_nlink(inode);
Aneesh Kumar K.V65ed7602012-04-25 16:01:50 -0700810 lockdep_annotate_inode_mutex_key(inode);
Al Viro7d54fa62011-07-24 20:20:48 -0400811 }
812 return inode;
813}
814
Michal Hockob610ded2013-08-13 16:00:55 -0700815/*
Davidlohr Buesoc8c06ef2014-12-12 16:54:24 -0800816 * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never
Michal Hockob610ded2013-08-13 16:00:55 -0700817 * be taken from reclaim -- unlike regular filesystems. This needs an
Kirill A. Shutemov88f306b2016-01-15 16:57:31 -0800818 * annotation because huge_pmd_share() does an allocation under hugetlb's
Davidlohr Buesoc8c06ef2014-12-12 16:54:24 -0800819 * i_mmap_rwsem.
Michal Hockob610ded2013-08-13 16:00:55 -0700820 */
Davidlohr Buesoc8c06ef2014-12-12 16:54:24 -0800821static struct lock_class_key hugetlbfs_i_mmap_rwsem_key;
Michal Hockob610ded2013-08-13 16:00:55 -0700822
Al Viro7d54fa62011-07-24 20:20:48 -0400823static struct inode *hugetlbfs_get_inode(struct super_block *sb,
824 struct inode *dir,
Al Viro18df2252011-07-24 23:17:40 -0400825 umode_t mode, dev_t dev)
Al Viro7d54fa62011-07-24 20:20:48 -0400826{
827 struct inode *inode;
Mike Kravetz58b6e5e2019-04-05 18:39:06 -0700828 struct resv_map *resv_map = NULL;
Joonsoo Kim9119a412014-04-03 14:47:25 -0700829
Mike Kravetz58b6e5e2019-04-05 18:39:06 -0700830 /*
831 * Reserve maps are only needed for inodes that can have associated
832 * page allocations.
833 */
834 if (S_ISREG(mode) || S_ISLNK(mode)) {
835 resv_map = resv_map_alloc();
836 if (!resv_map)
837 return NULL;
838 }
Al Viro7d54fa62011-07-24 20:20:48 -0400839
840 inode = new_inode(sb);
841 if (inode) {
Marc-André Lureauff62a342018-01-31 16:19:25 -0800842 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
843
Al Viro7d54fa62011-07-24 20:20:48 -0400844 inode->i_ino = get_next_ino();
Christian Brauner21cb47b2021-01-21 14:19:25 +0100845 inode_init_owner(&init_user_ns, inode, dir, mode);
Davidlohr Buesoc8c06ef2014-12-12 16:54:24 -0800846 lockdep_set_class(&inode->i_mapping->i_mmap_rwsem,
847 &hugetlbfs_i_mmap_rwsem_key);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848 inode->i_mapping->a_ops = &hugetlbfs_aops;
Deepa Dinamani078cd822016-09-14 07:48:04 -0700849 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
Joonsoo Kim9119a412014-04-03 14:47:25 -0700850 inode->i_mapping->private_data = resv_map;
Marc-André Lureauff62a342018-01-31 16:19:25 -0800851 info->seals = F_SEAL_SEAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852 switch (mode & S_IFMT) {
853 default:
854 init_special_inode(inode, mode, dev);
855 break;
856 case S_IFREG:
857 inode->i_op = &hugetlbfs_inode_operations;
858 inode->i_fop = &hugetlbfs_file_operations;
859 break;
860 case S_IFDIR:
861 inode->i_op = &hugetlbfs_dir_inode_operations;
862 inode->i_fop = &simple_dir_operations;
863
864 /* directory inodes start off with i_nlink == 2 (for "." entry) */
Dave Hansend8c76e62006-09-30 23:29:04 -0700865 inc_nlink(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866 break;
867 case S_IFLNK:
868 inode->i_op = &page_symlink_inode_operations;
Al Viro21fc61c2015-11-17 01:07:57 -0500869 inode_nohighmem(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870 break;
871 }
Josh Boyere096d0c2011-08-25 07:48:12 -0400872 lockdep_annotate_inode_mutex_key(inode);
Mike Kravetz58b6e5e2019-04-05 18:39:06 -0700873 } else {
874 if (resv_map)
875 kref_put(&resv_map->refs, resv_map_release);
876 }
Joonsoo Kim9119a412014-04-03 14:47:25 -0700877
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878 return inode;
879}
880
881/*
882 * File creation. Allocate an inode, and we're done..
883 */
Piotr Sarna1ab5b822019-11-30 17:56:43 -0800884static int do_hugetlbfs_mknod(struct inode *dir,
885 struct dentry *dentry,
886 umode_t mode,
887 dev_t dev,
888 bool tmpfile)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889{
890 struct inode *inode;
891 int error = -ENOSPC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892
Al Viro7d54fa62011-07-24 20:20:48 -0400893 inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894 if (inode) {
Deepa Dinamani078cd822016-09-14 07:48:04 -0700895 dir->i_ctime = dir->i_mtime = current_time(dir);
Piotr Sarna1ab5b822019-11-30 17:56:43 -0800896 if (tmpfile) {
897 d_tmpfile(dentry, inode);
898 } else {
899 d_instantiate(dentry, inode);
900 dget(dentry);/* Extra count - pin the dentry in core */
901 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902 error = 0;
903 }
904 return error;
905}
906
Christian Brauner549c7292021-01-21 14:19:43 +0100907static int hugetlbfs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
908 struct dentry *dentry, umode_t mode, dev_t dev)
Piotr Sarna1ab5b822019-11-30 17:56:43 -0800909{
910 return do_hugetlbfs_mknod(dir, dentry, mode, dev, false);
911}
912
Christian Brauner549c7292021-01-21 14:19:43 +0100913static int hugetlbfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
914 struct dentry *dentry, umode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915{
Christian Brauner549c7292021-01-21 14:19:43 +0100916 int retval = hugetlbfs_mknod(&init_user_ns, dir, dentry,
917 mode | S_IFDIR, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918 if (!retval)
Dave Hansend8c76e62006-09-30 23:29:04 -0700919 inc_nlink(dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920 return retval;
921}
922
Christian Brauner549c7292021-01-21 14:19:43 +0100923static int hugetlbfs_create(struct user_namespace *mnt_userns,
924 struct inode *dir, struct dentry *dentry,
925 umode_t mode, bool excl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926{
Christian Brauner549c7292021-01-21 14:19:43 +0100927 return hugetlbfs_mknod(&init_user_ns, dir, dentry, mode | S_IFREG, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928}
929
Christian Brauner549c7292021-01-21 14:19:43 +0100930static int hugetlbfs_tmpfile(struct user_namespace *mnt_userns,
931 struct inode *dir, struct dentry *dentry,
932 umode_t mode)
Piotr Sarna1ab5b822019-11-30 17:56:43 -0800933{
934 return do_hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0, true);
935}
936
Christian Brauner549c7292021-01-21 14:19:43 +0100937static int hugetlbfs_symlink(struct user_namespace *mnt_userns,
938 struct inode *dir, struct dentry *dentry,
939 const char *symname)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940{
941 struct inode *inode;
942 int error = -ENOSPC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943
Al Viro7d54fa62011-07-24 20:20:48 -0400944 inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945 if (inode) {
946 int l = strlen(symname)+1;
947 error = page_symlink(inode, symname, l);
948 if (!error) {
949 d_instantiate(dentry, inode);
950 dget(dentry);
951 } else
952 iput(inode);
953 }
Deepa Dinamani078cd822016-09-14 07:48:04 -0700954 dir->i_ctime = dir->i_mtime = current_time(dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955
956 return error;
957}
958
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900959static int hugetlbfs_migrate_page(struct address_space *mapping,
Mel Gormanb969c4ab2012-01-12 17:19:34 -0800960 struct page *newpage, struct page *page,
Mel Gormana6bc32b2012-01-12 17:19:43 -0800961 enum migrate_mode mode)
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900962{
963 int rc;
964
965 rc = migrate_huge_page_move_mapping(mapping, newpage, page);
Rafael Aquini78bd5202012-12-11 16:02:31 -0800966 if (rc != MIGRATEPAGE_SUCCESS)
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900967 return rc;
Mike Kravetzcb6acd02019-02-28 16:22:02 -0800968
Mike Kravetzd6995da2021-02-24 12:08:51 -0800969 if (hugetlb_page_subpool(page)) {
970 hugetlb_set_page_subpool(newpage, hugetlb_page_subpool(page));
971 hugetlb_set_page_subpool(page, NULL);
Mike Kravetzcb6acd02019-02-28 16:22:02 -0800972 }
973
Jérôme Glisse2916ecc2017-09-08 16:12:06 -0700974 if (mode != MIGRATE_SYNC_NO_COPY)
975 migrate_page_copy(newpage, page);
976 else
977 migrate_page_states(newpage, page);
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900978
Rafael Aquini78bd5202012-12-11 16:02:31 -0800979 return MIGRATEPAGE_SUCCESS;
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900980}
981
Naoya Horiguchi78bb9202017-07-10 15:47:50 -0700982static int hugetlbfs_error_remove_page(struct address_space *mapping,
983 struct page *page)
984{
985 struct inode *inode = mapping->host;
Mike Kravetzab615a52017-11-02 15:59:41 -0700986 pgoff_t index = page->index;
Naoya Horiguchi78bb9202017-07-10 15:47:50 -0700987
988 remove_huge_page(page);
Mike Kravetzab615a52017-11-02 15:59:41 -0700989 if (unlikely(hugetlb_unreserve_pages(inode, index, index + 1, 1)))
990 hugetlb_fix_reserve_counts(inode);
991
Naoya Horiguchi78bb9202017-07-10 15:47:50 -0700992 return 0;
993}
994
David Howells4a252202017-07-05 16:24:18 +0100995/*
996 * Display the mount options in /proc/mounts.
997 */
998static int hugetlbfs_show_options(struct seq_file *m, struct dentry *root)
999{
1000 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(root->d_sb);
1001 struct hugepage_subpool *spool = sbinfo->spool;
1002 unsigned long hpage_size = huge_page_size(sbinfo->hstate);
1003 unsigned hpage_shift = huge_page_shift(sbinfo->hstate);
1004 char mod;
1005
1006 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
1007 seq_printf(m, ",uid=%u",
1008 from_kuid_munged(&init_user_ns, sbinfo->uid));
1009 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
1010 seq_printf(m, ",gid=%u",
1011 from_kgid_munged(&init_user_ns, sbinfo->gid));
1012 if (sbinfo->mode != 0755)
1013 seq_printf(m, ",mode=%o", sbinfo->mode);
1014 if (sbinfo->max_inodes != -1)
1015 seq_printf(m, ",nr_inodes=%lu", sbinfo->max_inodes);
1016
1017 hpage_size /= 1024;
1018 mod = 'K';
1019 if (hpage_size >= 1024) {
1020 hpage_size /= 1024;
1021 mod = 'M';
1022 }
1023 seq_printf(m, ",pagesize=%lu%c", hpage_size, mod);
1024 if (spool) {
1025 if (spool->max_hpages != -1)
1026 seq_printf(m, ",size=%llu",
1027 (unsigned long long)spool->max_hpages << hpage_shift);
1028 if (spool->min_hpages != -1)
1029 seq_printf(m, ",min_size=%llu",
1030 (unsigned long long)spool->min_hpages << hpage_shift);
1031 }
1032 return 0;
1033}
1034
David Howells726c3342006-06-23 02:02:58 -07001035static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036{
David Howells726c3342006-06-23 02:02:58 -07001037 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb);
David Howells2b0143b2015-03-17 22:25:59 +00001038 struct hstate *h = hstate_inode(d_inode(dentry));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039
1040 buf->f_type = HUGETLBFS_MAGIC;
Andi Kleena5516432008-07-23 21:27:41 -07001041 buf->f_bsize = huge_page_size(h);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042 if (sbinfo) {
1043 spin_lock(&sbinfo->stat_lock);
David Gibson74a8a652005-11-21 21:32:24 -08001044 /* If no limits set, just report 0 for max/free/used
1045 * blocks, like simple_statfs() */
David Gibson90481622012-03-21 16:34:12 -07001046 if (sbinfo->spool) {
1047 long free_pages;
1048
1049 spin_lock(&sbinfo->spool->lock);
1050 buf->f_blocks = sbinfo->spool->max_hpages;
1051 free_pages = sbinfo->spool->max_hpages
1052 - sbinfo->spool->used_hpages;
1053 buf->f_bavail = buf->f_bfree = free_pages;
1054 spin_unlock(&sbinfo->spool->lock);
David Gibson74a8a652005-11-21 21:32:24 -08001055 buf->f_files = sbinfo->max_inodes;
1056 buf->f_ffree = sbinfo->free_inodes;
1057 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001058 spin_unlock(&sbinfo->stat_lock);
1059 }
1060 buf->f_namelen = NAME_MAX;
1061 return 0;
1062}
1063
1064static void hugetlbfs_put_super(struct super_block *sb)
1065{
1066 struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb);
1067
1068 if (sbi) {
1069 sb->s_fs_info = NULL;
David Gibson90481622012-03-21 16:34:12 -07001070
1071 if (sbi->spool)
1072 hugepage_put_subpool(sbi->spool);
1073
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074 kfree(sbi);
1075 }
1076}
1077
Christoph Hellwig96527982005-10-29 18:16:42 -07001078static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo)
1079{
1080 if (sbinfo->free_inodes >= 0) {
1081 spin_lock(&sbinfo->stat_lock);
1082 if (unlikely(!sbinfo->free_inodes)) {
1083 spin_unlock(&sbinfo->stat_lock);
1084 return 0;
1085 }
1086 sbinfo->free_inodes--;
1087 spin_unlock(&sbinfo->stat_lock);
1088 }
1089
1090 return 1;
1091}
1092
1093static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo)
1094{
1095 if (sbinfo->free_inodes >= 0) {
1096 spin_lock(&sbinfo->stat_lock);
1097 sbinfo->free_inodes++;
1098 spin_unlock(&sbinfo->stat_lock);
1099 }
1100}
1101
1102
Christoph Lametere18b8902006-12-06 20:33:20 -08001103static struct kmem_cache *hugetlbfs_inode_cachep;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104
1105static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
1106{
Christoph Hellwig96527982005-10-29 18:16:42 -07001107 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108 struct hugetlbfs_inode_info *p;
1109
Christoph Hellwig96527982005-10-29 18:16:42 -07001110 if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111 return NULL;
Christoph Lametere94b1762006-12-06 20:33:17 -08001112 p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL);
Christoph Hellwig96527982005-10-29 18:16:42 -07001113 if (unlikely(!p)) {
1114 hugetlbfs_inc_free_inodes(sbinfo);
1115 return NULL;
1116 }
Mike Kravetz4742a352017-03-31 15:12:01 -07001117
1118 /*
1119 * Any time after allocation, hugetlbfs_destroy_inode can be called
1120 * for the inode. mpol_free_shared_policy is unconditionally called
1121 * as part of hugetlbfs_destroy_inode. So, initialize policy here
1122 * in case of a quick call to destroy.
1123 *
1124 * Note that the policy is initialized even if we are creating a
1125 * private inode. This simplifies hugetlbfs_destroy_inode.
1126 */
1127 mpol_shared_policy_init(&p->policy, NULL);
1128
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129 return &p->vfs_inode;
1130}
1131
Al Virob62de322019-04-15 23:16:38 -04001132static void hugetlbfs_free_inode(struct inode *inode)
Nick Pigginfa0d7e3d2011-01-07 17:49:49 +11001133{
Nick Pigginfa0d7e3d2011-01-07 17:49:49 +11001134 kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode));
1135}
1136
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137static void hugetlbfs_destroy_inode(struct inode *inode)
1138{
Christoph Hellwig96527982005-10-29 18:16:42 -07001139 hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140 mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141}
1142
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07001143static const struct address_space_operations hugetlbfs_aops = {
Nick Piggin800d15a2007-10-16 01:25:03 -07001144 .write_begin = hugetlbfs_write_begin,
1145 .write_end = hugetlbfs_write_end,
Mike Kravetza4fa34cd2021-02-24 12:09:58 -08001146 .set_page_dirty = __set_page_dirty_no_writeback,
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001147 .migratepage = hugetlbfs_migrate_page,
Naoya Horiguchi78bb9202017-07-10 15:47:50 -07001148 .error_remove_page = hugetlbfs_error_remove_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149};
1150
Christoph Hellwig96527982005-10-29 18:16:42 -07001151
Alexey Dobriyan51cc5062008-07-25 19:45:34 -07001152static void init_once(void *foo)
Christoph Hellwig96527982005-10-29 18:16:42 -07001153{
1154 struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo;
1155
Christoph Lametera35afb82007-05-16 22:10:57 -07001156 inode_init_once(&ei->vfs_inode);
Christoph Hellwig96527982005-10-29 18:16:42 -07001157}
1158
Arjan van de Ven4b6f5d22006-03-28 01:56:42 -08001159const struct file_operations hugetlbfs_file_operations = {
Al Viro34d06402015-04-03 11:31:35 -04001160 .read_iter = hugetlbfs_read_iter,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161 .mmap = hugetlbfs_file_mmap,
Christoph Hellwig1b061d92010-05-26 17:53:41 +02001162 .fsync = noop_fsync,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163 .get_unmapped_area = hugetlb_get_unmapped_area,
Mike Kravetz70c35472015-09-08 15:01:54 -07001164 .llseek = default_llseek,
1165 .fallocate = hugetlbfs_fallocate,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166};
1167
Arjan van de Ven92e1d5b2007-02-12 00:55:39 -08001168static const struct inode_operations hugetlbfs_dir_inode_operations = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169 .create = hugetlbfs_create,
1170 .lookup = simple_lookup,
1171 .link = simple_link,
1172 .unlink = simple_unlink,
1173 .symlink = hugetlbfs_symlink,
1174 .mkdir = hugetlbfs_mkdir,
1175 .rmdir = simple_rmdir,
1176 .mknod = hugetlbfs_mknod,
1177 .rename = simple_rename,
1178 .setattr = hugetlbfs_setattr,
Piotr Sarna1ab5b822019-11-30 17:56:43 -08001179 .tmpfile = hugetlbfs_tmpfile,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180};
1181
Arjan van de Ven92e1d5b2007-02-12 00:55:39 -08001182static const struct inode_operations hugetlbfs_inode_operations = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183 .setattr = hugetlbfs_setattr,
1184};
1185
Josef 'Jeff' Sipekee9b6d62007-02-12 00:55:41 -08001186static const struct super_operations hugetlbfs_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187 .alloc_inode = hugetlbfs_alloc_inode,
Al Virob62de322019-04-15 23:16:38 -04001188 .free_inode = hugetlbfs_free_inode,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189 .destroy_inode = hugetlbfs_destroy_inode,
Al Viro2bbbda32010-06-04 19:52:12 -04001190 .evict_inode = hugetlbfs_evict_inode,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191 .statfs = hugetlbfs_statfs,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192 .put_super = hugetlbfs_put_super,
David Howells4a252202017-07-05 16:24:18 +01001193 .show_options = hugetlbfs_show_options,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194};
1195
Mike Kravetz7ca02d0a2015-04-15 16:13:42 -07001196/*
1197 * Convert size option passed from command line to number of huge pages
1198 * in the pool specified by hstate. Size option could be in bytes
1199 * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT).
1200 */
David Howells4a252202017-07-05 16:24:18 +01001201static long
Mike Kravetz7ca02d0a2015-04-15 16:13:42 -07001202hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt,
David Howells4a252202017-07-05 16:24:18 +01001203 enum hugetlbfs_size_type val_type)
Mike Kravetz7ca02d0a2015-04-15 16:13:42 -07001204{
1205 if (val_type == NO_SIZE)
1206 return -1;
1207
1208 if (val_type == SIZE_PERCENT) {
1209 size_opt <<= huge_page_shift(h);
1210 size_opt *= h->max_huge_pages;
1211 do_div(size_opt, 100);
1212 }
1213
1214 size_opt >>= huge_page_shift(h);
1215 return size_opt;
1216}
1217
David Howells32021982018-11-01 23:07:26 +00001218/*
1219 * Parse one mount parameter.
1220 */
1221static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222{
David Howells32021982018-11-01 23:07:26 +00001223 struct hugetlbfs_fs_context *ctx = fc->fs_private;
1224 struct fs_parse_result result;
1225 char *rest;
1226 unsigned long ps;
1227 int opt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228
Al Virod7167b12019-09-07 07:23:15 -04001229 opt = fs_parse(fc, hugetlb_fs_parameters, param, &result);
David Howells32021982018-11-01 23:07:26 +00001230 if (opt < 0)
1231 return opt;
1232
1233 switch (opt) {
1234 case Opt_uid:
1235 ctx->uid = make_kuid(current_user_ns(), result.uint_32);
1236 if (!uid_valid(ctx->uid))
1237 goto bad_val;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239
David Howells32021982018-11-01 23:07:26 +00001240 case Opt_gid:
1241 ctx->gid = make_kgid(current_user_ns(), result.uint_32);
1242 if (!gid_valid(ctx->gid))
1243 goto bad_val;
1244 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245
David Howells32021982018-11-01 23:07:26 +00001246 case Opt_mode:
1247 ctx->mode = result.uint_32 & 01777U;
1248 return 0;
Randy Dunlape73a75f2007-07-15 23:40:52 -07001249
David Howells32021982018-11-01 23:07:26 +00001250 case Opt_size:
1251 /* memparse() will accept a K/M/G without a digit */
1252 if (!isdigit(param->string[0]))
1253 goto bad_val;
1254 ctx->max_size_opt = memparse(param->string, &rest);
1255 ctx->max_val_type = SIZE_STD;
1256 if (*rest == '%')
1257 ctx->max_val_type = SIZE_PERCENT;
1258 return 0;
Randy Dunlape73a75f2007-07-15 23:40:52 -07001259
David Howells32021982018-11-01 23:07:26 +00001260 case Opt_nr_inodes:
1261 /* memparse() will accept a K/M/G without a digit */
1262 if (!isdigit(param->string[0]))
1263 goto bad_val;
1264 ctx->nr_inodes = memparse(param->string, &rest);
1265 return 0;
Randy Dunlape73a75f2007-07-15 23:40:52 -07001266
David Howells32021982018-11-01 23:07:26 +00001267 case Opt_pagesize:
1268 ps = memparse(param->string, &rest);
1269 ctx->hstate = size_to_hstate(ps);
1270 if (!ctx->hstate) {
1271 pr_err("Unsupported page size %lu MB\n", ps >> 20);
Lee Schermerhornb4c07bc2007-07-15 23:40:54 -07001272 return -EINVAL;
Randy Dunlape73a75f2007-07-15 23:40:52 -07001273 }
David Howells32021982018-11-01 23:07:26 +00001274 return 0;
1275
1276 case Opt_min_size:
1277 /* memparse() will accept a K/M/G without a digit */
1278 if (!isdigit(param->string[0]))
1279 goto bad_val;
1280 ctx->min_size_opt = memparse(param->string, &rest);
1281 ctx->min_val_type = SIZE_STD;
1282 if (*rest == '%')
1283 ctx->min_val_type = SIZE_PERCENT;
1284 return 0;
1285
1286 default:
1287 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288 }
Andi Kleena137e1c2008-07-23 21:27:43 -07001289
David Howells32021982018-11-01 23:07:26 +00001290bad_val:
Al Virob5db30c2019-12-21 21:34:06 -05001291 return invalfc(fc, "Bad value '%s' for mount option '%s'\n",
David Howells32021982018-11-01 23:07:26 +00001292 param->string, param->key);
1293}
1294
1295/*
1296 * Validate the parsed options.
1297 */
1298static int hugetlbfs_validate(struct fs_context *fc)
1299{
1300 struct hugetlbfs_fs_context *ctx = fc->fs_private;
1301
Mike Kravetz7ca02d0a2015-04-15 16:13:42 -07001302 /*
1303 * Use huge page pool size (in hstate) to convert the size
1304 * options to number of huge pages. If NO_SIZE, -1 is returned.
1305 */
David Howells32021982018-11-01 23:07:26 +00001306 ctx->max_hpages = hugetlbfs_size_to_hpages(ctx->hstate,
1307 ctx->max_size_opt,
1308 ctx->max_val_type);
1309 ctx->min_hpages = hugetlbfs_size_to_hpages(ctx->hstate,
1310 ctx->min_size_opt,
1311 ctx->min_val_type);
Mike Kravetz7ca02d0a2015-04-15 16:13:42 -07001312
1313 /*
1314 * If max_size was specified, then min_size must be smaller
1315 */
David Howells32021982018-11-01 23:07:26 +00001316 if (ctx->max_val_type > NO_SIZE &&
1317 ctx->min_hpages > ctx->max_hpages) {
1318 pr_err("Minimum size can not be greater than maximum size\n");
Mike Kravetz7ca02d0a2015-04-15 16:13:42 -07001319 return -EINVAL;
Andi Kleena137e1c2008-07-23 21:27:43 -07001320 }
1321
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322 return 0;
1323}
1324
1325static int
David Howells32021982018-11-01 23:07:26 +00001326hugetlbfs_fill_super(struct super_block *sb, struct fs_context *fc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327{
David Howells32021982018-11-01 23:07:26 +00001328 struct hugetlbfs_fs_context *ctx = fc->fs_private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329 struct hugetlbfs_sb_info *sbinfo;
1330
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331 sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL);
1332 if (!sbinfo)
1333 return -ENOMEM;
1334 sb->s_fs_info = sbinfo;
1335 spin_lock_init(&sbinfo->stat_lock);
David Howells32021982018-11-01 23:07:26 +00001336 sbinfo->hstate = ctx->hstate;
1337 sbinfo->max_inodes = ctx->nr_inodes;
1338 sbinfo->free_inodes = ctx->nr_inodes;
1339 sbinfo->spool = NULL;
1340 sbinfo->uid = ctx->uid;
1341 sbinfo->gid = ctx->gid;
1342 sbinfo->mode = ctx->mode;
David Howells4a252202017-07-05 16:24:18 +01001343
Mike Kravetz7ca02d0a2015-04-15 16:13:42 -07001344 /*
1345 * Allocate and initialize subpool if maximum or minimum size is
Miaohe Lin1935ebd2021-02-24 12:10:21 -08001346 * specified. Any needed reservations (for minimum size) are taken
Mike Kravetz7ca02d0a2015-04-15 16:13:42 -07001347 * taken when the subpool is created.
1348 */
David Howells32021982018-11-01 23:07:26 +00001349 if (ctx->max_hpages != -1 || ctx->min_hpages != -1) {
1350 sbinfo->spool = hugepage_new_subpool(ctx->hstate,
1351 ctx->max_hpages,
1352 ctx->min_hpages);
David Gibson90481622012-03-21 16:34:12 -07001353 if (!sbinfo->spool)
1354 goto out_free;
1355 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001356 sb->s_maxbytes = MAX_LFS_FILESIZE;
David Howells32021982018-11-01 23:07:26 +00001357 sb->s_blocksize = huge_page_size(ctx->hstate);
1358 sb->s_blocksize_bits = huge_page_shift(ctx->hstate);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001359 sb->s_magic = HUGETLBFS_MAGIC;
1360 sb->s_op = &hugetlbfs_ops;
1361 sb->s_time_gran = 1;
Mike Kravetz15568292020-08-11 18:31:35 -07001362
1363 /*
1364 * Due to the special and limited functionality of hugetlbfs, it does
1365 * not work well as a stacking filesystem.
1366 */
1367 sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH;
David Howells32021982018-11-01 23:07:26 +00001368 sb->s_root = d_make_root(hugetlbfs_get_root(sb, ctx));
Al Viro48fde702012-01-08 22:15:13 -05001369 if (!sb->s_root)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001370 goto out_free;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371 return 0;
1372out_free:
Fabian Frederick6e6870d2014-06-04 16:10:40 -07001373 kfree(sbinfo->spool);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374 kfree(sbinfo);
1375 return -ENOMEM;
1376}
1377
David Howells32021982018-11-01 23:07:26 +00001378static int hugetlbfs_get_tree(struct fs_context *fc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379{
David Howells32021982018-11-01 23:07:26 +00001380 int err = hugetlbfs_validate(fc);
1381 if (err)
1382 return err;
Al Viro2ac295d2019-06-01 20:48:55 -04001383 return get_tree_nodev(fc, hugetlbfs_fill_super);
David Howells32021982018-11-01 23:07:26 +00001384}
1385
1386static void hugetlbfs_fs_context_free(struct fs_context *fc)
1387{
1388 kfree(fc->fs_private);
1389}
1390
1391static const struct fs_context_operations hugetlbfs_fs_context_ops = {
1392 .free = hugetlbfs_fs_context_free,
1393 .parse_param = hugetlbfs_parse_param,
1394 .get_tree = hugetlbfs_get_tree,
1395};
1396
1397static int hugetlbfs_init_fs_context(struct fs_context *fc)
1398{
1399 struct hugetlbfs_fs_context *ctx;
1400
1401 ctx = kzalloc(sizeof(struct hugetlbfs_fs_context), GFP_KERNEL);
1402 if (!ctx)
1403 return -ENOMEM;
1404
1405 ctx->max_hpages = -1; /* No limit on size by default */
1406 ctx->nr_inodes = -1; /* No limit on number of inodes by default */
1407 ctx->uid = current_fsuid();
1408 ctx->gid = current_fsgid();
1409 ctx->mode = 0755;
1410 ctx->hstate = &default_hstate;
1411 ctx->min_hpages = -1; /* No default minimum size */
1412 ctx->max_val_type = NO_SIZE;
1413 ctx->min_val_type = NO_SIZE;
1414 fc->fs_private = ctx;
1415 fc->ops = &hugetlbfs_fs_context_ops;
1416 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001417}
1418
1419static struct file_system_type hugetlbfs_fs_type = {
David Howells32021982018-11-01 23:07:26 +00001420 .name = "hugetlbfs",
1421 .init_fs_context = hugetlbfs_init_fs_context,
Al Virod7167b12019-09-07 07:23:15 -04001422 .parameters = hugetlb_fs_parameters,
David Howells32021982018-11-01 23:07:26 +00001423 .kill_sb = kill_litter_super,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424};
1425
Andi Kleen42d73952012-12-11 16:01:34 -08001426static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427
From: Mel Gormanef1ff6b2009-09-23 15:56:05 -07001428static int can_do_hugetlb_shm(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429{
Eric W. Biedermana0eb3a02012-02-07 16:19:25 -08001430 kgid_t shm_group;
1431 shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group);
1432 return capable(CAP_IPC_LOCK) || in_group_p(shm_group);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433}
1434
Andi Kleen42d73952012-12-11 16:01:34 -08001435static int get_hstate_idx(int page_size_log)
1436{
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -07001437 struct hstate *h = hstate_sizelog(page_size_log);
Andi Kleen42d73952012-12-11 16:01:34 -08001438
Andi Kleen42d73952012-12-11 16:01:34 -08001439 if (!h)
1440 return -1;
Miaohe Lin04adbc32021-05-04 18:33:22 -07001441 return hstate_index(h);
Andi Kleen42d73952012-12-11 16:01:34 -08001442}
1443
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -07001444/*
1445 * Note that size should be aligned to proper hugepage size in caller side,
1446 * otherwise hugetlb_reserve_pages reserves one less hugepages than intended.
1447 */
1448struct file *hugetlb_file_setup(const char *name, size_t size,
Alexey Gladkovd7c9e992021-04-22 14:27:14 +02001449 vm_flags_t acctflag, struct ucounts **ucounts,
Andi Kleen42d73952012-12-11 16:01:34 -08001450 int creat_flags, int page_size_log)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452 struct inode *inode;
Al Viroe68375c2018-06-09 09:50:46 -04001453 struct vfsmount *mnt;
Andi Kleen42d73952012-12-11 16:01:34 -08001454 int hstate_idx;
Al Viroe68375c2018-06-09 09:50:46 -04001455 struct file *file;
Andi Kleen42d73952012-12-11 16:01:34 -08001456
1457 hstate_idx = get_hstate_idx(page_size_log);
1458 if (hstate_idx < 0)
1459 return ERR_PTR(-ENODEV);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460
Alexey Gladkovd7c9e992021-04-22 14:27:14 +02001461 *ucounts = NULL;
Al Viroe68375c2018-06-09 09:50:46 -04001462 mnt = hugetlbfs_vfsmount[hstate_idx];
1463 if (!mnt)
Akinobu Mita5bc98592007-05-06 14:50:18 -07001464 return ERR_PTR(-ENOENT);
1465
From: Mel Gormanef1ff6b2009-09-23 15:56:05 -07001466 if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) {
Alexey Gladkovd7c9e992021-04-22 14:27:14 +02001467 *ucounts = current_ucounts();
1468 if (user_shm_lock(size, *ucounts)) {
David Rientjes21a3c272012-03-21 16:34:13 -07001469 task_lock(current);
Andrew Morton9b857d22014-06-04 16:07:21 -07001470 pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is deprecated\n",
David Rientjes21a3c272012-03-21 16:34:13 -07001471 current->comm, current->pid);
1472 task_unlock(current);
Hugh Dickins353d5c32009-08-24 16:30:28 +01001473 } else {
Alexey Gladkovd7c9e992021-04-22 14:27:14 +02001474 *ucounts = NULL;
Ravikiran G Thirumalai2584e512009-03-31 15:21:26 -07001475 return ERR_PTR(-EPERM);
Hugh Dickins353d5c32009-08-24 16:30:28 +01001476 }
Ravikiran G Thirumalai2584e512009-03-31 15:21:26 -07001477 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478
Anatol Pomozov39b65252012-09-12 20:11:55 -07001479 file = ERR_PTR(-ENOSPC);
Al Viroe68375c2018-06-09 09:50:46 -04001480 inode = hugetlbfs_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481 if (!inode)
Al Viroe68375c2018-06-09 09:50:46 -04001482 goto out;
Stephen Smalleye1832f22015-08-06 15:46:55 -07001483 if (creat_flags == HUGETLB_SHMFS_INODE)
1484 inode->i_flags |= S_PRIVATE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486 inode->i_size = size;
Miklos Szeredi6d6b77f2011-10-28 14:13:28 +02001487 clear_nlink(inode);
Dave Hansence8d2cd2007-10-16 23:31:13 -07001488
Mike Kravetz33b8f842021-02-24 12:09:54 -08001489 if (!hugetlb_reserve_pages(inode, 0,
Al Viroe68375c2018-06-09 09:50:46 -04001490 size >> huge_page_shift(hstate_inode(inode)), NULL,
1491 acctflag))
1492 file = ERR_PTR(-ENOMEM);
1493 else
1494 file = alloc_file_pseudo(inode, mnt, name, O_RDWR,
1495 &hugetlbfs_file_operations);
1496 if (!IS_ERR(file))
1497 return file;
Dave Hansence8d2cd2007-10-16 23:31:13 -07001498
David Gibsonb45b5bd2006-03-22 00:08:55 -08001499 iput(inode);
Al Viroe68375c2018-06-09 09:50:46 -04001500out:
Alexey Gladkovd7c9e992021-04-22 14:27:14 +02001501 if (*ucounts) {
1502 user_shm_unlock(size, *ucounts);
1503 *ucounts = NULL;
Hugh Dickins353d5c32009-08-24 16:30:28 +01001504 }
Anatol Pomozov39b65252012-09-12 20:11:55 -07001505 return file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506}
1507
David Howells32021982018-11-01 23:07:26 +00001508static struct vfsmount *__init mount_one_hugetlbfs(struct hstate *h)
1509{
1510 struct fs_context *fc;
1511 struct vfsmount *mnt;
1512
1513 fc = fs_context_for_mount(&hugetlbfs_fs_type, SB_KERNMOUNT);
1514 if (IS_ERR(fc)) {
1515 mnt = ERR_CAST(fc);
1516 } else {
1517 struct hugetlbfs_fs_context *ctx = fc->fs_private;
1518 ctx->hstate = h;
1519 mnt = fc_mount(fc);
1520 put_fs_context(fc);
1521 }
1522 if (IS_ERR(mnt))
Miaohe Lina25fddc2021-02-24 12:10:14 -08001523 pr_err("Cannot mount internal hugetlbfs for page size %luK",
1524 huge_page_size(h) >> 10);
David Howells32021982018-11-01 23:07:26 +00001525 return mnt;
1526}
1527
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528static int __init init_hugetlbfs_fs(void)
1529{
David Howells32021982018-11-01 23:07:26 +00001530 struct vfsmount *mnt;
Andi Kleen42d73952012-12-11 16:01:34 -08001531 struct hstate *h;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532 int error;
Andi Kleen42d73952012-12-11 16:01:34 -08001533 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534
Nishanth Aravamudan457c1b22014-05-06 12:50:00 -07001535 if (!hugepages_supported()) {
Andrew Morton9b857d22014-06-04 16:07:21 -07001536 pr_info("disabling because there are no supported hugepage sizes\n");
Nishanth Aravamudan457c1b22014-05-06 12:50:00 -07001537 return -ENOTSUPP;
1538 }
1539
Hillf Dantond1d5e05ff2012-03-21 16:34:15 -07001540 error = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541 hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache",
1542 sizeof(struct hugetlbfs_inode_info),
Vladimir Davydov5d097052016-01-14 15:18:21 -08001543 0, SLAB_ACCOUNT, init_once);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544 if (hugetlbfs_inode_cachep == NULL)
Mike Kravetz8fc312b2019-11-30 17:56:34 -08001545 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546
1547 error = register_filesystem(&hugetlbfs_fs_type);
1548 if (error)
Mike Kravetz8fc312b2019-11-30 17:56:34 -08001549 goto out_free;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550
Mike Kravetz8fc312b2019-11-30 17:56:34 -08001551 /* default hstate mount is required */
Miaohe Lin3b2275a2021-02-24 12:10:04 -08001552 mnt = mount_one_hugetlbfs(&default_hstate);
Mike Kravetz8fc312b2019-11-30 17:56:34 -08001553 if (IS_ERR(mnt)) {
1554 error = PTR_ERR(mnt);
1555 goto out_unreg;
1556 }
1557 hugetlbfs_vfsmount[default_hstate_idx] = mnt;
1558
1559 /* other hstates are optional */
Andi Kleen42d73952012-12-11 16:01:34 -08001560 i = 0;
1561 for_each_hstate(h) {
Jan Stancek15f0ec92020-01-03 18:37:18 +01001562 if (i == default_hstate_idx) {
1563 i++;
Mike Kravetz8fc312b2019-11-30 17:56:34 -08001564 continue;
Jan Stancek15f0ec92020-01-03 18:37:18 +01001565 }
Mike Kravetz8fc312b2019-11-30 17:56:34 -08001566
David Howells32021982018-11-01 23:07:26 +00001567 mnt = mount_one_hugetlbfs(h);
Mike Kravetz8fc312b2019-11-30 17:56:34 -08001568 if (IS_ERR(mnt))
1569 hugetlbfs_vfsmount[i] = NULL;
1570 else
1571 hugetlbfs_vfsmount[i] = mnt;
Andi Kleen42d73952012-12-11 16:01:34 -08001572 i++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573 }
David Howells32021982018-11-01 23:07:26 +00001574
1575 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576
Mike Kravetz8fc312b2019-11-30 17:56:34 -08001577 out_unreg:
1578 (void)unregister_filesystem(&hugetlbfs_fs_type);
1579 out_free:
Hillf Dantond1d5e05ff2012-03-21 16:34:15 -07001580 kmem_cache_destroy(hugetlbfs_inode_cachep);
Mike Kravetz8fc312b2019-11-30 17:56:34 -08001581 out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582 return error;
1583}
Paul Gortmaker3e89e1c2016-01-14 15:21:52 -08001584fs_initcall(init_hugetlbfs_fs)