blob: 991c60c7ffe06efe4e99548440aaa1bea158a476 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * hugetlbpage-backed filesystem. Based on ramfs.
3 *
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +01004 * Nadia Yvette Chambers, 2002
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 *
6 * Copyright (C) 2002 Linus Torvalds.
Paul Gortmaker3e89e1c2016-01-14 15:21:52 -08007 * License: GPL
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 */
9
Andrew Morton9b857d22014-06-04 16:07:21 -070010#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/thread_info.h>
13#include <asm/current.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010014#include <linux/sched/signal.h> /* remove ASAP */
Mike Kravetz70c35472015-09-08 15:01:54 -070015#include <linux/falloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/fs.h>
17#include <linux/mount.h>
18#include <linux/file.h>
Randy Dunlape73a75f2007-07-15 23:40:52 -070019#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <linux/writeback.h>
21#include <linux/pagemap.h>
22#include <linux/highmem.h>
23#include <linux/init.h>
24#include <linux/string.h>
Randy Dunlap16f7e0f2006-01-11 12:17:46 -080025#include <linux/capability.h>
Randy Dunlape73a75f2007-07-15 23:40:52 -070026#include <linux/ctype.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/backing-dev.h>
28#include <linux/hugetlb.h>
29#include <linux/pagevec.h>
David Howells32021982018-11-01 23:07:26 +000030#include <linux/fs_parser.h>
Benjamin Herrenschmidt036e0852007-05-06 14:50:12 -070031#include <linux/mman.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <linux/slab.h>
33#include <linux/dnotify.h>
34#include <linux/statfs.h>
35#include <linux/security.h>
Nick Black1fd7317d2009-09-22 16:43:33 -070036#include <linux/magic.h>
Naoya Horiguchi290408d2010-09-08 10:19:35 +090037#include <linux/migrate.h>
Al Viro34d06402015-04-03 11:31:35 -040038#include <linux/uio.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080040#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Josef 'Jeff' Sipekee9b6d62007-02-12 00:55:41 -080042static const struct super_operations hugetlbfs_ops;
Christoph Hellwigf5e54d62006-06-28 04:26:44 -070043static const struct address_space_operations hugetlbfs_aops;
Arjan van de Ven4b6f5d22006-03-28 01:56:42 -080044const struct file_operations hugetlbfs_file_operations;
Arjan van de Ven92e1d5b2007-02-12 00:55:39 -080045static const struct inode_operations hugetlbfs_dir_inode_operations;
46static const struct inode_operations hugetlbfs_inode_operations;
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
David Howells32021982018-11-01 23:07:26 +000048enum hugetlbfs_size_type { NO_SIZE, SIZE_STD, SIZE_PERCENT };
49
50struct hugetlbfs_fs_context {
David Howells4a252202017-07-05 16:24:18 +010051 struct hstate *hstate;
David Howells32021982018-11-01 23:07:26 +000052 unsigned long long max_size_opt;
53 unsigned long long min_size_opt;
David Howells4a252202017-07-05 16:24:18 +010054 long max_hpages;
55 long nr_inodes;
56 long min_hpages;
David Howells32021982018-11-01 23:07:26 +000057 enum hugetlbfs_size_type max_val_type;
58 enum hugetlbfs_size_type min_val_type;
David Howells4a252202017-07-05 16:24:18 +010059 kuid_t uid;
60 kgid_t gid;
61 umode_t mode;
David Gibsona1d776e2012-03-21 16:34:12 -070062};
63
Linus Torvalds1da177e2005-04-16 15:20:36 -070064int sysctl_hugetlb_shm_group;
65
David Howells32021982018-11-01 23:07:26 +000066enum hugetlb_param {
67 Opt_gid,
68 Opt_min_size,
69 Opt_mode,
70 Opt_nr_inodes,
71 Opt_pagesize,
72 Opt_size,
73 Opt_uid,
Randy Dunlape73a75f2007-07-15 23:40:52 -070074};
75
Al Virod7167b12019-09-07 07:23:15 -040076static const struct fs_parameter_spec hugetlb_fs_parameters[] = {
David Howells32021982018-11-01 23:07:26 +000077 fsparam_u32 ("gid", Opt_gid),
78 fsparam_string("min_size", Opt_min_size),
79 fsparam_u32 ("mode", Opt_mode),
80 fsparam_string("nr_inodes", Opt_nr_inodes),
81 fsparam_string("pagesize", Opt_pagesize),
82 fsparam_string("size", Opt_size),
83 fsparam_u32 ("uid", Opt_uid),
84 {}
85};
86
Mike Kravetz70c35472015-09-08 15:01:54 -070087#ifdef CONFIG_NUMA
88static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
89 struct inode *inode, pgoff_t index)
90{
91 vma->vm_policy = mpol_shared_policy_lookup(&HUGETLBFS_I(inode)->policy,
92 index);
93}
94
95static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
96{
97 mpol_cond_put(vma->vm_policy);
98}
99#else
100static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
101 struct inode *inode, pgoff_t index)
102{
103}
104
105static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
106{
107}
108#endif
109
Adam Litke2e9b367c2005-10-29 18:16:47 -0700110static void huge_pagevec_release(struct pagevec *pvec)
111{
112 int i;
113
114 for (i = 0; i < pagevec_count(pvec); ++i)
115 put_page(pvec->pages[i]);
116
117 pagevec_reinit(pvec);
118}
119
Mike Kravetz63489f82018-03-22 16:17:13 -0700120/*
121 * Mask used when checking the page offset value passed in via system
122 * calls. This value will be converted to a loff_t which is signed.
123 * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the
124 * value. The extra bit (- 1 in the shift value) is to take the sign
125 * bit into account.
126 */
127#define PGOFF_LOFFT_MAX \
128 (((1UL << (PAGE_SHIFT + 1)) - 1) << (BITS_PER_LONG - (PAGE_SHIFT + 1)))
129
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
131{
Al Viro496ad9a2013-01-23 17:07:38 -0500132 struct inode *inode = file_inode(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 loff_t len, vma_len;
134 int ret;
Andi Kleena5516432008-07-23 21:27:41 -0700135 struct hstate *h = hstate_file(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136
Hugh Dickins68589bc2006-11-14 02:03:32 -0800137 /*
David Gibsondec4ad82007-08-30 23:56:40 -0700138 * vma address alignment (but not the pgoff alignment) has
139 * already been checked by prepare_hugepage_range. If you add
140 * any error returns here, do so after setting VM_HUGETLB, so
141 * is_vm_hugetlb_page tests below unmap_region go the right
142 * way when do_mmap_pgoff unwinds (may be important on powerpc
143 * and ia64).
Hugh Dickins68589bc2006-11-14 02:03:32 -0800144 */
Naoya Horiguchia2fce912013-04-17 15:58:27 -0700145 vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
Hugh Dickins68589bc2006-11-14 02:03:32 -0800146 vma->vm_ops = &hugetlb_vm_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
Mike Kravetz045c7a32017-04-13 14:56:32 -0700148 /*
Mike Kravetz63489f82018-03-22 16:17:13 -0700149 * page based offset in vm_pgoff could be sufficiently large to
Mike Kravetz5df63c22018-04-05 16:18:21 -0700150 * overflow a loff_t when converted to byte offset. This can
151 * only happen on architectures where sizeof(loff_t) ==
152 * sizeof(unsigned long). So, only check in those instances.
Mike Kravetz045c7a32017-04-13 14:56:32 -0700153 */
Mike Kravetz5df63c22018-04-05 16:18:21 -0700154 if (sizeof(unsigned long) == sizeof(loff_t)) {
155 if (vma->vm_pgoff & PGOFF_LOFFT_MAX)
156 return -EINVAL;
157 }
Mike Kravetz045c7a32017-04-13 14:56:32 -0700158
Mike Kravetz63489f82018-03-22 16:17:13 -0700159 /* must be huge page aligned */
Becky Bruce2b37c352011-07-25 17:11:49 -0700160 if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
David Gibsondec4ad82007-08-30 23:56:40 -0700161 return -EINVAL;
162
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 vma_len = (loff_t)(vma->vm_end - vma->vm_start);
Mike Kravetz045c7a32017-04-13 14:56:32 -0700164 len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
165 /* check for overflow */
166 if (len < vma_len)
167 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168
Al Viro59551022016-01-22 15:40:57 -0500169 inode_lock(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170 file_accessed(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171
172 ret = -ENOMEM;
Mel Gormana1e78772008-07-23 21:27:23 -0700173 if (hugetlb_reserve_pages(inode,
Andi Kleena5516432008-07-23 21:27:41 -0700174 vma->vm_pgoff >> huge_page_order(h),
Mel Gorman5a6fe122009-02-10 14:02:27 +0000175 len >> huge_page_shift(h), vma,
176 vma->vm_flags))
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -0700177 goto out;
David Gibsonb45b5bd2006-03-22 00:08:55 -0800178
Adam Litke4c887262005-10-29 18:16:46 -0700179 ret = 0;
Zhang, Yanminb6174df2006-07-10 04:44:49 -0700180 if (vma->vm_flags & VM_WRITE && inode->i_size < len)
Mike Kravetz045c7a32017-04-13 14:56:32 -0700181 i_size_write(inode, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182out:
Al Viro59551022016-01-22 15:40:57 -0500183 inode_unlock(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184
185 return ret;
186}
187
188/*
Hugh Dickins508034a2005-10-29 18:16:30 -0700189 * Called under down_write(mmap_sem).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190 */
191
Adrian Bunkd2ba27e82007-05-06 14:49:00 -0700192#ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193static unsigned long
194hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
195 unsigned long len, unsigned long pgoff, unsigned long flags)
196{
197 struct mm_struct *mm = current->mm;
198 struct vm_area_struct *vma;
Andi Kleena5516432008-07-23 21:27:41 -0700199 struct hstate *h = hstate_file(file);
Michel Lespinasse08659352012-12-11 16:02:00 -0800200 struct vm_unmapped_area_info info;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201
Andi Kleena5516432008-07-23 21:27:41 -0700202 if (len & ~huge_page_mask(h))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 return -EINVAL;
204 if (len > TASK_SIZE)
205 return -ENOMEM;
206
Benjamin Herrenschmidt036e0852007-05-06 14:50:12 -0700207 if (flags & MAP_FIXED) {
Andi Kleena5516432008-07-23 21:27:41 -0700208 if (prepare_hugepage_range(file, addr, len))
Benjamin Herrenschmidt036e0852007-05-06 14:50:12 -0700209 return -EINVAL;
210 return addr;
211 }
212
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 if (addr) {
Andi Kleena5516432008-07-23 21:27:41 -0700214 addr = ALIGN(addr, huge_page_size(h));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 vma = find_vma(mm, addr);
216 if (TASK_SIZE - len >= addr &&
Hugh Dickins1be71072017-06-19 04:03:24 -0700217 (!vma || addr + len <= vm_start_gap(vma)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218 return addr;
219 }
220
Michel Lespinasse08659352012-12-11 16:02:00 -0800221 info.flags = 0;
222 info.length = len;
223 info.low_limit = TASK_UNMAPPED_BASE;
224 info.high_limit = TASK_SIZE;
225 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
226 info.align_offset = 0;
227 return vm_unmapped_area(&info);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228}
229#endif
230
Al Viro34d06402015-04-03 11:31:35 -0400231static size_t
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700232hugetlbfs_read_actor(struct page *page, unsigned long offset,
Al Viro34d06402015-04-03 11:31:35 -0400233 struct iov_iter *to, unsigned long size)
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700234{
Al Viro34d06402015-04-03 11:31:35 -0400235 size_t copied = 0;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700236 int i, chunksize;
237
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700238 /* Find which 4k chunk and offset with in that chunk */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300239 i = offset >> PAGE_SHIFT;
240 offset = offset & ~PAGE_MASK;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700241
242 while (size) {
Al Viro34d06402015-04-03 11:31:35 -0400243 size_t n;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300244 chunksize = PAGE_SIZE;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700245 if (offset)
246 chunksize -= offset;
247 if (chunksize > size)
248 chunksize = size;
Al Viro34d06402015-04-03 11:31:35 -0400249 n = copy_page_to_iter(&page[i], offset, chunksize, to);
250 copied += n;
251 if (n != chunksize)
252 return copied;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700253 offset = 0;
254 size -= chunksize;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700255 i++;
256 }
Al Viro34d06402015-04-03 11:31:35 -0400257 return copied;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700258}
259
260/*
261 * Support for read() - Find the page attached to f_mapping and copy out the
262 * data. Its *very* similar to do_generic_mapping_read(), we can't use that
Kirill A. Shutemovea1754a2016-04-01 15:29:48 +0300263 * since it has PAGE_SIZE assumptions.
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700264 */
Al Viro34d06402015-04-03 11:31:35 -0400265static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700266{
Al Viro34d06402015-04-03 11:31:35 -0400267 struct file *file = iocb->ki_filp;
268 struct hstate *h = hstate_file(file);
269 struct address_space *mapping = file->f_mapping;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700270 struct inode *inode = mapping->host;
Al Viro34d06402015-04-03 11:31:35 -0400271 unsigned long index = iocb->ki_pos >> huge_page_shift(h);
272 unsigned long offset = iocb->ki_pos & ~huge_page_mask(h);
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700273 unsigned long end_index;
274 loff_t isize;
275 ssize_t retval = 0;
276
Al Viro34d06402015-04-03 11:31:35 -0400277 while (iov_iter_count(to)) {
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700278 struct page *page;
Al Viro34d06402015-04-03 11:31:35 -0400279 size_t nr, copied;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700280
281 /* nr is the maximum number of bytes to copy from this page */
Andi Kleena5516432008-07-23 21:27:41 -0700282 nr = huge_page_size(h);
Aneesh Kumar K.Va05b0852012-03-21 16:34:08 -0700283 isize = i_size_read(inode);
284 if (!isize)
Al Viro34d06402015-04-03 11:31:35 -0400285 break;
Aneesh Kumar K.Va05b0852012-03-21 16:34:08 -0700286 end_index = (isize - 1) >> huge_page_shift(h);
Al Viro34d06402015-04-03 11:31:35 -0400287 if (index > end_index)
288 break;
289 if (index == end_index) {
Andi Kleena5516432008-07-23 21:27:41 -0700290 nr = ((isize - 1) & ~huge_page_mask(h)) + 1;
Aneesh Kumar K.Va05b0852012-03-21 16:34:08 -0700291 if (nr <= offset)
Al Viro34d06402015-04-03 11:31:35 -0400292 break;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700293 }
294 nr = nr - offset;
295
296 /* Find the page */
Aneesh Kumar K.Va05b0852012-03-21 16:34:08 -0700297 page = find_lock_page(mapping, index);
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700298 if (unlikely(page == NULL)) {
299 /*
300 * We have a HOLE, zero out the user-buffer for the
301 * length of the hole or request.
302 */
Al Viro34d06402015-04-03 11:31:35 -0400303 copied = iov_iter_zero(nr, to);
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700304 } else {
Aneesh Kumar K.Va05b0852012-03-21 16:34:08 -0700305 unlock_page(page);
306
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700307 /*
308 * We have the page, copy it to user space buffer.
309 */
Al Viro34d06402015-04-03 11:31:35 -0400310 copied = hugetlbfs_read_actor(page, offset, to, nr);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300311 put_page(page);
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700312 }
Al Viro34d06402015-04-03 11:31:35 -0400313 offset += copied;
314 retval += copied;
315 if (copied != nr && iov_iter_count(to)) {
316 if (!retval)
317 retval = -EFAULT;
318 break;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700319 }
Andi Kleena5516432008-07-23 21:27:41 -0700320 index += offset >> huge_page_shift(h);
321 offset &= ~huge_page_mask(h);
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700322 }
Al Viro34d06402015-04-03 11:31:35 -0400323 iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700324 return retval;
325}
326
Nick Piggin800d15a2007-10-16 01:25:03 -0700327static int hugetlbfs_write_begin(struct file *file,
328 struct address_space *mapping,
329 loff_t pos, unsigned len, unsigned flags,
330 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331{
332 return -EINVAL;
333}
334
Nick Piggin800d15a2007-10-16 01:25:03 -0700335static int hugetlbfs_write_end(struct file *file, struct address_space *mapping,
336 loff_t pos, unsigned len, unsigned copied,
337 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338{
Nick Piggin800d15a2007-10-16 01:25:03 -0700339 BUG();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 return -EINVAL;
341}
342
Mike Kravetzb5cec282015-09-08 15:01:41 -0700343static void remove_huge_page(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344{
Konstantin Khlebnikovb9ea2512015-04-14 15:45:27 -0700345 ClearPageDirty(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 ClearPageUptodate(page);
Minchan Kimbd65cb82011-03-22 16:30:54 -0700347 delete_from_page_cache(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348}
349
Mike Kravetz4aae8d12016-01-15 16:57:40 -0800350static void
Davidlohr Buesof808c132017-09-08 16:15:08 -0700351hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end)
Mike Kravetz4aae8d12016-01-15 16:57:40 -0800352{
353 struct vm_area_struct *vma;
354
355 /*
356 * end == 0 indicates that the entire range after
357 * start should be unmapped.
358 */
359 vma_interval_tree_foreach(vma, root, start, end ? end : ULONG_MAX) {
360 unsigned long v_offset;
361 unsigned long v_end;
362
363 /*
364 * Can the expression below overflow on 32-bit arches?
365 * No, because the interval tree returns us only those vmas
366 * which overlap the truncated area starting at pgoff,
367 * and no vma on a 32-bit arch can span beyond the 4GB.
368 */
369 if (vma->vm_pgoff < start)
370 v_offset = (start - vma->vm_pgoff) << PAGE_SHIFT;
371 else
372 v_offset = 0;
373
374 if (!end)
375 v_end = vma->vm_end;
376 else {
377 v_end = ((end - vma->vm_pgoff) << PAGE_SHIFT)
378 + vma->vm_start;
379 if (v_end > vma->vm_end)
380 v_end = vma->vm_end;
381 }
382
383 unmap_hugepage_range(vma, vma->vm_start + v_offset, v_end,
384 NULL);
385 }
386}
Mike Kravetzb5cec282015-09-08 15:01:41 -0700387
388/*
389 * remove_inode_hugepages handles two distinct cases: truncation and hole
390 * punch. There are subtle differences in operation for each case.
Mike Kravetz4aae8d12016-01-15 16:57:40 -0800391 *
Mike Kravetzb5cec282015-09-08 15:01:41 -0700392 * truncation is indicated by end of range being LLONG_MAX
393 * In this case, we first scan the range and release found pages.
394 * After releasing pages, hugetlb_unreserve_pages cleans up region/reserv
Mike Kravetze7c58092019-01-08 15:23:32 -0800395 * maps and global counts. Page faults can not race with truncation
Mike Kravetz87bf91d2020-04-01 21:11:08 -0700396 * in this routine. hugetlb_no_page() holds i_mmap_rwsem and prevents
397 * page faults in the truncated range by checking i_size. i_size is
398 * modified while holding i_mmap_rwsem.
Mike Kravetzb5cec282015-09-08 15:01:41 -0700399 * hole punch is indicated if end is not LLONG_MAX
400 * In the hole punch case we scan the range and release found pages.
401 * Only when releasing a page is the associated region/reserv map
402 * deleted. The region/reserv map for ranges without associated
Mike Kravetze7c58092019-01-08 15:23:32 -0800403 * pages are not modified. Page faults can race with hole punch.
404 * This is indicated if we find a mapped page.
Mike Kravetzb5cec282015-09-08 15:01:41 -0700405 * Note: If the passed end of range value is beyond the end of file, but
406 * not LLONG_MAX this routine still performs a hole punch operation.
407 */
408static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
409 loff_t lend)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410{
Andi Kleena5516432008-07-23 21:27:41 -0700411 struct hstate *h = hstate_inode(inode);
David Gibsonb45b5bd2006-03-22 00:08:55 -0800412 struct address_space *mapping = &inode->i_data;
Andi Kleena5516432008-07-23 21:27:41 -0700413 const pgoff_t start = lstart >> huge_page_shift(h);
Mike Kravetzb5cec282015-09-08 15:01:41 -0700414 const pgoff_t end = lend >> huge_page_shift(h);
415 struct vm_area_struct pseudo_vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 struct pagevec pvec;
Jan Karad72dc8a2017-09-06 16:21:18 -0700417 pgoff_t next, index;
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -0700418 int i, freed = 0;
Mike Kravetzb5cec282015-09-08 15:01:41 -0700419 bool truncate_op = (lend == LLONG_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420
Kirill A. Shutemov2c4541e2018-07-26 16:37:30 -0700421 vma_init(&pseudo_vma, current->mm);
Mike Kravetzb5cec282015-09-08 15:01:41 -0700422 pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
Mel Gorman86679822017-11-15 17:37:52 -0800423 pagevec_init(&pvec);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424 next = start;
Mike Kravetzb5cec282015-09-08 15:01:41 -0700425 while (next < end) {
426 /*
Mike Kravetz18178892015-11-20 15:57:13 -0800427 * When no more pages are found, we are done.
Mike Kravetzb5cec282015-09-08 15:01:41 -0700428 */
Jan Kara397162f2017-09-06 16:21:43 -0700429 if (!pagevec_lookup_range(&pvec, mapping, &next, end - 1))
Mike Kravetz18178892015-11-20 15:57:13 -0800430 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431
432 for (i = 0; i < pagevec_count(&pvec); ++i) {
433 struct page *page = pvec.pages[i];
Mike Kravetze7c58092019-01-08 15:23:32 -0800434 u32 hash;
Mike Kravetzb5cec282015-09-08 15:01:41 -0700435
Jan Karad72dc8a2017-09-06 16:21:18 -0700436 index = page->index;
Wei Yang188b04a2019-11-30 17:57:02 -0800437 hash = hugetlb_fault_mutex_hash(mapping, index);
Mike Kravetz87bf91d2020-04-01 21:11:08 -0700438 if (!truncate_op) {
439 /*
440 * Only need to hold the fault mutex in the
441 * hole punch case. This prevents races with
442 * page faults. Races are not possible in the
443 * case of truncation.
444 */
445 mutex_lock(&hugetlb_fault_mutex_table[hash]);
446 }
Mike Kravetze7c58092019-01-08 15:23:32 -0800447
Mike Kravetz4aae8d12016-01-15 16:57:40 -0800448 /*
Mike Kravetze7c58092019-01-08 15:23:32 -0800449 * If page is mapped, it was faulted in after being
450 * unmapped in caller. Unmap (again) now after taking
451 * the fault mutex. The mutex will prevent faults
452 * until we finish removing the page.
453 *
454 * This race can only happen in the hole punch case.
455 * Getting here in a truncate operation is a bug.
Mike Kravetz4aae8d12016-01-15 16:57:40 -0800456 */
Mike Kravetze7c58092019-01-08 15:23:32 -0800457 if (unlikely(page_mapped(page))) {
458 BUG_ON(truncate_op);
459
Mike Kravetzc0d03812020-04-01 21:11:05 -0700460 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
Mike Kravetze7c58092019-01-08 15:23:32 -0800461 i_mmap_lock_write(mapping);
Mike Kravetzc0d03812020-04-01 21:11:05 -0700462 mutex_lock(&hugetlb_fault_mutex_table[hash]);
Mike Kravetze7c58092019-01-08 15:23:32 -0800463 hugetlb_vmdelete_list(&mapping->i_mmap,
464 index * pages_per_huge_page(h),
465 (index + 1) * pages_per_huge_page(h));
466 i_mmap_unlock_write(mapping);
467 }
Mike Kravetz4aae8d12016-01-15 16:57:40 -0800468
469 lock_page(page);
470 /*
471 * We must free the huge page and remove from page
472 * cache (remove_huge_page) BEFORE removing the
473 * region/reserve map (hugetlb_unreserve_pages). In
474 * rare out of memory conditions, removal of the
zhong jiang72e29362016-10-07 17:02:01 -0700475 * region/reserve map could fail. Correspondingly,
476 * the subpool and global reserve usage count can need
477 * to be adjusted.
Mike Kravetz4aae8d12016-01-15 16:57:40 -0800478 */
zhong jiang72e29362016-10-07 17:02:01 -0700479 VM_BUG_ON(PagePrivate(page));
Mike Kravetz4aae8d12016-01-15 16:57:40 -0800480 remove_huge_page(page);
481 freed++;
482 if (!truncate_op) {
483 if (unlikely(hugetlb_unreserve_pages(inode,
Jan Karad72dc8a2017-09-06 16:21:18 -0700484 index, index + 1, 1)))
zhong jiang72e29362016-10-07 17:02:01 -0700485 hugetlb_fix_reserve_counts(inode);
Mike Kravetzb5cec282015-09-08 15:01:41 -0700486 }
487
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488 unlock_page(page);
Mike Kravetz87bf91d2020-04-01 21:11:08 -0700489 if (!truncate_op)
490 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 }
492 huge_pagevec_release(&pvec);
Mike Kravetz18178892015-11-20 15:57:13 -0800493 cond_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 }
Mike Kravetzb5cec282015-09-08 15:01:41 -0700495
496 if (truncate_op)
497 (void)hugetlb_unreserve_pages(inode, start, LONG_MAX, freed);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498}
499
Al Viro2bbbda32010-06-04 19:52:12 -0400500static void hugetlbfs_evict_inode(struct inode *inode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501{
Joonsoo Kim9119a412014-04-03 14:47:25 -0700502 struct resv_map *resv_map;
503
Mike Kravetzb5cec282015-09-08 15:01:41 -0700504 remove_inode_hugepages(inode, 0, LLONG_MAX);
Mike Kravetzf27a5132019-05-13 17:22:55 -0700505
506 /*
507 * Get the resv_map from the address space embedded in the inode.
508 * This is the address space which points to any resv_map allocated
509 * at inode creation time. If this is a device special inode,
510 * i_mapping may not point to the original address space.
511 */
512 resv_map = (struct resv_map *)(&inode->i_data)->private_data;
513 /* Only regular and link inodes have associated reserve maps */
Joonsoo Kim9119a412014-04-03 14:47:25 -0700514 if (resv_map)
515 resv_map_release(&resv_map->refs);
Jan Karadbd57682012-05-03 14:48:02 +0200516 clear_inode(inode);
Christoph Hellwig149f4212005-10-29 18:16:43 -0700517}
518
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519static int hugetlb_vmtruncate(struct inode *inode, loff_t offset)
520{
Hugh Dickins856fc292006-10-28 10:38:43 -0700521 pgoff_t pgoff;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522 struct address_space *mapping = inode->i_mapping;
Andi Kleena5516432008-07-23 21:27:41 -0700523 struct hstate *h = hstate_inode(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524
Andi Kleena5516432008-07-23 21:27:41 -0700525 BUG_ON(offset & ~huge_page_mask(h));
Hugh Dickins856fc292006-10-28 10:38:43 -0700526 pgoff = offset >> PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527
Davidlohr Bueso83cde9e2014-12-12 16:54:21 -0800528 i_mmap_lock_write(mapping);
Mike Kravetz87bf91d2020-04-01 21:11:08 -0700529 i_size_write(inode, offset);
Davidlohr Buesof808c132017-09-08 16:15:08 -0700530 if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
Mike Kravetz1bfad992015-09-08 15:01:38 -0700531 hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0);
Mike Kravetzc86aa7b2018-12-28 00:39:42 -0800532 i_mmap_unlock_write(mapping);
Mike Kravetze7c58092019-01-08 15:23:32 -0800533 remove_inode_hugepages(inode, offset, LLONG_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 return 0;
535}
536
Mike Kravetz70c35472015-09-08 15:01:54 -0700537static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
538{
539 struct hstate *h = hstate_inode(inode);
540 loff_t hpage_size = huge_page_size(h);
541 loff_t hole_start, hole_end;
542
543 /*
544 * For hole punch round up the beginning offset of the hole and
545 * round down the end.
546 */
547 hole_start = round_up(offset, hpage_size);
548 hole_end = round_down(offset + len, hpage_size);
549
550 if (hole_end > hole_start) {
551 struct address_space *mapping = inode->i_mapping;
Marc-André Lureauff62a342018-01-31 16:19:25 -0800552 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
Mike Kravetz70c35472015-09-08 15:01:54 -0700553
Al Viro59551022016-01-22 15:40:57 -0500554 inode_lock(inode);
Marc-André Lureauff62a342018-01-31 16:19:25 -0800555
556 /* protected by i_mutex */
Joel Fernandes (Google)ab3948f2019-03-05 15:47:54 -0800557 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
Marc-André Lureauff62a342018-01-31 16:19:25 -0800558 inode_unlock(inode);
559 return -EPERM;
560 }
561
Mike Kravetz70c35472015-09-08 15:01:54 -0700562 i_mmap_lock_write(mapping);
Davidlohr Buesof808c132017-09-08 16:15:08 -0700563 if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
Mike Kravetz70c35472015-09-08 15:01:54 -0700564 hugetlb_vmdelete_list(&mapping->i_mmap,
565 hole_start >> PAGE_SHIFT,
566 hole_end >> PAGE_SHIFT);
Mike Kravetzc86aa7b2018-12-28 00:39:42 -0800567 i_mmap_unlock_write(mapping);
Mike Kravetze7c58092019-01-08 15:23:32 -0800568 remove_inode_hugepages(inode, hole_start, hole_end);
Al Viro59551022016-01-22 15:40:57 -0500569 inode_unlock(inode);
Mike Kravetz70c35472015-09-08 15:01:54 -0700570 }
571
572 return 0;
573}
574
575static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
576 loff_t len)
577{
578 struct inode *inode = file_inode(file);
Marc-André Lureauff62a342018-01-31 16:19:25 -0800579 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
Mike Kravetz70c35472015-09-08 15:01:54 -0700580 struct address_space *mapping = inode->i_mapping;
581 struct hstate *h = hstate_inode(inode);
582 struct vm_area_struct pseudo_vma;
583 struct mm_struct *mm = current->mm;
584 loff_t hpage_size = huge_page_size(h);
585 unsigned long hpage_shift = huge_page_shift(h);
586 pgoff_t start, index, end;
587 int error;
588 u32 hash;
589
590 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
591 return -EOPNOTSUPP;
592
593 if (mode & FALLOC_FL_PUNCH_HOLE)
594 return hugetlbfs_punch_hole(inode, offset, len);
595
596 /*
597 * Default preallocate case.
598 * For this range, start is rounded down and end is rounded up
599 * as well as being converted to page offsets.
600 */
601 start = offset >> hpage_shift;
602 end = (offset + len + hpage_size - 1) >> hpage_shift;
603
Al Viro59551022016-01-22 15:40:57 -0500604 inode_lock(inode);
Mike Kravetz70c35472015-09-08 15:01:54 -0700605
606 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
607 error = inode_newsize_ok(inode, offset + len);
608 if (error)
609 goto out;
610
Marc-André Lureauff62a342018-01-31 16:19:25 -0800611 if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
612 error = -EPERM;
613 goto out;
614 }
615
Mike Kravetz70c35472015-09-08 15:01:54 -0700616 /*
617 * Initialize a pseudo vma as this is required by the huge page
618 * allocation routines. If NUMA is configured, use page index
619 * as input to create an allocation policy.
620 */
Kirill A. Shutemov2c4541e2018-07-26 16:37:30 -0700621 vma_init(&pseudo_vma, mm);
Mike Kravetz70c35472015-09-08 15:01:54 -0700622 pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
623 pseudo_vma.vm_file = file;
624
625 for (index = start; index < end; index++) {
626 /*
627 * This is supposed to be the vaddr where the page is being
628 * faulted in, but we have no vaddr here.
629 */
630 struct page *page;
631 unsigned long addr;
632 int avoid_reserve = 0;
633
634 cond_resched();
635
636 /*
637 * fallocate(2) manpage permits EINTR; we may have been
638 * interrupted because we are using up too much memory.
639 */
640 if (signal_pending(current)) {
641 error = -EINTR;
642 break;
643 }
644
645 /* Set numa allocation policy based on index */
646 hugetlb_set_vma_policy(&pseudo_vma, inode, index);
647
648 /* addr is the offset within the file (zero based) */
649 addr = index * hpage_size;
650
Mike Kravetz87bf91d2020-04-01 21:11:08 -0700651 /*
652 * fault mutex taken here, protects against fault path
653 * and hole punch. inode_lock previously taken protects
654 * against truncation.
655 */
Wei Yang188b04a2019-11-30 17:57:02 -0800656 hash = hugetlb_fault_mutex_hash(mapping, index);
Mike Kravetz70c35472015-09-08 15:01:54 -0700657 mutex_lock(&hugetlb_fault_mutex_table[hash]);
658
659 /* See if already present in mapping to avoid alloc/free */
660 page = find_get_page(mapping, index);
661 if (page) {
662 put_page(page);
663 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
664 hugetlb_drop_vma_policy(&pseudo_vma);
665 continue;
666 }
667
668 /* Allocate page and add to page cache */
669 page = alloc_huge_page(&pseudo_vma, addr, avoid_reserve);
670 hugetlb_drop_vma_policy(&pseudo_vma);
671 if (IS_ERR(page)) {
672 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
673 error = PTR_ERR(page);
674 goto out;
675 }
676 clear_huge_page(page, addr, pages_per_huge_page(h));
677 __SetPageUptodate(page);
678 error = huge_add_to_page_cache(page, mapping, index);
679 if (unlikely(error)) {
680 put_page(page);
681 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
682 goto out;
683 }
684
685 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
686
687 /*
Mike Kravetz70c35472015-09-08 15:01:54 -0700688 * unlock_page because locked by add_to_page_cache()
Nadav Amit72639e62017-11-29 16:11:33 -0800689 * page_put due to reference from alloc_huge_page()
Mike Kravetz70c35472015-09-08 15:01:54 -0700690 */
Mike Kravetz70c35472015-09-08 15:01:54 -0700691 unlock_page(page);
Nadav Amit72639e62017-11-29 16:11:33 -0800692 put_page(page);
Mike Kravetz70c35472015-09-08 15:01:54 -0700693 }
694
695 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
696 i_size_write(inode, offset + len);
Deepa Dinamani078cd822016-09-14 07:48:04 -0700697 inode->i_ctime = current_time(inode);
Mike Kravetz70c35472015-09-08 15:01:54 -0700698out:
Al Viro59551022016-01-22 15:40:57 -0500699 inode_unlock(inode);
Mike Kravetz70c35472015-09-08 15:01:54 -0700700 return error;
701}
702
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr)
704{
David Howells2b0143b2015-03-17 22:25:59 +0000705 struct inode *inode = d_inode(dentry);
Andi Kleena5516432008-07-23 21:27:41 -0700706 struct hstate *h = hstate_inode(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707 int error;
708 unsigned int ia_valid = attr->ia_valid;
Marc-André Lureauff62a342018-01-31 16:19:25 -0800709 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710
711 BUG_ON(!inode);
712
Jan Kara31051c82016-05-26 16:55:18 +0200713 error = setattr_prepare(dentry, attr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714 if (error)
Christoph Hellwig10257742010-06-04 11:30:02 +0200715 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716
717 if (ia_valid & ATTR_SIZE) {
Marc-André Lureauff62a342018-01-31 16:19:25 -0800718 loff_t oldsize = inode->i_size;
719 loff_t newsize = attr->ia_size;
720
721 if (newsize & ~huge_page_mask(h))
Christoph Hellwig10257742010-06-04 11:30:02 +0200722 return -EINVAL;
Marc-André Lureauff62a342018-01-31 16:19:25 -0800723 /* protected by i_mutex */
724 if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
725 (newsize > oldsize && (info->seals & F_SEAL_GROW)))
726 return -EPERM;
727 error = hugetlb_vmtruncate(inode, newsize);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728 if (error)
Christoph Hellwig10257742010-06-04 11:30:02 +0200729 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730 }
Christoph Hellwig10257742010-06-04 11:30:02 +0200731
732 setattr_copy(inode, attr);
733 mark_inode_dirty(inode);
734 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735}
736
Al Viro7d54fa62011-07-24 20:20:48 -0400737static struct inode *hugetlbfs_get_root(struct super_block *sb,
David Howells32021982018-11-01 23:07:26 +0000738 struct hugetlbfs_fs_context *ctx)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739{
740 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741
742 inode = new_inode(sb);
743 if (inode) {
Christoph Hellwig85fe4022010-10-23 11:19:54 -0400744 inode->i_ino = get_next_ino();
David Howells32021982018-11-01 23:07:26 +0000745 inode->i_mode = S_IFDIR | ctx->mode;
746 inode->i_uid = ctx->uid;
747 inode->i_gid = ctx->gid;
Deepa Dinamani078cd822016-09-14 07:48:04 -0700748 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
Al Viro7d54fa62011-07-24 20:20:48 -0400749 inode->i_op = &hugetlbfs_dir_inode_operations;
750 inode->i_fop = &simple_dir_operations;
751 /* directory inodes start off with i_nlink == 2 (for "." entry) */
752 inc_nlink(inode);
Aneesh Kumar K.V65ed7602012-04-25 16:01:50 -0700753 lockdep_annotate_inode_mutex_key(inode);
Al Viro7d54fa62011-07-24 20:20:48 -0400754 }
755 return inode;
756}
757
Michal Hockob610ded2013-08-13 16:00:55 -0700758/*
Davidlohr Buesoc8c06ef2014-12-12 16:54:24 -0800759 * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never
Michal Hockob610ded2013-08-13 16:00:55 -0700760 * be taken from reclaim -- unlike regular filesystems. This needs an
Kirill A. Shutemov88f306b2016-01-15 16:57:31 -0800761 * annotation because huge_pmd_share() does an allocation under hugetlb's
Davidlohr Buesoc8c06ef2014-12-12 16:54:24 -0800762 * i_mmap_rwsem.
Michal Hockob610ded2013-08-13 16:00:55 -0700763 */
Davidlohr Buesoc8c06ef2014-12-12 16:54:24 -0800764static struct lock_class_key hugetlbfs_i_mmap_rwsem_key;
Michal Hockob610ded2013-08-13 16:00:55 -0700765
Al Viro7d54fa62011-07-24 20:20:48 -0400766static struct inode *hugetlbfs_get_inode(struct super_block *sb,
767 struct inode *dir,
Al Viro18df2252011-07-24 23:17:40 -0400768 umode_t mode, dev_t dev)
Al Viro7d54fa62011-07-24 20:20:48 -0400769{
770 struct inode *inode;
Mike Kravetz58b6e5e2019-04-05 18:39:06 -0700771 struct resv_map *resv_map = NULL;
Joonsoo Kim9119a412014-04-03 14:47:25 -0700772
Mike Kravetz58b6e5e2019-04-05 18:39:06 -0700773 /*
774 * Reserve maps are only needed for inodes that can have associated
775 * page allocations.
776 */
777 if (S_ISREG(mode) || S_ISLNK(mode)) {
778 resv_map = resv_map_alloc();
779 if (!resv_map)
780 return NULL;
781 }
Al Viro7d54fa62011-07-24 20:20:48 -0400782
783 inode = new_inode(sb);
784 if (inode) {
Marc-André Lureauff62a342018-01-31 16:19:25 -0800785 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
786
Al Viro7d54fa62011-07-24 20:20:48 -0400787 inode->i_ino = get_next_ino();
788 inode_init_owner(inode, dir, mode);
Davidlohr Buesoc8c06ef2014-12-12 16:54:24 -0800789 lockdep_set_class(&inode->i_mapping->i_mmap_rwsem,
790 &hugetlbfs_i_mmap_rwsem_key);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791 inode->i_mapping->a_ops = &hugetlbfs_aops;
Deepa Dinamani078cd822016-09-14 07:48:04 -0700792 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
Joonsoo Kim9119a412014-04-03 14:47:25 -0700793 inode->i_mapping->private_data = resv_map;
Marc-André Lureauff62a342018-01-31 16:19:25 -0800794 info->seals = F_SEAL_SEAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 switch (mode & S_IFMT) {
796 default:
797 init_special_inode(inode, mode, dev);
798 break;
799 case S_IFREG:
800 inode->i_op = &hugetlbfs_inode_operations;
801 inode->i_fop = &hugetlbfs_file_operations;
802 break;
803 case S_IFDIR:
804 inode->i_op = &hugetlbfs_dir_inode_operations;
805 inode->i_fop = &simple_dir_operations;
806
807 /* directory inodes start off with i_nlink == 2 (for "." entry) */
Dave Hansend8c76e62006-09-30 23:29:04 -0700808 inc_nlink(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 break;
810 case S_IFLNK:
811 inode->i_op = &page_symlink_inode_operations;
Al Viro21fc61c2015-11-17 01:07:57 -0500812 inode_nohighmem(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813 break;
814 }
Josh Boyere096d0c2011-08-25 07:48:12 -0400815 lockdep_annotate_inode_mutex_key(inode);
Mike Kravetz58b6e5e2019-04-05 18:39:06 -0700816 } else {
817 if (resv_map)
818 kref_put(&resv_map->refs, resv_map_release);
819 }
Joonsoo Kim9119a412014-04-03 14:47:25 -0700820
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821 return inode;
822}
823
824/*
825 * File creation. Allocate an inode, and we're done..
826 */
Piotr Sarna1ab5b822019-11-30 17:56:43 -0800827static int do_hugetlbfs_mknod(struct inode *dir,
828 struct dentry *dentry,
829 umode_t mode,
830 dev_t dev,
831 bool tmpfile)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832{
833 struct inode *inode;
834 int error = -ENOSPC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835
Al Viro7d54fa62011-07-24 20:20:48 -0400836 inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837 if (inode) {
Deepa Dinamani078cd822016-09-14 07:48:04 -0700838 dir->i_ctime = dir->i_mtime = current_time(dir);
Piotr Sarna1ab5b822019-11-30 17:56:43 -0800839 if (tmpfile) {
840 d_tmpfile(dentry, inode);
841 } else {
842 d_instantiate(dentry, inode);
843 dget(dentry);/* Extra count - pin the dentry in core */
844 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845 error = 0;
846 }
847 return error;
848}
849
Piotr Sarna1ab5b822019-11-30 17:56:43 -0800850static int hugetlbfs_mknod(struct inode *dir,
851 struct dentry *dentry, umode_t mode, dev_t dev)
852{
853 return do_hugetlbfs_mknod(dir, dentry, mode, dev, false);
854}
855
Al Viro18bb1db2011-07-26 01:41:39 -0400856static int hugetlbfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857{
858 int retval = hugetlbfs_mknod(dir, dentry, mode | S_IFDIR, 0);
859 if (!retval)
Dave Hansend8c76e62006-09-30 23:29:04 -0700860 inc_nlink(dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 return retval;
862}
863
Al Viroebfc3b42012-06-10 18:05:36 -0400864static int hugetlbfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865{
866 return hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0);
867}
868
Piotr Sarna1ab5b822019-11-30 17:56:43 -0800869static int hugetlbfs_tmpfile(struct inode *dir,
870 struct dentry *dentry, umode_t mode)
871{
872 return do_hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0, true);
873}
874
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875static int hugetlbfs_symlink(struct inode *dir,
876 struct dentry *dentry, const char *symname)
877{
878 struct inode *inode;
879 int error = -ENOSPC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880
Al Viro7d54fa62011-07-24 20:20:48 -0400881 inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882 if (inode) {
883 int l = strlen(symname)+1;
884 error = page_symlink(inode, symname, l);
885 if (!error) {
886 d_instantiate(dentry, inode);
887 dget(dentry);
888 } else
889 iput(inode);
890 }
Deepa Dinamani078cd822016-09-14 07:48:04 -0700891 dir->i_ctime = dir->i_mtime = current_time(dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892
893 return error;
894}
895
896/*
Ken Chen6649a382007-02-08 14:20:27 -0800897 * mark the head page dirty
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898 */
899static int hugetlbfs_set_page_dirty(struct page *page)
900{
Christoph Lameterd85f3382007-05-06 14:49:39 -0700901 struct page *head = compound_head(page);
Ken Chen6649a382007-02-08 14:20:27 -0800902
903 SetPageDirty(head);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904 return 0;
905}
906
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900907static int hugetlbfs_migrate_page(struct address_space *mapping,
Mel Gormanb969c4a2012-01-12 17:19:34 -0800908 struct page *newpage, struct page *page,
Mel Gormana6bc32b2012-01-12 17:19:43 -0800909 enum migrate_mode mode)
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900910{
911 int rc;
912
913 rc = migrate_huge_page_move_mapping(mapping, newpage, page);
Rafael Aquini78bd5202012-12-11 16:02:31 -0800914 if (rc != MIGRATEPAGE_SUCCESS)
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900915 return rc;
Mike Kravetzcb6acd02019-02-28 16:22:02 -0800916
917 /*
918 * page_private is subpool pointer in hugetlb pages. Transfer to
919 * new page. PagePrivate is not associated with page_private for
920 * hugetlb pages and can not be set here as only page_huge_active
921 * pages can be migrated.
922 */
923 if (page_private(page)) {
924 set_page_private(newpage, page_private(page));
925 set_page_private(page, 0);
926 }
927
Jérôme Glisse2916ecc2017-09-08 16:12:06 -0700928 if (mode != MIGRATE_SYNC_NO_COPY)
929 migrate_page_copy(newpage, page);
930 else
931 migrate_page_states(newpage, page);
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900932
Rafael Aquini78bd5202012-12-11 16:02:31 -0800933 return MIGRATEPAGE_SUCCESS;
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900934}
935
Naoya Horiguchi78bb9202017-07-10 15:47:50 -0700936static int hugetlbfs_error_remove_page(struct address_space *mapping,
937 struct page *page)
938{
939 struct inode *inode = mapping->host;
Mike Kravetzab615a52017-11-02 15:59:41 -0700940 pgoff_t index = page->index;
Naoya Horiguchi78bb9202017-07-10 15:47:50 -0700941
942 remove_huge_page(page);
Mike Kravetzab615a52017-11-02 15:59:41 -0700943 if (unlikely(hugetlb_unreserve_pages(inode, index, index + 1, 1)))
944 hugetlb_fix_reserve_counts(inode);
945
Naoya Horiguchi78bb9202017-07-10 15:47:50 -0700946 return 0;
947}
948
David Howells4a252202017-07-05 16:24:18 +0100949/*
950 * Display the mount options in /proc/mounts.
951 */
952static int hugetlbfs_show_options(struct seq_file *m, struct dentry *root)
953{
954 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(root->d_sb);
955 struct hugepage_subpool *spool = sbinfo->spool;
956 unsigned long hpage_size = huge_page_size(sbinfo->hstate);
957 unsigned hpage_shift = huge_page_shift(sbinfo->hstate);
958 char mod;
959
960 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
961 seq_printf(m, ",uid=%u",
962 from_kuid_munged(&init_user_ns, sbinfo->uid));
963 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
964 seq_printf(m, ",gid=%u",
965 from_kgid_munged(&init_user_ns, sbinfo->gid));
966 if (sbinfo->mode != 0755)
967 seq_printf(m, ",mode=%o", sbinfo->mode);
968 if (sbinfo->max_inodes != -1)
969 seq_printf(m, ",nr_inodes=%lu", sbinfo->max_inodes);
970
971 hpage_size /= 1024;
972 mod = 'K';
973 if (hpage_size >= 1024) {
974 hpage_size /= 1024;
975 mod = 'M';
976 }
977 seq_printf(m, ",pagesize=%lu%c", hpage_size, mod);
978 if (spool) {
979 if (spool->max_hpages != -1)
980 seq_printf(m, ",size=%llu",
981 (unsigned long long)spool->max_hpages << hpage_shift);
982 if (spool->min_hpages != -1)
983 seq_printf(m, ",min_size=%llu",
984 (unsigned long long)spool->min_hpages << hpage_shift);
985 }
986 return 0;
987}
988
David Howells726c3342006-06-23 02:02:58 -0700989static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990{
David Howells726c3342006-06-23 02:02:58 -0700991 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb);
David Howells2b0143b2015-03-17 22:25:59 +0000992 struct hstate *h = hstate_inode(d_inode(dentry));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993
994 buf->f_type = HUGETLBFS_MAGIC;
Andi Kleena5516432008-07-23 21:27:41 -0700995 buf->f_bsize = huge_page_size(h);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996 if (sbinfo) {
997 spin_lock(&sbinfo->stat_lock);
David Gibson74a8a652005-11-21 21:32:24 -0800998 /* If no limits set, just report 0 for max/free/used
999 * blocks, like simple_statfs() */
David Gibson90481622012-03-21 16:34:12 -07001000 if (sbinfo->spool) {
1001 long free_pages;
1002
1003 spin_lock(&sbinfo->spool->lock);
1004 buf->f_blocks = sbinfo->spool->max_hpages;
1005 free_pages = sbinfo->spool->max_hpages
1006 - sbinfo->spool->used_hpages;
1007 buf->f_bavail = buf->f_bfree = free_pages;
1008 spin_unlock(&sbinfo->spool->lock);
David Gibson74a8a652005-11-21 21:32:24 -08001009 buf->f_files = sbinfo->max_inodes;
1010 buf->f_ffree = sbinfo->free_inodes;
1011 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012 spin_unlock(&sbinfo->stat_lock);
1013 }
1014 buf->f_namelen = NAME_MAX;
1015 return 0;
1016}
1017
1018static void hugetlbfs_put_super(struct super_block *sb)
1019{
1020 struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb);
1021
1022 if (sbi) {
1023 sb->s_fs_info = NULL;
David Gibson90481622012-03-21 16:34:12 -07001024
1025 if (sbi->spool)
1026 hugepage_put_subpool(sbi->spool);
1027
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028 kfree(sbi);
1029 }
1030}
1031
Christoph Hellwig96527982005-10-29 18:16:42 -07001032static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo)
1033{
1034 if (sbinfo->free_inodes >= 0) {
1035 spin_lock(&sbinfo->stat_lock);
1036 if (unlikely(!sbinfo->free_inodes)) {
1037 spin_unlock(&sbinfo->stat_lock);
1038 return 0;
1039 }
1040 sbinfo->free_inodes--;
1041 spin_unlock(&sbinfo->stat_lock);
1042 }
1043
1044 return 1;
1045}
1046
1047static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo)
1048{
1049 if (sbinfo->free_inodes >= 0) {
1050 spin_lock(&sbinfo->stat_lock);
1051 sbinfo->free_inodes++;
1052 spin_unlock(&sbinfo->stat_lock);
1053 }
1054}
1055
1056
Christoph Lametere18b8902006-12-06 20:33:20 -08001057static struct kmem_cache *hugetlbfs_inode_cachep;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001058
1059static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
1060{
Christoph Hellwig96527982005-10-29 18:16:42 -07001061 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062 struct hugetlbfs_inode_info *p;
1063
Christoph Hellwig96527982005-10-29 18:16:42 -07001064 if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065 return NULL;
Christoph Lametere94b1762006-12-06 20:33:17 -08001066 p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL);
Christoph Hellwig96527982005-10-29 18:16:42 -07001067 if (unlikely(!p)) {
1068 hugetlbfs_inc_free_inodes(sbinfo);
1069 return NULL;
1070 }
Mike Kravetz4742a352017-03-31 15:12:01 -07001071
1072 /*
1073 * Any time after allocation, hugetlbfs_destroy_inode can be called
1074 * for the inode. mpol_free_shared_policy is unconditionally called
1075 * as part of hugetlbfs_destroy_inode. So, initialize policy here
1076 * in case of a quick call to destroy.
1077 *
1078 * Note that the policy is initialized even if we are creating a
1079 * private inode. This simplifies hugetlbfs_destroy_inode.
1080 */
1081 mpol_shared_policy_init(&p->policy, NULL);
1082
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083 return &p->vfs_inode;
1084}
1085
Al Virob62de322019-04-15 23:16:38 -04001086static void hugetlbfs_free_inode(struct inode *inode)
Nick Pigginfa0d7e3d2011-01-07 17:49:49 +11001087{
Nick Pigginfa0d7e3d2011-01-07 17:49:49 +11001088 kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode));
1089}
1090
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091static void hugetlbfs_destroy_inode(struct inode *inode)
1092{
Christoph Hellwig96527982005-10-29 18:16:42 -07001093 hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094 mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095}
1096
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07001097static const struct address_space_operations hugetlbfs_aops = {
Nick Piggin800d15a2007-10-16 01:25:03 -07001098 .write_begin = hugetlbfs_write_begin,
1099 .write_end = hugetlbfs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100 .set_page_dirty = hugetlbfs_set_page_dirty,
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001101 .migratepage = hugetlbfs_migrate_page,
Naoya Horiguchi78bb9202017-07-10 15:47:50 -07001102 .error_remove_page = hugetlbfs_error_remove_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103};
1104
Christoph Hellwig96527982005-10-29 18:16:42 -07001105
Alexey Dobriyan51cc5062008-07-25 19:45:34 -07001106static void init_once(void *foo)
Christoph Hellwig96527982005-10-29 18:16:42 -07001107{
1108 struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo;
1109
Christoph Lametera35afb82007-05-16 22:10:57 -07001110 inode_init_once(&ei->vfs_inode);
Christoph Hellwig96527982005-10-29 18:16:42 -07001111}
1112
Arjan van de Ven4b6f5d22006-03-28 01:56:42 -08001113const struct file_operations hugetlbfs_file_operations = {
Al Viro34d06402015-04-03 11:31:35 -04001114 .read_iter = hugetlbfs_read_iter,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115 .mmap = hugetlbfs_file_mmap,
Christoph Hellwig1b061d92010-05-26 17:53:41 +02001116 .fsync = noop_fsync,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117 .get_unmapped_area = hugetlb_get_unmapped_area,
Mike Kravetz70c35472015-09-08 15:01:54 -07001118 .llseek = default_llseek,
1119 .fallocate = hugetlbfs_fallocate,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120};
1121
Arjan van de Ven92e1d5b2007-02-12 00:55:39 -08001122static const struct inode_operations hugetlbfs_dir_inode_operations = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123 .create = hugetlbfs_create,
1124 .lookup = simple_lookup,
1125 .link = simple_link,
1126 .unlink = simple_unlink,
1127 .symlink = hugetlbfs_symlink,
1128 .mkdir = hugetlbfs_mkdir,
1129 .rmdir = simple_rmdir,
1130 .mknod = hugetlbfs_mknod,
1131 .rename = simple_rename,
1132 .setattr = hugetlbfs_setattr,
Piotr Sarna1ab5b822019-11-30 17:56:43 -08001133 .tmpfile = hugetlbfs_tmpfile,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134};
1135
Arjan van de Ven92e1d5b2007-02-12 00:55:39 -08001136static const struct inode_operations hugetlbfs_inode_operations = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137 .setattr = hugetlbfs_setattr,
1138};
1139
Josef 'Jeff' Sipekee9b6d62007-02-12 00:55:41 -08001140static const struct super_operations hugetlbfs_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141 .alloc_inode = hugetlbfs_alloc_inode,
Al Virob62de322019-04-15 23:16:38 -04001142 .free_inode = hugetlbfs_free_inode,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143 .destroy_inode = hugetlbfs_destroy_inode,
Al Viro2bbbda32010-06-04 19:52:12 -04001144 .evict_inode = hugetlbfs_evict_inode,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145 .statfs = hugetlbfs_statfs,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146 .put_super = hugetlbfs_put_super,
David Howells4a252202017-07-05 16:24:18 +01001147 .show_options = hugetlbfs_show_options,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148};
1149
Mike Kravetz7ca02d0a2015-04-15 16:13:42 -07001150/*
1151 * Convert size option passed from command line to number of huge pages
1152 * in the pool specified by hstate. Size option could be in bytes
1153 * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT).
1154 */
David Howells4a252202017-07-05 16:24:18 +01001155static long
Mike Kravetz7ca02d0a2015-04-15 16:13:42 -07001156hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt,
David Howells4a252202017-07-05 16:24:18 +01001157 enum hugetlbfs_size_type val_type)
Mike Kravetz7ca02d0a2015-04-15 16:13:42 -07001158{
1159 if (val_type == NO_SIZE)
1160 return -1;
1161
1162 if (val_type == SIZE_PERCENT) {
1163 size_opt <<= huge_page_shift(h);
1164 size_opt *= h->max_huge_pages;
1165 do_div(size_opt, 100);
1166 }
1167
1168 size_opt >>= huge_page_shift(h);
1169 return size_opt;
1170}
1171
David Howells32021982018-11-01 23:07:26 +00001172/*
1173 * Parse one mount parameter.
1174 */
1175static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176{
David Howells32021982018-11-01 23:07:26 +00001177 struct hugetlbfs_fs_context *ctx = fc->fs_private;
1178 struct fs_parse_result result;
1179 char *rest;
1180 unsigned long ps;
1181 int opt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182
Al Virod7167b12019-09-07 07:23:15 -04001183 opt = fs_parse(fc, hugetlb_fs_parameters, param, &result);
David Howells32021982018-11-01 23:07:26 +00001184 if (opt < 0)
1185 return opt;
1186
1187 switch (opt) {
1188 case Opt_uid:
1189 ctx->uid = make_kuid(current_user_ns(), result.uint_32);
1190 if (!uid_valid(ctx->uid))
1191 goto bad_val;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193
David Howells32021982018-11-01 23:07:26 +00001194 case Opt_gid:
1195 ctx->gid = make_kgid(current_user_ns(), result.uint_32);
1196 if (!gid_valid(ctx->gid))
1197 goto bad_val;
1198 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199
David Howells32021982018-11-01 23:07:26 +00001200 case Opt_mode:
1201 ctx->mode = result.uint_32 & 01777U;
1202 return 0;
Randy Dunlape73a75f2007-07-15 23:40:52 -07001203
David Howells32021982018-11-01 23:07:26 +00001204 case Opt_size:
1205 /* memparse() will accept a K/M/G without a digit */
1206 if (!isdigit(param->string[0]))
1207 goto bad_val;
1208 ctx->max_size_opt = memparse(param->string, &rest);
1209 ctx->max_val_type = SIZE_STD;
1210 if (*rest == '%')
1211 ctx->max_val_type = SIZE_PERCENT;
1212 return 0;
Randy Dunlape73a75f2007-07-15 23:40:52 -07001213
David Howells32021982018-11-01 23:07:26 +00001214 case Opt_nr_inodes:
1215 /* memparse() will accept a K/M/G without a digit */
1216 if (!isdigit(param->string[0]))
1217 goto bad_val;
1218 ctx->nr_inodes = memparse(param->string, &rest);
1219 return 0;
Randy Dunlape73a75f2007-07-15 23:40:52 -07001220
David Howells32021982018-11-01 23:07:26 +00001221 case Opt_pagesize:
1222 ps = memparse(param->string, &rest);
1223 ctx->hstate = size_to_hstate(ps);
1224 if (!ctx->hstate) {
1225 pr_err("Unsupported page size %lu MB\n", ps >> 20);
Lee Schermerhornb4c07bc2007-07-15 23:40:54 -07001226 return -EINVAL;
Randy Dunlape73a75f2007-07-15 23:40:52 -07001227 }
David Howells32021982018-11-01 23:07:26 +00001228 return 0;
1229
1230 case Opt_min_size:
1231 /* memparse() will accept a K/M/G without a digit */
1232 if (!isdigit(param->string[0]))
1233 goto bad_val;
1234 ctx->min_size_opt = memparse(param->string, &rest);
1235 ctx->min_val_type = SIZE_STD;
1236 if (*rest == '%')
1237 ctx->min_val_type = SIZE_PERCENT;
1238 return 0;
1239
1240 default:
1241 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242 }
Andi Kleena137e1c2008-07-23 21:27:43 -07001243
David Howells32021982018-11-01 23:07:26 +00001244bad_val:
Al Virob5db30c2019-12-21 21:34:06 -05001245 return invalfc(fc, "Bad value '%s' for mount option '%s'\n",
David Howells32021982018-11-01 23:07:26 +00001246 param->string, param->key);
1247}
1248
1249/*
1250 * Validate the parsed options.
1251 */
1252static int hugetlbfs_validate(struct fs_context *fc)
1253{
1254 struct hugetlbfs_fs_context *ctx = fc->fs_private;
1255
Mike Kravetz7ca02d0a2015-04-15 16:13:42 -07001256 /*
1257 * Use huge page pool size (in hstate) to convert the size
1258 * options to number of huge pages. If NO_SIZE, -1 is returned.
1259 */
David Howells32021982018-11-01 23:07:26 +00001260 ctx->max_hpages = hugetlbfs_size_to_hpages(ctx->hstate,
1261 ctx->max_size_opt,
1262 ctx->max_val_type);
1263 ctx->min_hpages = hugetlbfs_size_to_hpages(ctx->hstate,
1264 ctx->min_size_opt,
1265 ctx->min_val_type);
Mike Kravetz7ca02d0a2015-04-15 16:13:42 -07001266
1267 /*
1268 * If max_size was specified, then min_size must be smaller
1269 */
David Howells32021982018-11-01 23:07:26 +00001270 if (ctx->max_val_type > NO_SIZE &&
1271 ctx->min_hpages > ctx->max_hpages) {
1272 pr_err("Minimum size can not be greater than maximum size\n");
Mike Kravetz7ca02d0a2015-04-15 16:13:42 -07001273 return -EINVAL;
Andi Kleena137e1c2008-07-23 21:27:43 -07001274 }
1275
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276 return 0;
1277}
1278
1279static int
David Howells32021982018-11-01 23:07:26 +00001280hugetlbfs_fill_super(struct super_block *sb, struct fs_context *fc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281{
David Howells32021982018-11-01 23:07:26 +00001282 struct hugetlbfs_fs_context *ctx = fc->fs_private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283 struct hugetlbfs_sb_info *sbinfo;
1284
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285 sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL);
1286 if (!sbinfo)
1287 return -ENOMEM;
1288 sb->s_fs_info = sbinfo;
1289 spin_lock_init(&sbinfo->stat_lock);
David Howells32021982018-11-01 23:07:26 +00001290 sbinfo->hstate = ctx->hstate;
1291 sbinfo->max_inodes = ctx->nr_inodes;
1292 sbinfo->free_inodes = ctx->nr_inodes;
1293 sbinfo->spool = NULL;
1294 sbinfo->uid = ctx->uid;
1295 sbinfo->gid = ctx->gid;
1296 sbinfo->mode = ctx->mode;
David Howells4a252202017-07-05 16:24:18 +01001297
Mike Kravetz7ca02d0a2015-04-15 16:13:42 -07001298 /*
1299 * Allocate and initialize subpool if maximum or minimum size is
1300 * specified. Any needed reservations (for minimim size) are taken
1301 * taken when the subpool is created.
1302 */
David Howells32021982018-11-01 23:07:26 +00001303 if (ctx->max_hpages != -1 || ctx->min_hpages != -1) {
1304 sbinfo->spool = hugepage_new_subpool(ctx->hstate,
1305 ctx->max_hpages,
1306 ctx->min_hpages);
David Gibson90481622012-03-21 16:34:12 -07001307 if (!sbinfo->spool)
1308 goto out_free;
1309 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310 sb->s_maxbytes = MAX_LFS_FILESIZE;
David Howells32021982018-11-01 23:07:26 +00001311 sb->s_blocksize = huge_page_size(ctx->hstate);
1312 sb->s_blocksize_bits = huge_page_shift(ctx->hstate);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313 sb->s_magic = HUGETLBFS_MAGIC;
1314 sb->s_op = &hugetlbfs_ops;
1315 sb->s_time_gran = 1;
David Howells32021982018-11-01 23:07:26 +00001316 sb->s_root = d_make_root(hugetlbfs_get_root(sb, ctx));
Al Viro48fde702012-01-08 22:15:13 -05001317 if (!sb->s_root)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318 goto out_free;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001319 return 0;
1320out_free:
Fabian Frederick6e6870d2014-06-04 16:10:40 -07001321 kfree(sbinfo->spool);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322 kfree(sbinfo);
1323 return -ENOMEM;
1324}
1325
David Howells32021982018-11-01 23:07:26 +00001326static int hugetlbfs_get_tree(struct fs_context *fc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327{
David Howells32021982018-11-01 23:07:26 +00001328 int err = hugetlbfs_validate(fc);
1329 if (err)
1330 return err;
Al Viro2ac295d2019-06-01 20:48:55 -04001331 return get_tree_nodev(fc, hugetlbfs_fill_super);
David Howells32021982018-11-01 23:07:26 +00001332}
1333
1334static void hugetlbfs_fs_context_free(struct fs_context *fc)
1335{
1336 kfree(fc->fs_private);
1337}
1338
1339static const struct fs_context_operations hugetlbfs_fs_context_ops = {
1340 .free = hugetlbfs_fs_context_free,
1341 .parse_param = hugetlbfs_parse_param,
1342 .get_tree = hugetlbfs_get_tree,
1343};
1344
1345static int hugetlbfs_init_fs_context(struct fs_context *fc)
1346{
1347 struct hugetlbfs_fs_context *ctx;
1348
1349 ctx = kzalloc(sizeof(struct hugetlbfs_fs_context), GFP_KERNEL);
1350 if (!ctx)
1351 return -ENOMEM;
1352
1353 ctx->max_hpages = -1; /* No limit on size by default */
1354 ctx->nr_inodes = -1; /* No limit on number of inodes by default */
1355 ctx->uid = current_fsuid();
1356 ctx->gid = current_fsgid();
1357 ctx->mode = 0755;
1358 ctx->hstate = &default_hstate;
1359 ctx->min_hpages = -1; /* No default minimum size */
1360 ctx->max_val_type = NO_SIZE;
1361 ctx->min_val_type = NO_SIZE;
1362 fc->fs_private = ctx;
1363 fc->ops = &hugetlbfs_fs_context_ops;
1364 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365}
1366
1367static struct file_system_type hugetlbfs_fs_type = {
David Howells32021982018-11-01 23:07:26 +00001368 .name = "hugetlbfs",
1369 .init_fs_context = hugetlbfs_init_fs_context,
Al Virod7167b12019-09-07 07:23:15 -04001370 .parameters = hugetlb_fs_parameters,
David Howells32021982018-11-01 23:07:26 +00001371 .kill_sb = kill_litter_super,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372};
1373
Andi Kleen42d73952012-12-11 16:01:34 -08001374static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375
From: Mel Gormanef1ff6b2009-09-23 15:56:05 -07001376static int can_do_hugetlb_shm(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377{
Eric W. Biedermana0eb3a02012-02-07 16:19:25 -08001378 kgid_t shm_group;
1379 shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group);
1380 return capable(CAP_IPC_LOCK) || in_group_p(shm_group);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381}
1382
Andi Kleen42d73952012-12-11 16:01:34 -08001383static int get_hstate_idx(int page_size_log)
1384{
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -07001385 struct hstate *h = hstate_sizelog(page_size_log);
Andi Kleen42d73952012-12-11 16:01:34 -08001386
Andi Kleen42d73952012-12-11 16:01:34 -08001387 if (!h)
1388 return -1;
1389 return h - hstates;
1390}
1391
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -07001392/*
1393 * Note that size should be aligned to proper hugepage size in caller side,
1394 * otherwise hugetlb_reserve_pages reserves one less hugepages than intended.
1395 */
1396struct file *hugetlb_file_setup(const char *name, size_t size,
1397 vm_flags_t acctflag, struct user_struct **user,
Andi Kleen42d73952012-12-11 16:01:34 -08001398 int creat_flags, int page_size_log)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400 struct inode *inode;
Al Viroe68375c2018-06-09 09:50:46 -04001401 struct vfsmount *mnt;
Andi Kleen42d73952012-12-11 16:01:34 -08001402 int hstate_idx;
Al Viroe68375c2018-06-09 09:50:46 -04001403 struct file *file;
Andi Kleen42d73952012-12-11 16:01:34 -08001404
1405 hstate_idx = get_hstate_idx(page_size_log);
1406 if (hstate_idx < 0)
1407 return ERR_PTR(-ENODEV);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408
Hugh Dickins353d5c32009-08-24 16:30:28 +01001409 *user = NULL;
Al Viroe68375c2018-06-09 09:50:46 -04001410 mnt = hugetlbfs_vfsmount[hstate_idx];
1411 if (!mnt)
Akinobu Mita5bc98592007-05-06 14:50:18 -07001412 return ERR_PTR(-ENOENT);
1413
From: Mel Gormanef1ff6b2009-09-23 15:56:05 -07001414 if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) {
Hugh Dickins353d5c32009-08-24 16:30:28 +01001415 *user = current_user();
1416 if (user_shm_lock(size, *user)) {
David Rientjes21a3c272012-03-21 16:34:13 -07001417 task_lock(current);
Andrew Morton9b857d22014-06-04 16:07:21 -07001418 pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is deprecated\n",
David Rientjes21a3c272012-03-21 16:34:13 -07001419 current->comm, current->pid);
1420 task_unlock(current);
Hugh Dickins353d5c32009-08-24 16:30:28 +01001421 } else {
1422 *user = NULL;
Ravikiran G Thirumalai2584e512009-03-31 15:21:26 -07001423 return ERR_PTR(-EPERM);
Hugh Dickins353d5c32009-08-24 16:30:28 +01001424 }
Ravikiran G Thirumalai2584e512009-03-31 15:21:26 -07001425 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426
Anatol Pomozov39b65252012-09-12 20:11:55 -07001427 file = ERR_PTR(-ENOSPC);
Al Viroe68375c2018-06-09 09:50:46 -04001428 inode = hugetlbfs_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429 if (!inode)
Al Viroe68375c2018-06-09 09:50:46 -04001430 goto out;
Stephen Smalleye1832f22015-08-06 15:46:55 -07001431 if (creat_flags == HUGETLB_SHMFS_INODE)
1432 inode->i_flags |= S_PRIVATE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434 inode->i_size = size;
Miklos Szeredi6d6b77f2011-10-28 14:13:28 +02001435 clear_nlink(inode);
Dave Hansence8d2cd2007-10-16 23:31:13 -07001436
Al Viroe68375c2018-06-09 09:50:46 -04001437 if (hugetlb_reserve_pages(inode, 0,
1438 size >> huge_page_shift(hstate_inode(inode)), NULL,
1439 acctflag))
1440 file = ERR_PTR(-ENOMEM);
1441 else
1442 file = alloc_file_pseudo(inode, mnt, name, O_RDWR,
1443 &hugetlbfs_file_operations);
1444 if (!IS_ERR(file))
1445 return file;
Dave Hansence8d2cd2007-10-16 23:31:13 -07001446
David Gibsonb45b5bd2006-03-22 00:08:55 -08001447 iput(inode);
Al Viroe68375c2018-06-09 09:50:46 -04001448out:
Hugh Dickins353d5c32009-08-24 16:30:28 +01001449 if (*user) {
1450 user_shm_unlock(size, *user);
1451 *user = NULL;
1452 }
Anatol Pomozov39b65252012-09-12 20:11:55 -07001453 return file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454}
1455
David Howells32021982018-11-01 23:07:26 +00001456static struct vfsmount *__init mount_one_hugetlbfs(struct hstate *h)
1457{
1458 struct fs_context *fc;
1459 struct vfsmount *mnt;
1460
1461 fc = fs_context_for_mount(&hugetlbfs_fs_type, SB_KERNMOUNT);
1462 if (IS_ERR(fc)) {
1463 mnt = ERR_CAST(fc);
1464 } else {
1465 struct hugetlbfs_fs_context *ctx = fc->fs_private;
1466 ctx->hstate = h;
1467 mnt = fc_mount(fc);
1468 put_fs_context(fc);
1469 }
1470 if (IS_ERR(mnt))
1471 pr_err("Cannot mount internal hugetlbfs for page size %uK",
1472 1U << (h->order + PAGE_SHIFT - 10));
1473 return mnt;
1474}
1475
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476static int __init init_hugetlbfs_fs(void)
1477{
David Howells32021982018-11-01 23:07:26 +00001478 struct vfsmount *mnt;
Andi Kleen42d73952012-12-11 16:01:34 -08001479 struct hstate *h;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480 int error;
Andi Kleen42d73952012-12-11 16:01:34 -08001481 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482
Nishanth Aravamudan457c1b22014-05-06 12:50:00 -07001483 if (!hugepages_supported()) {
Andrew Morton9b857d22014-06-04 16:07:21 -07001484 pr_info("disabling because there are no supported hugepage sizes\n");
Nishanth Aravamudan457c1b22014-05-06 12:50:00 -07001485 return -ENOTSUPP;
1486 }
1487
Hillf Dantond1d5e05ff2012-03-21 16:34:15 -07001488 error = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489 hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache",
1490 sizeof(struct hugetlbfs_inode_info),
Vladimir Davydov5d097052016-01-14 15:18:21 -08001491 0, SLAB_ACCOUNT, init_once);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492 if (hugetlbfs_inode_cachep == NULL)
Mike Kravetz8fc312b2019-11-30 17:56:34 -08001493 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494
1495 error = register_filesystem(&hugetlbfs_fs_type);
1496 if (error)
Mike Kravetz8fc312b2019-11-30 17:56:34 -08001497 goto out_free;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498
Mike Kravetz8fc312b2019-11-30 17:56:34 -08001499 /* default hstate mount is required */
1500 mnt = mount_one_hugetlbfs(&hstates[default_hstate_idx]);
1501 if (IS_ERR(mnt)) {
1502 error = PTR_ERR(mnt);
1503 goto out_unreg;
1504 }
1505 hugetlbfs_vfsmount[default_hstate_idx] = mnt;
1506
1507 /* other hstates are optional */
Andi Kleen42d73952012-12-11 16:01:34 -08001508 i = 0;
1509 for_each_hstate(h) {
Jan Stancek15f0ec92020-01-03 18:37:18 +01001510 if (i == default_hstate_idx) {
1511 i++;
Mike Kravetz8fc312b2019-11-30 17:56:34 -08001512 continue;
Jan Stancek15f0ec92020-01-03 18:37:18 +01001513 }
Mike Kravetz8fc312b2019-11-30 17:56:34 -08001514
David Howells32021982018-11-01 23:07:26 +00001515 mnt = mount_one_hugetlbfs(h);
Mike Kravetz8fc312b2019-11-30 17:56:34 -08001516 if (IS_ERR(mnt))
1517 hugetlbfs_vfsmount[i] = NULL;
1518 else
1519 hugetlbfs_vfsmount[i] = mnt;
Andi Kleen42d73952012-12-11 16:01:34 -08001520 i++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521 }
David Howells32021982018-11-01 23:07:26 +00001522
1523 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524
Mike Kravetz8fc312b2019-11-30 17:56:34 -08001525 out_unreg:
1526 (void)unregister_filesystem(&hugetlbfs_fs_type);
1527 out_free:
Hillf Dantond1d5e05ff2012-03-21 16:34:15 -07001528 kmem_cache_destroy(hugetlbfs_inode_cachep);
Mike Kravetz8fc312b2019-11-30 17:56:34 -08001529 out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530 return error;
1531}
Paul Gortmaker3e89e1c2016-01-14 15:21:52 -08001532fs_initcall(init_hugetlbfs_fs)