blob: a478df03565179d1aa07c8db52c04a72421241f5 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * hugetlbpage-backed filesystem. Based on ramfs.
3 *
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +01004 * Nadia Yvette Chambers, 2002
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 *
6 * Copyright (C) 2002 Linus Torvalds.
Paul Gortmaker3e89e1c2016-01-14 15:21:52 -08007 * License: GPL
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 */
9
Andrew Morton9b857d22014-06-04 16:07:21 -070010#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/thread_info.h>
13#include <asm/current.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010014#include <linux/sched/signal.h> /* remove ASAP */
Mike Kravetz70c35472015-09-08 15:01:54 -070015#include <linux/falloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/fs.h>
17#include <linux/mount.h>
18#include <linux/file.h>
Randy Dunlape73a75f2007-07-15 23:40:52 -070019#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <linux/writeback.h>
21#include <linux/pagemap.h>
22#include <linux/highmem.h>
23#include <linux/init.h>
24#include <linux/string.h>
Randy Dunlap16f7e0f2006-01-11 12:17:46 -080025#include <linux/capability.h>
Randy Dunlape73a75f2007-07-15 23:40:52 -070026#include <linux/ctype.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/backing-dev.h>
28#include <linux/hugetlb.h>
29#include <linux/pagevec.h>
David Howells32021982018-11-01 23:07:26 +000030#include <linux/fs_parser.h>
Benjamin Herrenschmidt036e0852007-05-06 14:50:12 -070031#include <linux/mman.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <linux/slab.h>
33#include <linux/dnotify.h>
34#include <linux/statfs.h>
35#include <linux/security.h>
Nick Black1fd7317d2009-09-22 16:43:33 -070036#include <linux/magic.h>
Naoya Horiguchi290408d2010-09-08 10:19:35 +090037#include <linux/migrate.h>
Al Viro34d06402015-04-03 11:31:35 -040038#include <linux/uio.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080040#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Josef 'Jeff' Sipekee9b6d62007-02-12 00:55:41 -080042static const struct super_operations hugetlbfs_ops;
Christoph Hellwigf5e54d62006-06-28 04:26:44 -070043static const struct address_space_operations hugetlbfs_aops;
Arjan van de Ven4b6f5d22006-03-28 01:56:42 -080044const struct file_operations hugetlbfs_file_operations;
Arjan van de Ven92e1d5b2007-02-12 00:55:39 -080045static const struct inode_operations hugetlbfs_dir_inode_operations;
46static const struct inode_operations hugetlbfs_inode_operations;
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
David Howells32021982018-11-01 23:07:26 +000048enum hugetlbfs_size_type { NO_SIZE, SIZE_STD, SIZE_PERCENT };
49
50struct hugetlbfs_fs_context {
David Howells4a252202017-07-05 16:24:18 +010051 struct hstate *hstate;
David Howells32021982018-11-01 23:07:26 +000052 unsigned long long max_size_opt;
53 unsigned long long min_size_opt;
David Howells4a252202017-07-05 16:24:18 +010054 long max_hpages;
55 long nr_inodes;
56 long min_hpages;
David Howells32021982018-11-01 23:07:26 +000057 enum hugetlbfs_size_type max_val_type;
58 enum hugetlbfs_size_type min_val_type;
David Howells4a252202017-07-05 16:24:18 +010059 kuid_t uid;
60 kgid_t gid;
61 umode_t mode;
David Gibsona1d776e2012-03-21 16:34:12 -070062};
63
Linus Torvalds1da177e2005-04-16 15:20:36 -070064int sysctl_hugetlb_shm_group;
65
David Howells32021982018-11-01 23:07:26 +000066enum hugetlb_param {
67 Opt_gid,
68 Opt_min_size,
69 Opt_mode,
70 Opt_nr_inodes,
71 Opt_pagesize,
72 Opt_size,
73 Opt_uid,
Randy Dunlape73a75f2007-07-15 23:40:52 -070074};
75
David Howells32021982018-11-01 23:07:26 +000076static const struct fs_parameter_spec hugetlb_param_specs[] = {
77 fsparam_u32 ("gid", Opt_gid),
78 fsparam_string("min_size", Opt_min_size),
79 fsparam_u32 ("mode", Opt_mode),
80 fsparam_string("nr_inodes", Opt_nr_inodes),
81 fsparam_string("pagesize", Opt_pagesize),
82 fsparam_string("size", Opt_size),
83 fsparam_u32 ("uid", Opt_uid),
84 {}
85};
86
87static const struct fs_parameter_description hugetlb_fs_parameters = {
88 .name = "hugetlbfs",
89 .specs = hugetlb_param_specs,
Randy Dunlape73a75f2007-07-15 23:40:52 -070090};
91
Mike Kravetz70c35472015-09-08 15:01:54 -070092#ifdef CONFIG_NUMA
93static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
94 struct inode *inode, pgoff_t index)
95{
96 vma->vm_policy = mpol_shared_policy_lookup(&HUGETLBFS_I(inode)->policy,
97 index);
98}
99
100static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
101{
102 mpol_cond_put(vma->vm_policy);
103}
104#else
105static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
106 struct inode *inode, pgoff_t index)
107{
108}
109
110static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
111{
112}
113#endif
114
Adam Litke2e9b367c2005-10-29 18:16:47 -0700115static void huge_pagevec_release(struct pagevec *pvec)
116{
117 int i;
118
119 for (i = 0; i < pagevec_count(pvec); ++i)
120 put_page(pvec->pages[i]);
121
122 pagevec_reinit(pvec);
123}
124
Mike Kravetz63489f82018-03-22 16:17:13 -0700125/*
126 * Mask used when checking the page offset value passed in via system
127 * calls. This value will be converted to a loff_t which is signed.
128 * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the
129 * value. The extra bit (- 1 in the shift value) is to take the sign
130 * bit into account.
131 */
132#define PGOFF_LOFFT_MAX \
133 (((1UL << (PAGE_SHIFT + 1)) - 1) << (BITS_PER_LONG - (PAGE_SHIFT + 1)))
134
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
136{
Al Viro496ad9a2013-01-23 17:07:38 -0500137 struct inode *inode = file_inode(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 loff_t len, vma_len;
139 int ret;
Andi Kleena5516432008-07-23 21:27:41 -0700140 struct hstate *h = hstate_file(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141
Hugh Dickins68589bc2006-11-14 02:03:32 -0800142 /*
David Gibsondec4ad82007-08-30 23:56:40 -0700143 * vma address alignment (but not the pgoff alignment) has
144 * already been checked by prepare_hugepage_range. If you add
145 * any error returns here, do so after setting VM_HUGETLB, so
146 * is_vm_hugetlb_page tests below unmap_region go the right
147 * way when do_mmap_pgoff unwinds (may be important on powerpc
148 * and ia64).
Hugh Dickins68589bc2006-11-14 02:03:32 -0800149 */
Naoya Horiguchia2fce912013-04-17 15:58:27 -0700150 vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
Hugh Dickins68589bc2006-11-14 02:03:32 -0800151 vma->vm_ops = &hugetlb_vm_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152
Mike Kravetz045c7a32017-04-13 14:56:32 -0700153 /*
Mike Kravetz63489f82018-03-22 16:17:13 -0700154 * page based offset in vm_pgoff could be sufficiently large to
Mike Kravetz5df63c22018-04-05 16:18:21 -0700155 * overflow a loff_t when converted to byte offset. This can
156 * only happen on architectures where sizeof(loff_t) ==
157 * sizeof(unsigned long). So, only check in those instances.
Mike Kravetz045c7a32017-04-13 14:56:32 -0700158 */
Mike Kravetz5df63c22018-04-05 16:18:21 -0700159 if (sizeof(unsigned long) == sizeof(loff_t)) {
160 if (vma->vm_pgoff & PGOFF_LOFFT_MAX)
161 return -EINVAL;
162 }
Mike Kravetz045c7a32017-04-13 14:56:32 -0700163
Mike Kravetz63489f82018-03-22 16:17:13 -0700164 /* must be huge page aligned */
Becky Bruce2b37c352011-07-25 17:11:49 -0700165 if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
David Gibsondec4ad82007-08-30 23:56:40 -0700166 return -EINVAL;
167
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168 vma_len = (loff_t)(vma->vm_end - vma->vm_start);
Mike Kravetz045c7a32017-04-13 14:56:32 -0700169 len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
170 /* check for overflow */
171 if (len < vma_len)
172 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173
Al Viro59551022016-01-22 15:40:57 -0500174 inode_lock(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175 file_accessed(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176
177 ret = -ENOMEM;
Mel Gormana1e78772008-07-23 21:27:23 -0700178 if (hugetlb_reserve_pages(inode,
Andi Kleena5516432008-07-23 21:27:41 -0700179 vma->vm_pgoff >> huge_page_order(h),
Mel Gorman5a6fe122009-02-10 14:02:27 +0000180 len >> huge_page_shift(h), vma,
181 vma->vm_flags))
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -0700182 goto out;
David Gibsonb45b5bd2006-03-22 00:08:55 -0800183
Adam Litke4c887262005-10-29 18:16:46 -0700184 ret = 0;
Zhang, Yanminb6174df2006-07-10 04:44:49 -0700185 if (vma->vm_flags & VM_WRITE && inode->i_size < len)
Mike Kravetz045c7a32017-04-13 14:56:32 -0700186 i_size_write(inode, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187out:
Al Viro59551022016-01-22 15:40:57 -0500188 inode_unlock(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189
190 return ret;
191}
192
193/*
Hugh Dickins508034a2005-10-29 18:16:30 -0700194 * Called under down_write(mmap_sem).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 */
196
Adrian Bunkd2ba27e82007-05-06 14:49:00 -0700197#ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198static unsigned long
199hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
200 unsigned long len, unsigned long pgoff, unsigned long flags)
201{
202 struct mm_struct *mm = current->mm;
203 struct vm_area_struct *vma;
Andi Kleena5516432008-07-23 21:27:41 -0700204 struct hstate *h = hstate_file(file);
Michel Lespinasse08659352012-12-11 16:02:00 -0800205 struct vm_unmapped_area_info info;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206
Andi Kleena5516432008-07-23 21:27:41 -0700207 if (len & ~huge_page_mask(h))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 return -EINVAL;
209 if (len > TASK_SIZE)
210 return -ENOMEM;
211
Benjamin Herrenschmidt036e0852007-05-06 14:50:12 -0700212 if (flags & MAP_FIXED) {
Andi Kleena5516432008-07-23 21:27:41 -0700213 if (prepare_hugepage_range(file, addr, len))
Benjamin Herrenschmidt036e0852007-05-06 14:50:12 -0700214 return -EINVAL;
215 return addr;
216 }
217
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218 if (addr) {
Andi Kleena5516432008-07-23 21:27:41 -0700219 addr = ALIGN(addr, huge_page_size(h));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 vma = find_vma(mm, addr);
221 if (TASK_SIZE - len >= addr &&
Hugh Dickins1be71072017-06-19 04:03:24 -0700222 (!vma || addr + len <= vm_start_gap(vma)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223 return addr;
224 }
225
Michel Lespinasse08659352012-12-11 16:02:00 -0800226 info.flags = 0;
227 info.length = len;
228 info.low_limit = TASK_UNMAPPED_BASE;
229 info.high_limit = TASK_SIZE;
230 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
231 info.align_offset = 0;
232 return vm_unmapped_area(&info);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233}
234#endif
235
Al Viro34d06402015-04-03 11:31:35 -0400236static size_t
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700237hugetlbfs_read_actor(struct page *page, unsigned long offset,
Al Viro34d06402015-04-03 11:31:35 -0400238 struct iov_iter *to, unsigned long size)
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700239{
Al Viro34d06402015-04-03 11:31:35 -0400240 size_t copied = 0;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700241 int i, chunksize;
242
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700243 /* Find which 4k chunk and offset with in that chunk */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300244 i = offset >> PAGE_SHIFT;
245 offset = offset & ~PAGE_MASK;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700246
247 while (size) {
Al Viro34d06402015-04-03 11:31:35 -0400248 size_t n;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300249 chunksize = PAGE_SIZE;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700250 if (offset)
251 chunksize -= offset;
252 if (chunksize > size)
253 chunksize = size;
Al Viro34d06402015-04-03 11:31:35 -0400254 n = copy_page_to_iter(&page[i], offset, chunksize, to);
255 copied += n;
256 if (n != chunksize)
257 return copied;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700258 offset = 0;
259 size -= chunksize;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700260 i++;
261 }
Al Viro34d06402015-04-03 11:31:35 -0400262 return copied;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700263}
264
265/*
266 * Support for read() - Find the page attached to f_mapping and copy out the
267 * data. Its *very* similar to do_generic_mapping_read(), we can't use that
Kirill A. Shutemovea1754a2016-04-01 15:29:48 +0300268 * since it has PAGE_SIZE assumptions.
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700269 */
Al Viro34d06402015-04-03 11:31:35 -0400270static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700271{
Al Viro34d06402015-04-03 11:31:35 -0400272 struct file *file = iocb->ki_filp;
273 struct hstate *h = hstate_file(file);
274 struct address_space *mapping = file->f_mapping;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700275 struct inode *inode = mapping->host;
Al Viro34d06402015-04-03 11:31:35 -0400276 unsigned long index = iocb->ki_pos >> huge_page_shift(h);
277 unsigned long offset = iocb->ki_pos & ~huge_page_mask(h);
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700278 unsigned long end_index;
279 loff_t isize;
280 ssize_t retval = 0;
281
Al Viro34d06402015-04-03 11:31:35 -0400282 while (iov_iter_count(to)) {
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700283 struct page *page;
Al Viro34d06402015-04-03 11:31:35 -0400284 size_t nr, copied;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700285
286 /* nr is the maximum number of bytes to copy from this page */
Andi Kleena5516432008-07-23 21:27:41 -0700287 nr = huge_page_size(h);
Aneesh Kumar K.Va05b0852012-03-21 16:34:08 -0700288 isize = i_size_read(inode);
289 if (!isize)
Al Viro34d06402015-04-03 11:31:35 -0400290 break;
Aneesh Kumar K.Va05b0852012-03-21 16:34:08 -0700291 end_index = (isize - 1) >> huge_page_shift(h);
Al Viro34d06402015-04-03 11:31:35 -0400292 if (index > end_index)
293 break;
294 if (index == end_index) {
Andi Kleena5516432008-07-23 21:27:41 -0700295 nr = ((isize - 1) & ~huge_page_mask(h)) + 1;
Aneesh Kumar K.Va05b0852012-03-21 16:34:08 -0700296 if (nr <= offset)
Al Viro34d06402015-04-03 11:31:35 -0400297 break;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700298 }
299 nr = nr - offset;
300
301 /* Find the page */
Aneesh Kumar K.Va05b0852012-03-21 16:34:08 -0700302 page = find_lock_page(mapping, index);
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700303 if (unlikely(page == NULL)) {
304 /*
305 * We have a HOLE, zero out the user-buffer for the
306 * length of the hole or request.
307 */
Al Viro34d06402015-04-03 11:31:35 -0400308 copied = iov_iter_zero(nr, to);
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700309 } else {
Aneesh Kumar K.Va05b0852012-03-21 16:34:08 -0700310 unlock_page(page);
311
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700312 /*
313 * We have the page, copy it to user space buffer.
314 */
Al Viro34d06402015-04-03 11:31:35 -0400315 copied = hugetlbfs_read_actor(page, offset, to, nr);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300316 put_page(page);
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700317 }
Al Viro34d06402015-04-03 11:31:35 -0400318 offset += copied;
319 retval += copied;
320 if (copied != nr && iov_iter_count(to)) {
321 if (!retval)
322 retval = -EFAULT;
323 break;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700324 }
Andi Kleena5516432008-07-23 21:27:41 -0700325 index += offset >> huge_page_shift(h);
326 offset &= ~huge_page_mask(h);
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700327 }
Al Viro34d06402015-04-03 11:31:35 -0400328 iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700329 return retval;
330}
331
Nick Piggin800d15a2007-10-16 01:25:03 -0700332static int hugetlbfs_write_begin(struct file *file,
333 struct address_space *mapping,
334 loff_t pos, unsigned len, unsigned flags,
335 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336{
337 return -EINVAL;
338}
339
Nick Piggin800d15a2007-10-16 01:25:03 -0700340static int hugetlbfs_write_end(struct file *file, struct address_space *mapping,
341 loff_t pos, unsigned len, unsigned copied,
342 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343{
Nick Piggin800d15a2007-10-16 01:25:03 -0700344 BUG();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 return -EINVAL;
346}
347
Mike Kravetzb5cec282015-09-08 15:01:41 -0700348static void remove_huge_page(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349{
Konstantin Khlebnikovb9ea2512015-04-14 15:45:27 -0700350 ClearPageDirty(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 ClearPageUptodate(page);
Minchan Kimbd65cb82011-03-22 16:30:54 -0700352 delete_from_page_cache(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353}
354
Mike Kravetz4aae8d12016-01-15 16:57:40 -0800355static void
Davidlohr Buesof808c132017-09-08 16:15:08 -0700356hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end)
Mike Kravetz4aae8d12016-01-15 16:57:40 -0800357{
358 struct vm_area_struct *vma;
359
360 /*
361 * end == 0 indicates that the entire range after
362 * start should be unmapped.
363 */
364 vma_interval_tree_foreach(vma, root, start, end ? end : ULONG_MAX) {
365 unsigned long v_offset;
366 unsigned long v_end;
367
368 /*
369 * Can the expression below overflow on 32-bit arches?
370 * No, because the interval tree returns us only those vmas
371 * which overlap the truncated area starting at pgoff,
372 * and no vma on a 32-bit arch can span beyond the 4GB.
373 */
374 if (vma->vm_pgoff < start)
375 v_offset = (start - vma->vm_pgoff) << PAGE_SHIFT;
376 else
377 v_offset = 0;
378
379 if (!end)
380 v_end = vma->vm_end;
381 else {
382 v_end = ((end - vma->vm_pgoff) << PAGE_SHIFT)
383 + vma->vm_start;
384 if (v_end > vma->vm_end)
385 v_end = vma->vm_end;
386 }
387
388 unmap_hugepage_range(vma, vma->vm_start + v_offset, v_end,
389 NULL);
390 }
391}
Mike Kravetzb5cec282015-09-08 15:01:41 -0700392
393/*
394 * remove_inode_hugepages handles two distinct cases: truncation and hole
395 * punch. There are subtle differences in operation for each case.
Mike Kravetz4aae8d12016-01-15 16:57:40 -0800396 *
Mike Kravetzb5cec282015-09-08 15:01:41 -0700397 * truncation is indicated by end of range being LLONG_MAX
398 * In this case, we first scan the range and release found pages.
399 * After releasing pages, hugetlb_unreserve_pages cleans up region/reserv
Mike Kravetze7c58092019-01-08 15:23:32 -0800400 * maps and global counts. Page faults can not race with truncation
401 * in this routine. hugetlb_no_page() prevents page faults in the
402 * truncated range. It checks i_size before allocation, and again after
403 * with the page table lock for the page held. The same lock must be
404 * acquired to unmap a page.
Mike Kravetzb5cec282015-09-08 15:01:41 -0700405 * hole punch is indicated if end is not LLONG_MAX
406 * In the hole punch case we scan the range and release found pages.
407 * Only when releasing a page is the associated region/reserv map
408 * deleted. The region/reserv map for ranges without associated
Mike Kravetze7c58092019-01-08 15:23:32 -0800409 * pages are not modified. Page faults can race with hole punch.
410 * This is indicated if we find a mapped page.
Mike Kravetzb5cec282015-09-08 15:01:41 -0700411 * Note: If the passed end of range value is beyond the end of file, but
412 * not LLONG_MAX this routine still performs a hole punch operation.
413 */
414static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
415 loff_t lend)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416{
Andi Kleena5516432008-07-23 21:27:41 -0700417 struct hstate *h = hstate_inode(inode);
David Gibsonb45b5bd2006-03-22 00:08:55 -0800418 struct address_space *mapping = &inode->i_data;
Andi Kleena5516432008-07-23 21:27:41 -0700419 const pgoff_t start = lstart >> huge_page_shift(h);
Mike Kravetzb5cec282015-09-08 15:01:41 -0700420 const pgoff_t end = lend >> huge_page_shift(h);
421 struct vm_area_struct pseudo_vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422 struct pagevec pvec;
Jan Karad72dc8a2017-09-06 16:21:18 -0700423 pgoff_t next, index;
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -0700424 int i, freed = 0;
Mike Kravetzb5cec282015-09-08 15:01:41 -0700425 bool truncate_op = (lend == LLONG_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426
Kirill A. Shutemov2c4541e2018-07-26 16:37:30 -0700427 vma_init(&pseudo_vma, current->mm);
Mike Kravetzb5cec282015-09-08 15:01:41 -0700428 pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
Mel Gorman86679822017-11-15 17:37:52 -0800429 pagevec_init(&pvec);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 next = start;
Mike Kravetzb5cec282015-09-08 15:01:41 -0700431 while (next < end) {
432 /*
Mike Kravetz18178892015-11-20 15:57:13 -0800433 * When no more pages are found, we are done.
Mike Kravetzb5cec282015-09-08 15:01:41 -0700434 */
Jan Kara397162f2017-09-06 16:21:43 -0700435 if (!pagevec_lookup_range(&pvec, mapping, &next, end - 1))
Mike Kravetz18178892015-11-20 15:57:13 -0800436 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437
438 for (i = 0; i < pagevec_count(&pvec); ++i) {
439 struct page *page = pvec.pages[i];
Mike Kravetze7c58092019-01-08 15:23:32 -0800440 u32 hash;
Mike Kravetzb5cec282015-09-08 15:01:41 -0700441
Jan Karad72dc8a2017-09-06 16:21:18 -0700442 index = page->index;
Mike Kravetz1b426ba2019-05-13 17:19:41 -0700443 hash = hugetlb_fault_mutex_hash(h, mapping, index, 0);
Mike Kravetze7c58092019-01-08 15:23:32 -0800444 mutex_lock(&hugetlb_fault_mutex_table[hash]);
445
Mike Kravetz4aae8d12016-01-15 16:57:40 -0800446 /*
Mike Kravetze7c58092019-01-08 15:23:32 -0800447 * If page is mapped, it was faulted in after being
448 * unmapped in caller. Unmap (again) now after taking
449 * the fault mutex. The mutex will prevent faults
450 * until we finish removing the page.
451 *
452 * This race can only happen in the hole punch case.
453 * Getting here in a truncate operation is a bug.
Mike Kravetz4aae8d12016-01-15 16:57:40 -0800454 */
Mike Kravetze7c58092019-01-08 15:23:32 -0800455 if (unlikely(page_mapped(page))) {
456 BUG_ON(truncate_op);
457
458 i_mmap_lock_write(mapping);
459 hugetlb_vmdelete_list(&mapping->i_mmap,
460 index * pages_per_huge_page(h),
461 (index + 1) * pages_per_huge_page(h));
462 i_mmap_unlock_write(mapping);
463 }
Mike Kravetz4aae8d12016-01-15 16:57:40 -0800464
465 lock_page(page);
466 /*
467 * We must free the huge page and remove from page
468 * cache (remove_huge_page) BEFORE removing the
469 * region/reserve map (hugetlb_unreserve_pages). In
470 * rare out of memory conditions, removal of the
zhong jiang72e29362016-10-07 17:02:01 -0700471 * region/reserve map could fail. Correspondingly,
472 * the subpool and global reserve usage count can need
473 * to be adjusted.
Mike Kravetz4aae8d12016-01-15 16:57:40 -0800474 */
zhong jiang72e29362016-10-07 17:02:01 -0700475 VM_BUG_ON(PagePrivate(page));
Mike Kravetz4aae8d12016-01-15 16:57:40 -0800476 remove_huge_page(page);
477 freed++;
478 if (!truncate_op) {
479 if (unlikely(hugetlb_unreserve_pages(inode,
Jan Karad72dc8a2017-09-06 16:21:18 -0700480 index, index + 1, 1)))
zhong jiang72e29362016-10-07 17:02:01 -0700481 hugetlb_fix_reserve_counts(inode);
Mike Kravetzb5cec282015-09-08 15:01:41 -0700482 }
483
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484 unlock_page(page);
Mike Kravetze7c58092019-01-08 15:23:32 -0800485 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486 }
487 huge_pagevec_release(&pvec);
Mike Kravetz18178892015-11-20 15:57:13 -0800488 cond_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489 }
Mike Kravetzb5cec282015-09-08 15:01:41 -0700490
491 if (truncate_op)
492 (void)hugetlb_unreserve_pages(inode, start, LONG_MAX, freed);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493}
494
Al Viro2bbbda32010-06-04 19:52:12 -0400495static void hugetlbfs_evict_inode(struct inode *inode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496{
Joonsoo Kim9119a412014-04-03 14:47:25 -0700497 struct resv_map *resv_map;
498
Mike Kravetzb5cec282015-09-08 15:01:41 -0700499 remove_inode_hugepages(inode, 0, LLONG_MAX);
Mike Kravetzf27a5132019-05-13 17:22:55 -0700500
501 /*
502 * Get the resv_map from the address space embedded in the inode.
503 * This is the address space which points to any resv_map allocated
504 * at inode creation time. If this is a device special inode,
505 * i_mapping may not point to the original address space.
506 */
507 resv_map = (struct resv_map *)(&inode->i_data)->private_data;
508 /* Only regular and link inodes have associated reserve maps */
Joonsoo Kim9119a412014-04-03 14:47:25 -0700509 if (resv_map)
510 resv_map_release(&resv_map->refs);
Jan Karadbd57682012-05-03 14:48:02 +0200511 clear_inode(inode);
Christoph Hellwig149f4212005-10-29 18:16:43 -0700512}
513
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514static int hugetlb_vmtruncate(struct inode *inode, loff_t offset)
515{
Hugh Dickins856fc292006-10-28 10:38:43 -0700516 pgoff_t pgoff;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 struct address_space *mapping = inode->i_mapping;
Andi Kleena5516432008-07-23 21:27:41 -0700518 struct hstate *h = hstate_inode(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519
Andi Kleena5516432008-07-23 21:27:41 -0700520 BUG_ON(offset & ~huge_page_mask(h));
Hugh Dickins856fc292006-10-28 10:38:43 -0700521 pgoff = offset >> PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522
Ken Chen7aa91e12007-10-16 01:26:21 -0700523 i_size_write(inode, offset);
Davidlohr Bueso83cde9e2014-12-12 16:54:21 -0800524 i_mmap_lock_write(mapping);
Davidlohr Buesof808c132017-09-08 16:15:08 -0700525 if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
Mike Kravetz1bfad992015-09-08 15:01:38 -0700526 hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0);
Mike Kravetzc86aa7b2018-12-28 00:39:42 -0800527 i_mmap_unlock_write(mapping);
Mike Kravetze7c58092019-01-08 15:23:32 -0800528 remove_inode_hugepages(inode, offset, LLONG_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529 return 0;
530}
531
Mike Kravetz70c35472015-09-08 15:01:54 -0700532static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
533{
534 struct hstate *h = hstate_inode(inode);
535 loff_t hpage_size = huge_page_size(h);
536 loff_t hole_start, hole_end;
537
538 /*
539 * For hole punch round up the beginning offset of the hole and
540 * round down the end.
541 */
542 hole_start = round_up(offset, hpage_size);
543 hole_end = round_down(offset + len, hpage_size);
544
545 if (hole_end > hole_start) {
546 struct address_space *mapping = inode->i_mapping;
Marc-André Lureauff62a342018-01-31 16:19:25 -0800547 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
Mike Kravetz70c35472015-09-08 15:01:54 -0700548
Al Viro59551022016-01-22 15:40:57 -0500549 inode_lock(inode);
Marc-André Lureauff62a342018-01-31 16:19:25 -0800550
551 /* protected by i_mutex */
Joel Fernandes (Google)ab3948f2019-03-05 15:47:54 -0800552 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
Marc-André Lureauff62a342018-01-31 16:19:25 -0800553 inode_unlock(inode);
554 return -EPERM;
555 }
556
Mike Kravetz70c35472015-09-08 15:01:54 -0700557 i_mmap_lock_write(mapping);
Davidlohr Buesof808c132017-09-08 16:15:08 -0700558 if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
Mike Kravetz70c35472015-09-08 15:01:54 -0700559 hugetlb_vmdelete_list(&mapping->i_mmap,
560 hole_start >> PAGE_SHIFT,
561 hole_end >> PAGE_SHIFT);
Mike Kravetzc86aa7b2018-12-28 00:39:42 -0800562 i_mmap_unlock_write(mapping);
Mike Kravetze7c58092019-01-08 15:23:32 -0800563 remove_inode_hugepages(inode, hole_start, hole_end);
Al Viro59551022016-01-22 15:40:57 -0500564 inode_unlock(inode);
Mike Kravetz70c35472015-09-08 15:01:54 -0700565 }
566
567 return 0;
568}
569
570static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
571 loff_t len)
572{
573 struct inode *inode = file_inode(file);
Marc-André Lureauff62a342018-01-31 16:19:25 -0800574 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
Mike Kravetz70c35472015-09-08 15:01:54 -0700575 struct address_space *mapping = inode->i_mapping;
576 struct hstate *h = hstate_inode(inode);
577 struct vm_area_struct pseudo_vma;
578 struct mm_struct *mm = current->mm;
579 loff_t hpage_size = huge_page_size(h);
580 unsigned long hpage_shift = huge_page_shift(h);
581 pgoff_t start, index, end;
582 int error;
583 u32 hash;
584
585 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
586 return -EOPNOTSUPP;
587
588 if (mode & FALLOC_FL_PUNCH_HOLE)
589 return hugetlbfs_punch_hole(inode, offset, len);
590
591 /*
592 * Default preallocate case.
593 * For this range, start is rounded down and end is rounded up
594 * as well as being converted to page offsets.
595 */
596 start = offset >> hpage_shift;
597 end = (offset + len + hpage_size - 1) >> hpage_shift;
598
Al Viro59551022016-01-22 15:40:57 -0500599 inode_lock(inode);
Mike Kravetz70c35472015-09-08 15:01:54 -0700600
601 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
602 error = inode_newsize_ok(inode, offset + len);
603 if (error)
604 goto out;
605
Marc-André Lureauff62a342018-01-31 16:19:25 -0800606 if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
607 error = -EPERM;
608 goto out;
609 }
610
Mike Kravetz70c35472015-09-08 15:01:54 -0700611 /*
612 * Initialize a pseudo vma as this is required by the huge page
613 * allocation routines. If NUMA is configured, use page index
614 * as input to create an allocation policy.
615 */
Kirill A. Shutemov2c4541e2018-07-26 16:37:30 -0700616 vma_init(&pseudo_vma, mm);
Mike Kravetz70c35472015-09-08 15:01:54 -0700617 pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
618 pseudo_vma.vm_file = file;
619
620 for (index = start; index < end; index++) {
621 /*
622 * This is supposed to be the vaddr where the page is being
623 * faulted in, but we have no vaddr here.
624 */
625 struct page *page;
626 unsigned long addr;
627 int avoid_reserve = 0;
628
629 cond_resched();
630
631 /*
632 * fallocate(2) manpage permits EINTR; we may have been
633 * interrupted because we are using up too much memory.
634 */
635 if (signal_pending(current)) {
636 error = -EINTR;
637 break;
638 }
639
640 /* Set numa allocation policy based on index */
641 hugetlb_set_vma_policy(&pseudo_vma, inode, index);
642
643 /* addr is the offset within the file (zero based) */
644 addr = index * hpage_size;
645
Mike Kravetze7c58092019-01-08 15:23:32 -0800646 /* mutex taken here, fault path and hole punch */
Mike Kravetz1b426ba2019-05-13 17:19:41 -0700647 hash = hugetlb_fault_mutex_hash(h, mapping, index, addr);
Mike Kravetz70c35472015-09-08 15:01:54 -0700648 mutex_lock(&hugetlb_fault_mutex_table[hash]);
649
650 /* See if already present in mapping to avoid alloc/free */
651 page = find_get_page(mapping, index);
652 if (page) {
653 put_page(page);
654 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
655 hugetlb_drop_vma_policy(&pseudo_vma);
656 continue;
657 }
658
659 /* Allocate page and add to page cache */
660 page = alloc_huge_page(&pseudo_vma, addr, avoid_reserve);
661 hugetlb_drop_vma_policy(&pseudo_vma);
662 if (IS_ERR(page)) {
663 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
664 error = PTR_ERR(page);
665 goto out;
666 }
667 clear_huge_page(page, addr, pages_per_huge_page(h));
668 __SetPageUptodate(page);
669 error = huge_add_to_page_cache(page, mapping, index);
670 if (unlikely(error)) {
671 put_page(page);
672 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
673 goto out;
674 }
675
676 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
677
678 /*
Mike Kravetz70c35472015-09-08 15:01:54 -0700679 * unlock_page because locked by add_to_page_cache()
Nadav Amit72639e62017-11-29 16:11:33 -0800680 * page_put due to reference from alloc_huge_page()
Mike Kravetz70c35472015-09-08 15:01:54 -0700681 */
Mike Kravetz70c35472015-09-08 15:01:54 -0700682 unlock_page(page);
Nadav Amit72639e62017-11-29 16:11:33 -0800683 put_page(page);
Mike Kravetz70c35472015-09-08 15:01:54 -0700684 }
685
686 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
687 i_size_write(inode, offset + len);
Deepa Dinamani078cd822016-09-14 07:48:04 -0700688 inode->i_ctime = current_time(inode);
Mike Kravetz70c35472015-09-08 15:01:54 -0700689out:
Al Viro59551022016-01-22 15:40:57 -0500690 inode_unlock(inode);
Mike Kravetz70c35472015-09-08 15:01:54 -0700691 return error;
692}
693
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr)
695{
David Howells2b0143b2015-03-17 22:25:59 +0000696 struct inode *inode = d_inode(dentry);
Andi Kleena5516432008-07-23 21:27:41 -0700697 struct hstate *h = hstate_inode(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698 int error;
699 unsigned int ia_valid = attr->ia_valid;
Marc-André Lureauff62a342018-01-31 16:19:25 -0800700 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701
702 BUG_ON(!inode);
703
Jan Kara31051c82016-05-26 16:55:18 +0200704 error = setattr_prepare(dentry, attr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705 if (error)
Christoph Hellwig10257742010-06-04 11:30:02 +0200706 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707
708 if (ia_valid & ATTR_SIZE) {
Marc-André Lureauff62a342018-01-31 16:19:25 -0800709 loff_t oldsize = inode->i_size;
710 loff_t newsize = attr->ia_size;
711
712 if (newsize & ~huge_page_mask(h))
Christoph Hellwig10257742010-06-04 11:30:02 +0200713 return -EINVAL;
Marc-André Lureauff62a342018-01-31 16:19:25 -0800714 /* protected by i_mutex */
715 if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
716 (newsize > oldsize && (info->seals & F_SEAL_GROW)))
717 return -EPERM;
718 error = hugetlb_vmtruncate(inode, newsize);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719 if (error)
Christoph Hellwig10257742010-06-04 11:30:02 +0200720 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 }
Christoph Hellwig10257742010-06-04 11:30:02 +0200722
723 setattr_copy(inode, attr);
724 mark_inode_dirty(inode);
725 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726}
727
Al Viro7d54fa62011-07-24 20:20:48 -0400728static struct inode *hugetlbfs_get_root(struct super_block *sb,
David Howells32021982018-11-01 23:07:26 +0000729 struct hugetlbfs_fs_context *ctx)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730{
731 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732
733 inode = new_inode(sb);
734 if (inode) {
Christoph Hellwig85fe4022010-10-23 11:19:54 -0400735 inode->i_ino = get_next_ino();
David Howells32021982018-11-01 23:07:26 +0000736 inode->i_mode = S_IFDIR | ctx->mode;
737 inode->i_uid = ctx->uid;
738 inode->i_gid = ctx->gid;
Deepa Dinamani078cd822016-09-14 07:48:04 -0700739 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
Al Viro7d54fa62011-07-24 20:20:48 -0400740 inode->i_op = &hugetlbfs_dir_inode_operations;
741 inode->i_fop = &simple_dir_operations;
742 /* directory inodes start off with i_nlink == 2 (for "." entry) */
743 inc_nlink(inode);
Aneesh Kumar K.V65ed7602012-04-25 16:01:50 -0700744 lockdep_annotate_inode_mutex_key(inode);
Al Viro7d54fa62011-07-24 20:20:48 -0400745 }
746 return inode;
747}
748
Michal Hockob610ded2013-08-13 16:00:55 -0700749/*
Davidlohr Buesoc8c06ef2014-12-12 16:54:24 -0800750 * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never
Michal Hockob610ded2013-08-13 16:00:55 -0700751 * be taken from reclaim -- unlike regular filesystems. This needs an
Kirill A. Shutemov88f306b2016-01-15 16:57:31 -0800752 * annotation because huge_pmd_share() does an allocation under hugetlb's
Davidlohr Buesoc8c06ef2014-12-12 16:54:24 -0800753 * i_mmap_rwsem.
Michal Hockob610ded2013-08-13 16:00:55 -0700754 */
Davidlohr Buesoc8c06ef2014-12-12 16:54:24 -0800755static struct lock_class_key hugetlbfs_i_mmap_rwsem_key;
Michal Hockob610ded2013-08-13 16:00:55 -0700756
Al Viro7d54fa62011-07-24 20:20:48 -0400757static struct inode *hugetlbfs_get_inode(struct super_block *sb,
758 struct inode *dir,
Al Viro18df2252011-07-24 23:17:40 -0400759 umode_t mode, dev_t dev)
Al Viro7d54fa62011-07-24 20:20:48 -0400760{
761 struct inode *inode;
Mike Kravetz58b6e5e2019-04-05 18:39:06 -0700762 struct resv_map *resv_map = NULL;
Joonsoo Kim9119a412014-04-03 14:47:25 -0700763
Mike Kravetz58b6e5e2019-04-05 18:39:06 -0700764 /*
765 * Reserve maps are only needed for inodes that can have associated
766 * page allocations.
767 */
768 if (S_ISREG(mode) || S_ISLNK(mode)) {
769 resv_map = resv_map_alloc();
770 if (!resv_map)
771 return NULL;
772 }
Al Viro7d54fa62011-07-24 20:20:48 -0400773
774 inode = new_inode(sb);
775 if (inode) {
Marc-André Lureauff62a342018-01-31 16:19:25 -0800776 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
777
Al Viro7d54fa62011-07-24 20:20:48 -0400778 inode->i_ino = get_next_ino();
779 inode_init_owner(inode, dir, mode);
Davidlohr Buesoc8c06ef2014-12-12 16:54:24 -0800780 lockdep_set_class(&inode->i_mapping->i_mmap_rwsem,
781 &hugetlbfs_i_mmap_rwsem_key);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782 inode->i_mapping->a_ops = &hugetlbfs_aops;
Deepa Dinamani078cd822016-09-14 07:48:04 -0700783 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
Joonsoo Kim9119a412014-04-03 14:47:25 -0700784 inode->i_mapping->private_data = resv_map;
Marc-André Lureauff62a342018-01-31 16:19:25 -0800785 info->seals = F_SEAL_SEAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786 switch (mode & S_IFMT) {
787 default:
788 init_special_inode(inode, mode, dev);
789 break;
790 case S_IFREG:
791 inode->i_op = &hugetlbfs_inode_operations;
792 inode->i_fop = &hugetlbfs_file_operations;
793 break;
794 case S_IFDIR:
795 inode->i_op = &hugetlbfs_dir_inode_operations;
796 inode->i_fop = &simple_dir_operations;
797
798 /* directory inodes start off with i_nlink == 2 (for "." entry) */
Dave Hansend8c76e62006-09-30 23:29:04 -0700799 inc_nlink(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800 break;
801 case S_IFLNK:
802 inode->i_op = &page_symlink_inode_operations;
Al Viro21fc61c2015-11-17 01:07:57 -0500803 inode_nohighmem(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804 break;
805 }
Josh Boyere096d0c2011-08-25 07:48:12 -0400806 lockdep_annotate_inode_mutex_key(inode);
Mike Kravetz58b6e5e2019-04-05 18:39:06 -0700807 } else {
808 if (resv_map)
809 kref_put(&resv_map->refs, resv_map_release);
810 }
Joonsoo Kim9119a412014-04-03 14:47:25 -0700811
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812 return inode;
813}
814
815/*
816 * File creation. Allocate an inode, and we're done..
817 */
818static int hugetlbfs_mknod(struct inode *dir,
Al Viro1a67aaf2011-07-26 01:52:52 -0400819 struct dentry *dentry, umode_t mode, dev_t dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820{
821 struct inode *inode;
822 int error = -ENOSPC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823
Al Viro7d54fa62011-07-24 20:20:48 -0400824 inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 if (inode) {
Deepa Dinamani078cd822016-09-14 07:48:04 -0700826 dir->i_ctime = dir->i_mtime = current_time(dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827 d_instantiate(dentry, inode);
828 dget(dentry); /* Extra count - pin the dentry in core */
829 error = 0;
830 }
831 return error;
832}
833
Al Viro18bb1db2011-07-26 01:41:39 -0400834static int hugetlbfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835{
836 int retval = hugetlbfs_mknod(dir, dentry, mode | S_IFDIR, 0);
837 if (!retval)
Dave Hansend8c76e62006-09-30 23:29:04 -0700838 inc_nlink(dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839 return retval;
840}
841
Al Viroebfc3b42012-06-10 18:05:36 -0400842static int hugetlbfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843{
844 return hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0);
845}
846
847static int hugetlbfs_symlink(struct inode *dir,
848 struct dentry *dentry, const char *symname)
849{
850 struct inode *inode;
851 int error = -ENOSPC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852
Al Viro7d54fa62011-07-24 20:20:48 -0400853 inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854 if (inode) {
855 int l = strlen(symname)+1;
856 error = page_symlink(inode, symname, l);
857 if (!error) {
858 d_instantiate(dentry, inode);
859 dget(dentry);
860 } else
861 iput(inode);
862 }
Deepa Dinamani078cd822016-09-14 07:48:04 -0700863 dir->i_ctime = dir->i_mtime = current_time(dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864
865 return error;
866}
867
868/*
Ken Chen6649a382007-02-08 14:20:27 -0800869 * mark the head page dirty
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870 */
871static int hugetlbfs_set_page_dirty(struct page *page)
872{
Christoph Lameterd85f3382007-05-06 14:49:39 -0700873 struct page *head = compound_head(page);
Ken Chen6649a382007-02-08 14:20:27 -0800874
875 SetPageDirty(head);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876 return 0;
877}
878
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900879static int hugetlbfs_migrate_page(struct address_space *mapping,
Mel Gormanb969c4a2012-01-12 17:19:34 -0800880 struct page *newpage, struct page *page,
Mel Gormana6bc32b2012-01-12 17:19:43 -0800881 enum migrate_mode mode)
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900882{
883 int rc;
884
885 rc = migrate_huge_page_move_mapping(mapping, newpage, page);
Rafael Aquini78bd5202012-12-11 16:02:31 -0800886 if (rc != MIGRATEPAGE_SUCCESS)
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900887 return rc;
Mike Kravetzcb6acd02019-02-28 16:22:02 -0800888
889 /*
890 * page_private is subpool pointer in hugetlb pages. Transfer to
891 * new page. PagePrivate is not associated with page_private for
892 * hugetlb pages and can not be set here as only page_huge_active
893 * pages can be migrated.
894 */
895 if (page_private(page)) {
896 set_page_private(newpage, page_private(page));
897 set_page_private(page, 0);
898 }
899
Jérôme Glisse2916ecc2017-09-08 16:12:06 -0700900 if (mode != MIGRATE_SYNC_NO_COPY)
901 migrate_page_copy(newpage, page);
902 else
903 migrate_page_states(newpage, page);
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900904
Rafael Aquini78bd5202012-12-11 16:02:31 -0800905 return MIGRATEPAGE_SUCCESS;
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900906}
907
Naoya Horiguchi78bb9202017-07-10 15:47:50 -0700908static int hugetlbfs_error_remove_page(struct address_space *mapping,
909 struct page *page)
910{
911 struct inode *inode = mapping->host;
Mike Kravetzab615a52017-11-02 15:59:41 -0700912 pgoff_t index = page->index;
Naoya Horiguchi78bb9202017-07-10 15:47:50 -0700913
914 remove_huge_page(page);
Mike Kravetzab615a52017-11-02 15:59:41 -0700915 if (unlikely(hugetlb_unreserve_pages(inode, index, index + 1, 1)))
916 hugetlb_fix_reserve_counts(inode);
917
Naoya Horiguchi78bb9202017-07-10 15:47:50 -0700918 return 0;
919}
920
David Howells4a252202017-07-05 16:24:18 +0100921/*
922 * Display the mount options in /proc/mounts.
923 */
924static int hugetlbfs_show_options(struct seq_file *m, struct dentry *root)
925{
926 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(root->d_sb);
927 struct hugepage_subpool *spool = sbinfo->spool;
928 unsigned long hpage_size = huge_page_size(sbinfo->hstate);
929 unsigned hpage_shift = huge_page_shift(sbinfo->hstate);
930 char mod;
931
932 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
933 seq_printf(m, ",uid=%u",
934 from_kuid_munged(&init_user_ns, sbinfo->uid));
935 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
936 seq_printf(m, ",gid=%u",
937 from_kgid_munged(&init_user_ns, sbinfo->gid));
938 if (sbinfo->mode != 0755)
939 seq_printf(m, ",mode=%o", sbinfo->mode);
940 if (sbinfo->max_inodes != -1)
941 seq_printf(m, ",nr_inodes=%lu", sbinfo->max_inodes);
942
943 hpage_size /= 1024;
944 mod = 'K';
945 if (hpage_size >= 1024) {
946 hpage_size /= 1024;
947 mod = 'M';
948 }
949 seq_printf(m, ",pagesize=%lu%c", hpage_size, mod);
950 if (spool) {
951 if (spool->max_hpages != -1)
952 seq_printf(m, ",size=%llu",
953 (unsigned long long)spool->max_hpages << hpage_shift);
954 if (spool->min_hpages != -1)
955 seq_printf(m, ",min_size=%llu",
956 (unsigned long long)spool->min_hpages << hpage_shift);
957 }
958 return 0;
959}
960
David Howells726c3342006-06-23 02:02:58 -0700961static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962{
David Howells726c3342006-06-23 02:02:58 -0700963 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb);
David Howells2b0143b2015-03-17 22:25:59 +0000964 struct hstate *h = hstate_inode(d_inode(dentry));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965
966 buf->f_type = HUGETLBFS_MAGIC;
Andi Kleena5516432008-07-23 21:27:41 -0700967 buf->f_bsize = huge_page_size(h);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968 if (sbinfo) {
969 spin_lock(&sbinfo->stat_lock);
David Gibson74a8a652005-11-21 21:32:24 -0800970 /* If no limits set, just report 0 for max/free/used
971 * blocks, like simple_statfs() */
David Gibson90481622012-03-21 16:34:12 -0700972 if (sbinfo->spool) {
973 long free_pages;
974
975 spin_lock(&sbinfo->spool->lock);
976 buf->f_blocks = sbinfo->spool->max_hpages;
977 free_pages = sbinfo->spool->max_hpages
978 - sbinfo->spool->used_hpages;
979 buf->f_bavail = buf->f_bfree = free_pages;
980 spin_unlock(&sbinfo->spool->lock);
David Gibson74a8a652005-11-21 21:32:24 -0800981 buf->f_files = sbinfo->max_inodes;
982 buf->f_ffree = sbinfo->free_inodes;
983 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984 spin_unlock(&sbinfo->stat_lock);
985 }
986 buf->f_namelen = NAME_MAX;
987 return 0;
988}
989
990static void hugetlbfs_put_super(struct super_block *sb)
991{
992 struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb);
993
994 if (sbi) {
995 sb->s_fs_info = NULL;
David Gibson90481622012-03-21 16:34:12 -0700996
997 if (sbi->spool)
998 hugepage_put_subpool(sbi->spool);
999
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000 kfree(sbi);
1001 }
1002}
1003
Christoph Hellwig96527982005-10-29 18:16:42 -07001004static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo)
1005{
1006 if (sbinfo->free_inodes >= 0) {
1007 spin_lock(&sbinfo->stat_lock);
1008 if (unlikely(!sbinfo->free_inodes)) {
1009 spin_unlock(&sbinfo->stat_lock);
1010 return 0;
1011 }
1012 sbinfo->free_inodes--;
1013 spin_unlock(&sbinfo->stat_lock);
1014 }
1015
1016 return 1;
1017}
1018
1019static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo)
1020{
1021 if (sbinfo->free_inodes >= 0) {
1022 spin_lock(&sbinfo->stat_lock);
1023 sbinfo->free_inodes++;
1024 spin_unlock(&sbinfo->stat_lock);
1025 }
1026}
1027
1028
Christoph Lametere18b8902006-12-06 20:33:20 -08001029static struct kmem_cache *hugetlbfs_inode_cachep;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030
1031static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
1032{
Christoph Hellwig96527982005-10-29 18:16:42 -07001033 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034 struct hugetlbfs_inode_info *p;
1035
Christoph Hellwig96527982005-10-29 18:16:42 -07001036 if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037 return NULL;
Christoph Lametere94b1762006-12-06 20:33:17 -08001038 p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL);
Christoph Hellwig96527982005-10-29 18:16:42 -07001039 if (unlikely(!p)) {
1040 hugetlbfs_inc_free_inodes(sbinfo);
1041 return NULL;
1042 }
Mike Kravetz4742a352017-03-31 15:12:01 -07001043
1044 /*
1045 * Any time after allocation, hugetlbfs_destroy_inode can be called
1046 * for the inode. mpol_free_shared_policy is unconditionally called
1047 * as part of hugetlbfs_destroy_inode. So, initialize policy here
1048 * in case of a quick call to destroy.
1049 *
1050 * Note that the policy is initialized even if we are creating a
1051 * private inode. This simplifies hugetlbfs_destroy_inode.
1052 */
1053 mpol_shared_policy_init(&p->policy, NULL);
1054
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055 return &p->vfs_inode;
1056}
1057
Al Virob62de322019-04-15 23:16:38 -04001058static void hugetlbfs_free_inode(struct inode *inode)
Nick Pigginfa0d7e3d2011-01-07 17:49:49 +11001059{
Nick Pigginfa0d7e3d2011-01-07 17:49:49 +11001060 kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode));
1061}
1062
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063static void hugetlbfs_destroy_inode(struct inode *inode)
1064{
Christoph Hellwig96527982005-10-29 18:16:42 -07001065 hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066 mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067}
1068
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07001069static const struct address_space_operations hugetlbfs_aops = {
Nick Piggin800d15a2007-10-16 01:25:03 -07001070 .write_begin = hugetlbfs_write_begin,
1071 .write_end = hugetlbfs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072 .set_page_dirty = hugetlbfs_set_page_dirty,
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001073 .migratepage = hugetlbfs_migrate_page,
Naoya Horiguchi78bb9202017-07-10 15:47:50 -07001074 .error_remove_page = hugetlbfs_error_remove_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075};
1076
Christoph Hellwig96527982005-10-29 18:16:42 -07001077
Alexey Dobriyan51cc5062008-07-25 19:45:34 -07001078static void init_once(void *foo)
Christoph Hellwig96527982005-10-29 18:16:42 -07001079{
1080 struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo;
1081
Christoph Lametera35afb82007-05-16 22:10:57 -07001082 inode_init_once(&ei->vfs_inode);
Christoph Hellwig96527982005-10-29 18:16:42 -07001083}
1084
Arjan van de Ven4b6f5d22006-03-28 01:56:42 -08001085const struct file_operations hugetlbfs_file_operations = {
Al Viro34d06402015-04-03 11:31:35 -04001086 .read_iter = hugetlbfs_read_iter,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087 .mmap = hugetlbfs_file_mmap,
Christoph Hellwig1b061d92010-05-26 17:53:41 +02001088 .fsync = noop_fsync,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089 .get_unmapped_area = hugetlb_get_unmapped_area,
Mike Kravetz70c35472015-09-08 15:01:54 -07001090 .llseek = default_llseek,
1091 .fallocate = hugetlbfs_fallocate,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092};
1093
Arjan van de Ven92e1d5b2007-02-12 00:55:39 -08001094static const struct inode_operations hugetlbfs_dir_inode_operations = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095 .create = hugetlbfs_create,
1096 .lookup = simple_lookup,
1097 .link = simple_link,
1098 .unlink = simple_unlink,
1099 .symlink = hugetlbfs_symlink,
1100 .mkdir = hugetlbfs_mkdir,
1101 .rmdir = simple_rmdir,
1102 .mknod = hugetlbfs_mknod,
1103 .rename = simple_rename,
1104 .setattr = hugetlbfs_setattr,
1105};
1106
Arjan van de Ven92e1d5b2007-02-12 00:55:39 -08001107static const struct inode_operations hugetlbfs_inode_operations = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108 .setattr = hugetlbfs_setattr,
1109};
1110
Josef 'Jeff' Sipekee9b6d62007-02-12 00:55:41 -08001111static const struct super_operations hugetlbfs_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112 .alloc_inode = hugetlbfs_alloc_inode,
Al Virob62de322019-04-15 23:16:38 -04001113 .free_inode = hugetlbfs_free_inode,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114 .destroy_inode = hugetlbfs_destroy_inode,
Al Viro2bbbda32010-06-04 19:52:12 -04001115 .evict_inode = hugetlbfs_evict_inode,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116 .statfs = hugetlbfs_statfs,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117 .put_super = hugetlbfs_put_super,
David Howells4a252202017-07-05 16:24:18 +01001118 .show_options = hugetlbfs_show_options,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119};
1120
Mike Kravetz7ca02d0a2015-04-15 16:13:42 -07001121/*
1122 * Convert size option passed from command line to number of huge pages
1123 * in the pool specified by hstate. Size option could be in bytes
1124 * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT).
1125 */
David Howells4a252202017-07-05 16:24:18 +01001126static long
Mike Kravetz7ca02d0a2015-04-15 16:13:42 -07001127hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt,
David Howells4a252202017-07-05 16:24:18 +01001128 enum hugetlbfs_size_type val_type)
Mike Kravetz7ca02d0a2015-04-15 16:13:42 -07001129{
1130 if (val_type == NO_SIZE)
1131 return -1;
1132
1133 if (val_type == SIZE_PERCENT) {
1134 size_opt <<= huge_page_shift(h);
1135 size_opt *= h->max_huge_pages;
1136 do_div(size_opt, 100);
1137 }
1138
1139 size_opt >>= huge_page_shift(h);
1140 return size_opt;
1141}
1142
David Howells32021982018-11-01 23:07:26 +00001143/*
1144 * Parse one mount parameter.
1145 */
1146static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147{
David Howells32021982018-11-01 23:07:26 +00001148 struct hugetlbfs_fs_context *ctx = fc->fs_private;
1149 struct fs_parse_result result;
1150 char *rest;
1151 unsigned long ps;
1152 int opt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153
David Howells32021982018-11-01 23:07:26 +00001154 opt = fs_parse(fc, &hugetlb_fs_parameters, param, &result);
1155 if (opt < 0)
1156 return opt;
1157
1158 switch (opt) {
1159 case Opt_uid:
1160 ctx->uid = make_kuid(current_user_ns(), result.uint_32);
1161 if (!uid_valid(ctx->uid))
1162 goto bad_val;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164
David Howells32021982018-11-01 23:07:26 +00001165 case Opt_gid:
1166 ctx->gid = make_kgid(current_user_ns(), result.uint_32);
1167 if (!gid_valid(ctx->gid))
1168 goto bad_val;
1169 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170
David Howells32021982018-11-01 23:07:26 +00001171 case Opt_mode:
1172 ctx->mode = result.uint_32 & 01777U;
1173 return 0;
Randy Dunlape73a75f2007-07-15 23:40:52 -07001174
David Howells32021982018-11-01 23:07:26 +00001175 case Opt_size:
1176 /* memparse() will accept a K/M/G without a digit */
1177 if (!isdigit(param->string[0]))
1178 goto bad_val;
1179 ctx->max_size_opt = memparse(param->string, &rest);
1180 ctx->max_val_type = SIZE_STD;
1181 if (*rest == '%')
1182 ctx->max_val_type = SIZE_PERCENT;
1183 return 0;
Randy Dunlape73a75f2007-07-15 23:40:52 -07001184
David Howells32021982018-11-01 23:07:26 +00001185 case Opt_nr_inodes:
1186 /* memparse() will accept a K/M/G without a digit */
1187 if (!isdigit(param->string[0]))
1188 goto bad_val;
1189 ctx->nr_inodes = memparse(param->string, &rest);
1190 return 0;
Randy Dunlape73a75f2007-07-15 23:40:52 -07001191
David Howells32021982018-11-01 23:07:26 +00001192 case Opt_pagesize:
1193 ps = memparse(param->string, &rest);
1194 ctx->hstate = size_to_hstate(ps);
1195 if (!ctx->hstate) {
1196 pr_err("Unsupported page size %lu MB\n", ps >> 20);
Lee Schermerhornb4c07bc2007-07-15 23:40:54 -07001197 return -EINVAL;
Randy Dunlape73a75f2007-07-15 23:40:52 -07001198 }
David Howells32021982018-11-01 23:07:26 +00001199 return 0;
1200
1201 case Opt_min_size:
1202 /* memparse() will accept a K/M/G without a digit */
1203 if (!isdigit(param->string[0]))
1204 goto bad_val;
1205 ctx->min_size_opt = memparse(param->string, &rest);
1206 ctx->min_val_type = SIZE_STD;
1207 if (*rest == '%')
1208 ctx->min_val_type = SIZE_PERCENT;
1209 return 0;
1210
1211 default:
1212 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213 }
Andi Kleena137e1c2008-07-23 21:27:43 -07001214
David Howells32021982018-11-01 23:07:26 +00001215bad_val:
1216 return invalf(fc, "hugetlbfs: Bad value '%s' for mount option '%s'\n",
1217 param->string, param->key);
1218}
1219
1220/*
1221 * Validate the parsed options.
1222 */
1223static int hugetlbfs_validate(struct fs_context *fc)
1224{
1225 struct hugetlbfs_fs_context *ctx = fc->fs_private;
1226
Mike Kravetz7ca02d0a2015-04-15 16:13:42 -07001227 /*
1228 * Use huge page pool size (in hstate) to convert the size
1229 * options to number of huge pages. If NO_SIZE, -1 is returned.
1230 */
David Howells32021982018-11-01 23:07:26 +00001231 ctx->max_hpages = hugetlbfs_size_to_hpages(ctx->hstate,
1232 ctx->max_size_opt,
1233 ctx->max_val_type);
1234 ctx->min_hpages = hugetlbfs_size_to_hpages(ctx->hstate,
1235 ctx->min_size_opt,
1236 ctx->min_val_type);
Mike Kravetz7ca02d0a2015-04-15 16:13:42 -07001237
1238 /*
1239 * If max_size was specified, then min_size must be smaller
1240 */
David Howells32021982018-11-01 23:07:26 +00001241 if (ctx->max_val_type > NO_SIZE &&
1242 ctx->min_hpages > ctx->max_hpages) {
1243 pr_err("Minimum size can not be greater than maximum size\n");
Mike Kravetz7ca02d0a2015-04-15 16:13:42 -07001244 return -EINVAL;
Andi Kleena137e1c2008-07-23 21:27:43 -07001245 }
1246
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247 return 0;
1248}
1249
1250static int
David Howells32021982018-11-01 23:07:26 +00001251hugetlbfs_fill_super(struct super_block *sb, struct fs_context *fc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252{
David Howells32021982018-11-01 23:07:26 +00001253 struct hugetlbfs_fs_context *ctx = fc->fs_private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254 struct hugetlbfs_sb_info *sbinfo;
1255
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256 sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL);
1257 if (!sbinfo)
1258 return -ENOMEM;
1259 sb->s_fs_info = sbinfo;
1260 spin_lock_init(&sbinfo->stat_lock);
David Howells32021982018-11-01 23:07:26 +00001261 sbinfo->hstate = ctx->hstate;
1262 sbinfo->max_inodes = ctx->nr_inodes;
1263 sbinfo->free_inodes = ctx->nr_inodes;
1264 sbinfo->spool = NULL;
1265 sbinfo->uid = ctx->uid;
1266 sbinfo->gid = ctx->gid;
1267 sbinfo->mode = ctx->mode;
David Howells4a252202017-07-05 16:24:18 +01001268
Mike Kravetz7ca02d0a2015-04-15 16:13:42 -07001269 /*
1270 * Allocate and initialize subpool if maximum or minimum size is
1271 * specified. Any needed reservations (for minimim size) are taken
1272 * taken when the subpool is created.
1273 */
David Howells32021982018-11-01 23:07:26 +00001274 if (ctx->max_hpages != -1 || ctx->min_hpages != -1) {
1275 sbinfo->spool = hugepage_new_subpool(ctx->hstate,
1276 ctx->max_hpages,
1277 ctx->min_hpages);
David Gibson90481622012-03-21 16:34:12 -07001278 if (!sbinfo->spool)
1279 goto out_free;
1280 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281 sb->s_maxbytes = MAX_LFS_FILESIZE;
David Howells32021982018-11-01 23:07:26 +00001282 sb->s_blocksize = huge_page_size(ctx->hstate);
1283 sb->s_blocksize_bits = huge_page_shift(ctx->hstate);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284 sb->s_magic = HUGETLBFS_MAGIC;
1285 sb->s_op = &hugetlbfs_ops;
1286 sb->s_time_gran = 1;
David Howells32021982018-11-01 23:07:26 +00001287 sb->s_root = d_make_root(hugetlbfs_get_root(sb, ctx));
Al Viro48fde702012-01-08 22:15:13 -05001288 if (!sb->s_root)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289 goto out_free;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290 return 0;
1291out_free:
Fabian Frederick6e6870d2014-06-04 16:10:40 -07001292 kfree(sbinfo->spool);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293 kfree(sbinfo);
1294 return -ENOMEM;
1295}
1296
David Howells32021982018-11-01 23:07:26 +00001297static int hugetlbfs_get_tree(struct fs_context *fc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001298{
David Howells32021982018-11-01 23:07:26 +00001299 int err = hugetlbfs_validate(fc);
1300 if (err)
1301 return err;
Al Viro2ac295d2019-06-01 20:48:55 -04001302 return get_tree_nodev(fc, hugetlbfs_fill_super);
David Howells32021982018-11-01 23:07:26 +00001303}
1304
1305static void hugetlbfs_fs_context_free(struct fs_context *fc)
1306{
1307 kfree(fc->fs_private);
1308}
1309
1310static const struct fs_context_operations hugetlbfs_fs_context_ops = {
1311 .free = hugetlbfs_fs_context_free,
1312 .parse_param = hugetlbfs_parse_param,
1313 .get_tree = hugetlbfs_get_tree,
1314};
1315
1316static int hugetlbfs_init_fs_context(struct fs_context *fc)
1317{
1318 struct hugetlbfs_fs_context *ctx;
1319
1320 ctx = kzalloc(sizeof(struct hugetlbfs_fs_context), GFP_KERNEL);
1321 if (!ctx)
1322 return -ENOMEM;
1323
1324 ctx->max_hpages = -1; /* No limit on size by default */
1325 ctx->nr_inodes = -1; /* No limit on number of inodes by default */
1326 ctx->uid = current_fsuid();
1327 ctx->gid = current_fsgid();
1328 ctx->mode = 0755;
1329 ctx->hstate = &default_hstate;
1330 ctx->min_hpages = -1; /* No default minimum size */
1331 ctx->max_val_type = NO_SIZE;
1332 ctx->min_val_type = NO_SIZE;
1333 fc->fs_private = ctx;
1334 fc->ops = &hugetlbfs_fs_context_ops;
1335 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336}
1337
1338static struct file_system_type hugetlbfs_fs_type = {
David Howells32021982018-11-01 23:07:26 +00001339 .name = "hugetlbfs",
1340 .init_fs_context = hugetlbfs_init_fs_context,
1341 .parameters = &hugetlb_fs_parameters,
1342 .kill_sb = kill_litter_super,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343};
1344
Andi Kleen42d73952012-12-11 16:01:34 -08001345static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346
From: Mel Gormanef1ff6b2009-09-23 15:56:05 -07001347static int can_do_hugetlb_shm(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348{
Eric W. Biedermana0eb3a02012-02-07 16:19:25 -08001349 kgid_t shm_group;
1350 shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group);
1351 return capable(CAP_IPC_LOCK) || in_group_p(shm_group);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352}
1353
Andi Kleen42d73952012-12-11 16:01:34 -08001354static int get_hstate_idx(int page_size_log)
1355{
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -07001356 struct hstate *h = hstate_sizelog(page_size_log);
Andi Kleen42d73952012-12-11 16:01:34 -08001357
Andi Kleen42d73952012-12-11 16:01:34 -08001358 if (!h)
1359 return -1;
1360 return h - hstates;
1361}
1362
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -07001363/*
1364 * Note that size should be aligned to proper hugepage size in caller side,
1365 * otherwise hugetlb_reserve_pages reserves one less hugepages than intended.
1366 */
1367struct file *hugetlb_file_setup(const char *name, size_t size,
1368 vm_flags_t acctflag, struct user_struct **user,
Andi Kleen42d73952012-12-11 16:01:34 -08001369 int creat_flags, int page_size_log)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001370{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371 struct inode *inode;
Al Viroe68375c2018-06-09 09:50:46 -04001372 struct vfsmount *mnt;
Andi Kleen42d73952012-12-11 16:01:34 -08001373 int hstate_idx;
Al Viroe68375c2018-06-09 09:50:46 -04001374 struct file *file;
Andi Kleen42d73952012-12-11 16:01:34 -08001375
1376 hstate_idx = get_hstate_idx(page_size_log);
1377 if (hstate_idx < 0)
1378 return ERR_PTR(-ENODEV);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379
Hugh Dickins353d5c32009-08-24 16:30:28 +01001380 *user = NULL;
Al Viroe68375c2018-06-09 09:50:46 -04001381 mnt = hugetlbfs_vfsmount[hstate_idx];
1382 if (!mnt)
Akinobu Mita5bc98592007-05-06 14:50:18 -07001383 return ERR_PTR(-ENOENT);
1384
From: Mel Gormanef1ff6b2009-09-23 15:56:05 -07001385 if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) {
Hugh Dickins353d5c32009-08-24 16:30:28 +01001386 *user = current_user();
1387 if (user_shm_lock(size, *user)) {
David Rientjes21a3c272012-03-21 16:34:13 -07001388 task_lock(current);
Andrew Morton9b857d22014-06-04 16:07:21 -07001389 pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is deprecated\n",
David Rientjes21a3c272012-03-21 16:34:13 -07001390 current->comm, current->pid);
1391 task_unlock(current);
Hugh Dickins353d5c32009-08-24 16:30:28 +01001392 } else {
1393 *user = NULL;
Ravikiran G Thirumalai2584e512009-03-31 15:21:26 -07001394 return ERR_PTR(-EPERM);
Hugh Dickins353d5c32009-08-24 16:30:28 +01001395 }
Ravikiran G Thirumalai2584e512009-03-31 15:21:26 -07001396 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397
Anatol Pomozov39b65252012-09-12 20:11:55 -07001398 file = ERR_PTR(-ENOSPC);
Al Viroe68375c2018-06-09 09:50:46 -04001399 inode = hugetlbfs_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400 if (!inode)
Al Viroe68375c2018-06-09 09:50:46 -04001401 goto out;
Stephen Smalleye1832f22015-08-06 15:46:55 -07001402 if (creat_flags == HUGETLB_SHMFS_INODE)
1403 inode->i_flags |= S_PRIVATE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405 inode->i_size = size;
Miklos Szeredi6d6b77f2011-10-28 14:13:28 +02001406 clear_nlink(inode);
Dave Hansence8d2cd2007-10-16 23:31:13 -07001407
Al Viroe68375c2018-06-09 09:50:46 -04001408 if (hugetlb_reserve_pages(inode, 0,
1409 size >> huge_page_shift(hstate_inode(inode)), NULL,
1410 acctflag))
1411 file = ERR_PTR(-ENOMEM);
1412 else
1413 file = alloc_file_pseudo(inode, mnt, name, O_RDWR,
1414 &hugetlbfs_file_operations);
1415 if (!IS_ERR(file))
1416 return file;
Dave Hansence8d2cd2007-10-16 23:31:13 -07001417
David Gibsonb45b5bd2006-03-22 00:08:55 -08001418 iput(inode);
Al Viroe68375c2018-06-09 09:50:46 -04001419out:
Hugh Dickins353d5c32009-08-24 16:30:28 +01001420 if (*user) {
1421 user_shm_unlock(size, *user);
1422 *user = NULL;
1423 }
Anatol Pomozov39b65252012-09-12 20:11:55 -07001424 return file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001425}
1426
David Howells32021982018-11-01 23:07:26 +00001427static struct vfsmount *__init mount_one_hugetlbfs(struct hstate *h)
1428{
1429 struct fs_context *fc;
1430 struct vfsmount *mnt;
1431
1432 fc = fs_context_for_mount(&hugetlbfs_fs_type, SB_KERNMOUNT);
1433 if (IS_ERR(fc)) {
1434 mnt = ERR_CAST(fc);
1435 } else {
1436 struct hugetlbfs_fs_context *ctx = fc->fs_private;
1437 ctx->hstate = h;
1438 mnt = fc_mount(fc);
1439 put_fs_context(fc);
1440 }
1441 if (IS_ERR(mnt))
1442 pr_err("Cannot mount internal hugetlbfs for page size %uK",
1443 1U << (h->order + PAGE_SHIFT - 10));
1444 return mnt;
1445}
1446
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447static int __init init_hugetlbfs_fs(void)
1448{
David Howells32021982018-11-01 23:07:26 +00001449 struct vfsmount *mnt;
Andi Kleen42d73952012-12-11 16:01:34 -08001450 struct hstate *h;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451 int error;
Andi Kleen42d73952012-12-11 16:01:34 -08001452 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453
Nishanth Aravamudan457c1b22014-05-06 12:50:00 -07001454 if (!hugepages_supported()) {
Andrew Morton9b857d22014-06-04 16:07:21 -07001455 pr_info("disabling because there are no supported hugepage sizes\n");
Nishanth Aravamudan457c1b22014-05-06 12:50:00 -07001456 return -ENOTSUPP;
1457 }
1458
Hillf Dantond1d5e05ff2012-03-21 16:34:15 -07001459 error = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460 hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache",
1461 sizeof(struct hugetlbfs_inode_info),
Vladimir Davydov5d097052016-01-14 15:18:21 -08001462 0, SLAB_ACCOUNT, init_once);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463 if (hugetlbfs_inode_cachep == NULL)
Peter Zijlstrae0bf68d2007-10-16 23:25:46 -07001464 goto out2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465
1466 error = register_filesystem(&hugetlbfs_fs_type);
1467 if (error)
1468 goto out;
1469
Andi Kleen42d73952012-12-11 16:01:34 -08001470 i = 0;
1471 for_each_hstate(h) {
David Howells32021982018-11-01 23:07:26 +00001472 mnt = mount_one_hugetlbfs(h);
1473 if (IS_ERR(mnt) && i == 0) {
1474 error = PTR_ERR(mnt);
1475 goto out;
Andi Kleen42d73952012-12-11 16:01:34 -08001476 }
David Howells32021982018-11-01 23:07:26 +00001477 hugetlbfs_vfsmount[i] = mnt;
Andi Kleen42d73952012-12-11 16:01:34 -08001478 i++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479 }
David Howells32021982018-11-01 23:07:26 +00001480
1481 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482
1483 out:
Hillf Dantond1d5e05ff2012-03-21 16:34:15 -07001484 kmem_cache_destroy(hugetlbfs_inode_cachep);
Peter Zijlstrae0bf68d2007-10-16 23:25:46 -07001485 out2:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486 return error;
1487}
Paul Gortmaker3e89e1c2016-01-14 15:21:52 -08001488fs_initcall(init_hugetlbfs_fs)