Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * hugetlbpage-backed filesystem. Based on ramfs. |
| 3 | * |
Nadia Yvette Chambers | 6d49e35 | 2012-12-06 10:39:54 +0100 | [diff] [blame] | 4 | * Nadia Yvette Chambers, 2002 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | * |
| 6 | * Copyright (C) 2002 Linus Torvalds. |
Paul Gortmaker | 3e89e1c | 2016-01-14 15:21:52 -0800 | [diff] [blame] | 7 | * License: GPL |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | */ |
| 9 | |
Andrew Morton | 9b857d2 | 2014-06-04 16:07:21 -0700 | [diff] [blame] | 10 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 11 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | #include <linux/thread_info.h> |
| 13 | #include <asm/current.h> |
Ingo Molnar | 174cd4b | 2017-02-02 19:15:33 +0100 | [diff] [blame] | 14 | #include <linux/sched/signal.h> /* remove ASAP */ |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 15 | #include <linux/falloc.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | #include <linux/fs.h> |
| 17 | #include <linux/mount.h> |
| 18 | #include <linux/file.h> |
Randy Dunlap | e73a75f | 2007-07-15 23:40:52 -0700 | [diff] [blame] | 19 | #include <linux/kernel.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | #include <linux/writeback.h> |
| 21 | #include <linux/pagemap.h> |
| 22 | #include <linux/highmem.h> |
| 23 | #include <linux/init.h> |
| 24 | #include <linux/string.h> |
Randy Dunlap | 16f7e0f | 2006-01-11 12:17:46 -0800 | [diff] [blame] | 25 | #include <linux/capability.h> |
Randy Dunlap | e73a75f | 2007-07-15 23:40:52 -0700 | [diff] [blame] | 26 | #include <linux/ctype.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | #include <linux/backing-dev.h> |
| 28 | #include <linux/hugetlb.h> |
| 29 | #include <linux/pagevec.h> |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 30 | #include <linux/fs_parser.h> |
Benjamin Herrenschmidt | 036e085 | 2007-05-06 14:50:12 -0700 | [diff] [blame] | 31 | #include <linux/mman.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | #include <linux/slab.h> |
| 33 | #include <linux/dnotify.h> |
| 34 | #include <linux/statfs.h> |
| 35 | #include <linux/security.h> |
Nick Black | 1fd7317d | 2009-09-22 16:43:33 -0700 | [diff] [blame] | 36 | #include <linux/magic.h> |
Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 37 | #include <linux/migrate.h> |
Al Viro | 34d0640 | 2015-04-03 11:31:35 -0400 | [diff] [blame] | 38 | #include <linux/uio.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | |
Linus Torvalds | 7c0f6ba | 2016-12-24 11:46:01 -0800 | [diff] [blame] | 40 | #include <linux/uaccess.h> |
Shijie Hu | 8859025 | 2020-06-03 16:03:34 -0700 | [diff] [blame] | 41 | #include <linux/sched/mm.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | |
Josef 'Jeff' Sipek | ee9b6d6 | 2007-02-12 00:55:41 -0800 | [diff] [blame] | 43 | static const struct super_operations hugetlbfs_ops; |
Christoph Hellwig | f5e54d6 | 2006-06-28 04:26:44 -0700 | [diff] [blame] | 44 | static const struct address_space_operations hugetlbfs_aops; |
Arjan van de Ven | 4b6f5d2 | 2006-03-28 01:56:42 -0800 | [diff] [blame] | 45 | const struct file_operations hugetlbfs_file_operations; |
Arjan van de Ven | 92e1d5b | 2007-02-12 00:55:39 -0800 | [diff] [blame] | 46 | static const struct inode_operations hugetlbfs_dir_inode_operations; |
| 47 | static const struct inode_operations hugetlbfs_inode_operations; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 49 | enum hugetlbfs_size_type { NO_SIZE, SIZE_STD, SIZE_PERCENT }; |
| 50 | |
| 51 | struct hugetlbfs_fs_context { |
David Howells | 4a25220 | 2017-07-05 16:24:18 +0100 | [diff] [blame] | 52 | struct hstate *hstate; |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 53 | unsigned long long max_size_opt; |
| 54 | unsigned long long min_size_opt; |
David Howells | 4a25220 | 2017-07-05 16:24:18 +0100 | [diff] [blame] | 55 | long max_hpages; |
| 56 | long nr_inodes; |
| 57 | long min_hpages; |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 58 | enum hugetlbfs_size_type max_val_type; |
| 59 | enum hugetlbfs_size_type min_val_type; |
David Howells | 4a25220 | 2017-07-05 16:24:18 +0100 | [diff] [blame] | 60 | kuid_t uid; |
| 61 | kgid_t gid; |
| 62 | umode_t mode; |
David Gibson | a1d776e | 2012-03-21 16:34:12 -0700 | [diff] [blame] | 63 | }; |
| 64 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | int sysctl_hugetlb_shm_group; |
| 66 | |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 67 | enum hugetlb_param { |
| 68 | Opt_gid, |
| 69 | Opt_min_size, |
| 70 | Opt_mode, |
| 71 | Opt_nr_inodes, |
| 72 | Opt_pagesize, |
| 73 | Opt_size, |
| 74 | Opt_uid, |
Randy Dunlap | e73a75f | 2007-07-15 23:40:52 -0700 | [diff] [blame] | 75 | }; |
| 76 | |
Al Viro | d7167b1 | 2019-09-07 07:23:15 -0400 | [diff] [blame] | 77 | static const struct fs_parameter_spec hugetlb_fs_parameters[] = { |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 78 | fsparam_u32 ("gid", Opt_gid), |
| 79 | fsparam_string("min_size", Opt_min_size), |
| 80 | fsparam_u32 ("mode", Opt_mode), |
| 81 | fsparam_string("nr_inodes", Opt_nr_inodes), |
| 82 | fsparam_string("pagesize", Opt_pagesize), |
| 83 | fsparam_string("size", Opt_size), |
| 84 | fsparam_u32 ("uid", Opt_uid), |
| 85 | {} |
| 86 | }; |
| 87 | |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 88 | #ifdef CONFIG_NUMA |
| 89 | static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma, |
| 90 | struct inode *inode, pgoff_t index) |
| 91 | { |
| 92 | vma->vm_policy = mpol_shared_policy_lookup(&HUGETLBFS_I(inode)->policy, |
| 93 | index); |
| 94 | } |
| 95 | |
| 96 | static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma) |
| 97 | { |
| 98 | mpol_cond_put(vma->vm_policy); |
| 99 | } |
| 100 | #else |
| 101 | static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma, |
| 102 | struct inode *inode, pgoff_t index) |
| 103 | { |
| 104 | } |
| 105 | |
| 106 | static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma) |
| 107 | { |
| 108 | } |
| 109 | #endif |
| 110 | |
Adam Litke | 2e9b367c | 2005-10-29 18:16:47 -0700 | [diff] [blame] | 111 | static void huge_pagevec_release(struct pagevec *pvec) |
| 112 | { |
| 113 | int i; |
| 114 | |
| 115 | for (i = 0; i < pagevec_count(pvec); ++i) |
| 116 | put_page(pvec->pages[i]); |
| 117 | |
| 118 | pagevec_reinit(pvec); |
| 119 | } |
| 120 | |
Mike Kravetz | 63489f8 | 2018-03-22 16:17:13 -0700 | [diff] [blame] | 121 | /* |
| 122 | * Mask used when checking the page offset value passed in via system |
| 123 | * calls. This value will be converted to a loff_t which is signed. |
| 124 | * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the |
| 125 | * value. The extra bit (- 1 in the shift value) is to take the sign |
| 126 | * bit into account. |
| 127 | */ |
| 128 | #define PGOFF_LOFFT_MAX \ |
| 129 | (((1UL << (PAGE_SHIFT + 1)) - 1) << (BITS_PER_LONG - (PAGE_SHIFT + 1))) |
| 130 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 | static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) |
| 132 | { |
Al Viro | 496ad9a | 2013-01-23 17:07:38 -0500 | [diff] [blame] | 133 | struct inode *inode = file_inode(file); |
Peter Xu | 22247ef | 2021-05-14 17:27:04 -0700 | [diff] [blame] | 134 | struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | loff_t len, vma_len; |
| 136 | int ret; |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 137 | struct hstate *h = hstate_file(file); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 138 | |
Hugh Dickins | 68589bc | 2006-11-14 02:03:32 -0800 | [diff] [blame] | 139 | /* |
David Gibson | dec4ad8 | 2007-08-30 23:56:40 -0700 | [diff] [blame] | 140 | * vma address alignment (but not the pgoff alignment) has |
| 141 | * already been checked by prepare_hugepage_range. If you add |
| 142 | * any error returns here, do so after setting VM_HUGETLB, so |
| 143 | * is_vm_hugetlb_page tests below unmap_region go the right |
Peter Collingbourne | 45e5530 | 2020-08-06 23:23:37 -0700 | [diff] [blame] | 144 | * way when do_mmap unwinds (may be important on powerpc |
David Gibson | dec4ad8 | 2007-08-30 23:56:40 -0700 | [diff] [blame] | 145 | * and ia64). |
Hugh Dickins | 68589bc | 2006-11-14 02:03:32 -0800 | [diff] [blame] | 146 | */ |
Naoya Horiguchi | a2fce91 | 2013-04-17 15:58:27 -0700 | [diff] [blame] | 147 | vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND; |
Hugh Dickins | 68589bc | 2006-11-14 02:03:32 -0800 | [diff] [blame] | 148 | vma->vm_ops = &hugetlb_vm_ops; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 149 | |
Peter Xu | 22247ef | 2021-05-14 17:27:04 -0700 | [diff] [blame] | 150 | ret = seal_check_future_write(info->seals, vma); |
| 151 | if (ret) |
| 152 | return ret; |
| 153 | |
Mike Kravetz | 045c7a3 | 2017-04-13 14:56:32 -0700 | [diff] [blame] | 154 | /* |
Mike Kravetz | 63489f8 | 2018-03-22 16:17:13 -0700 | [diff] [blame] | 155 | * page based offset in vm_pgoff could be sufficiently large to |
Mike Kravetz | 5df63c2 | 2018-04-05 16:18:21 -0700 | [diff] [blame] | 156 | * overflow a loff_t when converted to byte offset. This can |
| 157 | * only happen on architectures where sizeof(loff_t) == |
| 158 | * sizeof(unsigned long). So, only check in those instances. |
Mike Kravetz | 045c7a3 | 2017-04-13 14:56:32 -0700 | [diff] [blame] | 159 | */ |
Mike Kravetz | 5df63c2 | 2018-04-05 16:18:21 -0700 | [diff] [blame] | 160 | if (sizeof(unsigned long) == sizeof(loff_t)) { |
| 161 | if (vma->vm_pgoff & PGOFF_LOFFT_MAX) |
| 162 | return -EINVAL; |
| 163 | } |
Mike Kravetz | 045c7a3 | 2017-04-13 14:56:32 -0700 | [diff] [blame] | 164 | |
Mike Kravetz | 63489f8 | 2018-03-22 16:17:13 -0700 | [diff] [blame] | 165 | /* must be huge page aligned */ |
Becky Bruce | 2b37c35 | 2011-07-25 17:11:49 -0700 | [diff] [blame] | 166 | if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT)) |
David Gibson | dec4ad8 | 2007-08-30 23:56:40 -0700 | [diff] [blame] | 167 | return -EINVAL; |
| 168 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 169 | vma_len = (loff_t)(vma->vm_end - vma->vm_start); |
Mike Kravetz | 045c7a3 | 2017-04-13 14:56:32 -0700 | [diff] [blame] | 170 | len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); |
| 171 | /* check for overflow */ |
| 172 | if (len < vma_len) |
| 173 | return -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 174 | |
Al Viro | 5955102 | 2016-01-22 15:40:57 -0500 | [diff] [blame] | 175 | inode_lock(inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 | file_accessed(file); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 177 | |
| 178 | ret = -ENOMEM; |
Mike Kravetz | 33b8f84 | 2021-02-24 12:09:54 -0800 | [diff] [blame] | 179 | if (!hugetlb_reserve_pages(inode, |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 180 | vma->vm_pgoff >> huge_page_order(h), |
Mel Gorman | 5a6fe12 | 2009-02-10 14:02:27 +0000 | [diff] [blame] | 181 | len >> huge_page_shift(h), vma, |
| 182 | vma->vm_flags)) |
Chen, Kenneth W | a43a8c3 | 2006-06-23 02:03:15 -0700 | [diff] [blame] | 183 | goto out; |
David Gibson | b45b5bd | 2006-03-22 00:08:55 -0800 | [diff] [blame] | 184 | |
Adam Litke | 4c88726 | 2005-10-29 18:16:46 -0700 | [diff] [blame] | 185 | ret = 0; |
Zhang, Yanmin | b6174df | 2006-07-10 04:44:49 -0700 | [diff] [blame] | 186 | if (vma->vm_flags & VM_WRITE && inode->i_size < len) |
Mike Kravetz | 045c7a3 | 2017-04-13 14:56:32 -0700 | [diff] [blame] | 187 | i_size_write(inode, len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 | out: |
Al Viro | 5955102 | 2016-01-22 15:40:57 -0500 | [diff] [blame] | 189 | inode_unlock(inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 190 | |
| 191 | return ret; |
| 192 | } |
| 193 | |
| 194 | /* |
Michel Lespinasse | 3e4e28c | 2020-06-08 21:33:51 -0700 | [diff] [blame] | 195 | * Called under mmap_write_lock(mm). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 | */ |
| 197 | |
Adrian Bunk | d2ba27e8 | 2007-05-06 14:49:00 -0700 | [diff] [blame] | 198 | #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 199 | static unsigned long |
Shijie Hu | 8859025 | 2020-06-03 16:03:34 -0700 | [diff] [blame] | 200 | hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr, |
| 201 | unsigned long len, unsigned long pgoff, unsigned long flags) |
| 202 | { |
| 203 | struct hstate *h = hstate_file(file); |
| 204 | struct vm_unmapped_area_info info; |
| 205 | |
| 206 | info.flags = 0; |
| 207 | info.length = len; |
| 208 | info.low_limit = current->mm->mmap_base; |
| 209 | info.high_limit = TASK_SIZE; |
| 210 | info.align_mask = PAGE_MASK & ~huge_page_mask(h); |
| 211 | info.align_offset = 0; |
| 212 | return vm_unmapped_area(&info); |
| 213 | } |
| 214 | |
| 215 | static unsigned long |
| 216 | hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr, |
| 217 | unsigned long len, unsigned long pgoff, unsigned long flags) |
| 218 | { |
| 219 | struct hstate *h = hstate_file(file); |
| 220 | struct vm_unmapped_area_info info; |
| 221 | |
| 222 | info.flags = VM_UNMAPPED_AREA_TOPDOWN; |
| 223 | info.length = len; |
| 224 | info.low_limit = max(PAGE_SIZE, mmap_min_addr); |
| 225 | info.high_limit = current->mm->mmap_base; |
| 226 | info.align_mask = PAGE_MASK & ~huge_page_mask(h); |
| 227 | info.align_offset = 0; |
| 228 | addr = vm_unmapped_area(&info); |
| 229 | |
| 230 | /* |
| 231 | * A failed mmap() very likely causes application failure, |
| 232 | * so fall back to the bottom-up function here. This scenario |
| 233 | * can happen with large stack limits and large mmap() |
| 234 | * allocations. |
| 235 | */ |
| 236 | if (unlikely(offset_in_page(addr))) { |
| 237 | VM_BUG_ON(addr != -ENOMEM); |
| 238 | info.flags = 0; |
| 239 | info.low_limit = current->mm->mmap_base; |
| 240 | info.high_limit = TASK_SIZE; |
| 241 | addr = vm_unmapped_area(&info); |
| 242 | } |
| 243 | |
| 244 | return addr; |
| 245 | } |
| 246 | |
| 247 | static unsigned long |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 248 | hugetlb_get_unmapped_area(struct file *file, unsigned long addr, |
| 249 | unsigned long len, unsigned long pgoff, unsigned long flags) |
| 250 | { |
| 251 | struct mm_struct *mm = current->mm; |
| 252 | struct vm_area_struct *vma; |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 253 | struct hstate *h = hstate_file(file); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 254 | |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 255 | if (len & ~huge_page_mask(h)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 256 | return -EINVAL; |
| 257 | if (len > TASK_SIZE) |
| 258 | return -ENOMEM; |
| 259 | |
Benjamin Herrenschmidt | 036e085 | 2007-05-06 14:50:12 -0700 | [diff] [blame] | 260 | if (flags & MAP_FIXED) { |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 261 | if (prepare_hugepage_range(file, addr, len)) |
Benjamin Herrenschmidt | 036e085 | 2007-05-06 14:50:12 -0700 | [diff] [blame] | 262 | return -EINVAL; |
| 263 | return addr; |
| 264 | } |
| 265 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 266 | if (addr) { |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 267 | addr = ALIGN(addr, huge_page_size(h)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 268 | vma = find_vma(mm, addr); |
| 269 | if (TASK_SIZE - len >= addr && |
Hugh Dickins | 1be7107 | 2017-06-19 04:03:24 -0700 | [diff] [blame] | 270 | (!vma || addr + len <= vm_start_gap(vma))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 271 | return addr; |
| 272 | } |
| 273 | |
Shijie Hu | 8859025 | 2020-06-03 16:03:34 -0700 | [diff] [blame] | 274 | /* |
| 275 | * Use mm->get_unmapped_area value as a hint to use topdown routine. |
| 276 | * If architectures have special needs, they should define their own |
| 277 | * version of hugetlb_get_unmapped_area. |
| 278 | */ |
| 279 | if (mm->get_unmapped_area == arch_get_unmapped_area_topdown) |
| 280 | return hugetlb_get_unmapped_area_topdown(file, addr, len, |
| 281 | pgoff, flags); |
| 282 | return hugetlb_get_unmapped_area_bottomup(file, addr, len, |
| 283 | pgoff, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 284 | } |
| 285 | #endif |
| 286 | |
Al Viro | 34d0640 | 2015-04-03 11:31:35 -0400 | [diff] [blame] | 287 | static size_t |
Badari Pulavarty | e63e1e5 | 2007-10-16 01:26:22 -0700 | [diff] [blame] | 288 | hugetlbfs_read_actor(struct page *page, unsigned long offset, |
Al Viro | 34d0640 | 2015-04-03 11:31:35 -0400 | [diff] [blame] | 289 | struct iov_iter *to, unsigned long size) |
Badari Pulavarty | e63e1e5 | 2007-10-16 01:26:22 -0700 | [diff] [blame] | 290 | { |
Al Viro | 34d0640 | 2015-04-03 11:31:35 -0400 | [diff] [blame] | 291 | size_t copied = 0; |
Badari Pulavarty | e63e1e5 | 2007-10-16 01:26:22 -0700 | [diff] [blame] | 292 | int i, chunksize; |
| 293 | |
Badari Pulavarty | e63e1e5 | 2007-10-16 01:26:22 -0700 | [diff] [blame] | 294 | /* Find which 4k chunk and offset with in that chunk */ |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 295 | i = offset >> PAGE_SHIFT; |
| 296 | offset = offset & ~PAGE_MASK; |
Badari Pulavarty | e63e1e5 | 2007-10-16 01:26:22 -0700 | [diff] [blame] | 297 | |
| 298 | while (size) { |
Al Viro | 34d0640 | 2015-04-03 11:31:35 -0400 | [diff] [blame] | 299 | size_t n; |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 300 | chunksize = PAGE_SIZE; |
Badari Pulavarty | e63e1e5 | 2007-10-16 01:26:22 -0700 | [diff] [blame] | 301 | if (offset) |
| 302 | chunksize -= offset; |
| 303 | if (chunksize > size) |
| 304 | chunksize = size; |
Al Viro | 34d0640 | 2015-04-03 11:31:35 -0400 | [diff] [blame] | 305 | n = copy_page_to_iter(&page[i], offset, chunksize, to); |
| 306 | copied += n; |
| 307 | if (n != chunksize) |
| 308 | return copied; |
Badari Pulavarty | e63e1e5 | 2007-10-16 01:26:22 -0700 | [diff] [blame] | 309 | offset = 0; |
| 310 | size -= chunksize; |
Badari Pulavarty | e63e1e5 | 2007-10-16 01:26:22 -0700 | [diff] [blame] | 311 | i++; |
| 312 | } |
Al Viro | 34d0640 | 2015-04-03 11:31:35 -0400 | [diff] [blame] | 313 | return copied; |
Badari Pulavarty | e63e1e5 | 2007-10-16 01:26:22 -0700 | [diff] [blame] | 314 | } |
| 315 | |
| 316 | /* |
| 317 | * Support for read() - Find the page attached to f_mapping and copy out the |
Miaohe Lin | c7e285e | 2021-02-24 12:10:08 -0800 | [diff] [blame] | 318 | * data. Its *very* similar to generic_file_buffered_read(), we can't use that |
Kirill A. Shutemov | ea1754a | 2016-04-01 15:29:48 +0300 | [diff] [blame] | 319 | * since it has PAGE_SIZE assumptions. |
Badari Pulavarty | e63e1e5 | 2007-10-16 01:26:22 -0700 | [diff] [blame] | 320 | */ |
Al Viro | 34d0640 | 2015-04-03 11:31:35 -0400 | [diff] [blame] | 321 | static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to) |
Badari Pulavarty | e63e1e5 | 2007-10-16 01:26:22 -0700 | [diff] [blame] | 322 | { |
Al Viro | 34d0640 | 2015-04-03 11:31:35 -0400 | [diff] [blame] | 323 | struct file *file = iocb->ki_filp; |
| 324 | struct hstate *h = hstate_file(file); |
| 325 | struct address_space *mapping = file->f_mapping; |
Badari Pulavarty | e63e1e5 | 2007-10-16 01:26:22 -0700 | [diff] [blame] | 326 | struct inode *inode = mapping->host; |
Al Viro | 34d0640 | 2015-04-03 11:31:35 -0400 | [diff] [blame] | 327 | unsigned long index = iocb->ki_pos >> huge_page_shift(h); |
| 328 | unsigned long offset = iocb->ki_pos & ~huge_page_mask(h); |
Badari Pulavarty | e63e1e5 | 2007-10-16 01:26:22 -0700 | [diff] [blame] | 329 | unsigned long end_index; |
| 330 | loff_t isize; |
| 331 | ssize_t retval = 0; |
| 332 | |
Al Viro | 34d0640 | 2015-04-03 11:31:35 -0400 | [diff] [blame] | 333 | while (iov_iter_count(to)) { |
Badari Pulavarty | e63e1e5 | 2007-10-16 01:26:22 -0700 | [diff] [blame] | 334 | struct page *page; |
Al Viro | 34d0640 | 2015-04-03 11:31:35 -0400 | [diff] [blame] | 335 | size_t nr, copied; |
Badari Pulavarty | e63e1e5 | 2007-10-16 01:26:22 -0700 | [diff] [blame] | 336 | |
| 337 | /* nr is the maximum number of bytes to copy from this page */ |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 338 | nr = huge_page_size(h); |
Aneesh Kumar K.V | a05b085 | 2012-03-21 16:34:08 -0700 | [diff] [blame] | 339 | isize = i_size_read(inode); |
| 340 | if (!isize) |
Al Viro | 34d0640 | 2015-04-03 11:31:35 -0400 | [diff] [blame] | 341 | break; |
Aneesh Kumar K.V | a05b085 | 2012-03-21 16:34:08 -0700 | [diff] [blame] | 342 | end_index = (isize - 1) >> huge_page_shift(h); |
Al Viro | 34d0640 | 2015-04-03 11:31:35 -0400 | [diff] [blame] | 343 | if (index > end_index) |
| 344 | break; |
| 345 | if (index == end_index) { |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 346 | nr = ((isize - 1) & ~huge_page_mask(h)) + 1; |
Aneesh Kumar K.V | a05b085 | 2012-03-21 16:34:08 -0700 | [diff] [blame] | 347 | if (nr <= offset) |
Al Viro | 34d0640 | 2015-04-03 11:31:35 -0400 | [diff] [blame] | 348 | break; |
Badari Pulavarty | e63e1e5 | 2007-10-16 01:26:22 -0700 | [diff] [blame] | 349 | } |
| 350 | nr = nr - offset; |
| 351 | |
| 352 | /* Find the page */ |
Aneesh Kumar K.V | a05b085 | 2012-03-21 16:34:08 -0700 | [diff] [blame] | 353 | page = find_lock_page(mapping, index); |
Badari Pulavarty | e63e1e5 | 2007-10-16 01:26:22 -0700 | [diff] [blame] | 354 | if (unlikely(page == NULL)) { |
| 355 | /* |
| 356 | * We have a HOLE, zero out the user-buffer for the |
| 357 | * length of the hole or request. |
| 358 | */ |
Al Viro | 34d0640 | 2015-04-03 11:31:35 -0400 | [diff] [blame] | 359 | copied = iov_iter_zero(nr, to); |
Badari Pulavarty | e63e1e5 | 2007-10-16 01:26:22 -0700 | [diff] [blame] | 360 | } else { |
Aneesh Kumar K.V | a05b085 | 2012-03-21 16:34:08 -0700 | [diff] [blame] | 361 | unlock_page(page); |
| 362 | |
Badari Pulavarty | e63e1e5 | 2007-10-16 01:26:22 -0700 | [diff] [blame] | 363 | /* |
| 364 | * We have the page, copy it to user space buffer. |
| 365 | */ |
Al Viro | 34d0640 | 2015-04-03 11:31:35 -0400 | [diff] [blame] | 366 | copied = hugetlbfs_read_actor(page, offset, to, nr); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 367 | put_page(page); |
Badari Pulavarty | e63e1e5 | 2007-10-16 01:26:22 -0700 | [diff] [blame] | 368 | } |
Al Viro | 34d0640 | 2015-04-03 11:31:35 -0400 | [diff] [blame] | 369 | offset += copied; |
| 370 | retval += copied; |
| 371 | if (copied != nr && iov_iter_count(to)) { |
| 372 | if (!retval) |
| 373 | retval = -EFAULT; |
| 374 | break; |
Badari Pulavarty | e63e1e5 | 2007-10-16 01:26:22 -0700 | [diff] [blame] | 375 | } |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 376 | index += offset >> huge_page_shift(h); |
| 377 | offset &= ~huge_page_mask(h); |
Badari Pulavarty | e63e1e5 | 2007-10-16 01:26:22 -0700 | [diff] [blame] | 378 | } |
Al Viro | 34d0640 | 2015-04-03 11:31:35 -0400 | [diff] [blame] | 379 | iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset; |
Badari Pulavarty | e63e1e5 | 2007-10-16 01:26:22 -0700 | [diff] [blame] | 380 | return retval; |
| 381 | } |
| 382 | |
Nick Piggin | 800d15a | 2007-10-16 01:25:03 -0700 | [diff] [blame] | 383 | static int hugetlbfs_write_begin(struct file *file, |
| 384 | struct address_space *mapping, |
| 385 | loff_t pos, unsigned len, unsigned flags, |
| 386 | struct page **pagep, void **fsdata) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 387 | { |
| 388 | return -EINVAL; |
| 389 | } |
| 390 | |
Nick Piggin | 800d15a | 2007-10-16 01:25:03 -0700 | [diff] [blame] | 391 | static int hugetlbfs_write_end(struct file *file, struct address_space *mapping, |
| 392 | loff_t pos, unsigned len, unsigned copied, |
| 393 | struct page *page, void *fsdata) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 394 | { |
Nick Piggin | 800d15a | 2007-10-16 01:25:03 -0700 | [diff] [blame] | 395 | BUG(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 396 | return -EINVAL; |
| 397 | } |
| 398 | |
Mike Kravetz | b5cec28 | 2015-09-08 15:01:41 -0700 | [diff] [blame] | 399 | static void remove_huge_page(struct page *page) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 400 | { |
Konstantin Khlebnikov | b9ea251 | 2015-04-14 15:45:27 -0700 | [diff] [blame] | 401 | ClearPageDirty(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 402 | ClearPageUptodate(page); |
Minchan Kim | bd65cb8 | 2011-03-22 16:30:54 -0700 | [diff] [blame] | 403 | delete_from_page_cache(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 404 | } |
| 405 | |
Mike Kravetz | 4aae8d1 | 2016-01-15 16:57:40 -0800 | [diff] [blame] | 406 | static void |
Davidlohr Bueso | f808c13 | 2017-09-08 16:15:08 -0700 | [diff] [blame] | 407 | hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end) |
Mike Kravetz | 4aae8d1 | 2016-01-15 16:57:40 -0800 | [diff] [blame] | 408 | { |
| 409 | struct vm_area_struct *vma; |
| 410 | |
| 411 | /* |
| 412 | * end == 0 indicates that the entire range after |
| 413 | * start should be unmapped. |
| 414 | */ |
| 415 | vma_interval_tree_foreach(vma, root, start, end ? end : ULONG_MAX) { |
| 416 | unsigned long v_offset; |
| 417 | unsigned long v_end; |
| 418 | |
| 419 | /* |
| 420 | * Can the expression below overflow on 32-bit arches? |
| 421 | * No, because the interval tree returns us only those vmas |
| 422 | * which overlap the truncated area starting at pgoff, |
| 423 | * and no vma on a 32-bit arch can span beyond the 4GB. |
| 424 | */ |
| 425 | if (vma->vm_pgoff < start) |
| 426 | v_offset = (start - vma->vm_pgoff) << PAGE_SHIFT; |
| 427 | else |
| 428 | v_offset = 0; |
| 429 | |
| 430 | if (!end) |
| 431 | v_end = vma->vm_end; |
| 432 | else { |
| 433 | v_end = ((end - vma->vm_pgoff) << PAGE_SHIFT) |
| 434 | + vma->vm_start; |
| 435 | if (v_end > vma->vm_end) |
| 436 | v_end = vma->vm_end; |
| 437 | } |
| 438 | |
| 439 | unmap_hugepage_range(vma, vma->vm_start + v_offset, v_end, |
| 440 | NULL); |
| 441 | } |
| 442 | } |
Mike Kravetz | b5cec28 | 2015-09-08 15:01:41 -0700 | [diff] [blame] | 443 | |
| 444 | /* |
| 445 | * remove_inode_hugepages handles two distinct cases: truncation and hole |
| 446 | * punch. There are subtle differences in operation for each case. |
Mike Kravetz | 4aae8d1 | 2016-01-15 16:57:40 -0800 | [diff] [blame] | 447 | * |
Mike Kravetz | b5cec28 | 2015-09-08 15:01:41 -0700 | [diff] [blame] | 448 | * truncation is indicated by end of range being LLONG_MAX |
| 449 | * In this case, we first scan the range and release found pages. |
Miaohe Lin | 1935ebd | 2021-02-24 12:10:21 -0800 | [diff] [blame] | 450 | * After releasing pages, hugetlb_unreserve_pages cleans up region/reserve |
Mike Kravetz | e7c5809 | 2019-01-08 15:23:32 -0800 | [diff] [blame] | 451 | * maps and global counts. Page faults can not race with truncation |
Mike Kravetz | 87bf91d | 2020-04-01 21:11:08 -0700 | [diff] [blame] | 452 | * in this routine. hugetlb_no_page() holds i_mmap_rwsem and prevents |
| 453 | * page faults in the truncated range by checking i_size. i_size is |
| 454 | * modified while holding i_mmap_rwsem. |
Mike Kravetz | b5cec28 | 2015-09-08 15:01:41 -0700 | [diff] [blame] | 455 | * hole punch is indicated if end is not LLONG_MAX |
| 456 | * In the hole punch case we scan the range and release found pages. |
Miaohe Lin | 1935ebd | 2021-02-24 12:10:21 -0800 | [diff] [blame] | 457 | * Only when releasing a page is the associated region/reserve map |
| 458 | * deleted. The region/reserve map for ranges without associated |
Mike Kravetz | e7c5809 | 2019-01-08 15:23:32 -0800 | [diff] [blame] | 459 | * pages are not modified. Page faults can race with hole punch. |
| 460 | * This is indicated if we find a mapped page. |
Mike Kravetz | b5cec28 | 2015-09-08 15:01:41 -0700 | [diff] [blame] | 461 | * Note: If the passed end of range value is beyond the end of file, but |
| 462 | * not LLONG_MAX this routine still performs a hole punch operation. |
| 463 | */ |
| 464 | static void remove_inode_hugepages(struct inode *inode, loff_t lstart, |
| 465 | loff_t lend) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 466 | { |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 467 | struct hstate *h = hstate_inode(inode); |
David Gibson | b45b5bd | 2006-03-22 00:08:55 -0800 | [diff] [blame] | 468 | struct address_space *mapping = &inode->i_data; |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 469 | const pgoff_t start = lstart >> huge_page_shift(h); |
Mike Kravetz | b5cec28 | 2015-09-08 15:01:41 -0700 | [diff] [blame] | 470 | const pgoff_t end = lend >> huge_page_shift(h); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 471 | struct pagevec pvec; |
Jan Kara | d72dc8a | 2017-09-06 16:21:18 -0700 | [diff] [blame] | 472 | pgoff_t next, index; |
Chen, Kenneth W | a43a8c3 | 2006-06-23 02:03:15 -0700 | [diff] [blame] | 473 | int i, freed = 0; |
Mike Kravetz | b5cec28 | 2015-09-08 15:01:41 -0700 | [diff] [blame] | 474 | bool truncate_op = (lend == LLONG_MAX); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 475 | |
Mel Gorman | 8667982 | 2017-11-15 17:37:52 -0800 | [diff] [blame] | 476 | pagevec_init(&pvec); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 477 | next = start; |
Mike Kravetz | b5cec28 | 2015-09-08 15:01:41 -0700 | [diff] [blame] | 478 | while (next < end) { |
| 479 | /* |
Mike Kravetz | 1817889 | 2015-11-20 15:57:13 -0800 | [diff] [blame] | 480 | * When no more pages are found, we are done. |
Mike Kravetz | b5cec28 | 2015-09-08 15:01:41 -0700 | [diff] [blame] | 481 | */ |
Jan Kara | 397162f | 2017-09-06 16:21:43 -0700 | [diff] [blame] | 482 | if (!pagevec_lookup_range(&pvec, mapping, &next, end - 1)) |
Mike Kravetz | 1817889 | 2015-11-20 15:57:13 -0800 | [diff] [blame] | 483 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 484 | |
| 485 | for (i = 0; i < pagevec_count(&pvec); ++i) { |
| 486 | struct page *page = pvec.pages[i]; |
Miaohe Lin | d4241a0 | 2021-05-04 18:33:34 -0700 | [diff] [blame] | 487 | u32 hash = 0; |
Mike Kravetz | b5cec28 | 2015-09-08 15:01:41 -0700 | [diff] [blame] | 488 | |
Jan Kara | d72dc8a | 2017-09-06 16:21:18 -0700 | [diff] [blame] | 489 | index = page->index; |
Mike Kravetz | 87bf91d | 2020-04-01 21:11:08 -0700 | [diff] [blame] | 490 | if (!truncate_op) { |
| 491 | /* |
| 492 | * Only need to hold the fault mutex in the |
| 493 | * hole punch case. This prevents races with |
| 494 | * page faults. Races are not possible in the |
| 495 | * case of truncation. |
| 496 | */ |
Miaohe Lin | d4241a0 | 2021-05-04 18:33:34 -0700 | [diff] [blame] | 497 | hash = hugetlb_fault_mutex_hash(mapping, index); |
Mike Kravetz | 87bf91d | 2020-04-01 21:11:08 -0700 | [diff] [blame] | 498 | mutex_lock(&hugetlb_fault_mutex_table[hash]); |
| 499 | } |
Mike Kravetz | e7c5809 | 2019-01-08 15:23:32 -0800 | [diff] [blame] | 500 | |
Mike Kravetz | 4aae8d1 | 2016-01-15 16:57:40 -0800 | [diff] [blame] | 501 | /* |
Mike Kravetz | e7c5809 | 2019-01-08 15:23:32 -0800 | [diff] [blame] | 502 | * If page is mapped, it was faulted in after being |
| 503 | * unmapped in caller. Unmap (again) now after taking |
| 504 | * the fault mutex. The mutex will prevent faults |
| 505 | * until we finish removing the page. |
| 506 | * |
| 507 | * This race can only happen in the hole punch case. |
| 508 | * Getting here in a truncate operation is a bug. |
Mike Kravetz | 4aae8d1 | 2016-01-15 16:57:40 -0800 | [diff] [blame] | 509 | */ |
Mike Kravetz | e7c5809 | 2019-01-08 15:23:32 -0800 | [diff] [blame] | 510 | if (unlikely(page_mapped(page))) { |
| 511 | BUG_ON(truncate_op); |
| 512 | |
Mike Kravetz | c0d0381 | 2020-04-01 21:11:05 -0700 | [diff] [blame] | 513 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
Mike Kravetz | e7c5809 | 2019-01-08 15:23:32 -0800 | [diff] [blame] | 514 | i_mmap_lock_write(mapping); |
Mike Kravetz | c0d0381 | 2020-04-01 21:11:05 -0700 | [diff] [blame] | 515 | mutex_lock(&hugetlb_fault_mutex_table[hash]); |
Mike Kravetz | e7c5809 | 2019-01-08 15:23:32 -0800 | [diff] [blame] | 516 | hugetlb_vmdelete_list(&mapping->i_mmap, |
| 517 | index * pages_per_huge_page(h), |
| 518 | (index + 1) * pages_per_huge_page(h)); |
| 519 | i_mmap_unlock_write(mapping); |
| 520 | } |
Mike Kravetz | 4aae8d1 | 2016-01-15 16:57:40 -0800 | [diff] [blame] | 521 | |
| 522 | lock_page(page); |
| 523 | /* |
| 524 | * We must free the huge page and remove from page |
| 525 | * cache (remove_huge_page) BEFORE removing the |
| 526 | * region/reserve map (hugetlb_unreserve_pages). In |
| 527 | * rare out of memory conditions, removal of the |
zhong jiang | 72e2936 | 2016-10-07 17:02:01 -0700 | [diff] [blame] | 528 | * region/reserve map could fail. Correspondingly, |
| 529 | * the subpool and global reserve usage count can need |
| 530 | * to be adjusted. |
Mike Kravetz | 4aae8d1 | 2016-01-15 16:57:40 -0800 | [diff] [blame] | 531 | */ |
Mike Kravetz | e32905e | 2021-05-22 17:42:11 -0700 | [diff] [blame] | 532 | VM_BUG_ON(HPageRestoreReserve(page)); |
Mike Kravetz | 4aae8d1 | 2016-01-15 16:57:40 -0800 | [diff] [blame] | 533 | remove_huge_page(page); |
| 534 | freed++; |
| 535 | if (!truncate_op) { |
| 536 | if (unlikely(hugetlb_unreserve_pages(inode, |
Jan Kara | d72dc8a | 2017-09-06 16:21:18 -0700 | [diff] [blame] | 537 | index, index + 1, 1))) |
zhong jiang | 72e2936 | 2016-10-07 17:02:01 -0700 | [diff] [blame] | 538 | hugetlb_fix_reserve_counts(inode); |
Mike Kravetz | b5cec28 | 2015-09-08 15:01:41 -0700 | [diff] [blame] | 539 | } |
| 540 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 541 | unlock_page(page); |
Mike Kravetz | 87bf91d | 2020-04-01 21:11:08 -0700 | [diff] [blame] | 542 | if (!truncate_op) |
| 543 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 544 | } |
| 545 | huge_pagevec_release(&pvec); |
Mike Kravetz | 1817889 | 2015-11-20 15:57:13 -0800 | [diff] [blame] | 546 | cond_resched(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 547 | } |
Mike Kravetz | b5cec28 | 2015-09-08 15:01:41 -0700 | [diff] [blame] | 548 | |
| 549 | if (truncate_op) |
| 550 | (void)hugetlb_unreserve_pages(inode, start, LONG_MAX, freed); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 551 | } |
| 552 | |
Al Viro | 2bbbda3 | 2010-06-04 19:52:12 -0400 | [diff] [blame] | 553 | static void hugetlbfs_evict_inode(struct inode *inode) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 554 | { |
Joonsoo Kim | 9119a41 | 2014-04-03 14:47:25 -0700 | [diff] [blame] | 555 | struct resv_map *resv_map; |
| 556 | |
Mike Kravetz | b5cec28 | 2015-09-08 15:01:41 -0700 | [diff] [blame] | 557 | remove_inode_hugepages(inode, 0, LLONG_MAX); |
Mike Kravetz | f27a513 | 2019-05-13 17:22:55 -0700 | [diff] [blame] | 558 | |
| 559 | /* |
| 560 | * Get the resv_map from the address space embedded in the inode. |
| 561 | * This is the address space which points to any resv_map allocated |
| 562 | * at inode creation time. If this is a device special inode, |
| 563 | * i_mapping may not point to the original address space. |
| 564 | */ |
| 565 | resv_map = (struct resv_map *)(&inode->i_data)->private_data; |
| 566 | /* Only regular and link inodes have associated reserve maps */ |
Joonsoo Kim | 9119a41 | 2014-04-03 14:47:25 -0700 | [diff] [blame] | 567 | if (resv_map) |
| 568 | resv_map_release(&resv_map->refs); |
Jan Kara | dbd5768 | 2012-05-03 14:48:02 +0200 | [diff] [blame] | 569 | clear_inode(inode); |
Christoph Hellwig | 149f421 | 2005-10-29 18:16:43 -0700 | [diff] [blame] | 570 | } |
| 571 | |
Miaohe Lin | e5d319d | 2021-02-24 12:10:25 -0800 | [diff] [blame] | 572 | static void hugetlb_vmtruncate(struct inode *inode, loff_t offset) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 573 | { |
Hugh Dickins | 856fc29 | 2006-10-28 10:38:43 -0700 | [diff] [blame] | 574 | pgoff_t pgoff; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 575 | struct address_space *mapping = inode->i_mapping; |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 576 | struct hstate *h = hstate_inode(inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 577 | |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 578 | BUG_ON(offset & ~huge_page_mask(h)); |
Hugh Dickins | 856fc29 | 2006-10-28 10:38:43 -0700 | [diff] [blame] | 579 | pgoff = offset >> PAGE_SHIFT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 580 | |
Davidlohr Bueso | 83cde9e | 2014-12-12 16:54:21 -0800 | [diff] [blame] | 581 | i_mmap_lock_write(mapping); |
Mike Kravetz | 87bf91d | 2020-04-01 21:11:08 -0700 | [diff] [blame] | 582 | i_size_write(inode, offset); |
Davidlohr Bueso | f808c13 | 2017-09-08 16:15:08 -0700 | [diff] [blame] | 583 | if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) |
Mike Kravetz | 1bfad99 | 2015-09-08 15:01:38 -0700 | [diff] [blame] | 584 | hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0); |
Mike Kravetz | c86aa7b | 2018-12-28 00:39:42 -0800 | [diff] [blame] | 585 | i_mmap_unlock_write(mapping); |
Mike Kravetz | e7c5809 | 2019-01-08 15:23:32 -0800 | [diff] [blame] | 586 | remove_inode_hugepages(inode, offset, LLONG_MAX); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 587 | } |
| 588 | |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 589 | static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) |
| 590 | { |
| 591 | struct hstate *h = hstate_inode(inode); |
| 592 | loff_t hpage_size = huge_page_size(h); |
| 593 | loff_t hole_start, hole_end; |
| 594 | |
| 595 | /* |
| 596 | * For hole punch round up the beginning offset of the hole and |
| 597 | * round down the end. |
| 598 | */ |
| 599 | hole_start = round_up(offset, hpage_size); |
| 600 | hole_end = round_down(offset + len, hpage_size); |
| 601 | |
| 602 | if (hole_end > hole_start) { |
| 603 | struct address_space *mapping = inode->i_mapping; |
Marc-André Lureau | ff62a34 | 2018-01-31 16:19:25 -0800 | [diff] [blame] | 604 | struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 605 | |
Al Viro | 5955102 | 2016-01-22 15:40:57 -0500 | [diff] [blame] | 606 | inode_lock(inode); |
Marc-André Lureau | ff62a34 | 2018-01-31 16:19:25 -0800 | [diff] [blame] | 607 | |
Miaohe Lin | 398c0da | 2021-02-24 12:10:18 -0800 | [diff] [blame] | 608 | /* protected by i_rwsem */ |
Joel Fernandes (Google) | ab3948f | 2019-03-05 15:47:54 -0800 | [diff] [blame] | 609 | if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) { |
Marc-André Lureau | ff62a34 | 2018-01-31 16:19:25 -0800 | [diff] [blame] | 610 | inode_unlock(inode); |
| 611 | return -EPERM; |
| 612 | } |
| 613 | |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 614 | i_mmap_lock_write(mapping); |
Davidlohr Bueso | f808c13 | 2017-09-08 16:15:08 -0700 | [diff] [blame] | 615 | if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 616 | hugetlb_vmdelete_list(&mapping->i_mmap, |
| 617 | hole_start >> PAGE_SHIFT, |
| 618 | hole_end >> PAGE_SHIFT); |
Mike Kravetz | c86aa7b | 2018-12-28 00:39:42 -0800 | [diff] [blame] | 619 | i_mmap_unlock_write(mapping); |
Mike Kravetz | e7c5809 | 2019-01-08 15:23:32 -0800 | [diff] [blame] | 620 | remove_inode_hugepages(inode, hole_start, hole_end); |
Al Viro | 5955102 | 2016-01-22 15:40:57 -0500 | [diff] [blame] | 621 | inode_unlock(inode); |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 622 | } |
| 623 | |
| 624 | return 0; |
| 625 | } |
| 626 | |
| 627 | static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, |
| 628 | loff_t len) |
| 629 | { |
| 630 | struct inode *inode = file_inode(file); |
Marc-André Lureau | ff62a34 | 2018-01-31 16:19:25 -0800 | [diff] [blame] | 631 | struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 632 | struct address_space *mapping = inode->i_mapping; |
| 633 | struct hstate *h = hstate_inode(inode); |
| 634 | struct vm_area_struct pseudo_vma; |
| 635 | struct mm_struct *mm = current->mm; |
| 636 | loff_t hpage_size = huge_page_size(h); |
| 637 | unsigned long hpage_shift = huge_page_shift(h); |
| 638 | pgoff_t start, index, end; |
| 639 | int error; |
| 640 | u32 hash; |
| 641 | |
| 642 | if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) |
| 643 | return -EOPNOTSUPP; |
| 644 | |
| 645 | if (mode & FALLOC_FL_PUNCH_HOLE) |
| 646 | return hugetlbfs_punch_hole(inode, offset, len); |
| 647 | |
| 648 | /* |
| 649 | * Default preallocate case. |
| 650 | * For this range, start is rounded down and end is rounded up |
| 651 | * as well as being converted to page offsets. |
| 652 | */ |
| 653 | start = offset >> hpage_shift; |
| 654 | end = (offset + len + hpage_size - 1) >> hpage_shift; |
| 655 | |
Al Viro | 5955102 | 2016-01-22 15:40:57 -0500 | [diff] [blame] | 656 | inode_lock(inode); |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 657 | |
| 658 | /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ |
| 659 | error = inode_newsize_ok(inode, offset + len); |
| 660 | if (error) |
| 661 | goto out; |
| 662 | |
Marc-André Lureau | ff62a34 | 2018-01-31 16:19:25 -0800 | [diff] [blame] | 663 | if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) { |
| 664 | error = -EPERM; |
| 665 | goto out; |
| 666 | } |
| 667 | |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 668 | /* |
| 669 | * Initialize a pseudo vma as this is required by the huge page |
| 670 | * allocation routines. If NUMA is configured, use page index |
| 671 | * as input to create an allocation policy. |
| 672 | */ |
Kirill A. Shutemov | 2c4541e | 2018-07-26 16:37:30 -0700 | [diff] [blame] | 673 | vma_init(&pseudo_vma, mm); |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 674 | pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED); |
| 675 | pseudo_vma.vm_file = file; |
| 676 | |
| 677 | for (index = start; index < end; index++) { |
| 678 | /* |
| 679 | * This is supposed to be the vaddr where the page is being |
| 680 | * faulted in, but we have no vaddr here. |
| 681 | */ |
| 682 | struct page *page; |
| 683 | unsigned long addr; |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 684 | |
| 685 | cond_resched(); |
| 686 | |
| 687 | /* |
| 688 | * fallocate(2) manpage permits EINTR; we may have been |
| 689 | * interrupted because we are using up too much memory. |
| 690 | */ |
| 691 | if (signal_pending(current)) { |
| 692 | error = -EINTR; |
| 693 | break; |
| 694 | } |
| 695 | |
| 696 | /* Set numa allocation policy based on index */ |
| 697 | hugetlb_set_vma_policy(&pseudo_vma, inode, index); |
| 698 | |
| 699 | /* addr is the offset within the file (zero based) */ |
| 700 | addr = index * hpage_size; |
| 701 | |
Mike Kravetz | 87bf91d | 2020-04-01 21:11:08 -0700 | [diff] [blame] | 702 | /* |
| 703 | * fault mutex taken here, protects against fault path |
| 704 | * and hole punch. inode_lock previously taken protects |
| 705 | * against truncation. |
| 706 | */ |
Wei Yang | 188b04a | 2019-11-30 17:57:02 -0800 | [diff] [blame] | 707 | hash = hugetlb_fault_mutex_hash(mapping, index); |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 708 | mutex_lock(&hugetlb_fault_mutex_table[hash]); |
| 709 | |
| 710 | /* See if already present in mapping to avoid alloc/free */ |
| 711 | page = find_get_page(mapping, index); |
| 712 | if (page) { |
| 713 | put_page(page); |
| 714 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
| 715 | hugetlb_drop_vma_policy(&pseudo_vma); |
| 716 | continue; |
| 717 | } |
| 718 | |
Miaohe Lin | 88ce3fe | 2021-02-24 12:10:11 -0800 | [diff] [blame] | 719 | /* |
| 720 | * Allocate page without setting the avoid_reserve argument. |
| 721 | * There certainly are no reserves associated with the |
| 722 | * pseudo_vma. However, there could be shared mappings with |
| 723 | * reserves for the file at the inode level. If we fallocate |
| 724 | * pages in these areas, we need to consume the reserves |
| 725 | * to keep reservation accounting consistent. |
| 726 | */ |
| 727 | page = alloc_huge_page(&pseudo_vma, addr, 0); |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 728 | hugetlb_drop_vma_policy(&pseudo_vma); |
| 729 | if (IS_ERR(page)) { |
| 730 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
| 731 | error = PTR_ERR(page); |
| 732 | goto out; |
| 733 | } |
| 734 | clear_huge_page(page, addr, pages_per_huge_page(h)); |
| 735 | __SetPageUptodate(page); |
| 736 | error = huge_add_to_page_cache(page, mapping, index); |
| 737 | if (unlikely(error)) { |
Mike Kravetz | 846be08 | 2021-06-15 18:23:29 -0700 | [diff] [blame] | 738 | restore_reserve_on_error(h, &pseudo_vma, addr, page); |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 739 | put_page(page); |
| 740 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
| 741 | goto out; |
| 742 | } |
| 743 | |
| 744 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
| 745 | |
Mike Kravetz | 8f251a3 | 2021-02-24 12:08:56 -0800 | [diff] [blame] | 746 | SetHPageMigratable(page); |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 747 | /* |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 748 | * unlock_page because locked by add_to_page_cache() |
Muchun Song | 585fc0d | 2021-02-04 18:32:03 -0800 | [diff] [blame] | 749 | * put_page() due to reference from alloc_huge_page() |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 750 | */ |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 751 | unlock_page(page); |
Nadav Amit | 72639e6 | 2017-11-29 16:11:33 -0800 | [diff] [blame] | 752 | put_page(page); |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 753 | } |
| 754 | |
| 755 | if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) |
| 756 | i_size_write(inode, offset + len); |
Deepa Dinamani | 078cd82 | 2016-09-14 07:48:04 -0700 | [diff] [blame] | 757 | inode->i_ctime = current_time(inode); |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 758 | out: |
Al Viro | 5955102 | 2016-01-22 15:40:57 -0500 | [diff] [blame] | 759 | inode_unlock(inode); |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 760 | return error; |
| 761 | } |
| 762 | |
Christian Brauner | 549c729 | 2021-01-21 14:19:43 +0100 | [diff] [blame] | 763 | static int hugetlbfs_setattr(struct user_namespace *mnt_userns, |
| 764 | struct dentry *dentry, struct iattr *attr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 765 | { |
David Howells | 2b0143b | 2015-03-17 22:25:59 +0000 | [diff] [blame] | 766 | struct inode *inode = d_inode(dentry); |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 767 | struct hstate *h = hstate_inode(inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 768 | int error; |
| 769 | unsigned int ia_valid = attr->ia_valid; |
Marc-André Lureau | ff62a34 | 2018-01-31 16:19:25 -0800 | [diff] [blame] | 770 | struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 771 | |
Christian Brauner | 2f221d6 | 2021-01-21 14:19:26 +0100 | [diff] [blame] | 772 | error = setattr_prepare(&init_user_ns, dentry, attr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 773 | if (error) |
Christoph Hellwig | 1025774 | 2010-06-04 11:30:02 +0200 | [diff] [blame] | 774 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 775 | |
| 776 | if (ia_valid & ATTR_SIZE) { |
Marc-André Lureau | ff62a34 | 2018-01-31 16:19:25 -0800 | [diff] [blame] | 777 | loff_t oldsize = inode->i_size; |
| 778 | loff_t newsize = attr->ia_size; |
| 779 | |
| 780 | if (newsize & ~huge_page_mask(h)) |
Christoph Hellwig | 1025774 | 2010-06-04 11:30:02 +0200 | [diff] [blame] | 781 | return -EINVAL; |
Miaohe Lin | 398c0da | 2021-02-24 12:10:18 -0800 | [diff] [blame] | 782 | /* protected by i_rwsem */ |
Marc-André Lureau | ff62a34 | 2018-01-31 16:19:25 -0800 | [diff] [blame] | 783 | if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) || |
| 784 | (newsize > oldsize && (info->seals & F_SEAL_GROW))) |
| 785 | return -EPERM; |
Miaohe Lin | e5d319d | 2021-02-24 12:10:25 -0800 | [diff] [blame] | 786 | hugetlb_vmtruncate(inode, newsize); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 787 | } |
Christoph Hellwig | 1025774 | 2010-06-04 11:30:02 +0200 | [diff] [blame] | 788 | |
Christian Brauner | 2f221d6 | 2021-01-21 14:19:26 +0100 | [diff] [blame] | 789 | setattr_copy(&init_user_ns, inode, attr); |
Christoph Hellwig | 1025774 | 2010-06-04 11:30:02 +0200 | [diff] [blame] | 790 | mark_inode_dirty(inode); |
| 791 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 792 | } |
| 793 | |
Al Viro | 7d54fa6 | 2011-07-24 20:20:48 -0400 | [diff] [blame] | 794 | static struct inode *hugetlbfs_get_root(struct super_block *sb, |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 795 | struct hugetlbfs_fs_context *ctx) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 796 | { |
| 797 | struct inode *inode; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 798 | |
| 799 | inode = new_inode(sb); |
| 800 | if (inode) { |
Christoph Hellwig | 85fe402 | 2010-10-23 11:19:54 -0400 | [diff] [blame] | 801 | inode->i_ino = get_next_ino(); |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 802 | inode->i_mode = S_IFDIR | ctx->mode; |
| 803 | inode->i_uid = ctx->uid; |
| 804 | inode->i_gid = ctx->gid; |
Deepa Dinamani | 078cd82 | 2016-09-14 07:48:04 -0700 | [diff] [blame] | 805 | inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); |
Al Viro | 7d54fa6 | 2011-07-24 20:20:48 -0400 | [diff] [blame] | 806 | inode->i_op = &hugetlbfs_dir_inode_operations; |
| 807 | inode->i_fop = &simple_dir_operations; |
| 808 | /* directory inodes start off with i_nlink == 2 (for "." entry) */ |
| 809 | inc_nlink(inode); |
Aneesh Kumar K.V | 65ed760 | 2012-04-25 16:01:50 -0700 | [diff] [blame] | 810 | lockdep_annotate_inode_mutex_key(inode); |
Al Viro | 7d54fa6 | 2011-07-24 20:20:48 -0400 | [diff] [blame] | 811 | } |
| 812 | return inode; |
| 813 | } |
| 814 | |
Michal Hocko | b610ded | 2013-08-13 16:00:55 -0700 | [diff] [blame] | 815 | /* |
Davidlohr Bueso | c8c06ef | 2014-12-12 16:54:24 -0800 | [diff] [blame] | 816 | * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never |
Michal Hocko | b610ded | 2013-08-13 16:00:55 -0700 | [diff] [blame] | 817 | * be taken from reclaim -- unlike regular filesystems. This needs an |
Kirill A. Shutemov | 88f306b | 2016-01-15 16:57:31 -0800 | [diff] [blame] | 818 | * annotation because huge_pmd_share() does an allocation under hugetlb's |
Davidlohr Bueso | c8c06ef | 2014-12-12 16:54:24 -0800 | [diff] [blame] | 819 | * i_mmap_rwsem. |
Michal Hocko | b610ded | 2013-08-13 16:00:55 -0700 | [diff] [blame] | 820 | */ |
Davidlohr Bueso | c8c06ef | 2014-12-12 16:54:24 -0800 | [diff] [blame] | 821 | static struct lock_class_key hugetlbfs_i_mmap_rwsem_key; |
Michal Hocko | b610ded | 2013-08-13 16:00:55 -0700 | [diff] [blame] | 822 | |
Al Viro | 7d54fa6 | 2011-07-24 20:20:48 -0400 | [diff] [blame] | 823 | static struct inode *hugetlbfs_get_inode(struct super_block *sb, |
| 824 | struct inode *dir, |
Al Viro | 18df225 | 2011-07-24 23:17:40 -0400 | [diff] [blame] | 825 | umode_t mode, dev_t dev) |
Al Viro | 7d54fa6 | 2011-07-24 20:20:48 -0400 | [diff] [blame] | 826 | { |
| 827 | struct inode *inode; |
Mike Kravetz | 58b6e5e | 2019-04-05 18:39:06 -0700 | [diff] [blame] | 828 | struct resv_map *resv_map = NULL; |
Joonsoo Kim | 9119a41 | 2014-04-03 14:47:25 -0700 | [diff] [blame] | 829 | |
Mike Kravetz | 58b6e5e | 2019-04-05 18:39:06 -0700 | [diff] [blame] | 830 | /* |
| 831 | * Reserve maps are only needed for inodes that can have associated |
| 832 | * page allocations. |
| 833 | */ |
| 834 | if (S_ISREG(mode) || S_ISLNK(mode)) { |
| 835 | resv_map = resv_map_alloc(); |
| 836 | if (!resv_map) |
| 837 | return NULL; |
| 838 | } |
Al Viro | 7d54fa6 | 2011-07-24 20:20:48 -0400 | [diff] [blame] | 839 | |
| 840 | inode = new_inode(sb); |
| 841 | if (inode) { |
Marc-André Lureau | ff62a34 | 2018-01-31 16:19:25 -0800 | [diff] [blame] | 842 | struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); |
| 843 | |
Al Viro | 7d54fa6 | 2011-07-24 20:20:48 -0400 | [diff] [blame] | 844 | inode->i_ino = get_next_ino(); |
Christian Brauner | 21cb47b | 2021-01-21 14:19:25 +0100 | [diff] [blame] | 845 | inode_init_owner(&init_user_ns, inode, dir, mode); |
Davidlohr Bueso | c8c06ef | 2014-12-12 16:54:24 -0800 | [diff] [blame] | 846 | lockdep_set_class(&inode->i_mapping->i_mmap_rwsem, |
| 847 | &hugetlbfs_i_mmap_rwsem_key); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 848 | inode->i_mapping->a_ops = &hugetlbfs_aops; |
Deepa Dinamani | 078cd82 | 2016-09-14 07:48:04 -0700 | [diff] [blame] | 849 | inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); |
Joonsoo Kim | 9119a41 | 2014-04-03 14:47:25 -0700 | [diff] [blame] | 850 | inode->i_mapping->private_data = resv_map; |
Marc-André Lureau | ff62a34 | 2018-01-31 16:19:25 -0800 | [diff] [blame] | 851 | info->seals = F_SEAL_SEAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 852 | switch (mode & S_IFMT) { |
| 853 | default: |
| 854 | init_special_inode(inode, mode, dev); |
| 855 | break; |
| 856 | case S_IFREG: |
| 857 | inode->i_op = &hugetlbfs_inode_operations; |
| 858 | inode->i_fop = &hugetlbfs_file_operations; |
| 859 | break; |
| 860 | case S_IFDIR: |
| 861 | inode->i_op = &hugetlbfs_dir_inode_operations; |
| 862 | inode->i_fop = &simple_dir_operations; |
| 863 | |
| 864 | /* directory inodes start off with i_nlink == 2 (for "." entry) */ |
Dave Hansen | d8c76e6 | 2006-09-30 23:29:04 -0700 | [diff] [blame] | 865 | inc_nlink(inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 866 | break; |
| 867 | case S_IFLNK: |
| 868 | inode->i_op = &page_symlink_inode_operations; |
Al Viro | 21fc61c | 2015-11-17 01:07:57 -0500 | [diff] [blame] | 869 | inode_nohighmem(inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 870 | break; |
| 871 | } |
Josh Boyer | e096d0c | 2011-08-25 07:48:12 -0400 | [diff] [blame] | 872 | lockdep_annotate_inode_mutex_key(inode); |
Mike Kravetz | 58b6e5e | 2019-04-05 18:39:06 -0700 | [diff] [blame] | 873 | } else { |
| 874 | if (resv_map) |
| 875 | kref_put(&resv_map->refs, resv_map_release); |
| 876 | } |
Joonsoo Kim | 9119a41 | 2014-04-03 14:47:25 -0700 | [diff] [blame] | 877 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 878 | return inode; |
| 879 | } |
| 880 | |
| 881 | /* |
| 882 | * File creation. Allocate an inode, and we're done.. |
| 883 | */ |
Piotr Sarna | 1ab5b82 | 2019-11-30 17:56:43 -0800 | [diff] [blame] | 884 | static int do_hugetlbfs_mknod(struct inode *dir, |
| 885 | struct dentry *dentry, |
| 886 | umode_t mode, |
| 887 | dev_t dev, |
| 888 | bool tmpfile) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 889 | { |
| 890 | struct inode *inode; |
| 891 | int error = -ENOSPC; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 892 | |
Al Viro | 7d54fa6 | 2011-07-24 20:20:48 -0400 | [diff] [blame] | 893 | inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 894 | if (inode) { |
Deepa Dinamani | 078cd82 | 2016-09-14 07:48:04 -0700 | [diff] [blame] | 895 | dir->i_ctime = dir->i_mtime = current_time(dir); |
Piotr Sarna | 1ab5b82 | 2019-11-30 17:56:43 -0800 | [diff] [blame] | 896 | if (tmpfile) { |
| 897 | d_tmpfile(dentry, inode); |
| 898 | } else { |
| 899 | d_instantiate(dentry, inode); |
| 900 | dget(dentry);/* Extra count - pin the dentry in core */ |
| 901 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 902 | error = 0; |
| 903 | } |
| 904 | return error; |
| 905 | } |
| 906 | |
Christian Brauner | 549c729 | 2021-01-21 14:19:43 +0100 | [diff] [blame] | 907 | static int hugetlbfs_mknod(struct user_namespace *mnt_userns, struct inode *dir, |
| 908 | struct dentry *dentry, umode_t mode, dev_t dev) |
Piotr Sarna | 1ab5b82 | 2019-11-30 17:56:43 -0800 | [diff] [blame] | 909 | { |
| 910 | return do_hugetlbfs_mknod(dir, dentry, mode, dev, false); |
| 911 | } |
| 912 | |
Christian Brauner | 549c729 | 2021-01-21 14:19:43 +0100 | [diff] [blame] | 913 | static int hugetlbfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir, |
| 914 | struct dentry *dentry, umode_t mode) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 915 | { |
Christian Brauner | 549c729 | 2021-01-21 14:19:43 +0100 | [diff] [blame] | 916 | int retval = hugetlbfs_mknod(&init_user_ns, dir, dentry, |
| 917 | mode | S_IFDIR, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 918 | if (!retval) |
Dave Hansen | d8c76e6 | 2006-09-30 23:29:04 -0700 | [diff] [blame] | 919 | inc_nlink(dir); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 920 | return retval; |
| 921 | } |
| 922 | |
Christian Brauner | 549c729 | 2021-01-21 14:19:43 +0100 | [diff] [blame] | 923 | static int hugetlbfs_create(struct user_namespace *mnt_userns, |
| 924 | struct inode *dir, struct dentry *dentry, |
| 925 | umode_t mode, bool excl) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 926 | { |
Christian Brauner | 549c729 | 2021-01-21 14:19:43 +0100 | [diff] [blame] | 927 | return hugetlbfs_mknod(&init_user_ns, dir, dentry, mode | S_IFREG, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 928 | } |
| 929 | |
Christian Brauner | 549c729 | 2021-01-21 14:19:43 +0100 | [diff] [blame] | 930 | static int hugetlbfs_tmpfile(struct user_namespace *mnt_userns, |
| 931 | struct inode *dir, struct dentry *dentry, |
| 932 | umode_t mode) |
Piotr Sarna | 1ab5b82 | 2019-11-30 17:56:43 -0800 | [diff] [blame] | 933 | { |
| 934 | return do_hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0, true); |
| 935 | } |
| 936 | |
Christian Brauner | 549c729 | 2021-01-21 14:19:43 +0100 | [diff] [blame] | 937 | static int hugetlbfs_symlink(struct user_namespace *mnt_userns, |
| 938 | struct inode *dir, struct dentry *dentry, |
| 939 | const char *symname) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 940 | { |
| 941 | struct inode *inode; |
| 942 | int error = -ENOSPC; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 943 | |
Al Viro | 7d54fa6 | 2011-07-24 20:20:48 -0400 | [diff] [blame] | 944 | inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 945 | if (inode) { |
| 946 | int l = strlen(symname)+1; |
| 947 | error = page_symlink(inode, symname, l); |
| 948 | if (!error) { |
| 949 | d_instantiate(dentry, inode); |
| 950 | dget(dentry); |
| 951 | } else |
| 952 | iput(inode); |
| 953 | } |
Deepa Dinamani | 078cd82 | 2016-09-14 07:48:04 -0700 | [diff] [blame] | 954 | dir->i_ctime = dir->i_mtime = current_time(dir); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 955 | |
| 956 | return error; |
| 957 | } |
| 958 | |
Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 959 | static int hugetlbfs_migrate_page(struct address_space *mapping, |
Mel Gorman | b969c4ab | 2012-01-12 17:19:34 -0800 | [diff] [blame] | 960 | struct page *newpage, struct page *page, |
Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 961 | enum migrate_mode mode) |
Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 962 | { |
| 963 | int rc; |
| 964 | |
| 965 | rc = migrate_huge_page_move_mapping(mapping, newpage, page); |
Rafael Aquini | 78bd520 | 2012-12-11 16:02:31 -0800 | [diff] [blame] | 966 | if (rc != MIGRATEPAGE_SUCCESS) |
Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 967 | return rc; |
Mike Kravetz | cb6acd0 | 2019-02-28 16:22:02 -0800 | [diff] [blame] | 968 | |
Mike Kravetz | d6995da | 2021-02-24 12:08:51 -0800 | [diff] [blame] | 969 | if (hugetlb_page_subpool(page)) { |
| 970 | hugetlb_set_page_subpool(newpage, hugetlb_page_subpool(page)); |
| 971 | hugetlb_set_page_subpool(page, NULL); |
Mike Kravetz | cb6acd0 | 2019-02-28 16:22:02 -0800 | [diff] [blame] | 972 | } |
| 973 | |
Jérôme Glisse | 2916ecc | 2017-09-08 16:12:06 -0700 | [diff] [blame] | 974 | if (mode != MIGRATE_SYNC_NO_COPY) |
| 975 | migrate_page_copy(newpage, page); |
| 976 | else |
| 977 | migrate_page_states(newpage, page); |
Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 978 | |
Rafael Aquini | 78bd520 | 2012-12-11 16:02:31 -0800 | [diff] [blame] | 979 | return MIGRATEPAGE_SUCCESS; |
Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 980 | } |
| 981 | |
Naoya Horiguchi | 78bb920 | 2017-07-10 15:47:50 -0700 | [diff] [blame] | 982 | static int hugetlbfs_error_remove_page(struct address_space *mapping, |
| 983 | struct page *page) |
| 984 | { |
| 985 | struct inode *inode = mapping->host; |
Mike Kravetz | ab615a5 | 2017-11-02 15:59:41 -0700 | [diff] [blame] | 986 | pgoff_t index = page->index; |
Naoya Horiguchi | 78bb920 | 2017-07-10 15:47:50 -0700 | [diff] [blame] | 987 | |
| 988 | remove_huge_page(page); |
Mike Kravetz | ab615a5 | 2017-11-02 15:59:41 -0700 | [diff] [blame] | 989 | if (unlikely(hugetlb_unreserve_pages(inode, index, index + 1, 1))) |
| 990 | hugetlb_fix_reserve_counts(inode); |
| 991 | |
Naoya Horiguchi | 78bb920 | 2017-07-10 15:47:50 -0700 | [diff] [blame] | 992 | return 0; |
| 993 | } |
| 994 | |
David Howells | 4a25220 | 2017-07-05 16:24:18 +0100 | [diff] [blame] | 995 | /* |
| 996 | * Display the mount options in /proc/mounts. |
| 997 | */ |
| 998 | static int hugetlbfs_show_options(struct seq_file *m, struct dentry *root) |
| 999 | { |
| 1000 | struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(root->d_sb); |
| 1001 | struct hugepage_subpool *spool = sbinfo->spool; |
| 1002 | unsigned long hpage_size = huge_page_size(sbinfo->hstate); |
| 1003 | unsigned hpage_shift = huge_page_shift(sbinfo->hstate); |
| 1004 | char mod; |
| 1005 | |
| 1006 | if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) |
| 1007 | seq_printf(m, ",uid=%u", |
| 1008 | from_kuid_munged(&init_user_ns, sbinfo->uid)); |
| 1009 | if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) |
| 1010 | seq_printf(m, ",gid=%u", |
| 1011 | from_kgid_munged(&init_user_ns, sbinfo->gid)); |
| 1012 | if (sbinfo->mode != 0755) |
| 1013 | seq_printf(m, ",mode=%o", sbinfo->mode); |
| 1014 | if (sbinfo->max_inodes != -1) |
| 1015 | seq_printf(m, ",nr_inodes=%lu", sbinfo->max_inodes); |
| 1016 | |
| 1017 | hpage_size /= 1024; |
| 1018 | mod = 'K'; |
| 1019 | if (hpage_size >= 1024) { |
| 1020 | hpage_size /= 1024; |
| 1021 | mod = 'M'; |
| 1022 | } |
| 1023 | seq_printf(m, ",pagesize=%lu%c", hpage_size, mod); |
| 1024 | if (spool) { |
| 1025 | if (spool->max_hpages != -1) |
| 1026 | seq_printf(m, ",size=%llu", |
| 1027 | (unsigned long long)spool->max_hpages << hpage_shift); |
| 1028 | if (spool->min_hpages != -1) |
| 1029 | seq_printf(m, ",min_size=%llu", |
| 1030 | (unsigned long long)spool->min_hpages << hpage_shift); |
| 1031 | } |
| 1032 | return 0; |
| 1033 | } |
| 1034 | |
David Howells | 726c334 | 2006-06-23 02:02:58 -0700 | [diff] [blame] | 1035 | static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1036 | { |
David Howells | 726c334 | 2006-06-23 02:02:58 -0700 | [diff] [blame] | 1037 | struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb); |
David Howells | 2b0143b | 2015-03-17 22:25:59 +0000 | [diff] [blame] | 1038 | struct hstate *h = hstate_inode(d_inode(dentry)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1039 | |
| 1040 | buf->f_type = HUGETLBFS_MAGIC; |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 1041 | buf->f_bsize = huge_page_size(h); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1042 | if (sbinfo) { |
| 1043 | spin_lock(&sbinfo->stat_lock); |
David Gibson | 74a8a65 | 2005-11-21 21:32:24 -0800 | [diff] [blame] | 1044 | /* If no limits set, just report 0 for max/free/used |
| 1045 | * blocks, like simple_statfs() */ |
David Gibson | 9048162 | 2012-03-21 16:34:12 -0700 | [diff] [blame] | 1046 | if (sbinfo->spool) { |
| 1047 | long free_pages; |
| 1048 | |
| 1049 | spin_lock(&sbinfo->spool->lock); |
| 1050 | buf->f_blocks = sbinfo->spool->max_hpages; |
| 1051 | free_pages = sbinfo->spool->max_hpages |
| 1052 | - sbinfo->spool->used_hpages; |
| 1053 | buf->f_bavail = buf->f_bfree = free_pages; |
| 1054 | spin_unlock(&sbinfo->spool->lock); |
David Gibson | 74a8a65 | 2005-11-21 21:32:24 -0800 | [diff] [blame] | 1055 | buf->f_files = sbinfo->max_inodes; |
| 1056 | buf->f_ffree = sbinfo->free_inodes; |
| 1057 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1058 | spin_unlock(&sbinfo->stat_lock); |
| 1059 | } |
| 1060 | buf->f_namelen = NAME_MAX; |
| 1061 | return 0; |
| 1062 | } |
| 1063 | |
| 1064 | static void hugetlbfs_put_super(struct super_block *sb) |
| 1065 | { |
| 1066 | struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb); |
| 1067 | |
| 1068 | if (sbi) { |
| 1069 | sb->s_fs_info = NULL; |
David Gibson | 9048162 | 2012-03-21 16:34:12 -0700 | [diff] [blame] | 1070 | |
| 1071 | if (sbi->spool) |
| 1072 | hugepage_put_subpool(sbi->spool); |
| 1073 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1074 | kfree(sbi); |
| 1075 | } |
| 1076 | } |
| 1077 | |
Christoph Hellwig | 9652798 | 2005-10-29 18:16:42 -0700 | [diff] [blame] | 1078 | static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo) |
| 1079 | { |
| 1080 | if (sbinfo->free_inodes >= 0) { |
| 1081 | spin_lock(&sbinfo->stat_lock); |
| 1082 | if (unlikely(!sbinfo->free_inodes)) { |
| 1083 | spin_unlock(&sbinfo->stat_lock); |
| 1084 | return 0; |
| 1085 | } |
| 1086 | sbinfo->free_inodes--; |
| 1087 | spin_unlock(&sbinfo->stat_lock); |
| 1088 | } |
| 1089 | |
| 1090 | return 1; |
| 1091 | } |
| 1092 | |
| 1093 | static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo) |
| 1094 | { |
| 1095 | if (sbinfo->free_inodes >= 0) { |
| 1096 | spin_lock(&sbinfo->stat_lock); |
| 1097 | sbinfo->free_inodes++; |
| 1098 | spin_unlock(&sbinfo->stat_lock); |
| 1099 | } |
| 1100 | } |
| 1101 | |
| 1102 | |
Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 1103 | static struct kmem_cache *hugetlbfs_inode_cachep; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1104 | |
| 1105 | static struct inode *hugetlbfs_alloc_inode(struct super_block *sb) |
| 1106 | { |
Christoph Hellwig | 9652798 | 2005-10-29 18:16:42 -0700 | [diff] [blame] | 1107 | struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1108 | struct hugetlbfs_inode_info *p; |
| 1109 | |
Christoph Hellwig | 9652798 | 2005-10-29 18:16:42 -0700 | [diff] [blame] | 1110 | if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1111 | return NULL; |
Christoph Lameter | e94b176 | 2006-12-06 20:33:17 -0800 | [diff] [blame] | 1112 | p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL); |
Christoph Hellwig | 9652798 | 2005-10-29 18:16:42 -0700 | [diff] [blame] | 1113 | if (unlikely(!p)) { |
| 1114 | hugetlbfs_inc_free_inodes(sbinfo); |
| 1115 | return NULL; |
| 1116 | } |
Mike Kravetz | 4742a35 | 2017-03-31 15:12:01 -0700 | [diff] [blame] | 1117 | |
| 1118 | /* |
| 1119 | * Any time after allocation, hugetlbfs_destroy_inode can be called |
| 1120 | * for the inode. mpol_free_shared_policy is unconditionally called |
| 1121 | * as part of hugetlbfs_destroy_inode. So, initialize policy here |
| 1122 | * in case of a quick call to destroy. |
| 1123 | * |
| 1124 | * Note that the policy is initialized even if we are creating a |
| 1125 | * private inode. This simplifies hugetlbfs_destroy_inode. |
| 1126 | */ |
| 1127 | mpol_shared_policy_init(&p->policy, NULL); |
| 1128 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1129 | return &p->vfs_inode; |
| 1130 | } |
| 1131 | |
Al Viro | b62de32 | 2019-04-15 23:16:38 -0400 | [diff] [blame] | 1132 | static void hugetlbfs_free_inode(struct inode *inode) |
Nick Piggin | fa0d7e3d | 2011-01-07 17:49:49 +1100 | [diff] [blame] | 1133 | { |
Nick Piggin | fa0d7e3d | 2011-01-07 17:49:49 +1100 | [diff] [blame] | 1134 | kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode)); |
| 1135 | } |
| 1136 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1137 | static void hugetlbfs_destroy_inode(struct inode *inode) |
| 1138 | { |
Christoph Hellwig | 9652798 | 2005-10-29 18:16:42 -0700 | [diff] [blame] | 1139 | hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1140 | mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1141 | } |
| 1142 | |
Christoph Hellwig | f5e54d6 | 2006-06-28 04:26:44 -0700 | [diff] [blame] | 1143 | static const struct address_space_operations hugetlbfs_aops = { |
Nick Piggin | 800d15a | 2007-10-16 01:25:03 -0700 | [diff] [blame] | 1144 | .write_begin = hugetlbfs_write_begin, |
| 1145 | .write_end = hugetlbfs_write_end, |
Mike Kravetz | a4fa34cd | 2021-02-24 12:09:58 -0800 | [diff] [blame] | 1146 | .set_page_dirty = __set_page_dirty_no_writeback, |
Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 1147 | .migratepage = hugetlbfs_migrate_page, |
Naoya Horiguchi | 78bb920 | 2017-07-10 15:47:50 -0700 | [diff] [blame] | 1148 | .error_remove_page = hugetlbfs_error_remove_page, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1149 | }; |
| 1150 | |
Christoph Hellwig | 9652798 | 2005-10-29 18:16:42 -0700 | [diff] [blame] | 1151 | |
Alexey Dobriyan | 51cc506 | 2008-07-25 19:45:34 -0700 | [diff] [blame] | 1152 | static void init_once(void *foo) |
Christoph Hellwig | 9652798 | 2005-10-29 18:16:42 -0700 | [diff] [blame] | 1153 | { |
| 1154 | struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo; |
| 1155 | |
Christoph Lameter | a35afb8 | 2007-05-16 22:10:57 -0700 | [diff] [blame] | 1156 | inode_init_once(&ei->vfs_inode); |
Christoph Hellwig | 9652798 | 2005-10-29 18:16:42 -0700 | [diff] [blame] | 1157 | } |
| 1158 | |
Arjan van de Ven | 4b6f5d2 | 2006-03-28 01:56:42 -0800 | [diff] [blame] | 1159 | const struct file_operations hugetlbfs_file_operations = { |
Al Viro | 34d0640 | 2015-04-03 11:31:35 -0400 | [diff] [blame] | 1160 | .read_iter = hugetlbfs_read_iter, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1161 | .mmap = hugetlbfs_file_mmap, |
Christoph Hellwig | 1b061d9 | 2010-05-26 17:53:41 +0200 | [diff] [blame] | 1162 | .fsync = noop_fsync, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1163 | .get_unmapped_area = hugetlb_get_unmapped_area, |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 1164 | .llseek = default_llseek, |
| 1165 | .fallocate = hugetlbfs_fallocate, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1166 | }; |
| 1167 | |
Arjan van de Ven | 92e1d5b | 2007-02-12 00:55:39 -0800 | [diff] [blame] | 1168 | static const struct inode_operations hugetlbfs_dir_inode_operations = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1169 | .create = hugetlbfs_create, |
| 1170 | .lookup = simple_lookup, |
| 1171 | .link = simple_link, |
| 1172 | .unlink = simple_unlink, |
| 1173 | .symlink = hugetlbfs_symlink, |
| 1174 | .mkdir = hugetlbfs_mkdir, |
| 1175 | .rmdir = simple_rmdir, |
| 1176 | .mknod = hugetlbfs_mknod, |
| 1177 | .rename = simple_rename, |
| 1178 | .setattr = hugetlbfs_setattr, |
Piotr Sarna | 1ab5b82 | 2019-11-30 17:56:43 -0800 | [diff] [blame] | 1179 | .tmpfile = hugetlbfs_tmpfile, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1180 | }; |
| 1181 | |
Arjan van de Ven | 92e1d5b | 2007-02-12 00:55:39 -0800 | [diff] [blame] | 1182 | static const struct inode_operations hugetlbfs_inode_operations = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1183 | .setattr = hugetlbfs_setattr, |
| 1184 | }; |
| 1185 | |
Josef 'Jeff' Sipek | ee9b6d6 | 2007-02-12 00:55:41 -0800 | [diff] [blame] | 1186 | static const struct super_operations hugetlbfs_ops = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1187 | .alloc_inode = hugetlbfs_alloc_inode, |
Al Viro | b62de32 | 2019-04-15 23:16:38 -0400 | [diff] [blame] | 1188 | .free_inode = hugetlbfs_free_inode, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1189 | .destroy_inode = hugetlbfs_destroy_inode, |
Al Viro | 2bbbda3 | 2010-06-04 19:52:12 -0400 | [diff] [blame] | 1190 | .evict_inode = hugetlbfs_evict_inode, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1191 | .statfs = hugetlbfs_statfs, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1192 | .put_super = hugetlbfs_put_super, |
David Howells | 4a25220 | 2017-07-05 16:24:18 +0100 | [diff] [blame] | 1193 | .show_options = hugetlbfs_show_options, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1194 | }; |
| 1195 | |
Mike Kravetz | 7ca02d0a | 2015-04-15 16:13:42 -0700 | [diff] [blame] | 1196 | /* |
| 1197 | * Convert size option passed from command line to number of huge pages |
| 1198 | * in the pool specified by hstate. Size option could be in bytes |
| 1199 | * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT). |
| 1200 | */ |
David Howells | 4a25220 | 2017-07-05 16:24:18 +0100 | [diff] [blame] | 1201 | static long |
Mike Kravetz | 7ca02d0a | 2015-04-15 16:13:42 -0700 | [diff] [blame] | 1202 | hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt, |
David Howells | 4a25220 | 2017-07-05 16:24:18 +0100 | [diff] [blame] | 1203 | enum hugetlbfs_size_type val_type) |
Mike Kravetz | 7ca02d0a | 2015-04-15 16:13:42 -0700 | [diff] [blame] | 1204 | { |
| 1205 | if (val_type == NO_SIZE) |
| 1206 | return -1; |
| 1207 | |
| 1208 | if (val_type == SIZE_PERCENT) { |
| 1209 | size_opt <<= huge_page_shift(h); |
| 1210 | size_opt *= h->max_huge_pages; |
| 1211 | do_div(size_opt, 100); |
| 1212 | } |
| 1213 | |
| 1214 | size_opt >>= huge_page_shift(h); |
| 1215 | return size_opt; |
| 1216 | } |
| 1217 | |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1218 | /* |
| 1219 | * Parse one mount parameter. |
| 1220 | */ |
| 1221 | static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *param) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1222 | { |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1223 | struct hugetlbfs_fs_context *ctx = fc->fs_private; |
| 1224 | struct fs_parse_result result; |
| 1225 | char *rest; |
| 1226 | unsigned long ps; |
| 1227 | int opt; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1228 | |
Al Viro | d7167b1 | 2019-09-07 07:23:15 -0400 | [diff] [blame] | 1229 | opt = fs_parse(fc, hugetlb_fs_parameters, param, &result); |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1230 | if (opt < 0) |
| 1231 | return opt; |
| 1232 | |
| 1233 | switch (opt) { |
| 1234 | case Opt_uid: |
| 1235 | ctx->uid = make_kuid(current_user_ns(), result.uint_32); |
| 1236 | if (!uid_valid(ctx->uid)) |
| 1237 | goto bad_val; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1238 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1239 | |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1240 | case Opt_gid: |
| 1241 | ctx->gid = make_kgid(current_user_ns(), result.uint_32); |
| 1242 | if (!gid_valid(ctx->gid)) |
| 1243 | goto bad_val; |
| 1244 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1245 | |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1246 | case Opt_mode: |
| 1247 | ctx->mode = result.uint_32 & 01777U; |
| 1248 | return 0; |
Randy Dunlap | e73a75f | 2007-07-15 23:40:52 -0700 | [diff] [blame] | 1249 | |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1250 | case Opt_size: |
| 1251 | /* memparse() will accept a K/M/G without a digit */ |
| 1252 | if (!isdigit(param->string[0])) |
| 1253 | goto bad_val; |
| 1254 | ctx->max_size_opt = memparse(param->string, &rest); |
| 1255 | ctx->max_val_type = SIZE_STD; |
| 1256 | if (*rest == '%') |
| 1257 | ctx->max_val_type = SIZE_PERCENT; |
| 1258 | return 0; |
Randy Dunlap | e73a75f | 2007-07-15 23:40:52 -0700 | [diff] [blame] | 1259 | |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1260 | case Opt_nr_inodes: |
| 1261 | /* memparse() will accept a K/M/G without a digit */ |
| 1262 | if (!isdigit(param->string[0])) |
| 1263 | goto bad_val; |
| 1264 | ctx->nr_inodes = memparse(param->string, &rest); |
| 1265 | return 0; |
Randy Dunlap | e73a75f | 2007-07-15 23:40:52 -0700 | [diff] [blame] | 1266 | |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1267 | case Opt_pagesize: |
| 1268 | ps = memparse(param->string, &rest); |
| 1269 | ctx->hstate = size_to_hstate(ps); |
| 1270 | if (!ctx->hstate) { |
| 1271 | pr_err("Unsupported page size %lu MB\n", ps >> 20); |
Lee Schermerhorn | b4c07bc | 2007-07-15 23:40:54 -0700 | [diff] [blame] | 1272 | return -EINVAL; |
Randy Dunlap | e73a75f | 2007-07-15 23:40:52 -0700 | [diff] [blame] | 1273 | } |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1274 | return 0; |
| 1275 | |
| 1276 | case Opt_min_size: |
| 1277 | /* memparse() will accept a K/M/G without a digit */ |
| 1278 | if (!isdigit(param->string[0])) |
| 1279 | goto bad_val; |
| 1280 | ctx->min_size_opt = memparse(param->string, &rest); |
| 1281 | ctx->min_val_type = SIZE_STD; |
| 1282 | if (*rest == '%') |
| 1283 | ctx->min_val_type = SIZE_PERCENT; |
| 1284 | return 0; |
| 1285 | |
| 1286 | default: |
| 1287 | return -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1288 | } |
Andi Kleen | a137e1c | 2008-07-23 21:27:43 -0700 | [diff] [blame] | 1289 | |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1290 | bad_val: |
Al Viro | b5db30c | 2019-12-21 21:34:06 -0500 | [diff] [blame] | 1291 | return invalfc(fc, "Bad value '%s' for mount option '%s'\n", |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1292 | param->string, param->key); |
| 1293 | } |
| 1294 | |
| 1295 | /* |
| 1296 | * Validate the parsed options. |
| 1297 | */ |
| 1298 | static int hugetlbfs_validate(struct fs_context *fc) |
| 1299 | { |
| 1300 | struct hugetlbfs_fs_context *ctx = fc->fs_private; |
| 1301 | |
Mike Kravetz | 7ca02d0a | 2015-04-15 16:13:42 -0700 | [diff] [blame] | 1302 | /* |
| 1303 | * Use huge page pool size (in hstate) to convert the size |
| 1304 | * options to number of huge pages. If NO_SIZE, -1 is returned. |
| 1305 | */ |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1306 | ctx->max_hpages = hugetlbfs_size_to_hpages(ctx->hstate, |
| 1307 | ctx->max_size_opt, |
| 1308 | ctx->max_val_type); |
| 1309 | ctx->min_hpages = hugetlbfs_size_to_hpages(ctx->hstate, |
| 1310 | ctx->min_size_opt, |
| 1311 | ctx->min_val_type); |
Mike Kravetz | 7ca02d0a | 2015-04-15 16:13:42 -0700 | [diff] [blame] | 1312 | |
| 1313 | /* |
| 1314 | * If max_size was specified, then min_size must be smaller |
| 1315 | */ |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1316 | if (ctx->max_val_type > NO_SIZE && |
| 1317 | ctx->min_hpages > ctx->max_hpages) { |
| 1318 | pr_err("Minimum size can not be greater than maximum size\n"); |
Mike Kravetz | 7ca02d0a | 2015-04-15 16:13:42 -0700 | [diff] [blame] | 1319 | return -EINVAL; |
Andi Kleen | a137e1c | 2008-07-23 21:27:43 -0700 | [diff] [blame] | 1320 | } |
| 1321 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1322 | return 0; |
| 1323 | } |
| 1324 | |
| 1325 | static int |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1326 | hugetlbfs_fill_super(struct super_block *sb, struct fs_context *fc) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1327 | { |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1328 | struct hugetlbfs_fs_context *ctx = fc->fs_private; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1329 | struct hugetlbfs_sb_info *sbinfo; |
| 1330 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1331 | sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL); |
| 1332 | if (!sbinfo) |
| 1333 | return -ENOMEM; |
| 1334 | sb->s_fs_info = sbinfo; |
| 1335 | spin_lock_init(&sbinfo->stat_lock); |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1336 | sbinfo->hstate = ctx->hstate; |
| 1337 | sbinfo->max_inodes = ctx->nr_inodes; |
| 1338 | sbinfo->free_inodes = ctx->nr_inodes; |
| 1339 | sbinfo->spool = NULL; |
| 1340 | sbinfo->uid = ctx->uid; |
| 1341 | sbinfo->gid = ctx->gid; |
| 1342 | sbinfo->mode = ctx->mode; |
David Howells | 4a25220 | 2017-07-05 16:24:18 +0100 | [diff] [blame] | 1343 | |
Mike Kravetz | 7ca02d0a | 2015-04-15 16:13:42 -0700 | [diff] [blame] | 1344 | /* |
| 1345 | * Allocate and initialize subpool if maximum or minimum size is |
Miaohe Lin | 1935ebd | 2021-02-24 12:10:21 -0800 | [diff] [blame] | 1346 | * specified. Any needed reservations (for minimum size) are taken |
Mike Kravetz | 7ca02d0a | 2015-04-15 16:13:42 -0700 | [diff] [blame] | 1347 | * taken when the subpool is created. |
| 1348 | */ |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1349 | if (ctx->max_hpages != -1 || ctx->min_hpages != -1) { |
| 1350 | sbinfo->spool = hugepage_new_subpool(ctx->hstate, |
| 1351 | ctx->max_hpages, |
| 1352 | ctx->min_hpages); |
David Gibson | 9048162 | 2012-03-21 16:34:12 -0700 | [diff] [blame] | 1353 | if (!sbinfo->spool) |
| 1354 | goto out_free; |
| 1355 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1356 | sb->s_maxbytes = MAX_LFS_FILESIZE; |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1357 | sb->s_blocksize = huge_page_size(ctx->hstate); |
| 1358 | sb->s_blocksize_bits = huge_page_shift(ctx->hstate); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1359 | sb->s_magic = HUGETLBFS_MAGIC; |
| 1360 | sb->s_op = &hugetlbfs_ops; |
| 1361 | sb->s_time_gran = 1; |
Mike Kravetz | 1556829 | 2020-08-11 18:31:35 -0700 | [diff] [blame] | 1362 | |
| 1363 | /* |
| 1364 | * Due to the special and limited functionality of hugetlbfs, it does |
| 1365 | * not work well as a stacking filesystem. |
| 1366 | */ |
| 1367 | sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH; |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1368 | sb->s_root = d_make_root(hugetlbfs_get_root(sb, ctx)); |
Al Viro | 48fde70 | 2012-01-08 22:15:13 -0500 | [diff] [blame] | 1369 | if (!sb->s_root) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1370 | goto out_free; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1371 | return 0; |
| 1372 | out_free: |
Fabian Frederick | 6e6870d | 2014-06-04 16:10:40 -0700 | [diff] [blame] | 1373 | kfree(sbinfo->spool); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1374 | kfree(sbinfo); |
| 1375 | return -ENOMEM; |
| 1376 | } |
| 1377 | |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1378 | static int hugetlbfs_get_tree(struct fs_context *fc) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1379 | { |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1380 | int err = hugetlbfs_validate(fc); |
| 1381 | if (err) |
| 1382 | return err; |
Al Viro | 2ac295d | 2019-06-01 20:48:55 -0400 | [diff] [blame] | 1383 | return get_tree_nodev(fc, hugetlbfs_fill_super); |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1384 | } |
| 1385 | |
| 1386 | static void hugetlbfs_fs_context_free(struct fs_context *fc) |
| 1387 | { |
| 1388 | kfree(fc->fs_private); |
| 1389 | } |
| 1390 | |
| 1391 | static const struct fs_context_operations hugetlbfs_fs_context_ops = { |
| 1392 | .free = hugetlbfs_fs_context_free, |
| 1393 | .parse_param = hugetlbfs_parse_param, |
| 1394 | .get_tree = hugetlbfs_get_tree, |
| 1395 | }; |
| 1396 | |
| 1397 | static int hugetlbfs_init_fs_context(struct fs_context *fc) |
| 1398 | { |
| 1399 | struct hugetlbfs_fs_context *ctx; |
| 1400 | |
| 1401 | ctx = kzalloc(sizeof(struct hugetlbfs_fs_context), GFP_KERNEL); |
| 1402 | if (!ctx) |
| 1403 | return -ENOMEM; |
| 1404 | |
| 1405 | ctx->max_hpages = -1; /* No limit on size by default */ |
| 1406 | ctx->nr_inodes = -1; /* No limit on number of inodes by default */ |
| 1407 | ctx->uid = current_fsuid(); |
| 1408 | ctx->gid = current_fsgid(); |
| 1409 | ctx->mode = 0755; |
| 1410 | ctx->hstate = &default_hstate; |
| 1411 | ctx->min_hpages = -1; /* No default minimum size */ |
| 1412 | ctx->max_val_type = NO_SIZE; |
| 1413 | ctx->min_val_type = NO_SIZE; |
| 1414 | fc->fs_private = ctx; |
| 1415 | fc->ops = &hugetlbfs_fs_context_ops; |
| 1416 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1417 | } |
| 1418 | |
| 1419 | static struct file_system_type hugetlbfs_fs_type = { |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1420 | .name = "hugetlbfs", |
| 1421 | .init_fs_context = hugetlbfs_init_fs_context, |
Al Viro | d7167b1 | 2019-09-07 07:23:15 -0400 | [diff] [blame] | 1422 | .parameters = hugetlb_fs_parameters, |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1423 | .kill_sb = kill_litter_super, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1424 | }; |
| 1425 | |
Andi Kleen | 42d7395 | 2012-12-11 16:01:34 -0800 | [diff] [blame] | 1426 | static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1427 | |
From: Mel Gorman | ef1ff6b | 2009-09-23 15:56:05 -0700 | [diff] [blame] | 1428 | static int can_do_hugetlb_shm(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1429 | { |
Eric W. Biederman | a0eb3a0 | 2012-02-07 16:19:25 -0800 | [diff] [blame] | 1430 | kgid_t shm_group; |
| 1431 | shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group); |
| 1432 | return capable(CAP_IPC_LOCK) || in_group_p(shm_group); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1433 | } |
| 1434 | |
Andi Kleen | 42d7395 | 2012-12-11 16:01:34 -0800 | [diff] [blame] | 1435 | static int get_hstate_idx(int page_size_log) |
| 1436 | { |
Naoya Horiguchi | af73e4d | 2013-05-07 16:18:13 -0700 | [diff] [blame] | 1437 | struct hstate *h = hstate_sizelog(page_size_log); |
Andi Kleen | 42d7395 | 2012-12-11 16:01:34 -0800 | [diff] [blame] | 1438 | |
Andi Kleen | 42d7395 | 2012-12-11 16:01:34 -0800 | [diff] [blame] | 1439 | if (!h) |
| 1440 | return -1; |
Miaohe Lin | 04adbc3 | 2021-05-04 18:33:22 -0700 | [diff] [blame] | 1441 | return hstate_index(h); |
Andi Kleen | 42d7395 | 2012-12-11 16:01:34 -0800 | [diff] [blame] | 1442 | } |
| 1443 | |
Naoya Horiguchi | af73e4d | 2013-05-07 16:18:13 -0700 | [diff] [blame] | 1444 | /* |
| 1445 | * Note that size should be aligned to proper hugepage size in caller side, |
| 1446 | * otherwise hugetlb_reserve_pages reserves one less hugepages than intended. |
| 1447 | */ |
| 1448 | struct file *hugetlb_file_setup(const char *name, size_t size, |
Alexey Gladkov | d7c9e99 | 2021-04-22 14:27:14 +0200 | [diff] [blame] | 1449 | vm_flags_t acctflag, struct ucounts **ucounts, |
Andi Kleen | 42d7395 | 2012-12-11 16:01:34 -0800 | [diff] [blame] | 1450 | int creat_flags, int page_size_log) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1451 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1452 | struct inode *inode; |
Al Viro | e68375c | 2018-06-09 09:50:46 -0400 | [diff] [blame] | 1453 | struct vfsmount *mnt; |
Andi Kleen | 42d7395 | 2012-12-11 16:01:34 -0800 | [diff] [blame] | 1454 | int hstate_idx; |
Al Viro | e68375c | 2018-06-09 09:50:46 -0400 | [diff] [blame] | 1455 | struct file *file; |
Andi Kleen | 42d7395 | 2012-12-11 16:01:34 -0800 | [diff] [blame] | 1456 | |
| 1457 | hstate_idx = get_hstate_idx(page_size_log); |
| 1458 | if (hstate_idx < 0) |
| 1459 | return ERR_PTR(-ENODEV); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1460 | |
Alexey Gladkov | d7c9e99 | 2021-04-22 14:27:14 +0200 | [diff] [blame] | 1461 | *ucounts = NULL; |
Al Viro | e68375c | 2018-06-09 09:50:46 -0400 | [diff] [blame] | 1462 | mnt = hugetlbfs_vfsmount[hstate_idx]; |
| 1463 | if (!mnt) |
Akinobu Mita | 5bc9859 | 2007-05-06 14:50:18 -0700 | [diff] [blame] | 1464 | return ERR_PTR(-ENOENT); |
| 1465 | |
From: Mel Gorman | ef1ff6b | 2009-09-23 15:56:05 -0700 | [diff] [blame] | 1466 | if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) { |
Alexey Gladkov | d7c9e99 | 2021-04-22 14:27:14 +0200 | [diff] [blame] | 1467 | *ucounts = current_ucounts(); |
| 1468 | if (user_shm_lock(size, *ucounts)) { |
David Rientjes | 21a3c27 | 2012-03-21 16:34:13 -0700 | [diff] [blame] | 1469 | task_lock(current); |
Andrew Morton | 9b857d2 | 2014-06-04 16:07:21 -0700 | [diff] [blame] | 1470 | pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is deprecated\n", |
David Rientjes | 21a3c27 | 2012-03-21 16:34:13 -0700 | [diff] [blame] | 1471 | current->comm, current->pid); |
| 1472 | task_unlock(current); |
Hugh Dickins | 353d5c3 | 2009-08-24 16:30:28 +0100 | [diff] [blame] | 1473 | } else { |
Alexey Gladkov | d7c9e99 | 2021-04-22 14:27:14 +0200 | [diff] [blame] | 1474 | *ucounts = NULL; |
Ravikiran G Thirumalai | 2584e51 | 2009-03-31 15:21:26 -0700 | [diff] [blame] | 1475 | return ERR_PTR(-EPERM); |
Hugh Dickins | 353d5c3 | 2009-08-24 16:30:28 +0100 | [diff] [blame] | 1476 | } |
Ravikiran G Thirumalai | 2584e51 | 2009-03-31 15:21:26 -0700 | [diff] [blame] | 1477 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1478 | |
Anatol Pomozov | 39b6525 | 2012-09-12 20:11:55 -0700 | [diff] [blame] | 1479 | file = ERR_PTR(-ENOSPC); |
Al Viro | e68375c | 2018-06-09 09:50:46 -0400 | [diff] [blame] | 1480 | inode = hugetlbfs_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1481 | if (!inode) |
Al Viro | e68375c | 2018-06-09 09:50:46 -0400 | [diff] [blame] | 1482 | goto out; |
Stephen Smalley | e1832f2 | 2015-08-06 15:46:55 -0700 | [diff] [blame] | 1483 | if (creat_flags == HUGETLB_SHMFS_INODE) |
| 1484 | inode->i_flags |= S_PRIVATE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1485 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1486 | inode->i_size = size; |
Miklos Szeredi | 6d6b77f | 2011-10-28 14:13:28 +0200 | [diff] [blame] | 1487 | clear_nlink(inode); |
Dave Hansen | ce8d2cd | 2007-10-16 23:31:13 -0700 | [diff] [blame] | 1488 | |
Mike Kravetz | 33b8f84 | 2021-02-24 12:09:54 -0800 | [diff] [blame] | 1489 | if (!hugetlb_reserve_pages(inode, 0, |
Al Viro | e68375c | 2018-06-09 09:50:46 -0400 | [diff] [blame] | 1490 | size >> huge_page_shift(hstate_inode(inode)), NULL, |
| 1491 | acctflag)) |
| 1492 | file = ERR_PTR(-ENOMEM); |
| 1493 | else |
| 1494 | file = alloc_file_pseudo(inode, mnt, name, O_RDWR, |
| 1495 | &hugetlbfs_file_operations); |
| 1496 | if (!IS_ERR(file)) |
| 1497 | return file; |
Dave Hansen | ce8d2cd | 2007-10-16 23:31:13 -0700 | [diff] [blame] | 1498 | |
David Gibson | b45b5bd | 2006-03-22 00:08:55 -0800 | [diff] [blame] | 1499 | iput(inode); |
Al Viro | e68375c | 2018-06-09 09:50:46 -0400 | [diff] [blame] | 1500 | out: |
Alexey Gladkov | d7c9e99 | 2021-04-22 14:27:14 +0200 | [diff] [blame] | 1501 | if (*ucounts) { |
| 1502 | user_shm_unlock(size, *ucounts); |
| 1503 | *ucounts = NULL; |
Hugh Dickins | 353d5c3 | 2009-08-24 16:30:28 +0100 | [diff] [blame] | 1504 | } |
Anatol Pomozov | 39b6525 | 2012-09-12 20:11:55 -0700 | [diff] [blame] | 1505 | return file; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1506 | } |
| 1507 | |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1508 | static struct vfsmount *__init mount_one_hugetlbfs(struct hstate *h) |
| 1509 | { |
| 1510 | struct fs_context *fc; |
| 1511 | struct vfsmount *mnt; |
| 1512 | |
| 1513 | fc = fs_context_for_mount(&hugetlbfs_fs_type, SB_KERNMOUNT); |
| 1514 | if (IS_ERR(fc)) { |
| 1515 | mnt = ERR_CAST(fc); |
| 1516 | } else { |
| 1517 | struct hugetlbfs_fs_context *ctx = fc->fs_private; |
| 1518 | ctx->hstate = h; |
| 1519 | mnt = fc_mount(fc); |
| 1520 | put_fs_context(fc); |
| 1521 | } |
| 1522 | if (IS_ERR(mnt)) |
Miaohe Lin | a25fddc | 2021-02-24 12:10:14 -0800 | [diff] [blame] | 1523 | pr_err("Cannot mount internal hugetlbfs for page size %luK", |
| 1524 | huge_page_size(h) >> 10); |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1525 | return mnt; |
| 1526 | } |
| 1527 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1528 | static int __init init_hugetlbfs_fs(void) |
| 1529 | { |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1530 | struct vfsmount *mnt; |
Andi Kleen | 42d7395 | 2012-12-11 16:01:34 -0800 | [diff] [blame] | 1531 | struct hstate *h; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1532 | int error; |
Andi Kleen | 42d7395 | 2012-12-11 16:01:34 -0800 | [diff] [blame] | 1533 | int i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1534 | |
Nishanth Aravamudan | 457c1b2 | 2014-05-06 12:50:00 -0700 | [diff] [blame] | 1535 | if (!hugepages_supported()) { |
Andrew Morton | 9b857d2 | 2014-06-04 16:07:21 -0700 | [diff] [blame] | 1536 | pr_info("disabling because there are no supported hugepage sizes\n"); |
Nishanth Aravamudan | 457c1b2 | 2014-05-06 12:50:00 -0700 | [diff] [blame] | 1537 | return -ENOTSUPP; |
| 1538 | } |
| 1539 | |
Hillf Danton | d1d5e05ff | 2012-03-21 16:34:15 -0700 | [diff] [blame] | 1540 | error = -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1541 | hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache", |
| 1542 | sizeof(struct hugetlbfs_inode_info), |
Vladimir Davydov | 5d09705 | 2016-01-14 15:18:21 -0800 | [diff] [blame] | 1543 | 0, SLAB_ACCOUNT, init_once); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1544 | if (hugetlbfs_inode_cachep == NULL) |
Mike Kravetz | 8fc312b | 2019-11-30 17:56:34 -0800 | [diff] [blame] | 1545 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1546 | |
| 1547 | error = register_filesystem(&hugetlbfs_fs_type); |
| 1548 | if (error) |
Mike Kravetz | 8fc312b | 2019-11-30 17:56:34 -0800 | [diff] [blame] | 1549 | goto out_free; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1550 | |
Mike Kravetz | 8fc312b | 2019-11-30 17:56:34 -0800 | [diff] [blame] | 1551 | /* default hstate mount is required */ |
Miaohe Lin | 3b2275a | 2021-02-24 12:10:04 -0800 | [diff] [blame] | 1552 | mnt = mount_one_hugetlbfs(&default_hstate); |
Mike Kravetz | 8fc312b | 2019-11-30 17:56:34 -0800 | [diff] [blame] | 1553 | if (IS_ERR(mnt)) { |
| 1554 | error = PTR_ERR(mnt); |
| 1555 | goto out_unreg; |
| 1556 | } |
| 1557 | hugetlbfs_vfsmount[default_hstate_idx] = mnt; |
| 1558 | |
| 1559 | /* other hstates are optional */ |
Andi Kleen | 42d7395 | 2012-12-11 16:01:34 -0800 | [diff] [blame] | 1560 | i = 0; |
| 1561 | for_each_hstate(h) { |
Jan Stancek | 15f0ec9 | 2020-01-03 18:37:18 +0100 | [diff] [blame] | 1562 | if (i == default_hstate_idx) { |
| 1563 | i++; |
Mike Kravetz | 8fc312b | 2019-11-30 17:56:34 -0800 | [diff] [blame] | 1564 | continue; |
Jan Stancek | 15f0ec9 | 2020-01-03 18:37:18 +0100 | [diff] [blame] | 1565 | } |
Mike Kravetz | 8fc312b | 2019-11-30 17:56:34 -0800 | [diff] [blame] | 1566 | |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1567 | mnt = mount_one_hugetlbfs(h); |
Mike Kravetz | 8fc312b | 2019-11-30 17:56:34 -0800 | [diff] [blame] | 1568 | if (IS_ERR(mnt)) |
| 1569 | hugetlbfs_vfsmount[i] = NULL; |
| 1570 | else |
| 1571 | hugetlbfs_vfsmount[i] = mnt; |
Andi Kleen | 42d7395 | 2012-12-11 16:01:34 -0800 | [diff] [blame] | 1572 | i++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1573 | } |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1574 | |
| 1575 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1576 | |
Mike Kravetz | 8fc312b | 2019-11-30 17:56:34 -0800 | [diff] [blame] | 1577 | out_unreg: |
| 1578 | (void)unregister_filesystem(&hugetlbfs_fs_type); |
| 1579 | out_free: |
Hillf Danton | d1d5e05ff | 2012-03-21 16:34:15 -0700 | [diff] [blame] | 1580 | kmem_cache_destroy(hugetlbfs_inode_cachep); |
Mike Kravetz | 8fc312b | 2019-11-30 17:56:34 -0800 | [diff] [blame] | 1581 | out: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1582 | return error; |
| 1583 | } |
Paul Gortmaker | 3e89e1c | 2016-01-14 15:21:52 -0800 | [diff] [blame] | 1584 | fs_initcall(init_hugetlbfs_fs) |