Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Resizable virtual memory filesystem for Linux. |
| 3 | * |
| 4 | * Copyright (C) 2000 Linus Torvalds. |
| 5 | * 2000 Transmeta Corp. |
| 6 | * 2000-2001 Christoph Rohland |
| 7 | * 2000-2001 SAP AG |
| 8 | * 2002 Red Hat Inc. |
Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 9 | * Copyright (C) 2002-2005 Hugh Dickins. |
| 10 | * Copyright (C) 2002-2005 VERITAS Software Corporation. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | * Copyright (C) 2004 Andi Kleen, SuSE Labs |
| 12 | * |
| 13 | * Extended attribute support for tmpfs: |
| 14 | * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net> |
| 15 | * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> |
| 16 | * |
Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 17 | * tiny-shmem: |
| 18 | * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com> |
| 19 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | * This file is released under the GPL. |
| 21 | */ |
| 22 | |
Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 23 | #include <linux/fs.h> |
| 24 | #include <linux/init.h> |
| 25 | #include <linux/vfs.h> |
| 26 | #include <linux/mount.h> |
Hugh Dickins | caefba1 | 2009-04-13 14:40:12 -0700 | [diff] [blame] | 27 | #include <linux/pagemap.h> |
Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 28 | #include <linux/file.h> |
| 29 | #include <linux/mm.h> |
| 30 | #include <linux/module.h> |
| 31 | #include <linux/swap.h> |
| 32 | |
| 33 | static struct vfsmount *shm_mnt; |
| 34 | |
| 35 | #ifdef CONFIG_SHMEM |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | /* |
| 37 | * This virtual memory filesystem is heavily based on the ramfs. It |
| 38 | * extends ramfs by the ability to use swap and honor resource limits |
| 39 | * which makes it a completely usable filesystem. |
| 40 | */ |
| 41 | |
Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 42 | #include <linux/xattr.h> |
Christoph Hellwig | a569425 | 2007-07-17 04:04:28 -0700 | [diff] [blame] | 43 | #include <linux/exportfs.h> |
Christoph Hellwig | 1c7c474 | 2009-11-03 16:44:44 +0100 | [diff] [blame] | 44 | #include <linux/posix_acl.h> |
Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 45 | #include <linux/generic_acl.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | #include <linux/mman.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | #include <linux/string.h> |
| 48 | #include <linux/slab.h> |
| 49 | #include <linux/backing-dev.h> |
| 50 | #include <linux/shmem_fs.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | #include <linux/writeback.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | #include <linux/blkdev.h> |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 53 | #include <linux/pagevec.h> |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 54 | #include <linux/percpu_counter.h> |
Hugh Dickins | 708e350 | 2011-07-25 17:12:32 -0700 | [diff] [blame] | 55 | #include <linux/splice.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 | #include <linux/security.h> |
| 57 | #include <linux/swapops.h> |
| 58 | #include <linux/mempolicy.h> |
| 59 | #include <linux/namei.h> |
Hugh Dickins | b00dc3a | 2006-02-21 23:49:47 +0000 | [diff] [blame] | 60 | #include <linux/ctype.h> |
Lee Schermerhorn | 304dbdb | 2006-04-22 02:35:48 -0700 | [diff] [blame] | 61 | #include <linux/migrate.h> |
Christoph Lameter | c1f60a5 | 2006-09-25 23:31:11 -0700 | [diff] [blame] | 62 | #include <linux/highmem.h> |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 63 | #include <linux/seq_file.h> |
Mimi Zohar | 9256292 | 2008-10-07 14:00:12 -0400 | [diff] [blame] | 64 | #include <linux/magic.h> |
Lee Schermerhorn | 304dbdb | 2006-04-22 02:35:48 -0700 | [diff] [blame] | 65 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 | #include <asm/uaccess.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 67 | #include <asm/pgtable.h> |
| 68 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | #define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 70 | #define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT) |
| 71 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | /* Pretend that each entry is of this size in directory's i_size */ |
| 73 | #define BOGO_DIRENT_SIZE 20 |
| 74 | |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 75 | struct shmem_xattr { |
| 76 | struct list_head list; /* anchored by shmem_inode_info->xattr_list */ |
| 77 | char *name; /* xattr name */ |
| 78 | size_t size; |
| 79 | char value[0]; |
| 80 | }; |
| 81 | |
Hugh Dickins | 285b2c4 | 2011-08-03 16:21:20 -0700 | [diff] [blame] | 82 | /* Flag allocation requirements to shmem_getpage */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 | enum sgp_type { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 84 | SGP_READ, /* don't exceed i_size, don't allocate page */ |
| 85 | SGP_CACHE, /* don't exceed i_size, may allocate page */ |
Hugh Dickins | a0ee5ec | 2008-02-04 22:28:51 -0800 | [diff] [blame] | 86 | SGP_DIRTY, /* like SGP_CACHE, but set new page dirty */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 87 | SGP_WRITE, /* may exceed i_size, may allocate page */ |
| 88 | }; |
| 89 | |
Andrew Morton | b76db735 | 2008-02-08 04:21:49 -0800 | [diff] [blame] | 90 | #ifdef CONFIG_TMPFS |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 91 | static unsigned long shmem_default_max_blocks(void) |
| 92 | { |
| 93 | return totalram_pages / 2; |
| 94 | } |
| 95 | |
| 96 | static unsigned long shmem_default_max_inodes(void) |
| 97 | { |
| 98 | return min(totalram_pages - totalhigh_pages, totalram_pages / 2); |
| 99 | } |
Andrew Morton | b76db735 | 2008-02-08 04:21:49 -0800 | [diff] [blame] | 100 | #endif |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 101 | |
Hugh Dickins | 68da9f0 | 2011-07-25 17:12:34 -0700 | [diff] [blame] | 102 | static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, |
| 103 | struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type); |
| 104 | |
| 105 | static inline int shmem_getpage(struct inode *inode, pgoff_t index, |
| 106 | struct page **pagep, enum sgp_type sgp, int *fault_type) |
| 107 | { |
| 108 | return shmem_getpage_gfp(inode, index, pagep, sgp, |
| 109 | mapping_gfp_mask(inode->i_mapping), fault_type); |
| 110 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 112 | static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb) |
| 113 | { |
| 114 | return sb->s_fs_info; |
| 115 | } |
| 116 | |
| 117 | /* |
| 118 | * shmem_file_setup pre-accounts the whole fixed size of a VM object, |
| 119 | * for shared memory and for shared anonymous (/dev/zero) mappings |
| 120 | * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1), |
| 121 | * consistent with the pre-accounting of private mappings ... |
| 122 | */ |
| 123 | static inline int shmem_acct_size(unsigned long flags, loff_t size) |
| 124 | { |
Hugh Dickins | 0b0a080 | 2009-02-24 20:51:52 +0000 | [diff] [blame] | 125 | return (flags & VM_NORESERVE) ? |
| 126 | 0 : security_vm_enough_memory_kern(VM_ACCT(size)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | } |
| 128 | |
| 129 | static inline void shmem_unacct_size(unsigned long flags, loff_t size) |
| 130 | { |
Hugh Dickins | 0b0a080 | 2009-02-24 20:51:52 +0000 | [diff] [blame] | 131 | if (!(flags & VM_NORESERVE)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 132 | vm_unacct_memory(VM_ACCT(size)); |
| 133 | } |
| 134 | |
| 135 | /* |
| 136 | * ... whereas tmpfs objects are accounted incrementally as |
| 137 | * pages are allocated, in order to allow huge sparse files. |
| 138 | * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM, |
| 139 | * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM. |
| 140 | */ |
| 141 | static inline int shmem_acct_block(unsigned long flags) |
| 142 | { |
Hugh Dickins | 0b0a080 | 2009-02-24 20:51:52 +0000 | [diff] [blame] | 143 | return (flags & VM_NORESERVE) ? |
| 144 | security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE)) : 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 145 | } |
| 146 | |
| 147 | static inline void shmem_unacct_blocks(unsigned long flags, long pages) |
| 148 | { |
Hugh Dickins | 0b0a080 | 2009-02-24 20:51:52 +0000 | [diff] [blame] | 149 | if (flags & VM_NORESERVE) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 | vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE)); |
| 151 | } |
| 152 | |
Hugh Dickins | 759b977 | 2007-03-05 00:30:28 -0800 | [diff] [blame] | 153 | static const struct super_operations shmem_ops; |
Christoph Hellwig | f5e54d6 | 2006-06-28 04:26:44 -0700 | [diff] [blame] | 154 | static const struct address_space_operations shmem_aops; |
Helge Deller | 15ad7cd | 2006-12-06 20:40:36 -0800 | [diff] [blame] | 155 | static const struct file_operations shmem_file_operations; |
Arjan van de Ven | 92e1d5b | 2007-02-12 00:55:39 -0800 | [diff] [blame] | 156 | static const struct inode_operations shmem_inode_operations; |
| 157 | static const struct inode_operations shmem_dir_inode_operations; |
| 158 | static const struct inode_operations shmem_special_inode_operations; |
Alexey Dobriyan | f0f37e2f | 2009-09-27 22:29:37 +0400 | [diff] [blame] | 159 | static const struct vm_operations_struct shmem_vm_ops; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 | |
Ravikiran G Thirumalai | 6c231b7 | 2005-09-06 15:17:45 -0700 | [diff] [blame] | 161 | static struct backing_dev_info shmem_backing_dev_info __read_mostly = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | .ra_pages = 0, /* No readahead */ |
Rik van Riel | 4f98a2f | 2008-10-18 20:26:32 -0700 | [diff] [blame] | 163 | .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 164 | }; |
| 165 | |
| 166 | static LIST_HEAD(shmem_swaplist); |
Hugh Dickins | cb5f7b9 | 2008-02-04 22:28:52 -0800 | [diff] [blame] | 167 | static DEFINE_MUTEX(shmem_swaplist_mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 168 | |
| 169 | static void shmem_free_blocks(struct inode *inode, long pages) |
| 170 | { |
| 171 | struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); |
Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 172 | if (sbinfo->max_blocks) { |
Tim Chen | 7e49629 | 2010-08-09 17:19:05 -0700 | [diff] [blame] | 173 | percpu_counter_add(&sbinfo->used_blocks, -pages); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 174 | inode->i_blocks -= pages*BLOCKS_PER_PAGE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 175 | } |
| 176 | } |
| 177 | |
Pavel Emelyanov | 5b04c68 | 2008-02-04 22:28:47 -0800 | [diff] [blame] | 178 | static int shmem_reserve_inode(struct super_block *sb) |
| 179 | { |
| 180 | struct shmem_sb_info *sbinfo = SHMEM_SB(sb); |
| 181 | if (sbinfo->max_inodes) { |
| 182 | spin_lock(&sbinfo->stat_lock); |
| 183 | if (!sbinfo->free_inodes) { |
| 184 | spin_unlock(&sbinfo->stat_lock); |
| 185 | return -ENOSPC; |
| 186 | } |
| 187 | sbinfo->free_inodes--; |
| 188 | spin_unlock(&sbinfo->stat_lock); |
| 189 | } |
| 190 | return 0; |
| 191 | } |
| 192 | |
| 193 | static void shmem_free_inode(struct super_block *sb) |
| 194 | { |
| 195 | struct shmem_sb_info *sbinfo = SHMEM_SB(sb); |
| 196 | if (sbinfo->max_inodes) { |
| 197 | spin_lock(&sbinfo->stat_lock); |
| 198 | sbinfo->free_inodes++; |
| 199 | spin_unlock(&sbinfo->stat_lock); |
| 200 | } |
| 201 | } |
| 202 | |
Randy Dunlap | 4671181 | 2008-03-19 17:00:41 -0700 | [diff] [blame] | 203 | /** |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 204 | * shmem_recalc_inode - recalculate the block usage of an inode |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 205 | * @inode: inode to recalc |
| 206 | * |
| 207 | * We have to calculate the free blocks since the mm can drop |
| 208 | * undirtied hole pages behind our back. |
| 209 | * |
| 210 | * But normally info->alloced == inode->i_mapping->nrpages + info->swapped |
| 211 | * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped) |
| 212 | * |
| 213 | * It has to be called with the spinlock held. |
| 214 | */ |
| 215 | static void shmem_recalc_inode(struct inode *inode) |
| 216 | { |
| 217 | struct shmem_inode_info *info = SHMEM_I(inode); |
| 218 | long freed; |
| 219 | |
| 220 | freed = info->alloced - info->swapped - inode->i_mapping->nrpages; |
| 221 | if (freed > 0) { |
| 222 | info->alloced -= freed; |
| 223 | shmem_unacct_blocks(info->flags, freed); |
| 224 | shmem_free_blocks(inode, freed); |
| 225 | } |
| 226 | } |
| 227 | |
Hugh Dickins | 285b2c4 | 2011-08-03 16:21:20 -0700 | [diff] [blame] | 228 | static void shmem_put_swap(struct shmem_inode_info *info, pgoff_t index, |
| 229 | swp_entry_t swap) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 230 | { |
Hugh Dickins | 285b2c4 | 2011-08-03 16:21:20 -0700 | [diff] [blame] | 231 | if (index < SHMEM_NR_DIRECT) |
| 232 | info->i_direct[index] = swap; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 233 | } |
| 234 | |
Hugh Dickins | 285b2c4 | 2011-08-03 16:21:20 -0700 | [diff] [blame] | 235 | static swp_entry_t shmem_get_swap(struct shmem_inode_info *info, pgoff_t index) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 236 | { |
Hugh Dickins | 285b2c4 | 2011-08-03 16:21:20 -0700 | [diff] [blame] | 237 | return (index < SHMEM_NR_DIRECT) ? |
| 238 | info->i_direct[index] : (swp_entry_t){0}; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 239 | } |
| 240 | |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 241 | /* |
| 242 | * Replace item expected in radix tree by a new item, while holding tree lock. |
| 243 | */ |
| 244 | static int shmem_radix_tree_replace(struct address_space *mapping, |
| 245 | pgoff_t index, void *expected, void *replacement) |
| 246 | { |
| 247 | void **pslot; |
| 248 | void *item = NULL; |
| 249 | |
| 250 | VM_BUG_ON(!expected); |
| 251 | pslot = radix_tree_lookup_slot(&mapping->page_tree, index); |
| 252 | if (pslot) |
| 253 | item = radix_tree_deref_slot_protected(pslot, |
| 254 | &mapping->tree_lock); |
| 255 | if (item != expected) |
| 256 | return -ENOENT; |
| 257 | if (replacement) |
| 258 | radix_tree_replace_slot(pslot, replacement); |
| 259 | else |
| 260 | radix_tree_delete(&mapping->page_tree, index); |
| 261 | return 0; |
| 262 | } |
| 263 | |
| 264 | /* |
Hugh Dickins | 46f65ec | 2011-08-03 16:21:23 -0700 | [diff] [blame^] | 265 | * Like add_to_page_cache_locked, but error if expected item has gone. |
| 266 | */ |
| 267 | static int shmem_add_to_page_cache(struct page *page, |
| 268 | struct address_space *mapping, |
| 269 | pgoff_t index, gfp_t gfp, void *expected) |
| 270 | { |
| 271 | int error; |
| 272 | |
| 273 | VM_BUG_ON(!PageLocked(page)); |
| 274 | VM_BUG_ON(!PageSwapBacked(page)); |
| 275 | |
| 276 | error = mem_cgroup_cache_charge(page, current->mm, |
| 277 | gfp & GFP_RECLAIM_MASK); |
| 278 | if (error) |
| 279 | goto out; |
| 280 | if (!expected) |
| 281 | error = radix_tree_preload(gfp & GFP_RECLAIM_MASK); |
| 282 | if (!error) { |
| 283 | page_cache_get(page); |
| 284 | page->mapping = mapping; |
| 285 | page->index = index; |
| 286 | |
| 287 | spin_lock_irq(&mapping->tree_lock); |
| 288 | if (!expected) |
| 289 | error = radix_tree_insert(&mapping->page_tree, |
| 290 | index, page); |
| 291 | else |
| 292 | error = shmem_radix_tree_replace(mapping, index, |
| 293 | expected, page); |
| 294 | if (!error) { |
| 295 | mapping->nrpages++; |
| 296 | __inc_zone_page_state(page, NR_FILE_PAGES); |
| 297 | __inc_zone_page_state(page, NR_SHMEM); |
| 298 | spin_unlock_irq(&mapping->tree_lock); |
| 299 | } else { |
| 300 | page->mapping = NULL; |
| 301 | spin_unlock_irq(&mapping->tree_lock); |
| 302 | page_cache_release(page); |
| 303 | } |
| 304 | if (!expected) |
| 305 | radix_tree_preload_end(); |
| 306 | } |
| 307 | if (error) |
| 308 | mem_cgroup_uncharge_cache_page(page); |
| 309 | out: |
| 310 | return error; |
| 311 | } |
| 312 | |
| 313 | /* |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 314 | * Like find_get_pages, but collecting swap entries as well as pages. |
| 315 | */ |
| 316 | static unsigned shmem_find_get_pages_and_swap(struct address_space *mapping, |
| 317 | pgoff_t start, unsigned int nr_pages, |
| 318 | struct page **pages, pgoff_t *indices) |
| 319 | { |
| 320 | unsigned int i; |
| 321 | unsigned int ret; |
| 322 | unsigned int nr_found; |
| 323 | |
| 324 | rcu_read_lock(); |
| 325 | restart: |
| 326 | nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree, |
| 327 | (void ***)pages, indices, start, nr_pages); |
| 328 | ret = 0; |
| 329 | for (i = 0; i < nr_found; i++) { |
| 330 | struct page *page; |
| 331 | repeat: |
| 332 | page = radix_tree_deref_slot((void **)pages[i]); |
| 333 | if (unlikely(!page)) |
| 334 | continue; |
| 335 | if (radix_tree_exception(page)) { |
| 336 | if (radix_tree_exceptional_entry(page)) |
| 337 | goto export; |
| 338 | /* radix_tree_deref_retry(page) */ |
| 339 | goto restart; |
| 340 | } |
| 341 | if (!page_cache_get_speculative(page)) |
| 342 | goto repeat; |
| 343 | |
| 344 | /* Has the page moved? */ |
| 345 | if (unlikely(page != *((void **)pages[i]))) { |
| 346 | page_cache_release(page); |
| 347 | goto repeat; |
| 348 | } |
| 349 | export: |
| 350 | indices[ret] = indices[i]; |
| 351 | pages[ret] = page; |
| 352 | ret++; |
| 353 | } |
| 354 | if (unlikely(!ret && nr_found)) |
| 355 | goto restart; |
| 356 | rcu_read_unlock(); |
| 357 | return ret; |
| 358 | } |
| 359 | |
| 360 | /* |
Hugh Dickins | 46f65ec | 2011-08-03 16:21:23 -0700 | [diff] [blame^] | 361 | * Lockless lookup of swap entry in radix tree, avoiding refcount on pages. |
| 362 | */ |
| 363 | static pgoff_t shmem_find_swap(struct address_space *mapping, void *radswap) |
| 364 | { |
| 365 | void **slots[PAGEVEC_SIZE]; |
| 366 | pgoff_t indices[PAGEVEC_SIZE]; |
| 367 | unsigned int nr_found; |
| 368 | |
| 369 | restart: |
| 370 | nr_found = 1; |
| 371 | indices[0] = -1; |
| 372 | while (nr_found) { |
| 373 | pgoff_t index = indices[nr_found - 1] + 1; |
| 374 | unsigned int i; |
| 375 | |
| 376 | rcu_read_lock(); |
| 377 | nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree, |
| 378 | slots, indices, index, PAGEVEC_SIZE); |
| 379 | for (i = 0; i < nr_found; i++) { |
| 380 | void *item = radix_tree_deref_slot(slots[i]); |
| 381 | if (radix_tree_deref_retry(item)) { |
| 382 | rcu_read_unlock(); |
| 383 | goto restart; |
| 384 | } |
| 385 | if (item == radswap) { |
| 386 | rcu_read_unlock(); |
| 387 | return indices[i]; |
| 388 | } |
| 389 | } |
| 390 | rcu_read_unlock(); |
| 391 | cond_resched(); |
| 392 | } |
| 393 | return -1; |
| 394 | } |
| 395 | |
| 396 | /* |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 397 | * Remove swap entry from radix tree, free the swap and its page cache. |
| 398 | */ |
| 399 | static int shmem_free_swap(struct address_space *mapping, |
| 400 | pgoff_t index, void *radswap) |
| 401 | { |
| 402 | int error; |
| 403 | |
| 404 | spin_lock_irq(&mapping->tree_lock); |
| 405 | error = shmem_radix_tree_replace(mapping, index, radswap, NULL); |
| 406 | spin_unlock_irq(&mapping->tree_lock); |
| 407 | if (!error) |
| 408 | free_swap_and_cache(radix_to_swp_entry(radswap)); |
| 409 | return error; |
| 410 | } |
| 411 | |
| 412 | /* |
| 413 | * Pagevec may contain swap entries, so shuffle up pages before releasing. |
| 414 | */ |
| 415 | static void shmem_pagevec_release(struct pagevec *pvec) |
| 416 | { |
| 417 | int i, j; |
| 418 | |
| 419 | for (i = 0, j = 0; i < pagevec_count(pvec); i++) { |
| 420 | struct page *page = pvec->pages[i]; |
| 421 | if (!radix_tree_exceptional_entry(page)) |
| 422 | pvec->pages[j++] = page; |
| 423 | } |
| 424 | pvec->nr = j; |
| 425 | pagevec_release(pvec); |
| 426 | } |
| 427 | |
| 428 | /* |
| 429 | * Remove range of pages and swap entries from radix tree, and free them. |
| 430 | */ |
Hugh Dickins | 285b2c4 | 2011-08-03 16:21:20 -0700 | [diff] [blame] | 431 | void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 432 | { |
Hugh Dickins | 285b2c4 | 2011-08-03 16:21:20 -0700 | [diff] [blame] | 433 | struct address_space *mapping = inode->i_mapping; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 434 | struct shmem_inode_info *info = SHMEM_I(inode); |
Hugh Dickins | 285b2c4 | 2011-08-03 16:21:20 -0700 | [diff] [blame] | 435 | pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 436 | unsigned partial = lstart & (PAGE_CACHE_SIZE - 1); |
Hugh Dickins | 285b2c4 | 2011-08-03 16:21:20 -0700 | [diff] [blame] | 437 | pgoff_t end = (lend >> PAGE_CACHE_SHIFT); |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 438 | struct pagevec pvec; |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 439 | pgoff_t indices[PAGEVEC_SIZE]; |
| 440 | long nr_swaps_freed = 0; |
Hugh Dickins | 285b2c4 | 2011-08-03 16:21:20 -0700 | [diff] [blame] | 441 | pgoff_t index; |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 442 | int i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 443 | |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 444 | BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1)); |
| 445 | |
| 446 | pagevec_init(&pvec, 0); |
| 447 | index = start; |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 448 | while (index <= end) { |
| 449 | pvec.nr = shmem_find_get_pages_and_swap(mapping, index, |
| 450 | min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1, |
| 451 | pvec.pages, indices); |
| 452 | if (!pvec.nr) |
| 453 | break; |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 454 | mem_cgroup_uncharge_start(); |
| 455 | for (i = 0; i < pagevec_count(&pvec); i++) { |
| 456 | struct page *page = pvec.pages[i]; |
| 457 | |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 458 | index = indices[i]; |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 459 | if (index > end) |
| 460 | break; |
| 461 | |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 462 | if (radix_tree_exceptional_entry(page)) { |
| 463 | nr_swaps_freed += !shmem_free_swap(mapping, |
| 464 | index, page); |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 465 | continue; |
| 466 | } |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 467 | |
| 468 | if (!trylock_page(page)) |
| 469 | continue; |
| 470 | if (page->mapping == mapping) { |
| 471 | VM_BUG_ON(PageWriteback(page)); |
| 472 | truncate_inode_page(mapping, page); |
| 473 | } |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 474 | unlock_page(page); |
| 475 | } |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 476 | shmem_pagevec_release(&pvec); |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 477 | mem_cgroup_uncharge_end(); |
| 478 | cond_resched(); |
| 479 | index++; |
| 480 | } |
| 481 | |
| 482 | if (partial) { |
| 483 | struct page *page = NULL; |
| 484 | shmem_getpage(inode, start - 1, &page, SGP_READ, NULL); |
| 485 | if (page) { |
| 486 | zero_user_segment(page, partial, PAGE_CACHE_SIZE); |
| 487 | set_page_dirty(page); |
| 488 | unlock_page(page); |
| 489 | page_cache_release(page); |
| 490 | } |
| 491 | } |
| 492 | |
| 493 | index = start; |
| 494 | for ( ; ; ) { |
| 495 | cond_resched(); |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 496 | pvec.nr = shmem_find_get_pages_and_swap(mapping, index, |
| 497 | min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1, |
| 498 | pvec.pages, indices); |
| 499 | if (!pvec.nr) { |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 500 | if (index == start) |
| 501 | break; |
| 502 | index = start; |
| 503 | continue; |
| 504 | } |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 505 | if (index == start && indices[0] > end) { |
| 506 | shmem_pagevec_release(&pvec); |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 507 | break; |
| 508 | } |
| 509 | mem_cgroup_uncharge_start(); |
| 510 | for (i = 0; i < pagevec_count(&pvec); i++) { |
| 511 | struct page *page = pvec.pages[i]; |
| 512 | |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 513 | index = indices[i]; |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 514 | if (index > end) |
| 515 | break; |
| 516 | |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 517 | if (radix_tree_exceptional_entry(page)) { |
| 518 | nr_swaps_freed += !shmem_free_swap(mapping, |
| 519 | index, page); |
| 520 | continue; |
| 521 | } |
| 522 | |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 523 | lock_page(page); |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 524 | if (page->mapping == mapping) { |
| 525 | VM_BUG_ON(PageWriteback(page)); |
| 526 | truncate_inode_page(mapping, page); |
| 527 | } |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 528 | unlock_page(page); |
| 529 | } |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 530 | shmem_pagevec_release(&pvec); |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 531 | mem_cgroup_uncharge_end(); |
| 532 | index++; |
| 533 | } |
Hugh Dickins | 94c1e62 | 2011-06-27 16:18:03 -0700 | [diff] [blame] | 534 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 535 | spin_lock(&info->lock); |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 536 | info->swapped -= nr_swaps_freed; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 537 | shmem_recalc_inode(inode); |
| 538 | spin_unlock(&info->lock); |
| 539 | |
Hugh Dickins | 285b2c4 | 2011-08-03 16:21:20 -0700 | [diff] [blame] | 540 | inode->i_ctime = inode->i_mtime = CURRENT_TIME; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 541 | } |
Hugh Dickins | 94c1e62 | 2011-06-27 16:18:03 -0700 | [diff] [blame] | 542 | EXPORT_SYMBOL_GPL(shmem_truncate_range); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 543 | |
Hugh Dickins | 94c1e62 | 2011-06-27 16:18:03 -0700 | [diff] [blame] | 544 | static int shmem_setattr(struct dentry *dentry, struct iattr *attr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 545 | { |
| 546 | struct inode *inode = dentry->d_inode; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 547 | int error; |
| 548 | |
Christoph Hellwig | db78b87 | 2010-06-04 11:30:03 +0200 | [diff] [blame] | 549 | error = inode_change_ok(inode, attr); |
| 550 | if (error) |
| 551 | return error; |
| 552 | |
Hugh Dickins | 94c1e62 | 2011-06-27 16:18:03 -0700 | [diff] [blame] | 553 | if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { |
| 554 | loff_t oldsize = inode->i_size; |
| 555 | loff_t newsize = attr->ia_size; |
npiggin@suse.de | 3889e6e | 2010-05-27 01:05:36 +1000 | [diff] [blame] | 556 | |
Hugh Dickins | 94c1e62 | 2011-06-27 16:18:03 -0700 | [diff] [blame] | 557 | if (newsize != oldsize) { |
| 558 | i_size_write(inode, newsize); |
| 559 | inode->i_ctime = inode->i_mtime = CURRENT_TIME; |
| 560 | } |
| 561 | if (newsize < oldsize) { |
| 562 | loff_t holebegin = round_up(newsize, PAGE_SIZE); |
| 563 | unmap_mapping_range(inode->i_mapping, holebegin, 0, 1); |
| 564 | shmem_truncate_range(inode, newsize, (loff_t)-1); |
| 565 | /* unmap again to remove racily COWed private pages */ |
| 566 | unmap_mapping_range(inode->i_mapping, holebegin, 0, 1); |
| 567 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 568 | } |
| 569 | |
Christoph Hellwig | db78b87 | 2010-06-04 11:30:03 +0200 | [diff] [blame] | 570 | setattr_copy(inode, attr); |
Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 571 | #ifdef CONFIG_TMPFS_POSIX_ACL |
Christoph Hellwig | db78b87 | 2010-06-04 11:30:03 +0200 | [diff] [blame] | 572 | if (attr->ia_valid & ATTR_MODE) |
Christoph Hellwig | 1c7c474 | 2009-11-03 16:44:44 +0100 | [diff] [blame] | 573 | error = generic_acl_chmod(inode); |
Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 574 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 575 | return error; |
| 576 | } |
| 577 | |
Al Viro | 1f895f7 | 2010-06-05 19:10:41 -0400 | [diff] [blame] | 578 | static void shmem_evict_inode(struct inode *inode) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 579 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 580 | struct shmem_inode_info *info = SHMEM_I(inode); |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 581 | struct shmem_xattr *xattr, *nxattr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 582 | |
npiggin@suse.de | 3889e6e | 2010-05-27 01:05:36 +1000 | [diff] [blame] | 583 | if (inode->i_mapping->a_ops == &shmem_aops) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 584 | shmem_unacct_size(info->flags, inode->i_size); |
| 585 | inode->i_size = 0; |
npiggin@suse.de | 3889e6e | 2010-05-27 01:05:36 +1000 | [diff] [blame] | 586 | shmem_truncate_range(inode, 0, (loff_t)-1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 587 | if (!list_empty(&info->swaplist)) { |
Hugh Dickins | cb5f7b9 | 2008-02-04 22:28:52 -0800 | [diff] [blame] | 588 | mutex_lock(&shmem_swaplist_mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 589 | list_del_init(&info->swaplist); |
Hugh Dickins | cb5f7b9 | 2008-02-04 22:28:52 -0800 | [diff] [blame] | 590 | mutex_unlock(&shmem_swaplist_mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 591 | } |
| 592 | } |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 593 | |
| 594 | list_for_each_entry_safe(xattr, nxattr, &info->xattr_list, list) { |
| 595 | kfree(xattr->name); |
| 596 | kfree(xattr); |
| 597 | } |
Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 598 | BUG_ON(inode->i_blocks); |
Pavel Emelyanov | 5b04c68 | 2008-02-04 22:28:47 -0800 | [diff] [blame] | 599 | shmem_free_inode(inode->i_sb); |
Al Viro | 1f895f7 | 2010-06-05 19:10:41 -0400 | [diff] [blame] | 600 | end_writeback(inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 601 | } |
| 602 | |
Hugh Dickins | 46f65ec | 2011-08-03 16:21:23 -0700 | [diff] [blame^] | 603 | /* |
| 604 | * If swap found in inode, free it and move page from swapcache to filecache. |
| 605 | */ |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 606 | static int shmem_unuse_inode(struct shmem_inode_info *info, |
| 607 | swp_entry_t swap, struct page *page) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 608 | { |
Hugh Dickins | 285b2c4 | 2011-08-03 16:21:20 -0700 | [diff] [blame] | 609 | struct address_space *mapping = info->vfs_inode.i_mapping; |
Hugh Dickins | 46f65ec | 2011-08-03 16:21:23 -0700 | [diff] [blame^] | 610 | void *radswap; |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 611 | pgoff_t index; |
Hugh Dickins | d9fe526 | 2008-02-04 22:28:51 -0800 | [diff] [blame] | 612 | int error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 613 | |
Hugh Dickins | 46f65ec | 2011-08-03 16:21:23 -0700 | [diff] [blame^] | 614 | radswap = swp_to_radix_entry(swap); |
| 615 | index = shmem_find_swap(mapping, radswap); |
| 616 | if (index == -1) |
Hugh Dickins | 285b2c4 | 2011-08-03 16:21:20 -0700 | [diff] [blame] | 617 | return 0; |
Hugh Dickins | 2e0e26c | 2008-02-04 22:28:53 -0800 | [diff] [blame] | 618 | |
Hugh Dickins | 1b1b32f | 2008-02-04 22:28:55 -0800 | [diff] [blame] | 619 | /* |
| 620 | * Move _head_ to start search for next from here. |
Al Viro | 1f895f7 | 2010-06-05 19:10:41 -0400 | [diff] [blame] | 621 | * But be careful: shmem_evict_inode checks list_empty without taking |
Hugh Dickins | 1b1b32f | 2008-02-04 22:28:55 -0800 | [diff] [blame] | 622 | * mutex, and there's an instant in list_move_tail when info->swaplist |
Hugh Dickins | 285b2c4 | 2011-08-03 16:21:20 -0700 | [diff] [blame] | 623 | * would appear empty, if it were the only one on shmem_swaplist. |
Hugh Dickins | 1b1b32f | 2008-02-04 22:28:55 -0800 | [diff] [blame] | 624 | */ |
| 625 | if (shmem_swaplist.next != &info->swaplist) |
| 626 | list_move_tail(&shmem_swaplist, &info->swaplist); |
Hugh Dickins | 2e0e26c | 2008-02-04 22:28:53 -0800 | [diff] [blame] | 627 | |
KAMEZAWA Hiroyuki | d13d144 | 2009-01-07 18:07:56 -0800 | [diff] [blame] | 628 | /* |
Hugh Dickins | 778dd89 | 2011-05-11 15:13:37 -0700 | [diff] [blame] | 629 | * We rely on shmem_swaplist_mutex, not only to protect the swaplist, |
| 630 | * but also to hold up shmem_evict_inode(): so inode cannot be freed |
| 631 | * beneath us (pagelock doesn't help until the page is in pagecache). |
KAMEZAWA Hiroyuki | d13d144 | 2009-01-07 18:07:56 -0800 | [diff] [blame] | 632 | */ |
Hugh Dickins | 46f65ec | 2011-08-03 16:21:23 -0700 | [diff] [blame^] | 633 | error = shmem_add_to_page_cache(page, mapping, index, |
| 634 | GFP_NOWAIT, radswap); |
Hugh Dickins | 778dd89 | 2011-05-11 15:13:37 -0700 | [diff] [blame] | 635 | /* which does mem_cgroup_uncharge_cache_page on error */ |
KAMEZAWA Hiroyuki | 69029cd | 2008-07-25 01:47:14 -0700 | [diff] [blame] | 636 | |
Hugh Dickins | 48f170f | 2011-07-25 17:12:37 -0700 | [diff] [blame] | 637 | if (error != -ENOMEM) { |
Hugh Dickins | 46f65ec | 2011-08-03 16:21:23 -0700 | [diff] [blame^] | 638 | /* |
| 639 | * Truncation and eviction use free_swap_and_cache(), which |
| 640 | * only does trylock page: if we raced, best clean up here. |
| 641 | */ |
Hugh Dickins | 73b1262 | 2008-02-04 22:28:50 -0800 | [diff] [blame] | 642 | delete_from_swap_cache(page); |
| 643 | set_page_dirty(page); |
Hugh Dickins | 46f65ec | 2011-08-03 16:21:23 -0700 | [diff] [blame^] | 644 | if (!error) { |
| 645 | spin_lock(&info->lock); |
| 646 | info->swapped--; |
| 647 | spin_unlock(&info->lock); |
| 648 | swap_free(swap); |
| 649 | } |
Hugh Dickins | 2e0e26c | 2008-02-04 22:28:53 -0800 | [diff] [blame] | 650 | error = 1; /* not an error, but entry was found */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 651 | } |
Hugh Dickins | 2e0e26c | 2008-02-04 22:28:53 -0800 | [diff] [blame] | 652 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 653 | } |
| 654 | |
| 655 | /* |
Hugh Dickins | 46f65ec | 2011-08-03 16:21:23 -0700 | [diff] [blame^] | 656 | * Search through swapped inodes to find and replace swap by page. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 657 | */ |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 658 | int shmem_unuse(swp_entry_t swap, struct page *page) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 659 | { |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 660 | struct list_head *this, *next; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 661 | struct shmem_inode_info *info; |
| 662 | int found = 0; |
Hugh Dickins | 778dd89 | 2011-05-11 15:13:37 -0700 | [diff] [blame] | 663 | int error; |
| 664 | |
| 665 | /* |
| 666 | * Charge page using GFP_KERNEL while we can wait, before taking |
| 667 | * the shmem_swaplist_mutex which might hold up shmem_writepage(). |
| 668 | * Charged back to the user (not to caller) when swap account is used. |
Hugh Dickins | 46f65ec | 2011-08-03 16:21:23 -0700 | [diff] [blame^] | 669 | * shmem_add_to_page_cache() will be called with GFP_NOWAIT. |
Hugh Dickins | 778dd89 | 2011-05-11 15:13:37 -0700 | [diff] [blame] | 670 | */ |
| 671 | error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL); |
| 672 | if (error) |
| 673 | goto out; |
Hugh Dickins | 46f65ec | 2011-08-03 16:21:23 -0700 | [diff] [blame^] | 674 | /* No radix_tree_preload: swap entry keeps a place for page in tree */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 675 | |
Hugh Dickins | cb5f7b9 | 2008-02-04 22:28:52 -0800 | [diff] [blame] | 676 | mutex_lock(&shmem_swaplist_mutex); |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 677 | list_for_each_safe(this, next, &shmem_swaplist) { |
| 678 | info = list_entry(this, struct shmem_inode_info, swaplist); |
Hugh Dickins | 285b2c4 | 2011-08-03 16:21:20 -0700 | [diff] [blame] | 679 | if (!info->swapped) { |
| 680 | spin_lock(&info->lock); |
| 681 | if (!info->swapped) |
| 682 | list_del_init(&info->swaplist); |
| 683 | spin_unlock(&info->lock); |
| 684 | } |
| 685 | if (info->swapped) |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 686 | found = shmem_unuse_inode(info, swap, page); |
Hugh Dickins | cb5f7b9 | 2008-02-04 22:28:52 -0800 | [diff] [blame] | 687 | cond_resched(); |
Hugh Dickins | 2e0e26c | 2008-02-04 22:28:53 -0800 | [diff] [blame] | 688 | if (found) |
Hugh Dickins | 778dd89 | 2011-05-11 15:13:37 -0700 | [diff] [blame] | 689 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 690 | } |
Hugh Dickins | cb5f7b9 | 2008-02-04 22:28:52 -0800 | [diff] [blame] | 691 | mutex_unlock(&shmem_swaplist_mutex); |
Hugh Dickins | 778dd89 | 2011-05-11 15:13:37 -0700 | [diff] [blame] | 692 | |
Hugh Dickins | 778dd89 | 2011-05-11 15:13:37 -0700 | [diff] [blame] | 693 | if (!found) |
| 694 | mem_cgroup_uncharge_cache_page(page); |
| 695 | if (found < 0) |
| 696 | error = found; |
| 697 | out: |
Hugh Dickins | aaa4686 | 2009-12-14 17:58:47 -0800 | [diff] [blame] | 698 | unlock_page(page); |
| 699 | page_cache_release(page); |
Hugh Dickins | 778dd89 | 2011-05-11 15:13:37 -0700 | [diff] [blame] | 700 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 701 | } |
| 702 | |
| 703 | /* |
| 704 | * Move the page from the page cache to the swap cache. |
| 705 | */ |
| 706 | static int shmem_writepage(struct page *page, struct writeback_control *wbc) |
| 707 | { |
| 708 | struct shmem_inode_info *info; |
Hugh Dickins | 285b2c4 | 2011-08-03 16:21:20 -0700 | [diff] [blame] | 709 | swp_entry_t swap, oswap; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 710 | struct address_space *mapping; |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 711 | pgoff_t index; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 712 | struct inode *inode; |
| 713 | |
| 714 | BUG_ON(!PageLocked(page)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 715 | mapping = page->mapping; |
| 716 | index = page->index; |
| 717 | inode = mapping->host; |
| 718 | info = SHMEM_I(inode); |
| 719 | if (info->flags & VM_LOCKED) |
| 720 | goto redirty; |
Hugh Dickins | d9fe526 | 2008-02-04 22:28:51 -0800 | [diff] [blame] | 721 | if (!total_swap_pages) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 722 | goto redirty; |
| 723 | |
Hugh Dickins | d9fe526 | 2008-02-04 22:28:51 -0800 | [diff] [blame] | 724 | /* |
| 725 | * shmem_backing_dev_info's capabilities prevent regular writeback or |
| 726 | * sync from ever calling shmem_writepage; but a stacking filesystem |
Hugh Dickins | 48f170f | 2011-07-25 17:12:37 -0700 | [diff] [blame] | 727 | * might use ->writepage of its underlying filesystem, in which case |
Hugh Dickins | d9fe526 | 2008-02-04 22:28:51 -0800 | [diff] [blame] | 728 | * tmpfs should write out to swap only in response to memory pressure, |
Hugh Dickins | 48f170f | 2011-07-25 17:12:37 -0700 | [diff] [blame] | 729 | * and not for the writeback threads or sync. |
Hugh Dickins | d9fe526 | 2008-02-04 22:28:51 -0800 | [diff] [blame] | 730 | */ |
Hugh Dickins | 48f170f | 2011-07-25 17:12:37 -0700 | [diff] [blame] | 731 | if (!wbc->for_reclaim) { |
| 732 | WARN_ON_ONCE(1); /* Still happens? Tell us about it! */ |
| 733 | goto redirty; |
| 734 | } |
Hugh Dickins | 285b2c4 | 2011-08-03 16:21:20 -0700 | [diff] [blame] | 735 | |
| 736 | /* |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 737 | * Disable even the toy swapping implementation, while we convert |
| 738 | * functions one by one to having swap entries in the radix tree. |
Hugh Dickins | 285b2c4 | 2011-08-03 16:21:20 -0700 | [diff] [blame] | 739 | */ |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 740 | if (index < ULONG_MAX) |
Hugh Dickins | 285b2c4 | 2011-08-03 16:21:20 -0700 | [diff] [blame] | 741 | goto redirty; |
| 742 | |
Hugh Dickins | 48f170f | 2011-07-25 17:12:37 -0700 | [diff] [blame] | 743 | swap = get_swap_page(); |
| 744 | if (!swap.val) |
| 745 | goto redirty; |
Hugh Dickins | d9fe526 | 2008-02-04 22:28:51 -0800 | [diff] [blame] | 746 | |
Hugh Dickins | b1dea80 | 2011-05-11 15:13:36 -0700 | [diff] [blame] | 747 | /* |
| 748 | * Add inode to shmem_unuse()'s list of swapped-out inodes, |
| 749 | * if it's not already there. Do it now because we cannot take |
| 750 | * mutex while holding spinlock, and must do so before the page |
| 751 | * is moved to swap cache, when its pagelock no longer protects |
| 752 | * the inode from eviction. But don't unlock the mutex until |
| 753 | * we've taken the spinlock, because shmem_unuse_inode() will |
| 754 | * prune a !swapped inode from the swaplist under both locks. |
| 755 | */ |
Hugh Dickins | 48f170f | 2011-07-25 17:12:37 -0700 | [diff] [blame] | 756 | mutex_lock(&shmem_swaplist_mutex); |
| 757 | if (list_empty(&info->swaplist)) |
| 758 | list_add_tail(&info->swaplist, &shmem_swaplist); |
Hugh Dickins | b1dea80 | 2011-05-11 15:13:36 -0700 | [diff] [blame] | 759 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 760 | spin_lock(&info->lock); |
Hugh Dickins | 48f170f | 2011-07-25 17:12:37 -0700 | [diff] [blame] | 761 | mutex_unlock(&shmem_swaplist_mutex); |
Hugh Dickins | b1dea80 | 2011-05-11 15:13:36 -0700 | [diff] [blame] | 762 | |
Hugh Dickins | 285b2c4 | 2011-08-03 16:21:20 -0700 | [diff] [blame] | 763 | oswap = shmem_get_swap(info, index); |
| 764 | if (oswap.val) { |
Hugh Dickins | 48f170f | 2011-07-25 17:12:37 -0700 | [diff] [blame] | 765 | WARN_ON_ONCE(1); /* Still happens? Tell us about it! */ |
Hugh Dickins | 285b2c4 | 2011-08-03 16:21:20 -0700 | [diff] [blame] | 766 | free_swap_and_cache(oswap); |
| 767 | shmem_put_swap(info, index, (swp_entry_t){0}); |
| 768 | info->swapped--; |
Hugh Dickins | d9fe526 | 2008-02-04 22:28:51 -0800 | [diff] [blame] | 769 | } |
| 770 | shmem_recalc_inode(inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 771 | |
Hugh Dickins | 48f170f | 2011-07-25 17:12:37 -0700 | [diff] [blame] | 772 | if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) { |
Minchan Kim | 4c73b1b | 2011-03-22 16:32:40 -0700 | [diff] [blame] | 773 | delete_from_page_cache(page); |
Hugh Dickins | 285b2c4 | 2011-08-03 16:21:20 -0700 | [diff] [blame] | 774 | shmem_put_swap(info, index, swap); |
| 775 | info->swapped++; |
Hugh Dickins | aaa4686 | 2009-12-14 17:58:47 -0800 | [diff] [blame] | 776 | swap_shmem_alloc(swap); |
Hugh Dickins | 826267c | 2011-05-28 13:14:09 -0700 | [diff] [blame] | 777 | spin_unlock(&info->lock); |
Hugh Dickins | d9fe526 | 2008-02-04 22:28:51 -0800 | [diff] [blame] | 778 | BUG_ON(page_mapped(page)); |
Hugh Dickins | 9fab561 | 2009-03-31 15:23:33 -0700 | [diff] [blame] | 779 | swap_writepage(page, wbc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 780 | return 0; |
| 781 | } |
| 782 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 783 | spin_unlock(&info->lock); |
KAMEZAWA Hiroyuki | cb4b86b | 2009-06-16 15:32:52 -0700 | [diff] [blame] | 784 | swapcache_free(swap, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 785 | redirty: |
| 786 | set_page_dirty(page); |
Hugh Dickins | d9fe526 | 2008-02-04 22:28:51 -0800 | [diff] [blame] | 787 | if (wbc->for_reclaim) |
| 788 | return AOP_WRITEPAGE_ACTIVATE; /* Return with page locked */ |
| 789 | unlock_page(page); |
| 790 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 791 | } |
| 792 | |
| 793 | #ifdef CONFIG_NUMA |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 794 | #ifdef CONFIG_TMPFS |
Lee Schermerhorn | 71fe804 | 2008-04-28 02:13:26 -0700 | [diff] [blame] | 795 | static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 796 | { |
Lee Schermerhorn | 095f1fc | 2008-04-28 02:13:23 -0700 | [diff] [blame] | 797 | char buffer[64]; |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 798 | |
Lee Schermerhorn | 71fe804 | 2008-04-28 02:13:26 -0700 | [diff] [blame] | 799 | if (!mpol || mpol->mode == MPOL_DEFAULT) |
Lee Schermerhorn | 095f1fc | 2008-04-28 02:13:23 -0700 | [diff] [blame] | 800 | return; /* show nothing */ |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 801 | |
Lee Schermerhorn | 71fe804 | 2008-04-28 02:13:26 -0700 | [diff] [blame] | 802 | mpol_to_str(buffer, sizeof(buffer), mpol, 1); |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 803 | |
Lee Schermerhorn | 095f1fc | 2008-04-28 02:13:23 -0700 | [diff] [blame] | 804 | seq_printf(seq, ",mpol=%s", buffer); |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 805 | } |
Lee Schermerhorn | 71fe804 | 2008-04-28 02:13:26 -0700 | [diff] [blame] | 806 | |
| 807 | static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) |
| 808 | { |
| 809 | struct mempolicy *mpol = NULL; |
| 810 | if (sbinfo->mpol) { |
| 811 | spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */ |
| 812 | mpol = sbinfo->mpol; |
| 813 | mpol_get(mpol); |
| 814 | spin_unlock(&sbinfo->stat_lock); |
| 815 | } |
| 816 | return mpol; |
| 817 | } |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 818 | #endif /* CONFIG_TMPFS */ |
| 819 | |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 820 | static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, |
| 821 | struct shmem_inode_info *info, pgoff_t index) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 822 | { |
Lee Schermerhorn | 52cd3b0 | 2008-04-28 02:13:16 -0700 | [diff] [blame] | 823 | struct mempolicy mpol, *spol; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 824 | struct vm_area_struct pvma; |
| 825 | |
Lee Schermerhorn | 52cd3b0 | 2008-04-28 02:13:16 -0700 | [diff] [blame] | 826 | spol = mpol_cond_copy(&mpol, |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 827 | mpol_shared_policy_lookup(&info->policy, index)); |
Lee Schermerhorn | 52cd3b0 | 2008-04-28 02:13:16 -0700 | [diff] [blame] | 828 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 829 | /* Create a pseudo vma that just contains the policy */ |
Hugh Dickins | c4cc6d0 | 2008-02-04 22:28:40 -0800 | [diff] [blame] | 830 | pvma.vm_start = 0; |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 831 | pvma.vm_pgoff = index; |
Hugh Dickins | c4cc6d0 | 2008-02-04 22:28:40 -0800 | [diff] [blame] | 832 | pvma.vm_ops = NULL; |
Lee Schermerhorn | 52cd3b0 | 2008-04-28 02:13:16 -0700 | [diff] [blame] | 833 | pvma.vm_policy = spol; |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 834 | return swapin_readahead(swap, gfp, &pvma, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 835 | } |
| 836 | |
Hugh Dickins | 02098fe | 2008-02-04 22:28:42 -0800 | [diff] [blame] | 837 | static struct page *shmem_alloc_page(gfp_t gfp, |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 838 | struct shmem_inode_info *info, pgoff_t index) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 839 | { |
| 840 | struct vm_area_struct pvma; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 841 | |
Hugh Dickins | c4cc6d0 | 2008-02-04 22:28:40 -0800 | [diff] [blame] | 842 | /* Create a pseudo vma that just contains the policy */ |
| 843 | pvma.vm_start = 0; |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 844 | pvma.vm_pgoff = index; |
Hugh Dickins | c4cc6d0 | 2008-02-04 22:28:40 -0800 | [diff] [blame] | 845 | pvma.vm_ops = NULL; |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 846 | pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index); |
Lee Schermerhorn | 52cd3b0 | 2008-04-28 02:13:16 -0700 | [diff] [blame] | 847 | |
| 848 | /* |
| 849 | * alloc_page_vma() will drop the shared policy reference |
| 850 | */ |
| 851 | return alloc_page_vma(gfp, &pvma, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 852 | } |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 853 | #else /* !CONFIG_NUMA */ |
| 854 | #ifdef CONFIG_TMPFS |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 855 | static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 856 | { |
| 857 | } |
| 858 | #endif /* CONFIG_TMPFS */ |
| 859 | |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 860 | static inline struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, |
| 861 | struct shmem_inode_info *info, pgoff_t index) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 862 | { |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 863 | return swapin_readahead(swap, gfp, NULL, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 864 | } |
| 865 | |
Hugh Dickins | 02098fe | 2008-02-04 22:28:42 -0800 | [diff] [blame] | 866 | static inline struct page *shmem_alloc_page(gfp_t gfp, |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 867 | struct shmem_inode_info *info, pgoff_t index) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 868 | { |
Hugh Dickins | e84e2e1 | 2007-11-28 18:55:10 +0000 | [diff] [blame] | 869 | return alloc_page(gfp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 870 | } |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 871 | #endif /* CONFIG_NUMA */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 872 | |
Lee Schermerhorn | 71fe804 | 2008-04-28 02:13:26 -0700 | [diff] [blame] | 873 | #if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS) |
| 874 | static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) |
| 875 | { |
| 876 | return NULL; |
| 877 | } |
| 878 | #endif |
| 879 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 880 | /* |
Hugh Dickins | 68da9f0 | 2011-07-25 17:12:34 -0700 | [diff] [blame] | 881 | * shmem_getpage_gfp - find page in cache, or get from swap, or allocate |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 882 | * |
| 883 | * If we allocate a new one we do not mark it dirty. That's up to the |
| 884 | * vm. If we swap it in we mark it dirty since we also free the swap |
| 885 | * entry since a page cannot live in both the swap and page cache |
| 886 | */ |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 887 | static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, |
Hugh Dickins | 68da9f0 | 2011-07-25 17:12:34 -0700 | [diff] [blame] | 888 | struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 889 | { |
| 890 | struct address_space *mapping = inode->i_mapping; |
| 891 | struct shmem_inode_info *info = SHMEM_I(inode); |
| 892 | struct shmem_sb_info *sbinfo; |
Hugh Dickins | 27ab700 | 2011-07-25 17:12:36 -0700 | [diff] [blame] | 893 | struct page *page; |
Shaohua Li | ff36b801 | 2010-08-09 17:19:06 -0700 | [diff] [blame] | 894 | struct page *prealloc_page = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 895 | swp_entry_t swap; |
| 896 | int error; |
| 897 | |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 898 | if (index > (MAX_LFS_FILESIZE >> PAGE_CACHE_SHIFT)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 899 | return -EFBIG; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 900 | repeat: |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 901 | page = find_lock_page(mapping, index); |
Hugh Dickins | 27ab700 | 2011-07-25 17:12:36 -0700 | [diff] [blame] | 902 | if (page) { |
Hugh Dickins | b409f9f | 2008-02-04 22:28:54 -0800 | [diff] [blame] | 903 | /* |
Hugh Dickins | 27ab700 | 2011-07-25 17:12:36 -0700 | [diff] [blame] | 904 | * Once we can get the page lock, it must be uptodate: |
| 905 | * if there were an error in reading back from swap, |
| 906 | * the page would not be inserted into the filecache. |
Hugh Dickins | b409f9f | 2008-02-04 22:28:54 -0800 | [diff] [blame] | 907 | */ |
Hugh Dickins | 27ab700 | 2011-07-25 17:12:36 -0700 | [diff] [blame] | 908 | BUG_ON(!PageUptodate(page)); |
| 909 | goto done; |
| 910 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 911 | |
| 912 | /* |
Hugh Dickins | 27ab700 | 2011-07-25 17:12:36 -0700 | [diff] [blame] | 913 | * Try to preload while we can wait, to not make a habit of |
| 914 | * draining atomic reserves; but don't latch on to this cpu. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 915 | */ |
Hugh Dickins | 27ab700 | 2011-07-25 17:12:36 -0700 | [diff] [blame] | 916 | error = radix_tree_preload(gfp & GFP_RECLAIM_MASK); |
| 917 | if (error) |
| 918 | goto out; |
| 919 | radix_tree_preload_end(); |
| 920 | |
| 921 | if (sgp != SGP_READ && !prealloc_page) { |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 922 | prealloc_page = shmem_alloc_page(gfp, info, index); |
Hugh Dickins | 27ab700 | 2011-07-25 17:12:36 -0700 | [diff] [blame] | 923 | if (prealloc_page) { |
| 924 | SetPageSwapBacked(prealloc_page); |
| 925 | if (mem_cgroup_cache_charge(prealloc_page, |
| 926 | current->mm, GFP_KERNEL)) { |
| 927 | page_cache_release(prealloc_page); |
| 928 | prealloc_page = NULL; |
Shaohua Li | ff36b801 | 2010-08-09 17:19:06 -0700 | [diff] [blame] | 929 | } |
| 930 | } |
Hugh Dickins | b409f9f | 2008-02-04 22:28:54 -0800 | [diff] [blame] | 931 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 932 | |
| 933 | spin_lock(&info->lock); |
| 934 | shmem_recalc_inode(inode); |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 935 | swap = shmem_get_swap(info, index); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 936 | if (swap.val) { |
| 937 | /* Look it up and read it in.. */ |
Hugh Dickins | 27ab700 | 2011-07-25 17:12:36 -0700 | [diff] [blame] | 938 | page = lookup_swap_cache(swap); |
| 939 | if (!page) { |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 940 | spin_unlock(&info->lock); |
Ying Han | 456f998 | 2011-05-26 16:25:38 -0700 | [diff] [blame] | 941 | /* here we actually do the io */ |
Hugh Dickins | 68da9f0 | 2011-07-25 17:12:34 -0700 | [diff] [blame] | 942 | if (fault_type) |
| 943 | *fault_type |= VM_FAULT_MAJOR; |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 944 | page = shmem_swapin(swap, gfp, info, index); |
Hugh Dickins | 27ab700 | 2011-07-25 17:12:36 -0700 | [diff] [blame] | 945 | if (!page) { |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 946 | swp_entry_t nswap = shmem_get_swap(info, index); |
Hugh Dickins | 285b2c4 | 2011-08-03 16:21:20 -0700 | [diff] [blame] | 947 | if (nswap.val == swap.val) { |
| 948 | error = -ENOMEM; |
Hugh Dickins | 27ab700 | 2011-07-25 17:12:36 -0700 | [diff] [blame] | 949 | goto out; |
Hugh Dickins | 285b2c4 | 2011-08-03 16:21:20 -0700 | [diff] [blame] | 950 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 951 | goto repeat; |
| 952 | } |
Hugh Dickins | 27ab700 | 2011-07-25 17:12:36 -0700 | [diff] [blame] | 953 | wait_on_page_locked(page); |
| 954 | page_cache_release(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 955 | goto repeat; |
| 956 | } |
| 957 | |
| 958 | /* We have to do this with page locked to prevent races */ |
Hugh Dickins | 27ab700 | 2011-07-25 17:12:36 -0700 | [diff] [blame] | 959 | if (!trylock_page(page)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 960 | spin_unlock(&info->lock); |
Hugh Dickins | 27ab700 | 2011-07-25 17:12:36 -0700 | [diff] [blame] | 961 | wait_on_page_locked(page); |
| 962 | page_cache_release(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 963 | goto repeat; |
| 964 | } |
Hugh Dickins | 27ab700 | 2011-07-25 17:12:36 -0700 | [diff] [blame] | 965 | if (PageWriteback(page)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 966 | spin_unlock(&info->lock); |
Hugh Dickins | 27ab700 | 2011-07-25 17:12:36 -0700 | [diff] [blame] | 967 | wait_on_page_writeback(page); |
| 968 | unlock_page(page); |
| 969 | page_cache_release(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 970 | goto repeat; |
| 971 | } |
Hugh Dickins | 27ab700 | 2011-07-25 17:12:36 -0700 | [diff] [blame] | 972 | if (!PageUptodate(page)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 973 | spin_unlock(&info->lock); |
Hugh Dickins | 27ab700 | 2011-07-25 17:12:36 -0700 | [diff] [blame] | 974 | unlock_page(page); |
| 975 | page_cache_release(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 976 | error = -EIO; |
Hugh Dickins | 27ab700 | 2011-07-25 17:12:36 -0700 | [diff] [blame] | 977 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 978 | } |
| 979 | |
Hugh Dickins | 27ab700 | 2011-07-25 17:12:36 -0700 | [diff] [blame] | 980 | error = add_to_page_cache_locked(page, mapping, |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 981 | index, GFP_NOWAIT); |
Hugh Dickins | 27ab700 | 2011-07-25 17:12:36 -0700 | [diff] [blame] | 982 | if (error) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 983 | spin_unlock(&info->lock); |
Hugh Dickins | 8236955 | 2008-02-07 00:14:22 -0800 | [diff] [blame] | 984 | if (error == -ENOMEM) { |
Daisuke Nishimura | ae3abae | 2009-04-30 15:08:19 -0700 | [diff] [blame] | 985 | /* |
| 986 | * reclaim from proper memory cgroup and |
| 987 | * call memcg's OOM if needed. |
| 988 | */ |
| 989 | error = mem_cgroup_shmem_charge_fallback( |
Hugh Dickins | 27ab700 | 2011-07-25 17:12:36 -0700 | [diff] [blame] | 990 | page, current->mm, gfp); |
KAMEZAWA Hiroyuki | b5a8431 | 2009-01-07 18:08:35 -0800 | [diff] [blame] | 991 | if (error) { |
Hugh Dickins | 27ab700 | 2011-07-25 17:12:36 -0700 | [diff] [blame] | 992 | unlock_page(page); |
| 993 | page_cache_release(page); |
| 994 | goto out; |
KAMEZAWA Hiroyuki | b5a8431 | 2009-01-07 18:08:35 -0800 | [diff] [blame] | 995 | } |
Hugh Dickins | 8236955 | 2008-02-07 00:14:22 -0800 | [diff] [blame] | 996 | } |
Hugh Dickins | 27ab700 | 2011-07-25 17:12:36 -0700 | [diff] [blame] | 997 | unlock_page(page); |
| 998 | page_cache_release(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 999 | goto repeat; |
| 1000 | } |
Hugh Dickins | 27ab700 | 2011-07-25 17:12:36 -0700 | [diff] [blame] | 1001 | |
Hugh Dickins | 27ab700 | 2011-07-25 17:12:36 -0700 | [diff] [blame] | 1002 | delete_from_swap_cache(page); |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 1003 | shmem_put_swap(info, index, (swp_entry_t){0}); |
Hugh Dickins | 285b2c4 | 2011-08-03 16:21:20 -0700 | [diff] [blame] | 1004 | info->swapped--; |
Hugh Dickins | 27ab700 | 2011-07-25 17:12:36 -0700 | [diff] [blame] | 1005 | spin_unlock(&info->lock); |
| 1006 | set_page_dirty(page); |
| 1007 | swap_free(swap); |
| 1008 | |
| 1009 | } else if (sgp == SGP_READ) { |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 1010 | page = find_get_page(mapping, index); |
Hugh Dickins | 27ab700 | 2011-07-25 17:12:36 -0700 | [diff] [blame] | 1011 | if (page && !trylock_page(page)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1012 | spin_unlock(&info->lock); |
Hugh Dickins | 27ab700 | 2011-07-25 17:12:36 -0700 | [diff] [blame] | 1013 | wait_on_page_locked(page); |
| 1014 | page_cache_release(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1015 | goto repeat; |
| 1016 | } |
| 1017 | spin_unlock(&info->lock); |
Hugh Dickins | e83c32e | 2011-07-25 17:12:35 -0700 | [diff] [blame] | 1018 | |
| 1019 | } else if (prealloc_page) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1020 | sbinfo = SHMEM_SB(inode->i_sb); |
Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 1021 | if (sbinfo->max_blocks) { |
Hugh Dickins | fc5da22 | 2011-04-14 15:22:07 -0700 | [diff] [blame] | 1022 | if (percpu_counter_compare(&sbinfo->used_blocks, |
| 1023 | sbinfo->max_blocks) >= 0 || |
Hugh Dickins | 59a16ea | 2011-05-11 15:13:38 -0700 | [diff] [blame] | 1024 | shmem_acct_block(info->flags)) |
| 1025 | goto nospace; |
Tim Chen | 7e49629 | 2010-08-09 17:19:05 -0700 | [diff] [blame] | 1026 | percpu_counter_inc(&sbinfo->used_blocks); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1027 | inode->i_blocks += BLOCKS_PER_PAGE; |
Hugh Dickins | 59a16ea | 2011-05-11 15:13:38 -0700 | [diff] [blame] | 1028 | } else if (shmem_acct_block(info->flags)) |
| 1029 | goto nospace; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1030 | |
Hugh Dickins | 27ab700 | 2011-07-25 17:12:36 -0700 | [diff] [blame] | 1031 | page = prealloc_page; |
| 1032 | prealloc_page = NULL; |
KAMEZAWA Hiroyuki | 69029cd | 2008-07-25 01:47:14 -0700 | [diff] [blame] | 1033 | |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 1034 | swap = shmem_get_swap(info, index); |
Hugh Dickins | 285b2c4 | 2011-08-03 16:21:20 -0700 | [diff] [blame] | 1035 | if (swap.val) |
Hugh Dickins | 27ab700 | 2011-07-25 17:12:36 -0700 | [diff] [blame] | 1036 | mem_cgroup_uncharge_cache_page(page); |
| 1037 | else |
Hugh Dickins | 285b2c4 | 2011-08-03 16:21:20 -0700 | [diff] [blame] | 1038 | error = add_to_page_cache_lru(page, mapping, |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 1039 | index, GFP_NOWAIT); |
Hugh Dickins | 27ab700 | 2011-07-25 17:12:36 -0700 | [diff] [blame] | 1040 | /* |
| 1041 | * At add_to_page_cache_lru() failure, |
| 1042 | * uncharge will be done automatically. |
| 1043 | */ |
Hugh Dickins | 285b2c4 | 2011-08-03 16:21:20 -0700 | [diff] [blame] | 1044 | if (swap.val || error) { |
Hugh Dickins | 27ab700 | 2011-07-25 17:12:36 -0700 | [diff] [blame] | 1045 | shmem_unacct_blocks(info->flags, 1); |
| 1046 | shmem_free_blocks(inode, 1); |
Hugh Dickins | 59a16ea | 2011-05-11 15:13:38 -0700 | [diff] [blame] | 1047 | spin_unlock(&info->lock); |
| 1048 | page_cache_release(page); |
| 1049 | goto repeat; |
| 1050 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1051 | |
| 1052 | info->alloced++; |
| 1053 | spin_unlock(&info->lock); |
Hugh Dickins | 27ab700 | 2011-07-25 17:12:36 -0700 | [diff] [blame] | 1054 | clear_highpage(page); |
| 1055 | flush_dcache_page(page); |
| 1056 | SetPageUptodate(page); |
Hugh Dickins | a0ee5ec | 2008-02-04 22:28:51 -0800 | [diff] [blame] | 1057 | if (sgp == SGP_DIRTY) |
Hugh Dickins | 27ab700 | 2011-07-25 17:12:36 -0700 | [diff] [blame] | 1058 | set_page_dirty(page); |
| 1059 | |
Hugh Dickins | e83c32e | 2011-07-25 17:12:35 -0700 | [diff] [blame] | 1060 | } else { |
| 1061 | spin_unlock(&info->lock); |
| 1062 | error = -ENOMEM; |
| 1063 | goto out; |
Hugh Dickins | 59a16ea | 2011-05-11 15:13:38 -0700 | [diff] [blame] | 1064 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1065 | done: |
Hugh Dickins | 27ab700 | 2011-07-25 17:12:36 -0700 | [diff] [blame] | 1066 | *pagep = page; |
Shaohua Li | ff36b801 | 2010-08-09 17:19:06 -0700 | [diff] [blame] | 1067 | error = 0; |
Shaohua Li | ff36b801 | 2010-08-09 17:19:06 -0700 | [diff] [blame] | 1068 | out: |
| 1069 | if (prealloc_page) { |
| 1070 | mem_cgroup_uncharge_cache_page(prealloc_page); |
| 1071 | page_cache_release(prealloc_page); |
| 1072 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1073 | return error; |
Nick Piggin | d00806b | 2007-07-19 01:46:57 -0700 | [diff] [blame] | 1074 | |
Hugh Dickins | 27d54b3 | 2008-02-04 22:28:43 -0800 | [diff] [blame] | 1075 | nospace: |
Nick Piggin | d0217ac | 2007-07-19 01:47:03 -0700 | [diff] [blame] | 1076 | /* |
| 1077 | * Perhaps the page was brought in from swap between find_lock_page |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1078 | * and taking info->lock? We allow for that at add_to_page_cache_lru, |
Nick Piggin | 83c5407 | 2007-07-19 01:47:05 -0700 | [diff] [blame] | 1079 | * but must also avoid reporting a spurious ENOSPC while working on a |
Hugh Dickins | 9276aad | 2011-07-25 17:12:34 -0700 | [diff] [blame] | 1080 | * full tmpfs. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1081 | */ |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 1082 | page = find_get_page(mapping, index); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1083 | spin_unlock(&info->lock); |
Hugh Dickins | 27ab700 | 2011-07-25 17:12:36 -0700 | [diff] [blame] | 1084 | if (page) { |
| 1085 | page_cache_release(page); |
| 1086 | goto repeat; |
Josef "Jeff" Sipek | d3ac7f8 | 2006-12-08 02:36:44 -0800 | [diff] [blame] | 1087 | } |
Hugh Dickins | 27ab700 | 2011-07-25 17:12:36 -0700 | [diff] [blame] | 1088 | error = -ENOSPC; |
Hugh Dickins | e83c32e | 2011-07-25 17:12:35 -0700 | [diff] [blame] | 1089 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1090 | } |
| 1091 | |
| 1092 | static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
| 1093 | { |
| 1094 | struct inode *inode = vma->vm_file->f_path.dentry->d_inode; |
| 1095 | int error; |
Hugh Dickins | 68da9f0 | 2011-07-25 17:12:34 -0700 | [diff] [blame] | 1096 | int ret = VM_FAULT_LOCKED; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1097 | |
| 1098 | if (((loff_t)vmf->pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode)) |
| 1099 | return VM_FAULT_SIGBUS; |
| 1100 | |
| 1101 | error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret); |
| 1102 | if (error) |
| 1103 | return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS); |
Hugh Dickins | 68da9f0 | 2011-07-25 17:12:34 -0700 | [diff] [blame] | 1104 | |
Ying Han | 456f998 | 2011-05-26 16:25:38 -0700 | [diff] [blame] | 1105 | if (ret & VM_FAULT_MAJOR) { |
| 1106 | count_vm_event(PGMAJFAULT); |
| 1107 | mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); |
| 1108 | } |
Hugh Dickins | 68da9f0 | 2011-07-25 17:12:34 -0700 | [diff] [blame] | 1109 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1110 | } |
| 1111 | |
| 1112 | #ifdef CONFIG_NUMA |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 1113 | static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1114 | { |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 1115 | struct inode *inode = vma->vm_file->f_path.dentry->d_inode; |
| 1116 | return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1117 | } |
| 1118 | |
Adrian Bunk | d8dc74f | 2007-10-16 01:26:26 -0700 | [diff] [blame] | 1119 | static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, |
| 1120 | unsigned long addr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1121 | { |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 1122 | struct inode *inode = vma->vm_file->f_path.dentry->d_inode; |
| 1123 | pgoff_t index; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1124 | |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 1125 | index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; |
| 1126 | return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1127 | } |
| 1128 | #endif |
| 1129 | |
| 1130 | int shmem_lock(struct file *file, int lock, struct user_struct *user) |
| 1131 | { |
Josef "Jeff" Sipek | d3ac7f8 | 2006-12-08 02:36:44 -0800 | [diff] [blame] | 1132 | struct inode *inode = file->f_path.dentry->d_inode; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1133 | struct shmem_inode_info *info = SHMEM_I(inode); |
| 1134 | int retval = -ENOMEM; |
| 1135 | |
| 1136 | spin_lock(&info->lock); |
| 1137 | if (lock && !(info->flags & VM_LOCKED)) { |
| 1138 | if (!user_shm_lock(inode->i_size, user)) |
| 1139 | goto out_nomem; |
| 1140 | info->flags |= VM_LOCKED; |
Lee Schermerhorn | 89e004ea | 2008-10-18 20:26:43 -0700 | [diff] [blame] | 1141 | mapping_set_unevictable(file->f_mapping); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1142 | } |
| 1143 | if (!lock && (info->flags & VM_LOCKED) && user) { |
| 1144 | user_shm_unlock(inode->i_size, user); |
| 1145 | info->flags &= ~VM_LOCKED; |
Lee Schermerhorn | 89e004ea | 2008-10-18 20:26:43 -0700 | [diff] [blame] | 1146 | mapping_clear_unevictable(file->f_mapping); |
| 1147 | scan_mapping_unevictable_pages(file->f_mapping); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1148 | } |
| 1149 | retval = 0; |
Lee Schermerhorn | 89e004ea | 2008-10-18 20:26:43 -0700 | [diff] [blame] | 1150 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1151 | out_nomem: |
| 1152 | spin_unlock(&info->lock); |
| 1153 | return retval; |
| 1154 | } |
| 1155 | |
Adrian Bunk | 9b83a6a | 2007-02-28 20:11:03 -0800 | [diff] [blame] | 1156 | static int shmem_mmap(struct file *file, struct vm_area_struct *vma) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1157 | { |
| 1158 | file_accessed(file); |
| 1159 | vma->vm_ops = &shmem_vm_ops; |
Nick Piggin | d0217ac | 2007-07-19 01:47:03 -0700 | [diff] [blame] | 1160 | vma->vm_flags |= VM_CAN_NONLINEAR; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1161 | return 0; |
| 1162 | } |
| 1163 | |
Dmitry Monakhov | 454abaf | 2010-03-04 17:32:18 +0300 | [diff] [blame] | 1164 | static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir, |
| 1165 | int mode, dev_t dev, unsigned long flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1166 | { |
| 1167 | struct inode *inode; |
| 1168 | struct shmem_inode_info *info; |
| 1169 | struct shmem_sb_info *sbinfo = SHMEM_SB(sb); |
| 1170 | |
Pavel Emelyanov | 5b04c68 | 2008-02-04 22:28:47 -0800 | [diff] [blame] | 1171 | if (shmem_reserve_inode(sb)) |
| 1172 | return NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1173 | |
| 1174 | inode = new_inode(sb); |
| 1175 | if (inode) { |
Christoph Hellwig | 85fe402 | 2010-10-23 11:19:54 -0400 | [diff] [blame] | 1176 | inode->i_ino = get_next_ino(); |
Dmitry Monakhov | 454abaf | 2010-03-04 17:32:18 +0300 | [diff] [blame] | 1177 | inode_init_owner(inode, dir, mode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1178 | inode->i_blocks = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1179 | inode->i_mapping->backing_dev_info = &shmem_backing_dev_info; |
| 1180 | inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; |
David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 1181 | inode->i_generation = get_seconds(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1182 | info = SHMEM_I(inode); |
| 1183 | memset(info, 0, (char *)inode - (char *)info); |
| 1184 | spin_lock_init(&info->lock); |
Hugh Dickins | 0b0a080 | 2009-02-24 20:51:52 +0000 | [diff] [blame] | 1185 | info->flags = flags & VM_NORESERVE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1186 | INIT_LIST_HEAD(&info->swaplist); |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 1187 | INIT_LIST_HEAD(&info->xattr_list); |
Al Viro | 72c0490 | 2009-06-24 16:58:48 -0400 | [diff] [blame] | 1188 | cache_no_acl(inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1189 | |
| 1190 | switch (mode & S_IFMT) { |
| 1191 | default: |
Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 1192 | inode->i_op = &shmem_special_inode_operations; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1193 | init_special_inode(inode, mode, dev); |
| 1194 | break; |
| 1195 | case S_IFREG: |
Hugh Dickins | 14fcc23 | 2008-07-28 15:46:19 -0700 | [diff] [blame] | 1196 | inode->i_mapping->a_ops = &shmem_aops; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1197 | inode->i_op = &shmem_inode_operations; |
| 1198 | inode->i_fop = &shmem_file_operations; |
Lee Schermerhorn | 71fe804 | 2008-04-28 02:13:26 -0700 | [diff] [blame] | 1199 | mpol_shared_policy_init(&info->policy, |
| 1200 | shmem_get_sbmpol(sbinfo)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1201 | break; |
| 1202 | case S_IFDIR: |
Dave Hansen | d8c76e6 | 2006-09-30 23:29:04 -0700 | [diff] [blame] | 1203 | inc_nlink(inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1204 | /* Some things misbehave if size == 0 on a directory */ |
| 1205 | inode->i_size = 2 * BOGO_DIRENT_SIZE; |
| 1206 | inode->i_op = &shmem_dir_inode_operations; |
| 1207 | inode->i_fop = &simple_dir_operations; |
| 1208 | break; |
| 1209 | case S_IFLNK: |
| 1210 | /* |
| 1211 | * Must not load anything in the rbtree, |
| 1212 | * mpol_free_shared_policy will not be called. |
| 1213 | */ |
Lee Schermerhorn | 71fe804 | 2008-04-28 02:13:26 -0700 | [diff] [blame] | 1214 | mpol_shared_policy_init(&info->policy, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1215 | break; |
| 1216 | } |
Pavel Emelyanov | 5b04c68 | 2008-02-04 22:28:47 -0800 | [diff] [blame] | 1217 | } else |
| 1218 | shmem_free_inode(sb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1219 | return inode; |
| 1220 | } |
| 1221 | |
| 1222 | #ifdef CONFIG_TMPFS |
Arjan van de Ven | 92e1d5b | 2007-02-12 00:55:39 -0800 | [diff] [blame] | 1223 | static const struct inode_operations shmem_symlink_inode_operations; |
| 1224 | static const struct inode_operations shmem_symlink_inline_operations; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1225 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1226 | static int |
Nick Piggin | 800d15a | 2007-10-16 01:25:03 -0700 | [diff] [blame] | 1227 | shmem_write_begin(struct file *file, struct address_space *mapping, |
| 1228 | loff_t pos, unsigned len, unsigned flags, |
| 1229 | struct page **pagep, void **fsdata) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1230 | { |
Nick Piggin | 800d15a | 2007-10-16 01:25:03 -0700 | [diff] [blame] | 1231 | struct inode *inode = mapping->host; |
| 1232 | pgoff_t index = pos >> PAGE_CACHE_SHIFT; |
Nick Piggin | 800d15a | 2007-10-16 01:25:03 -0700 | [diff] [blame] | 1233 | return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL); |
| 1234 | } |
| 1235 | |
| 1236 | static int |
| 1237 | shmem_write_end(struct file *file, struct address_space *mapping, |
| 1238 | loff_t pos, unsigned len, unsigned copied, |
| 1239 | struct page *page, void *fsdata) |
| 1240 | { |
| 1241 | struct inode *inode = mapping->host; |
| 1242 | |
Hugh Dickins | d360244 | 2008-02-04 22:28:44 -0800 | [diff] [blame] | 1243 | if (pos + copied > inode->i_size) |
| 1244 | i_size_write(inode, pos + copied); |
| 1245 | |
Nick Piggin | 800d15a | 2007-10-16 01:25:03 -0700 | [diff] [blame] | 1246 | set_page_dirty(page); |
Wu Fengguang | 6746aff | 2009-09-16 11:50:14 +0200 | [diff] [blame] | 1247 | unlock_page(page); |
Nick Piggin | 800d15a | 2007-10-16 01:25:03 -0700 | [diff] [blame] | 1248 | page_cache_release(page); |
| 1249 | |
Nick Piggin | 800d15a | 2007-10-16 01:25:03 -0700 | [diff] [blame] | 1250 | return copied; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1251 | } |
| 1252 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1253 | static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor) |
| 1254 | { |
Josef "Jeff" Sipek | d3ac7f8 | 2006-12-08 02:36:44 -0800 | [diff] [blame] | 1255 | struct inode *inode = filp->f_path.dentry->d_inode; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1256 | struct address_space *mapping = inode->i_mapping; |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 1257 | pgoff_t index; |
| 1258 | unsigned long offset; |
Hugh Dickins | a0ee5ec | 2008-02-04 22:28:51 -0800 | [diff] [blame] | 1259 | enum sgp_type sgp = SGP_READ; |
| 1260 | |
| 1261 | /* |
| 1262 | * Might this read be for a stacking filesystem? Then when reading |
| 1263 | * holes of a sparse file, we actually need to allocate those pages, |
| 1264 | * and even mark them dirty, so it cannot exceed the max_blocks limit. |
| 1265 | */ |
| 1266 | if (segment_eq(get_fs(), KERNEL_DS)) |
| 1267 | sgp = SGP_DIRTY; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1268 | |
| 1269 | index = *ppos >> PAGE_CACHE_SHIFT; |
| 1270 | offset = *ppos & ~PAGE_CACHE_MASK; |
| 1271 | |
| 1272 | for (;;) { |
| 1273 | struct page *page = NULL; |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 1274 | pgoff_t end_index; |
| 1275 | unsigned long nr, ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1276 | loff_t i_size = i_size_read(inode); |
| 1277 | |
| 1278 | end_index = i_size >> PAGE_CACHE_SHIFT; |
| 1279 | if (index > end_index) |
| 1280 | break; |
| 1281 | if (index == end_index) { |
| 1282 | nr = i_size & ~PAGE_CACHE_MASK; |
| 1283 | if (nr <= offset) |
| 1284 | break; |
| 1285 | } |
| 1286 | |
Hugh Dickins | a0ee5ec | 2008-02-04 22:28:51 -0800 | [diff] [blame] | 1287 | desc->error = shmem_getpage(inode, index, &page, sgp, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1288 | if (desc->error) { |
| 1289 | if (desc->error == -EINVAL) |
| 1290 | desc->error = 0; |
| 1291 | break; |
| 1292 | } |
Hugh Dickins | d360244 | 2008-02-04 22:28:44 -0800 | [diff] [blame] | 1293 | if (page) |
| 1294 | unlock_page(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1295 | |
| 1296 | /* |
| 1297 | * We must evaluate after, since reads (unlike writes) |
Jes Sorensen | 1b1dcc1 | 2006-01-09 15:59:24 -0800 | [diff] [blame] | 1298 | * are called without i_mutex protection against truncate |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1299 | */ |
| 1300 | nr = PAGE_CACHE_SIZE; |
| 1301 | i_size = i_size_read(inode); |
| 1302 | end_index = i_size >> PAGE_CACHE_SHIFT; |
| 1303 | if (index == end_index) { |
| 1304 | nr = i_size & ~PAGE_CACHE_MASK; |
| 1305 | if (nr <= offset) { |
| 1306 | if (page) |
| 1307 | page_cache_release(page); |
| 1308 | break; |
| 1309 | } |
| 1310 | } |
| 1311 | nr -= offset; |
| 1312 | |
| 1313 | if (page) { |
| 1314 | /* |
| 1315 | * If users can be writing to this page using arbitrary |
| 1316 | * virtual addresses, take care about potential aliasing |
| 1317 | * before reading the page on the kernel side. |
| 1318 | */ |
| 1319 | if (mapping_writably_mapped(mapping)) |
| 1320 | flush_dcache_page(page); |
| 1321 | /* |
| 1322 | * Mark the page accessed if we read the beginning. |
| 1323 | */ |
| 1324 | if (!offset) |
| 1325 | mark_page_accessed(page); |
Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame] | 1326 | } else { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1327 | page = ZERO_PAGE(0); |
Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame] | 1328 | page_cache_get(page); |
| 1329 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1330 | |
| 1331 | /* |
| 1332 | * Ok, we have the page, and it's up-to-date, so |
| 1333 | * now we can copy it to user space... |
| 1334 | * |
| 1335 | * The actor routine returns how many bytes were actually used.. |
| 1336 | * NOTE! This may not be the same as how much of a user buffer |
| 1337 | * we filled up (we may be padding etc), so we can only update |
| 1338 | * "pos" here (the actor routine has to update the user buffer |
| 1339 | * pointers and the remaining count). |
| 1340 | */ |
| 1341 | ret = actor(desc, page, offset, nr); |
| 1342 | offset += ret; |
| 1343 | index += offset >> PAGE_CACHE_SHIFT; |
| 1344 | offset &= ~PAGE_CACHE_MASK; |
| 1345 | |
| 1346 | page_cache_release(page); |
| 1347 | if (ret != nr || !desc->count) |
| 1348 | break; |
| 1349 | |
| 1350 | cond_resched(); |
| 1351 | } |
| 1352 | |
| 1353 | *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset; |
| 1354 | file_accessed(filp); |
| 1355 | } |
| 1356 | |
Hugh Dickins | bcd78e4 | 2008-07-23 21:27:35 -0700 | [diff] [blame] | 1357 | static ssize_t shmem_file_aio_read(struct kiocb *iocb, |
| 1358 | const struct iovec *iov, unsigned long nr_segs, loff_t pos) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1359 | { |
Hugh Dickins | bcd78e4 | 2008-07-23 21:27:35 -0700 | [diff] [blame] | 1360 | struct file *filp = iocb->ki_filp; |
| 1361 | ssize_t retval; |
| 1362 | unsigned long seg; |
| 1363 | size_t count; |
| 1364 | loff_t *ppos = &iocb->ki_pos; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1365 | |
Hugh Dickins | bcd78e4 | 2008-07-23 21:27:35 -0700 | [diff] [blame] | 1366 | retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE); |
| 1367 | if (retval) |
| 1368 | return retval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1369 | |
Hugh Dickins | bcd78e4 | 2008-07-23 21:27:35 -0700 | [diff] [blame] | 1370 | for (seg = 0; seg < nr_segs; seg++) { |
| 1371 | read_descriptor_t desc; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1372 | |
Hugh Dickins | bcd78e4 | 2008-07-23 21:27:35 -0700 | [diff] [blame] | 1373 | desc.written = 0; |
| 1374 | desc.arg.buf = iov[seg].iov_base; |
| 1375 | desc.count = iov[seg].iov_len; |
| 1376 | if (desc.count == 0) |
| 1377 | continue; |
| 1378 | desc.error = 0; |
| 1379 | do_shmem_file_read(filp, ppos, &desc, file_read_actor); |
| 1380 | retval += desc.written; |
| 1381 | if (desc.error) { |
| 1382 | retval = retval ?: desc.error; |
| 1383 | break; |
| 1384 | } |
| 1385 | if (desc.count > 0) |
| 1386 | break; |
| 1387 | } |
| 1388 | return retval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1389 | } |
| 1390 | |
Hugh Dickins | 708e350 | 2011-07-25 17:12:32 -0700 | [diff] [blame] | 1391 | static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos, |
| 1392 | struct pipe_inode_info *pipe, size_t len, |
| 1393 | unsigned int flags) |
| 1394 | { |
| 1395 | struct address_space *mapping = in->f_mapping; |
Hugh Dickins | 71f0e07 | 2011-07-25 17:12:33 -0700 | [diff] [blame] | 1396 | struct inode *inode = mapping->host; |
Hugh Dickins | 708e350 | 2011-07-25 17:12:32 -0700 | [diff] [blame] | 1397 | unsigned int loff, nr_pages, req_pages; |
| 1398 | struct page *pages[PIPE_DEF_BUFFERS]; |
| 1399 | struct partial_page partial[PIPE_DEF_BUFFERS]; |
| 1400 | struct page *page; |
| 1401 | pgoff_t index, end_index; |
| 1402 | loff_t isize, left; |
| 1403 | int error, page_nr; |
| 1404 | struct splice_pipe_desc spd = { |
| 1405 | .pages = pages, |
| 1406 | .partial = partial, |
| 1407 | .flags = flags, |
| 1408 | .ops = &page_cache_pipe_buf_ops, |
| 1409 | .spd_release = spd_release_page, |
| 1410 | }; |
| 1411 | |
Hugh Dickins | 71f0e07 | 2011-07-25 17:12:33 -0700 | [diff] [blame] | 1412 | isize = i_size_read(inode); |
Hugh Dickins | 708e350 | 2011-07-25 17:12:32 -0700 | [diff] [blame] | 1413 | if (unlikely(*ppos >= isize)) |
| 1414 | return 0; |
| 1415 | |
| 1416 | left = isize - *ppos; |
| 1417 | if (unlikely(left < len)) |
| 1418 | len = left; |
| 1419 | |
| 1420 | if (splice_grow_spd(pipe, &spd)) |
| 1421 | return -ENOMEM; |
| 1422 | |
| 1423 | index = *ppos >> PAGE_CACHE_SHIFT; |
| 1424 | loff = *ppos & ~PAGE_CACHE_MASK; |
| 1425 | req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; |
| 1426 | nr_pages = min(req_pages, pipe->buffers); |
| 1427 | |
Hugh Dickins | 708e350 | 2011-07-25 17:12:32 -0700 | [diff] [blame] | 1428 | spd.nr_pages = find_get_pages_contig(mapping, index, |
| 1429 | nr_pages, spd.pages); |
| 1430 | index += spd.nr_pages; |
Hugh Dickins | 708e350 | 2011-07-25 17:12:32 -0700 | [diff] [blame] | 1431 | error = 0; |
Hugh Dickins | 71f0e07 | 2011-07-25 17:12:33 -0700 | [diff] [blame] | 1432 | |
Hugh Dickins | 708e350 | 2011-07-25 17:12:32 -0700 | [diff] [blame] | 1433 | while (spd.nr_pages < nr_pages) { |
Hugh Dickins | 71f0e07 | 2011-07-25 17:12:33 -0700 | [diff] [blame] | 1434 | error = shmem_getpage(inode, index, &page, SGP_CACHE, NULL); |
| 1435 | if (error) |
| 1436 | break; |
| 1437 | unlock_page(page); |
Hugh Dickins | 708e350 | 2011-07-25 17:12:32 -0700 | [diff] [blame] | 1438 | spd.pages[spd.nr_pages++] = page; |
| 1439 | index++; |
| 1440 | } |
| 1441 | |
Hugh Dickins | 708e350 | 2011-07-25 17:12:32 -0700 | [diff] [blame] | 1442 | index = *ppos >> PAGE_CACHE_SHIFT; |
| 1443 | nr_pages = spd.nr_pages; |
| 1444 | spd.nr_pages = 0; |
Hugh Dickins | 71f0e07 | 2011-07-25 17:12:33 -0700 | [diff] [blame] | 1445 | |
Hugh Dickins | 708e350 | 2011-07-25 17:12:32 -0700 | [diff] [blame] | 1446 | for (page_nr = 0; page_nr < nr_pages; page_nr++) { |
| 1447 | unsigned int this_len; |
| 1448 | |
| 1449 | if (!len) |
| 1450 | break; |
| 1451 | |
Hugh Dickins | 708e350 | 2011-07-25 17:12:32 -0700 | [diff] [blame] | 1452 | this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff); |
| 1453 | page = spd.pages[page_nr]; |
| 1454 | |
Hugh Dickins | 71f0e07 | 2011-07-25 17:12:33 -0700 | [diff] [blame] | 1455 | if (!PageUptodate(page) || page->mapping != mapping) { |
Hugh Dickins | 71f0e07 | 2011-07-25 17:12:33 -0700 | [diff] [blame] | 1456 | error = shmem_getpage(inode, index, &page, |
| 1457 | SGP_CACHE, NULL); |
| 1458 | if (error) |
Hugh Dickins | 708e350 | 2011-07-25 17:12:32 -0700 | [diff] [blame] | 1459 | break; |
Hugh Dickins | 71f0e07 | 2011-07-25 17:12:33 -0700 | [diff] [blame] | 1460 | unlock_page(page); |
| 1461 | page_cache_release(spd.pages[page_nr]); |
| 1462 | spd.pages[page_nr] = page; |
Hugh Dickins | 708e350 | 2011-07-25 17:12:32 -0700 | [diff] [blame] | 1463 | } |
Hugh Dickins | 71f0e07 | 2011-07-25 17:12:33 -0700 | [diff] [blame] | 1464 | |
| 1465 | isize = i_size_read(inode); |
Hugh Dickins | 708e350 | 2011-07-25 17:12:32 -0700 | [diff] [blame] | 1466 | end_index = (isize - 1) >> PAGE_CACHE_SHIFT; |
| 1467 | if (unlikely(!isize || index > end_index)) |
| 1468 | break; |
| 1469 | |
Hugh Dickins | 708e350 | 2011-07-25 17:12:32 -0700 | [diff] [blame] | 1470 | if (end_index == index) { |
| 1471 | unsigned int plen; |
| 1472 | |
Hugh Dickins | 708e350 | 2011-07-25 17:12:32 -0700 | [diff] [blame] | 1473 | plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; |
| 1474 | if (plen <= loff) |
| 1475 | break; |
| 1476 | |
Hugh Dickins | 708e350 | 2011-07-25 17:12:32 -0700 | [diff] [blame] | 1477 | this_len = min(this_len, plen - loff); |
| 1478 | len = this_len; |
| 1479 | } |
| 1480 | |
| 1481 | spd.partial[page_nr].offset = loff; |
| 1482 | spd.partial[page_nr].len = this_len; |
| 1483 | len -= this_len; |
| 1484 | loff = 0; |
| 1485 | spd.nr_pages++; |
| 1486 | index++; |
| 1487 | } |
| 1488 | |
Hugh Dickins | 708e350 | 2011-07-25 17:12:32 -0700 | [diff] [blame] | 1489 | while (page_nr < nr_pages) |
| 1490 | page_cache_release(spd.pages[page_nr++]); |
Hugh Dickins | 708e350 | 2011-07-25 17:12:32 -0700 | [diff] [blame] | 1491 | |
| 1492 | if (spd.nr_pages) |
| 1493 | error = splice_to_pipe(pipe, &spd); |
| 1494 | |
| 1495 | splice_shrink_spd(pipe, &spd); |
| 1496 | |
| 1497 | if (error > 0) { |
| 1498 | *ppos += error; |
| 1499 | file_accessed(in); |
| 1500 | } |
| 1501 | return error; |
| 1502 | } |
| 1503 | |
David Howells | 726c334 | 2006-06-23 02:02:58 -0700 | [diff] [blame] | 1504 | static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1505 | { |
David Howells | 726c334 | 2006-06-23 02:02:58 -0700 | [diff] [blame] | 1506 | struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1507 | |
| 1508 | buf->f_type = TMPFS_MAGIC; |
| 1509 | buf->f_bsize = PAGE_CACHE_SIZE; |
| 1510 | buf->f_namelen = NAME_MAX; |
Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 1511 | if (sbinfo->max_blocks) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1512 | buf->f_blocks = sbinfo->max_blocks; |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 1513 | buf->f_bavail = |
| 1514 | buf->f_bfree = sbinfo->max_blocks - |
| 1515 | percpu_counter_sum(&sbinfo->used_blocks); |
Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 1516 | } |
| 1517 | if (sbinfo->max_inodes) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1518 | buf->f_files = sbinfo->max_inodes; |
| 1519 | buf->f_ffree = sbinfo->free_inodes; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1520 | } |
| 1521 | /* else leave those fields 0 like simple_statfs */ |
| 1522 | return 0; |
| 1523 | } |
| 1524 | |
| 1525 | /* |
| 1526 | * File creation. Allocate an inode, and we're done.. |
| 1527 | */ |
| 1528 | static int |
| 1529 | shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev) |
| 1530 | { |
Hugh Dickins | 0b0a080 | 2009-02-24 20:51:52 +0000 | [diff] [blame] | 1531 | struct inode *inode; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1532 | int error = -ENOSPC; |
| 1533 | |
Dmitry Monakhov | 454abaf | 2010-03-04 17:32:18 +0300 | [diff] [blame] | 1534 | inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1535 | if (inode) { |
Eric Paris | 2a7dba3 | 2011-02-01 11:05:39 -0500 | [diff] [blame] | 1536 | error = security_inode_init_security(inode, dir, |
| 1537 | &dentry->d_name, NULL, |
| 1538 | NULL, NULL); |
Stephen Smalley | 570bc1c | 2005-09-09 13:01:43 -0700 | [diff] [blame] | 1539 | if (error) { |
| 1540 | if (error != -EOPNOTSUPP) { |
| 1541 | iput(inode); |
| 1542 | return error; |
| 1543 | } |
Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 1544 | } |
Christoph Hellwig | 1c7c474 | 2009-11-03 16:44:44 +0100 | [diff] [blame] | 1545 | #ifdef CONFIG_TMPFS_POSIX_ACL |
| 1546 | error = generic_acl_init(inode, dir); |
Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 1547 | if (error) { |
| 1548 | iput(inode); |
| 1549 | return error; |
Stephen Smalley | 570bc1c | 2005-09-09 13:01:43 -0700 | [diff] [blame] | 1550 | } |
Al Viro | 718deb6 | 2009-12-16 19:35:36 -0500 | [diff] [blame] | 1551 | #else |
| 1552 | error = 0; |
Christoph Hellwig | 1c7c474 | 2009-11-03 16:44:44 +0100 | [diff] [blame] | 1553 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1554 | dir->i_size += BOGO_DIRENT_SIZE; |
| 1555 | dir->i_ctime = dir->i_mtime = CURRENT_TIME; |
| 1556 | d_instantiate(dentry, inode); |
| 1557 | dget(dentry); /* Extra count - pin the dentry in core */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1558 | } |
| 1559 | return error; |
| 1560 | } |
| 1561 | |
| 1562 | static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode) |
| 1563 | { |
| 1564 | int error; |
| 1565 | |
| 1566 | if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0))) |
| 1567 | return error; |
Dave Hansen | d8c76e6 | 2006-09-30 23:29:04 -0700 | [diff] [blame] | 1568 | inc_nlink(dir); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1569 | return 0; |
| 1570 | } |
| 1571 | |
| 1572 | static int shmem_create(struct inode *dir, struct dentry *dentry, int mode, |
| 1573 | struct nameidata *nd) |
| 1574 | { |
| 1575 | return shmem_mknod(dir, dentry, mode | S_IFREG, 0); |
| 1576 | } |
| 1577 | |
| 1578 | /* |
| 1579 | * Link a file.. |
| 1580 | */ |
| 1581 | static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) |
| 1582 | { |
| 1583 | struct inode *inode = old_dentry->d_inode; |
Pavel Emelyanov | 5b04c68 | 2008-02-04 22:28:47 -0800 | [diff] [blame] | 1584 | int ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1585 | |
| 1586 | /* |
| 1587 | * No ordinary (disk based) filesystem counts links as inodes; |
| 1588 | * but each new link needs a new dentry, pinning lowmem, and |
| 1589 | * tmpfs dentries cannot be pruned until they are unlinked. |
| 1590 | */ |
Pavel Emelyanov | 5b04c68 | 2008-02-04 22:28:47 -0800 | [diff] [blame] | 1591 | ret = shmem_reserve_inode(inode->i_sb); |
| 1592 | if (ret) |
| 1593 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1594 | |
| 1595 | dir->i_size += BOGO_DIRENT_SIZE; |
| 1596 | inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; |
Dave Hansen | d8c76e6 | 2006-09-30 23:29:04 -0700 | [diff] [blame] | 1597 | inc_nlink(inode); |
Al Viro | 7de9c6ee | 2010-10-23 11:11:40 -0400 | [diff] [blame] | 1598 | ihold(inode); /* New dentry reference */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1599 | dget(dentry); /* Extra pinning count for the created dentry */ |
| 1600 | d_instantiate(dentry, inode); |
Pavel Emelyanov | 5b04c68 | 2008-02-04 22:28:47 -0800 | [diff] [blame] | 1601 | out: |
| 1602 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1603 | } |
| 1604 | |
| 1605 | static int shmem_unlink(struct inode *dir, struct dentry *dentry) |
| 1606 | { |
| 1607 | struct inode *inode = dentry->d_inode; |
| 1608 | |
Pavel Emelyanov | 5b04c68 | 2008-02-04 22:28:47 -0800 | [diff] [blame] | 1609 | if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) |
| 1610 | shmem_free_inode(inode->i_sb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1611 | |
| 1612 | dir->i_size -= BOGO_DIRENT_SIZE; |
| 1613 | inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; |
Dave Hansen | 9a53c3a | 2006-09-30 23:29:03 -0700 | [diff] [blame] | 1614 | drop_nlink(inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1615 | dput(dentry); /* Undo the count from "create" - this does all the work */ |
| 1616 | return 0; |
| 1617 | } |
| 1618 | |
| 1619 | static int shmem_rmdir(struct inode *dir, struct dentry *dentry) |
| 1620 | { |
| 1621 | if (!simple_empty(dentry)) |
| 1622 | return -ENOTEMPTY; |
| 1623 | |
Dave Hansen | 9a53c3a | 2006-09-30 23:29:03 -0700 | [diff] [blame] | 1624 | drop_nlink(dentry->d_inode); |
| 1625 | drop_nlink(dir); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1626 | return shmem_unlink(dir, dentry); |
| 1627 | } |
| 1628 | |
| 1629 | /* |
| 1630 | * The VFS layer already does all the dentry stuff for rename, |
| 1631 | * we just have to decrement the usage count for the target if |
| 1632 | * it exists so that the VFS layer correctly free's it when it |
| 1633 | * gets overwritten. |
| 1634 | */ |
| 1635 | static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) |
| 1636 | { |
| 1637 | struct inode *inode = old_dentry->d_inode; |
| 1638 | int they_are_dirs = S_ISDIR(inode->i_mode); |
| 1639 | |
| 1640 | if (!simple_empty(new_dentry)) |
| 1641 | return -ENOTEMPTY; |
| 1642 | |
| 1643 | if (new_dentry->d_inode) { |
| 1644 | (void) shmem_unlink(new_dir, new_dentry); |
| 1645 | if (they_are_dirs) |
Dave Hansen | 9a53c3a | 2006-09-30 23:29:03 -0700 | [diff] [blame] | 1646 | drop_nlink(old_dir); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1647 | } else if (they_are_dirs) { |
Dave Hansen | 9a53c3a | 2006-09-30 23:29:03 -0700 | [diff] [blame] | 1648 | drop_nlink(old_dir); |
Dave Hansen | d8c76e6 | 2006-09-30 23:29:04 -0700 | [diff] [blame] | 1649 | inc_nlink(new_dir); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1650 | } |
| 1651 | |
| 1652 | old_dir->i_size -= BOGO_DIRENT_SIZE; |
| 1653 | new_dir->i_size += BOGO_DIRENT_SIZE; |
| 1654 | old_dir->i_ctime = old_dir->i_mtime = |
| 1655 | new_dir->i_ctime = new_dir->i_mtime = |
| 1656 | inode->i_ctime = CURRENT_TIME; |
| 1657 | return 0; |
| 1658 | } |
| 1659 | |
| 1660 | static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname) |
| 1661 | { |
| 1662 | int error; |
| 1663 | int len; |
| 1664 | struct inode *inode; |
Hugh Dickins | 9276aad | 2011-07-25 17:12:34 -0700 | [diff] [blame] | 1665 | struct page *page; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1666 | char *kaddr; |
| 1667 | struct shmem_inode_info *info; |
| 1668 | |
| 1669 | len = strlen(symname) + 1; |
| 1670 | if (len > PAGE_CACHE_SIZE) |
| 1671 | return -ENAMETOOLONG; |
| 1672 | |
Dmitry Monakhov | 454abaf | 2010-03-04 17:32:18 +0300 | [diff] [blame] | 1673 | inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1674 | if (!inode) |
| 1675 | return -ENOSPC; |
| 1676 | |
Eric Paris | 2a7dba3 | 2011-02-01 11:05:39 -0500 | [diff] [blame] | 1677 | error = security_inode_init_security(inode, dir, &dentry->d_name, NULL, |
| 1678 | NULL, NULL); |
Stephen Smalley | 570bc1c | 2005-09-09 13:01:43 -0700 | [diff] [blame] | 1679 | if (error) { |
| 1680 | if (error != -EOPNOTSUPP) { |
| 1681 | iput(inode); |
| 1682 | return error; |
| 1683 | } |
| 1684 | error = 0; |
| 1685 | } |
| 1686 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1687 | info = SHMEM_I(inode); |
| 1688 | inode->i_size = len-1; |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 1689 | if (len <= SHMEM_SYMLINK_INLINE_LEN) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1690 | /* do it inline */ |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 1691 | memcpy(info->inline_symlink, symname, len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1692 | inode->i_op = &shmem_symlink_inline_operations; |
| 1693 | } else { |
| 1694 | error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL); |
| 1695 | if (error) { |
| 1696 | iput(inode); |
| 1697 | return error; |
| 1698 | } |
Hugh Dickins | 14fcc23 | 2008-07-28 15:46:19 -0700 | [diff] [blame] | 1699 | inode->i_mapping->a_ops = &shmem_aops; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1700 | inode->i_op = &shmem_symlink_inode_operations; |
| 1701 | kaddr = kmap_atomic(page, KM_USER0); |
| 1702 | memcpy(kaddr, symname, len); |
| 1703 | kunmap_atomic(kaddr, KM_USER0); |
| 1704 | set_page_dirty(page); |
Wu Fengguang | 6746aff | 2009-09-16 11:50:14 +0200 | [diff] [blame] | 1705 | unlock_page(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1706 | page_cache_release(page); |
| 1707 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1708 | dir->i_size += BOGO_DIRENT_SIZE; |
| 1709 | dir->i_ctime = dir->i_mtime = CURRENT_TIME; |
| 1710 | d_instantiate(dentry, inode); |
| 1711 | dget(dentry); |
| 1712 | return 0; |
| 1713 | } |
| 1714 | |
Linus Torvalds | cc314ee | 2005-08-19 18:02:56 -0700 | [diff] [blame] | 1715 | static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1716 | { |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 1717 | nd_set_link(nd, SHMEM_I(dentry->d_inode)->inline_symlink); |
Linus Torvalds | cc314ee | 2005-08-19 18:02:56 -0700 | [diff] [blame] | 1718 | return NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1719 | } |
| 1720 | |
Linus Torvalds | cc314ee | 2005-08-19 18:02:56 -0700 | [diff] [blame] | 1721 | static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1722 | { |
| 1723 | struct page *page = NULL; |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 1724 | int error = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL); |
| 1725 | nd_set_link(nd, error ? ERR_PTR(error) : kmap(page)); |
Hugh Dickins | d360244 | 2008-02-04 22:28:44 -0800 | [diff] [blame] | 1726 | if (page) |
| 1727 | unlock_page(page); |
Linus Torvalds | cc314ee | 2005-08-19 18:02:56 -0700 | [diff] [blame] | 1728 | return page; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1729 | } |
| 1730 | |
Linus Torvalds | cc314ee | 2005-08-19 18:02:56 -0700 | [diff] [blame] | 1731 | static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1732 | { |
| 1733 | if (!IS_ERR(nd_get_link(nd))) { |
Linus Torvalds | cc314ee | 2005-08-19 18:02:56 -0700 | [diff] [blame] | 1734 | struct page *page = cookie; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1735 | kunmap(page); |
| 1736 | mark_page_accessed(page); |
| 1737 | page_cache_release(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1738 | } |
| 1739 | } |
| 1740 | |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 1741 | #ifdef CONFIG_TMPFS_XATTR |
| 1742 | /* |
| 1743 | * Superblocks without xattr inode operations may get some security.* xattr |
| 1744 | * support from the LSM "for free". As soon as we have any other xattrs |
| 1745 | * like ACLs, we also need to implement the security.* handlers at |
| 1746 | * filesystem level, though. |
| 1747 | */ |
| 1748 | |
| 1749 | static int shmem_xattr_get(struct dentry *dentry, const char *name, |
| 1750 | void *buffer, size_t size) |
| 1751 | { |
| 1752 | struct shmem_inode_info *info; |
| 1753 | struct shmem_xattr *xattr; |
| 1754 | int ret = -ENODATA; |
| 1755 | |
| 1756 | info = SHMEM_I(dentry->d_inode); |
| 1757 | |
| 1758 | spin_lock(&info->lock); |
| 1759 | list_for_each_entry(xattr, &info->xattr_list, list) { |
| 1760 | if (strcmp(name, xattr->name)) |
| 1761 | continue; |
| 1762 | |
| 1763 | ret = xattr->size; |
| 1764 | if (buffer) { |
| 1765 | if (size < xattr->size) |
| 1766 | ret = -ERANGE; |
| 1767 | else |
| 1768 | memcpy(buffer, xattr->value, xattr->size); |
| 1769 | } |
| 1770 | break; |
| 1771 | } |
| 1772 | spin_unlock(&info->lock); |
| 1773 | return ret; |
| 1774 | } |
| 1775 | |
| 1776 | static int shmem_xattr_set(struct dentry *dentry, const char *name, |
| 1777 | const void *value, size_t size, int flags) |
| 1778 | { |
| 1779 | struct inode *inode = dentry->d_inode; |
| 1780 | struct shmem_inode_info *info = SHMEM_I(inode); |
| 1781 | struct shmem_xattr *xattr; |
| 1782 | struct shmem_xattr *new_xattr = NULL; |
| 1783 | size_t len; |
| 1784 | int err = 0; |
| 1785 | |
| 1786 | /* value == NULL means remove */ |
| 1787 | if (value) { |
| 1788 | /* wrap around? */ |
| 1789 | len = sizeof(*new_xattr) + size; |
| 1790 | if (len <= sizeof(*new_xattr)) |
| 1791 | return -ENOMEM; |
| 1792 | |
| 1793 | new_xattr = kmalloc(len, GFP_KERNEL); |
| 1794 | if (!new_xattr) |
| 1795 | return -ENOMEM; |
| 1796 | |
| 1797 | new_xattr->name = kstrdup(name, GFP_KERNEL); |
| 1798 | if (!new_xattr->name) { |
| 1799 | kfree(new_xattr); |
| 1800 | return -ENOMEM; |
| 1801 | } |
| 1802 | |
| 1803 | new_xattr->size = size; |
| 1804 | memcpy(new_xattr->value, value, size); |
| 1805 | } |
| 1806 | |
| 1807 | spin_lock(&info->lock); |
| 1808 | list_for_each_entry(xattr, &info->xattr_list, list) { |
| 1809 | if (!strcmp(name, xattr->name)) { |
| 1810 | if (flags & XATTR_CREATE) { |
| 1811 | xattr = new_xattr; |
| 1812 | err = -EEXIST; |
| 1813 | } else if (new_xattr) { |
| 1814 | list_replace(&xattr->list, &new_xattr->list); |
| 1815 | } else { |
| 1816 | list_del(&xattr->list); |
| 1817 | } |
| 1818 | goto out; |
| 1819 | } |
| 1820 | } |
| 1821 | if (flags & XATTR_REPLACE) { |
| 1822 | xattr = new_xattr; |
| 1823 | err = -ENODATA; |
| 1824 | } else { |
| 1825 | list_add(&new_xattr->list, &info->xattr_list); |
| 1826 | xattr = NULL; |
| 1827 | } |
| 1828 | out: |
| 1829 | spin_unlock(&info->lock); |
| 1830 | if (xattr) |
| 1831 | kfree(xattr->name); |
| 1832 | kfree(xattr); |
| 1833 | return err; |
| 1834 | } |
| 1835 | |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 1836 | static const struct xattr_handler *shmem_xattr_handlers[] = { |
| 1837 | #ifdef CONFIG_TMPFS_POSIX_ACL |
| 1838 | &generic_acl_access_handler, |
| 1839 | &generic_acl_default_handler, |
| 1840 | #endif |
| 1841 | NULL |
| 1842 | }; |
| 1843 | |
| 1844 | static int shmem_xattr_validate(const char *name) |
| 1845 | { |
| 1846 | struct { const char *prefix; size_t len; } arr[] = { |
| 1847 | { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN }, |
| 1848 | { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN } |
| 1849 | }; |
| 1850 | int i; |
| 1851 | |
| 1852 | for (i = 0; i < ARRAY_SIZE(arr); i++) { |
| 1853 | size_t preflen = arr[i].len; |
| 1854 | if (strncmp(name, arr[i].prefix, preflen) == 0) { |
| 1855 | if (!name[preflen]) |
| 1856 | return -EINVAL; |
| 1857 | return 0; |
| 1858 | } |
| 1859 | } |
| 1860 | return -EOPNOTSUPP; |
| 1861 | } |
| 1862 | |
| 1863 | static ssize_t shmem_getxattr(struct dentry *dentry, const char *name, |
| 1864 | void *buffer, size_t size) |
| 1865 | { |
| 1866 | int err; |
| 1867 | |
| 1868 | /* |
| 1869 | * If this is a request for a synthetic attribute in the system.* |
| 1870 | * namespace use the generic infrastructure to resolve a handler |
| 1871 | * for it via sb->s_xattr. |
| 1872 | */ |
| 1873 | if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) |
| 1874 | return generic_getxattr(dentry, name, buffer, size); |
| 1875 | |
| 1876 | err = shmem_xattr_validate(name); |
| 1877 | if (err) |
| 1878 | return err; |
| 1879 | |
| 1880 | return shmem_xattr_get(dentry, name, buffer, size); |
| 1881 | } |
| 1882 | |
| 1883 | static int shmem_setxattr(struct dentry *dentry, const char *name, |
| 1884 | const void *value, size_t size, int flags) |
| 1885 | { |
| 1886 | int err; |
| 1887 | |
| 1888 | /* |
| 1889 | * If this is a request for a synthetic attribute in the system.* |
| 1890 | * namespace use the generic infrastructure to resolve a handler |
| 1891 | * for it via sb->s_xattr. |
| 1892 | */ |
| 1893 | if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) |
| 1894 | return generic_setxattr(dentry, name, value, size, flags); |
| 1895 | |
| 1896 | err = shmem_xattr_validate(name); |
| 1897 | if (err) |
| 1898 | return err; |
| 1899 | |
| 1900 | if (size == 0) |
| 1901 | value = ""; /* empty EA, do not remove */ |
| 1902 | |
| 1903 | return shmem_xattr_set(dentry, name, value, size, flags); |
| 1904 | |
| 1905 | } |
| 1906 | |
| 1907 | static int shmem_removexattr(struct dentry *dentry, const char *name) |
| 1908 | { |
| 1909 | int err; |
| 1910 | |
| 1911 | /* |
| 1912 | * If this is a request for a synthetic attribute in the system.* |
| 1913 | * namespace use the generic infrastructure to resolve a handler |
| 1914 | * for it via sb->s_xattr. |
| 1915 | */ |
| 1916 | if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) |
| 1917 | return generic_removexattr(dentry, name); |
| 1918 | |
| 1919 | err = shmem_xattr_validate(name); |
| 1920 | if (err) |
| 1921 | return err; |
| 1922 | |
| 1923 | return shmem_xattr_set(dentry, name, NULL, 0, XATTR_REPLACE); |
| 1924 | } |
| 1925 | |
| 1926 | static bool xattr_is_trusted(const char *name) |
| 1927 | { |
| 1928 | return !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN); |
| 1929 | } |
| 1930 | |
| 1931 | static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size) |
| 1932 | { |
| 1933 | bool trusted = capable(CAP_SYS_ADMIN); |
| 1934 | struct shmem_xattr *xattr; |
| 1935 | struct shmem_inode_info *info; |
| 1936 | size_t used = 0; |
| 1937 | |
| 1938 | info = SHMEM_I(dentry->d_inode); |
| 1939 | |
| 1940 | spin_lock(&info->lock); |
| 1941 | list_for_each_entry(xattr, &info->xattr_list, list) { |
| 1942 | size_t len; |
| 1943 | |
| 1944 | /* skip "trusted." attributes for unprivileged callers */ |
| 1945 | if (!trusted && xattr_is_trusted(xattr->name)) |
| 1946 | continue; |
| 1947 | |
| 1948 | len = strlen(xattr->name) + 1; |
| 1949 | used += len; |
| 1950 | if (buffer) { |
| 1951 | if (size < used) { |
| 1952 | used = -ERANGE; |
| 1953 | break; |
| 1954 | } |
| 1955 | memcpy(buffer, xattr->name, len); |
| 1956 | buffer += len; |
| 1957 | } |
| 1958 | } |
| 1959 | spin_unlock(&info->lock); |
| 1960 | |
| 1961 | return used; |
| 1962 | } |
| 1963 | #endif /* CONFIG_TMPFS_XATTR */ |
| 1964 | |
Arjan van de Ven | 92e1d5b | 2007-02-12 00:55:39 -0800 | [diff] [blame] | 1965 | static const struct inode_operations shmem_symlink_inline_operations = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1966 | .readlink = generic_readlink, |
| 1967 | .follow_link = shmem_follow_link_inline, |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 1968 | #ifdef CONFIG_TMPFS_XATTR |
| 1969 | .setxattr = shmem_setxattr, |
| 1970 | .getxattr = shmem_getxattr, |
| 1971 | .listxattr = shmem_listxattr, |
| 1972 | .removexattr = shmem_removexattr, |
| 1973 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1974 | }; |
| 1975 | |
Arjan van de Ven | 92e1d5b | 2007-02-12 00:55:39 -0800 | [diff] [blame] | 1976 | static const struct inode_operations shmem_symlink_inode_operations = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1977 | .readlink = generic_readlink, |
| 1978 | .follow_link = shmem_follow_link, |
| 1979 | .put_link = shmem_put_link, |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 1980 | #ifdef CONFIG_TMPFS_XATTR |
| 1981 | .setxattr = shmem_setxattr, |
| 1982 | .getxattr = shmem_getxattr, |
| 1983 | .listxattr = shmem_listxattr, |
| 1984 | .removexattr = shmem_removexattr, |
Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 1985 | #endif |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 1986 | }; |
Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 1987 | |
David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 1988 | static struct dentry *shmem_get_parent(struct dentry *child) |
| 1989 | { |
| 1990 | return ERR_PTR(-ESTALE); |
| 1991 | } |
| 1992 | |
| 1993 | static int shmem_match(struct inode *ino, void *vfh) |
| 1994 | { |
| 1995 | __u32 *fh = vfh; |
| 1996 | __u64 inum = fh[2]; |
| 1997 | inum = (inum << 32) | fh[1]; |
| 1998 | return ino->i_ino == inum && fh[0] == ino->i_generation; |
| 1999 | } |
| 2000 | |
Christoph Hellwig | 480b116 | 2007-10-21 16:42:13 -0700 | [diff] [blame] | 2001 | static struct dentry *shmem_fh_to_dentry(struct super_block *sb, |
| 2002 | struct fid *fid, int fh_len, int fh_type) |
David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 2003 | { |
David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 2004 | struct inode *inode; |
Christoph Hellwig | 480b116 | 2007-10-21 16:42:13 -0700 | [diff] [blame] | 2005 | struct dentry *dentry = NULL; |
| 2006 | u64 inum = fid->raw[2]; |
| 2007 | inum = (inum << 32) | fid->raw[1]; |
David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 2008 | |
Christoph Hellwig | 480b116 | 2007-10-21 16:42:13 -0700 | [diff] [blame] | 2009 | if (fh_len < 3) |
| 2010 | return NULL; |
| 2011 | |
| 2012 | inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]), |
| 2013 | shmem_match, fid->raw); |
David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 2014 | if (inode) { |
Christoph Hellwig | 480b116 | 2007-10-21 16:42:13 -0700 | [diff] [blame] | 2015 | dentry = d_find_alias(inode); |
David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 2016 | iput(inode); |
| 2017 | } |
| 2018 | |
Christoph Hellwig | 480b116 | 2007-10-21 16:42:13 -0700 | [diff] [blame] | 2019 | return dentry; |
David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 2020 | } |
| 2021 | |
| 2022 | static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len, |
| 2023 | int connectable) |
| 2024 | { |
| 2025 | struct inode *inode = dentry->d_inode; |
| 2026 | |
Aneesh Kumar K.V | 5fe0c23 | 2011-01-29 18:43:25 +0530 | [diff] [blame] | 2027 | if (*len < 3) { |
| 2028 | *len = 3; |
David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 2029 | return 255; |
Aneesh Kumar K.V | 5fe0c23 | 2011-01-29 18:43:25 +0530 | [diff] [blame] | 2030 | } |
David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 2031 | |
Al Viro | 1d3382cb | 2010-10-23 15:19:20 -0400 | [diff] [blame] | 2032 | if (inode_unhashed(inode)) { |
David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 2033 | /* Unfortunately insert_inode_hash is not idempotent, |
| 2034 | * so as we hash inodes here rather than at creation |
| 2035 | * time, we need a lock to ensure we only try |
| 2036 | * to do it once |
| 2037 | */ |
| 2038 | static DEFINE_SPINLOCK(lock); |
| 2039 | spin_lock(&lock); |
Al Viro | 1d3382cb | 2010-10-23 15:19:20 -0400 | [diff] [blame] | 2040 | if (inode_unhashed(inode)) |
David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 2041 | __insert_inode_hash(inode, |
| 2042 | inode->i_ino + inode->i_generation); |
| 2043 | spin_unlock(&lock); |
| 2044 | } |
| 2045 | |
| 2046 | fh[0] = inode->i_generation; |
| 2047 | fh[1] = inode->i_ino; |
| 2048 | fh[2] = ((__u64)inode->i_ino) >> 32; |
| 2049 | |
| 2050 | *len = 3; |
| 2051 | return 1; |
| 2052 | } |
| 2053 | |
Christoph Hellwig | 3965516 | 2007-10-21 16:42:17 -0700 | [diff] [blame] | 2054 | static const struct export_operations shmem_export_ops = { |
David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 2055 | .get_parent = shmem_get_parent, |
David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 2056 | .encode_fh = shmem_encode_fh, |
Christoph Hellwig | 480b116 | 2007-10-21 16:42:13 -0700 | [diff] [blame] | 2057 | .fh_to_dentry = shmem_fh_to_dentry, |
David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 2058 | }; |
| 2059 | |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 2060 | static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo, |
| 2061 | bool remount) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2062 | { |
| 2063 | char *this_char, *value, *rest; |
| 2064 | |
Hugh Dickins | b00dc3a | 2006-02-21 23:49:47 +0000 | [diff] [blame] | 2065 | while (options != NULL) { |
| 2066 | this_char = options; |
| 2067 | for (;;) { |
| 2068 | /* |
| 2069 | * NUL-terminate this option: unfortunately, |
| 2070 | * mount options form a comma-separated list, |
| 2071 | * but mpol's nodelist may also contain commas. |
| 2072 | */ |
| 2073 | options = strchr(options, ','); |
| 2074 | if (options == NULL) |
| 2075 | break; |
| 2076 | options++; |
| 2077 | if (!isdigit(*options)) { |
| 2078 | options[-1] = '\0'; |
| 2079 | break; |
| 2080 | } |
| 2081 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2082 | if (!*this_char) |
| 2083 | continue; |
| 2084 | if ((value = strchr(this_char,'=')) != NULL) { |
| 2085 | *value++ = 0; |
| 2086 | } else { |
| 2087 | printk(KERN_ERR |
| 2088 | "tmpfs: No value for mount option '%s'\n", |
| 2089 | this_char); |
| 2090 | return 1; |
| 2091 | } |
| 2092 | |
| 2093 | if (!strcmp(this_char,"size")) { |
| 2094 | unsigned long long size; |
| 2095 | size = memparse(value,&rest); |
| 2096 | if (*rest == '%') { |
| 2097 | size <<= PAGE_SHIFT; |
| 2098 | size *= totalram_pages; |
| 2099 | do_div(size, 100); |
| 2100 | rest++; |
| 2101 | } |
| 2102 | if (*rest) |
| 2103 | goto bad_val; |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 2104 | sbinfo->max_blocks = |
| 2105 | DIV_ROUND_UP(size, PAGE_CACHE_SIZE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2106 | } else if (!strcmp(this_char,"nr_blocks")) { |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 2107 | sbinfo->max_blocks = memparse(value, &rest); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2108 | if (*rest) |
| 2109 | goto bad_val; |
| 2110 | } else if (!strcmp(this_char,"nr_inodes")) { |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 2111 | sbinfo->max_inodes = memparse(value, &rest); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2112 | if (*rest) |
| 2113 | goto bad_val; |
| 2114 | } else if (!strcmp(this_char,"mode")) { |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 2115 | if (remount) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2116 | continue; |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 2117 | sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2118 | if (*rest) |
| 2119 | goto bad_val; |
| 2120 | } else if (!strcmp(this_char,"uid")) { |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 2121 | if (remount) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2122 | continue; |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 2123 | sbinfo->uid = simple_strtoul(value, &rest, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2124 | if (*rest) |
| 2125 | goto bad_val; |
| 2126 | } else if (!strcmp(this_char,"gid")) { |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 2127 | if (remount) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2128 | continue; |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 2129 | sbinfo->gid = simple_strtoul(value, &rest, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2130 | if (*rest) |
| 2131 | goto bad_val; |
Robin Holt | 7339ff8 | 2006-01-14 13:20:48 -0800 | [diff] [blame] | 2132 | } else if (!strcmp(this_char,"mpol")) { |
Lee Schermerhorn | 71fe804 | 2008-04-28 02:13:26 -0700 | [diff] [blame] | 2133 | if (mpol_parse_str(value, &sbinfo->mpol, 1)) |
Robin Holt | 7339ff8 | 2006-01-14 13:20:48 -0800 | [diff] [blame] | 2134 | goto bad_val; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2135 | } else { |
| 2136 | printk(KERN_ERR "tmpfs: Bad mount option %s\n", |
| 2137 | this_char); |
| 2138 | return 1; |
| 2139 | } |
| 2140 | } |
| 2141 | return 0; |
| 2142 | |
| 2143 | bad_val: |
| 2144 | printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n", |
| 2145 | value, this_char); |
| 2146 | return 1; |
| 2147 | |
| 2148 | } |
| 2149 | |
| 2150 | static int shmem_remount_fs(struct super_block *sb, int *flags, char *data) |
| 2151 | { |
| 2152 | struct shmem_sb_info *sbinfo = SHMEM_SB(sb); |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 2153 | struct shmem_sb_info config = *sbinfo; |
Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 2154 | unsigned long inodes; |
| 2155 | int error = -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2156 | |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 2157 | if (shmem_parse_options(data, &config, true)) |
Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 2158 | return error; |
| 2159 | |
| 2160 | spin_lock(&sbinfo->stat_lock); |
Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 2161 | inodes = sbinfo->max_inodes - sbinfo->free_inodes; |
Tim Chen | 7e49629 | 2010-08-09 17:19:05 -0700 | [diff] [blame] | 2162 | if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0) |
Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 2163 | goto out; |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 2164 | if (config.max_inodes < inodes) |
Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 2165 | goto out; |
| 2166 | /* |
| 2167 | * Those tests also disallow limited->unlimited while any are in |
| 2168 | * use, so i_blocks will always be zero when max_blocks is zero; |
| 2169 | * but we must separately disallow unlimited->limited, because |
| 2170 | * in that case we have no record of how much is already in use. |
| 2171 | */ |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 2172 | if (config.max_blocks && !sbinfo->max_blocks) |
Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 2173 | goto out; |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 2174 | if (config.max_inodes && !sbinfo->max_inodes) |
Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 2175 | goto out; |
| 2176 | |
| 2177 | error = 0; |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 2178 | sbinfo->max_blocks = config.max_blocks; |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 2179 | sbinfo->max_inodes = config.max_inodes; |
| 2180 | sbinfo->free_inodes = config.max_inodes - inodes; |
Lee Schermerhorn | 71fe804 | 2008-04-28 02:13:26 -0700 | [diff] [blame] | 2181 | |
| 2182 | mpol_put(sbinfo->mpol); |
| 2183 | sbinfo->mpol = config.mpol; /* transfers initial ref */ |
Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 2184 | out: |
| 2185 | spin_unlock(&sbinfo->stat_lock); |
| 2186 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2187 | } |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 2188 | |
| 2189 | static int shmem_show_options(struct seq_file *seq, struct vfsmount *vfs) |
| 2190 | { |
| 2191 | struct shmem_sb_info *sbinfo = SHMEM_SB(vfs->mnt_sb); |
| 2192 | |
| 2193 | if (sbinfo->max_blocks != shmem_default_max_blocks()) |
| 2194 | seq_printf(seq, ",size=%luk", |
| 2195 | sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10)); |
| 2196 | if (sbinfo->max_inodes != shmem_default_max_inodes()) |
| 2197 | seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes); |
| 2198 | if (sbinfo->mode != (S_IRWXUGO | S_ISVTX)) |
| 2199 | seq_printf(seq, ",mode=%03o", sbinfo->mode); |
| 2200 | if (sbinfo->uid != 0) |
| 2201 | seq_printf(seq, ",uid=%u", sbinfo->uid); |
| 2202 | if (sbinfo->gid != 0) |
| 2203 | seq_printf(seq, ",gid=%u", sbinfo->gid); |
Lee Schermerhorn | 71fe804 | 2008-04-28 02:13:26 -0700 | [diff] [blame] | 2204 | shmem_show_mpol(seq, sbinfo->mpol); |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 2205 | return 0; |
| 2206 | } |
| 2207 | #endif /* CONFIG_TMPFS */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2208 | |
| 2209 | static void shmem_put_super(struct super_block *sb) |
| 2210 | { |
Hugh Dickins | 602586a | 2010-08-17 15:23:56 -0700 | [diff] [blame] | 2211 | struct shmem_sb_info *sbinfo = SHMEM_SB(sb); |
| 2212 | |
| 2213 | percpu_counter_destroy(&sbinfo->used_blocks); |
| 2214 | kfree(sbinfo); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2215 | sb->s_fs_info = NULL; |
| 2216 | } |
| 2217 | |
Kay Sievers | 2b2af54 | 2009-04-30 15:23:42 +0200 | [diff] [blame] | 2218 | int shmem_fill_super(struct super_block *sb, void *data, int silent) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2219 | { |
| 2220 | struct inode *inode; |
| 2221 | struct dentry *root; |
Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 2222 | struct shmem_sb_info *sbinfo; |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 2223 | int err = -ENOMEM; |
| 2224 | |
| 2225 | /* Round up to L1_CACHE_BYTES to resist false sharing */ |
Pekka Enberg | 425fbf0 | 2009-09-21 17:03:50 -0700 | [diff] [blame] | 2226 | sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info), |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 2227 | L1_CACHE_BYTES), GFP_KERNEL); |
| 2228 | if (!sbinfo) |
| 2229 | return -ENOMEM; |
| 2230 | |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 2231 | sbinfo->mode = S_IRWXUGO | S_ISVTX; |
David Howells | 76aac0e | 2008-11-14 10:39:12 +1100 | [diff] [blame] | 2232 | sbinfo->uid = current_fsuid(); |
| 2233 | sbinfo->gid = current_fsgid(); |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 2234 | sb->s_fs_info = sbinfo; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2235 | |
Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 2236 | #ifdef CONFIG_TMPFS |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2237 | /* |
| 2238 | * Per default we only allow half of the physical ram per |
| 2239 | * tmpfs instance, limiting inodes to one per page of lowmem; |
| 2240 | * but the internal instance is left unlimited. |
| 2241 | */ |
| 2242 | if (!(sb->s_flags & MS_NOUSER)) { |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 2243 | sbinfo->max_blocks = shmem_default_max_blocks(); |
| 2244 | sbinfo->max_inodes = shmem_default_max_inodes(); |
| 2245 | if (shmem_parse_options(data, sbinfo, false)) { |
| 2246 | err = -EINVAL; |
| 2247 | goto failed; |
| 2248 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2249 | } |
David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 2250 | sb->s_export_op = &shmem_export_ops; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2251 | #else |
| 2252 | sb->s_flags |= MS_NOUSER; |
| 2253 | #endif |
| 2254 | |
Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 2255 | spin_lock_init(&sbinfo->stat_lock); |
Hugh Dickins | 602586a | 2010-08-17 15:23:56 -0700 | [diff] [blame] | 2256 | if (percpu_counter_init(&sbinfo->used_blocks, 0)) |
| 2257 | goto failed; |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 2258 | sbinfo->free_inodes = sbinfo->max_inodes; |
Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 2259 | |
Hugh Dickins | 285b2c4 | 2011-08-03 16:21:20 -0700 | [diff] [blame] | 2260 | sb->s_maxbytes = MAX_LFS_FILESIZE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2261 | sb->s_blocksize = PAGE_CACHE_SIZE; |
| 2262 | sb->s_blocksize_bits = PAGE_CACHE_SHIFT; |
| 2263 | sb->s_magic = TMPFS_MAGIC; |
| 2264 | sb->s_op = &shmem_ops; |
Robin H. Johnson | cfd95a9 | 2006-06-12 21:50:25 +0100 | [diff] [blame] | 2265 | sb->s_time_gran = 1; |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 2266 | #ifdef CONFIG_TMPFS_XATTR |
Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 2267 | sb->s_xattr = shmem_xattr_handlers; |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 2268 | #endif |
| 2269 | #ifdef CONFIG_TMPFS_POSIX_ACL |
Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 2270 | sb->s_flags |= MS_POSIXACL; |
| 2271 | #endif |
Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 2272 | |
Dmitry Monakhov | 454abaf | 2010-03-04 17:32:18 +0300 | [diff] [blame] | 2273 | inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2274 | if (!inode) |
| 2275 | goto failed; |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 2276 | inode->i_uid = sbinfo->uid; |
| 2277 | inode->i_gid = sbinfo->gid; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2278 | root = d_alloc_root(inode); |
| 2279 | if (!root) |
| 2280 | goto failed_iput; |
| 2281 | sb->s_root = root; |
| 2282 | return 0; |
| 2283 | |
| 2284 | failed_iput: |
| 2285 | iput(inode); |
| 2286 | failed: |
| 2287 | shmem_put_super(sb); |
| 2288 | return err; |
| 2289 | } |
| 2290 | |
Pekka Enberg | fcc234f | 2006-03-22 00:08:13 -0800 | [diff] [blame] | 2291 | static struct kmem_cache *shmem_inode_cachep; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2292 | |
| 2293 | static struct inode *shmem_alloc_inode(struct super_block *sb) |
| 2294 | { |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 2295 | struct shmem_inode_info *info; |
| 2296 | info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL); |
| 2297 | if (!info) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2298 | return NULL; |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 2299 | return &info->vfs_inode; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2300 | } |
| 2301 | |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 2302 | static void shmem_destroy_callback(struct rcu_head *head) |
Nick Piggin | fa0d7e3d | 2011-01-07 17:49:49 +1100 | [diff] [blame] | 2303 | { |
| 2304 | struct inode *inode = container_of(head, struct inode, i_rcu); |
| 2305 | INIT_LIST_HEAD(&inode->i_dentry); |
| 2306 | kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); |
| 2307 | } |
| 2308 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2309 | static void shmem_destroy_inode(struct inode *inode) |
| 2310 | { |
| 2311 | if ((inode->i_mode & S_IFMT) == S_IFREG) { |
| 2312 | /* only struct inode is valid if it's an inline symlink */ |
| 2313 | mpol_free_shared_policy(&SHMEM_I(inode)->policy); |
| 2314 | } |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 2315 | call_rcu(&inode->i_rcu, shmem_destroy_callback); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2316 | } |
| 2317 | |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 2318 | static void shmem_init_inode(void *foo) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2319 | { |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 2320 | struct shmem_inode_info *info = foo; |
| 2321 | inode_init_once(&info->vfs_inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2322 | } |
| 2323 | |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 2324 | static int shmem_init_inodecache(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2325 | { |
| 2326 | shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", |
| 2327 | sizeof(struct shmem_inode_info), |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 2328 | 0, SLAB_PANIC, shmem_init_inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2329 | return 0; |
| 2330 | } |
| 2331 | |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 2332 | static void shmem_destroy_inodecache(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2333 | { |
Alexey Dobriyan | 1a1d92c | 2006-09-27 01:49:40 -0700 | [diff] [blame] | 2334 | kmem_cache_destroy(shmem_inode_cachep); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2335 | } |
| 2336 | |
Christoph Hellwig | f5e54d6 | 2006-06-28 04:26:44 -0700 | [diff] [blame] | 2337 | static const struct address_space_operations shmem_aops = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2338 | .writepage = shmem_writepage, |
Ken Chen | 7671932 | 2007-02-10 01:43:15 -0800 | [diff] [blame] | 2339 | .set_page_dirty = __set_page_dirty_no_writeback, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2340 | #ifdef CONFIG_TMPFS |
Nick Piggin | 800d15a | 2007-10-16 01:25:03 -0700 | [diff] [blame] | 2341 | .write_begin = shmem_write_begin, |
| 2342 | .write_end = shmem_write_end, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2343 | #endif |
Lee Schermerhorn | 304dbdb | 2006-04-22 02:35:48 -0700 | [diff] [blame] | 2344 | .migratepage = migrate_page, |
Andi Kleen | aa261f5 | 2009-09-16 11:50:16 +0200 | [diff] [blame] | 2345 | .error_remove_page = generic_error_remove_page, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2346 | }; |
| 2347 | |
Helge Deller | 15ad7cd | 2006-12-06 20:40:36 -0800 | [diff] [blame] | 2348 | static const struct file_operations shmem_file_operations = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2349 | .mmap = shmem_mmap, |
| 2350 | #ifdef CONFIG_TMPFS |
| 2351 | .llseek = generic_file_llseek, |
Hugh Dickins | bcd78e4 | 2008-07-23 21:27:35 -0700 | [diff] [blame] | 2352 | .read = do_sync_read, |
Hugh Dickins | 5402b97 | 2008-02-04 22:28:44 -0800 | [diff] [blame] | 2353 | .write = do_sync_write, |
Hugh Dickins | bcd78e4 | 2008-07-23 21:27:35 -0700 | [diff] [blame] | 2354 | .aio_read = shmem_file_aio_read, |
Hugh Dickins | 5402b97 | 2008-02-04 22:28:44 -0800 | [diff] [blame] | 2355 | .aio_write = generic_file_aio_write, |
Christoph Hellwig | 1b061d9 | 2010-05-26 17:53:41 +0200 | [diff] [blame] | 2356 | .fsync = noop_fsync, |
Hugh Dickins | 708e350 | 2011-07-25 17:12:32 -0700 | [diff] [blame] | 2357 | .splice_read = shmem_file_splice_read, |
Hugh Dickins | ae976416 | 2007-06-04 10:00:39 +0200 | [diff] [blame] | 2358 | .splice_write = generic_file_splice_write, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2359 | #endif |
| 2360 | }; |
| 2361 | |
Arjan van de Ven | 92e1d5b | 2007-02-12 00:55:39 -0800 | [diff] [blame] | 2362 | static const struct inode_operations shmem_inode_operations = { |
Hugh Dickins | 94c1e62 | 2011-06-27 16:18:03 -0700 | [diff] [blame] | 2363 | .setattr = shmem_setattr, |
Badari Pulavarty | f6b3ec2 | 2006-01-06 00:10:38 -0800 | [diff] [blame] | 2364 | .truncate_range = shmem_truncate_range, |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 2365 | #ifdef CONFIG_TMPFS_XATTR |
| 2366 | .setxattr = shmem_setxattr, |
| 2367 | .getxattr = shmem_getxattr, |
| 2368 | .listxattr = shmem_listxattr, |
| 2369 | .removexattr = shmem_removexattr, |
| 2370 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2371 | }; |
| 2372 | |
Arjan van de Ven | 92e1d5b | 2007-02-12 00:55:39 -0800 | [diff] [blame] | 2373 | static const struct inode_operations shmem_dir_inode_operations = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2374 | #ifdef CONFIG_TMPFS |
| 2375 | .create = shmem_create, |
| 2376 | .lookup = simple_lookup, |
| 2377 | .link = shmem_link, |
| 2378 | .unlink = shmem_unlink, |
| 2379 | .symlink = shmem_symlink, |
| 2380 | .mkdir = shmem_mkdir, |
| 2381 | .rmdir = shmem_rmdir, |
| 2382 | .mknod = shmem_mknod, |
| 2383 | .rename = shmem_rename, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2384 | #endif |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 2385 | #ifdef CONFIG_TMPFS_XATTR |
| 2386 | .setxattr = shmem_setxattr, |
| 2387 | .getxattr = shmem_getxattr, |
| 2388 | .listxattr = shmem_listxattr, |
| 2389 | .removexattr = shmem_removexattr, |
| 2390 | #endif |
Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 2391 | #ifdef CONFIG_TMPFS_POSIX_ACL |
Hugh Dickins | 94c1e62 | 2011-06-27 16:18:03 -0700 | [diff] [blame] | 2392 | .setattr = shmem_setattr, |
Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 2393 | #endif |
| 2394 | }; |
| 2395 | |
Arjan van de Ven | 92e1d5b | 2007-02-12 00:55:39 -0800 | [diff] [blame] | 2396 | static const struct inode_operations shmem_special_inode_operations = { |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 2397 | #ifdef CONFIG_TMPFS_XATTR |
| 2398 | .setxattr = shmem_setxattr, |
| 2399 | .getxattr = shmem_getxattr, |
| 2400 | .listxattr = shmem_listxattr, |
| 2401 | .removexattr = shmem_removexattr, |
| 2402 | #endif |
Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 2403 | #ifdef CONFIG_TMPFS_POSIX_ACL |
Hugh Dickins | 94c1e62 | 2011-06-27 16:18:03 -0700 | [diff] [blame] | 2404 | .setattr = shmem_setattr, |
Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 2405 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2406 | }; |
| 2407 | |
Hugh Dickins | 759b977 | 2007-03-05 00:30:28 -0800 | [diff] [blame] | 2408 | static const struct super_operations shmem_ops = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2409 | .alloc_inode = shmem_alloc_inode, |
| 2410 | .destroy_inode = shmem_destroy_inode, |
| 2411 | #ifdef CONFIG_TMPFS |
| 2412 | .statfs = shmem_statfs, |
| 2413 | .remount_fs = shmem_remount_fs, |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 2414 | .show_options = shmem_show_options, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2415 | #endif |
Al Viro | 1f895f7 | 2010-06-05 19:10:41 -0400 | [diff] [blame] | 2416 | .evict_inode = shmem_evict_inode, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2417 | .drop_inode = generic_delete_inode, |
| 2418 | .put_super = shmem_put_super, |
| 2419 | }; |
| 2420 | |
Alexey Dobriyan | f0f37e2f | 2009-09-27 22:29:37 +0400 | [diff] [blame] | 2421 | static const struct vm_operations_struct shmem_vm_ops = { |
Nick Piggin | 54cb882 | 2007-07-19 01:46:59 -0700 | [diff] [blame] | 2422 | .fault = shmem_fault, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2423 | #ifdef CONFIG_NUMA |
| 2424 | .set_policy = shmem_set_policy, |
| 2425 | .get_policy = shmem_get_policy, |
| 2426 | #endif |
| 2427 | }; |
| 2428 | |
Al Viro | 3c26ff6 | 2010-07-25 11:46:36 +0400 | [diff] [blame] | 2429 | static struct dentry *shmem_mount(struct file_system_type *fs_type, |
| 2430 | int flags, const char *dev_name, void *data) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2431 | { |
Al Viro | 3c26ff6 | 2010-07-25 11:46:36 +0400 | [diff] [blame] | 2432 | return mount_nodev(fs_type, flags, data, shmem_fill_super); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2433 | } |
| 2434 | |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 2435 | static struct file_system_type shmem_fs_type = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2436 | .owner = THIS_MODULE, |
| 2437 | .name = "tmpfs", |
Al Viro | 3c26ff6 | 2010-07-25 11:46:36 +0400 | [diff] [blame] | 2438 | .mount = shmem_mount, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2439 | .kill_sb = kill_litter_super, |
| 2440 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2441 | |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 2442 | int __init shmem_init(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2443 | { |
| 2444 | int error; |
| 2445 | |
Peter Zijlstra | e0bf68d | 2007-10-16 23:25:46 -0700 | [diff] [blame] | 2446 | error = bdi_init(&shmem_backing_dev_info); |
| 2447 | if (error) |
| 2448 | goto out4; |
| 2449 | |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 2450 | error = shmem_init_inodecache(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2451 | if (error) |
| 2452 | goto out3; |
| 2453 | |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 2454 | error = register_filesystem(&shmem_fs_type); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2455 | if (error) { |
| 2456 | printk(KERN_ERR "Could not register tmpfs\n"); |
| 2457 | goto out2; |
| 2458 | } |
Greg Kroah-Hartman | 95dc112 | 2005-06-20 21:15:16 -0700 | [diff] [blame] | 2459 | |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 2460 | shm_mnt = vfs_kern_mount(&shmem_fs_type, MS_NOUSER, |
| 2461 | shmem_fs_type.name, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2462 | if (IS_ERR(shm_mnt)) { |
| 2463 | error = PTR_ERR(shm_mnt); |
| 2464 | printk(KERN_ERR "Could not kern_mount tmpfs\n"); |
| 2465 | goto out1; |
| 2466 | } |
| 2467 | return 0; |
| 2468 | |
| 2469 | out1: |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 2470 | unregister_filesystem(&shmem_fs_type); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2471 | out2: |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 2472 | shmem_destroy_inodecache(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2473 | out3: |
Peter Zijlstra | e0bf68d | 2007-10-16 23:25:46 -0700 | [diff] [blame] | 2474 | bdi_destroy(&shmem_backing_dev_info); |
| 2475 | out4: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2476 | shm_mnt = ERR_PTR(error); |
| 2477 | return error; |
| 2478 | } |
Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 2479 | |
Daisuke Nishimura | 87946a7 | 2010-05-26 14:42:39 -0700 | [diff] [blame] | 2480 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR |
| 2481 | /** |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 2482 | * mem_cgroup_get_shmem_target - find page or swap assigned to the shmem file |
Daisuke Nishimura | 87946a7 | 2010-05-26 14:42:39 -0700 | [diff] [blame] | 2483 | * @inode: the inode to be searched |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 2484 | * @index: the page offset to be searched |
Daisuke Nishimura | 87946a7 | 2010-05-26 14:42:39 -0700 | [diff] [blame] | 2485 | * @pagep: the pointer for the found page to be stored |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 2486 | * @swapp: the pointer for the found swap entry to be stored |
Daisuke Nishimura | 87946a7 | 2010-05-26 14:42:39 -0700 | [diff] [blame] | 2487 | * |
| 2488 | * If a page is found, refcount of it is incremented. Callers should handle |
| 2489 | * these refcount. |
| 2490 | */ |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 2491 | void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t index, |
| 2492 | struct page **pagep, swp_entry_t *swapp) |
Daisuke Nishimura | 87946a7 | 2010-05-26 14:42:39 -0700 | [diff] [blame] | 2493 | { |
Daisuke Nishimura | 87946a7 | 2010-05-26 14:42:39 -0700 | [diff] [blame] | 2494 | struct shmem_inode_info *info = SHMEM_I(inode); |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 2495 | struct page *page = NULL; |
| 2496 | swp_entry_t swap = {0}; |
Daisuke Nishimura | 87946a7 | 2010-05-26 14:42:39 -0700 | [diff] [blame] | 2497 | |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 2498 | if ((index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) |
Daisuke Nishimura | 87946a7 | 2010-05-26 14:42:39 -0700 | [diff] [blame] | 2499 | goto out; |
| 2500 | |
| 2501 | spin_lock(&info->lock); |
Daisuke Nishimura | 87946a7 | 2010-05-26 14:42:39 -0700 | [diff] [blame] | 2502 | #ifdef CONFIG_SWAP |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 2503 | swap = shmem_get_swap(info, index); |
| 2504 | if (swap.val) |
| 2505 | page = find_get_page(&swapper_space, swap.val); |
Hugh Dickins | 285b2c4 | 2011-08-03 16:21:20 -0700 | [diff] [blame] | 2506 | else |
Daisuke Nishimura | 87946a7 | 2010-05-26 14:42:39 -0700 | [diff] [blame] | 2507 | #endif |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 2508 | page = find_get_page(inode->i_mapping, index); |
Daisuke Nishimura | 87946a7 | 2010-05-26 14:42:39 -0700 | [diff] [blame] | 2509 | spin_unlock(&info->lock); |
| 2510 | out: |
| 2511 | *pagep = page; |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 2512 | *swapp = swap; |
Daisuke Nishimura | 87946a7 | 2010-05-26 14:42:39 -0700 | [diff] [blame] | 2513 | } |
| 2514 | #endif |
| 2515 | |
Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 2516 | #else /* !CONFIG_SHMEM */ |
| 2517 | |
| 2518 | /* |
| 2519 | * tiny-shmem: simple shmemfs and tmpfs using ramfs code |
| 2520 | * |
| 2521 | * This is intended for small system where the benefits of the full |
| 2522 | * shmem code (swap-backed and resource-limited) are outweighed by |
| 2523 | * their complexity. On systems without swap this code should be |
| 2524 | * effectively equivalent, but much lighter weight. |
| 2525 | */ |
| 2526 | |
| 2527 | #include <linux/ramfs.h> |
| 2528 | |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 2529 | static struct file_system_type shmem_fs_type = { |
Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 2530 | .name = "tmpfs", |
Al Viro | 3c26ff6 | 2010-07-25 11:46:36 +0400 | [diff] [blame] | 2531 | .mount = ramfs_mount, |
Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 2532 | .kill_sb = kill_litter_super, |
| 2533 | }; |
| 2534 | |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 2535 | int __init shmem_init(void) |
Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 2536 | { |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 2537 | BUG_ON(register_filesystem(&shmem_fs_type) != 0); |
Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 2538 | |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 2539 | shm_mnt = kern_mount(&shmem_fs_type); |
Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 2540 | BUG_ON(IS_ERR(shm_mnt)); |
| 2541 | |
| 2542 | return 0; |
| 2543 | } |
| 2544 | |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 2545 | int shmem_unuse(swp_entry_t swap, struct page *page) |
Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 2546 | { |
| 2547 | return 0; |
| 2548 | } |
| 2549 | |
Hugh Dickins | 3f96b79 | 2009-09-21 17:03:37 -0700 | [diff] [blame] | 2550 | int shmem_lock(struct file *file, int lock, struct user_struct *user) |
| 2551 | { |
| 2552 | return 0; |
| 2553 | } |
| 2554 | |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 2555 | void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) |
Hugh Dickins | 94c1e62 | 2011-06-27 16:18:03 -0700 | [diff] [blame] | 2556 | { |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 2557 | truncate_inode_pages_range(inode->i_mapping, lstart, lend); |
Hugh Dickins | 94c1e62 | 2011-06-27 16:18:03 -0700 | [diff] [blame] | 2558 | } |
| 2559 | EXPORT_SYMBOL_GPL(shmem_truncate_range); |
| 2560 | |
Daisuke Nishimura | 87946a7 | 2010-05-26 14:42:39 -0700 | [diff] [blame] | 2561 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR |
| 2562 | /** |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 2563 | * mem_cgroup_get_shmem_target - find page or swap assigned to the shmem file |
Daisuke Nishimura | 87946a7 | 2010-05-26 14:42:39 -0700 | [diff] [blame] | 2564 | * @inode: the inode to be searched |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 2565 | * @index: the page offset to be searched |
Daisuke Nishimura | 87946a7 | 2010-05-26 14:42:39 -0700 | [diff] [blame] | 2566 | * @pagep: the pointer for the found page to be stored |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 2567 | * @swapp: the pointer for the found swap entry to be stored |
Daisuke Nishimura | 87946a7 | 2010-05-26 14:42:39 -0700 | [diff] [blame] | 2568 | * |
| 2569 | * If a page is found, refcount of it is incremented. Callers should handle |
| 2570 | * these refcount. |
| 2571 | */ |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 2572 | void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t index, |
| 2573 | struct page **pagep, swp_entry_t *swapp) |
Daisuke Nishimura | 87946a7 | 2010-05-26 14:42:39 -0700 | [diff] [blame] | 2574 | { |
| 2575 | struct page *page = NULL; |
| 2576 | |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 2577 | if ((index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) |
Daisuke Nishimura | 87946a7 | 2010-05-26 14:42:39 -0700 | [diff] [blame] | 2578 | goto out; |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 2579 | page = find_get_page(inode->i_mapping, index); |
Daisuke Nishimura | 87946a7 | 2010-05-26 14:42:39 -0700 | [diff] [blame] | 2580 | out: |
| 2581 | *pagep = page; |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 2582 | *swapp = (swp_entry_t){0}; |
Daisuke Nishimura | 87946a7 | 2010-05-26 14:42:39 -0700 | [diff] [blame] | 2583 | } |
| 2584 | #endif |
| 2585 | |
Hugh Dickins | 0b0a080 | 2009-02-24 20:51:52 +0000 | [diff] [blame] | 2586 | #define shmem_vm_ops generic_file_vm_ops |
| 2587 | #define shmem_file_operations ramfs_file_operations |
Dmitry Monakhov | 454abaf | 2010-03-04 17:32:18 +0300 | [diff] [blame] | 2588 | #define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev) |
Hugh Dickins | 0b0a080 | 2009-02-24 20:51:52 +0000 | [diff] [blame] | 2589 | #define shmem_acct_size(flags, size) 0 |
| 2590 | #define shmem_unacct_size(flags, size) do {} while (0) |
Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 2591 | |
| 2592 | #endif /* CONFIG_SHMEM */ |
| 2593 | |
| 2594 | /* common code */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2595 | |
Randy Dunlap | 4671181 | 2008-03-19 17:00:41 -0700 | [diff] [blame] | 2596 | /** |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2597 | * shmem_file_setup - get an unlinked file living in tmpfs |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2598 | * @name: name for dentry (to be seen in /proc/<pid>/maps |
| 2599 | * @size: size to be set for the file |
Hugh Dickins | 0b0a080 | 2009-02-24 20:51:52 +0000 | [diff] [blame] | 2600 | * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2601 | */ |
Sergei Trofimovich | 168f5ac | 2009-06-16 15:33:02 -0700 | [diff] [blame] | 2602 | struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2603 | { |
| 2604 | int error; |
| 2605 | struct file *file; |
| 2606 | struct inode *inode; |
Al Viro | 2c48b9c | 2009-08-09 00:52:35 +0400 | [diff] [blame] | 2607 | struct path path; |
| 2608 | struct dentry *root; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2609 | struct qstr this; |
| 2610 | |
| 2611 | if (IS_ERR(shm_mnt)) |
| 2612 | return (void *)shm_mnt; |
| 2613 | |
Hugh Dickins | 285b2c4 | 2011-08-03 16:21:20 -0700 | [diff] [blame] | 2614 | if (size < 0 || size > MAX_LFS_FILESIZE) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2615 | return ERR_PTR(-EINVAL); |
| 2616 | |
| 2617 | if (shmem_acct_size(flags, size)) |
| 2618 | return ERR_PTR(-ENOMEM); |
| 2619 | |
| 2620 | error = -ENOMEM; |
| 2621 | this.name = name; |
| 2622 | this.len = strlen(name); |
| 2623 | this.hash = 0; /* will go */ |
| 2624 | root = shm_mnt->mnt_root; |
Al Viro | 2c48b9c | 2009-08-09 00:52:35 +0400 | [diff] [blame] | 2625 | path.dentry = d_alloc(root, &this); |
| 2626 | if (!path.dentry) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2627 | goto put_memory; |
Al Viro | 2c48b9c | 2009-08-09 00:52:35 +0400 | [diff] [blame] | 2628 | path.mnt = mntget(shm_mnt); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2629 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2630 | error = -ENOSPC; |
Dmitry Monakhov | 454abaf | 2010-03-04 17:32:18 +0300 | [diff] [blame] | 2631 | inode = shmem_get_inode(root->d_sb, NULL, S_IFREG | S_IRWXUGO, 0, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2632 | if (!inode) |
Al Viro | 4b42af8 | 2009-08-05 18:25:56 +0400 | [diff] [blame] | 2633 | goto put_dentry; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2634 | |
Al Viro | 2c48b9c | 2009-08-09 00:52:35 +0400 | [diff] [blame] | 2635 | d_instantiate(path.dentry, inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2636 | inode->i_size = size; |
| 2637 | inode->i_nlink = 0; /* It is unlinked */ |
Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 2638 | #ifndef CONFIG_MMU |
| 2639 | error = ramfs_nommu_expand_for_mapping(inode, size); |
| 2640 | if (error) |
Al Viro | 4b42af8 | 2009-08-05 18:25:56 +0400 | [diff] [blame] | 2641 | goto put_dentry; |
Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 2642 | #endif |
Al Viro | 4b42af8 | 2009-08-05 18:25:56 +0400 | [diff] [blame] | 2643 | |
| 2644 | error = -ENFILE; |
Al Viro | 2c48b9c | 2009-08-09 00:52:35 +0400 | [diff] [blame] | 2645 | file = alloc_file(&path, FMODE_WRITE | FMODE_READ, |
Al Viro | 4b42af8 | 2009-08-05 18:25:56 +0400 | [diff] [blame] | 2646 | &shmem_file_operations); |
| 2647 | if (!file) |
| 2648 | goto put_dentry; |
| 2649 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2650 | return file; |
| 2651 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2652 | put_dentry: |
Al Viro | 2c48b9c | 2009-08-09 00:52:35 +0400 | [diff] [blame] | 2653 | path_put(&path); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2654 | put_memory: |
| 2655 | shmem_unacct_size(flags, size); |
| 2656 | return ERR_PTR(error); |
| 2657 | } |
Keith Packard | 395e0dd | 2008-06-20 00:08:06 -0700 | [diff] [blame] | 2658 | EXPORT_SYMBOL_GPL(shmem_file_setup); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2659 | |
Randy Dunlap | 4671181 | 2008-03-19 17:00:41 -0700 | [diff] [blame] | 2660 | /** |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2661 | * shmem_zero_setup - setup a shared anonymous mapping |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2662 | * @vma: the vma to be mmapped is prepared by do_mmap_pgoff |
| 2663 | */ |
| 2664 | int shmem_zero_setup(struct vm_area_struct *vma) |
| 2665 | { |
| 2666 | struct file *file; |
| 2667 | loff_t size = vma->vm_end - vma->vm_start; |
| 2668 | |
| 2669 | file = shmem_file_setup("dev/zero", size, vma->vm_flags); |
| 2670 | if (IS_ERR(file)) |
| 2671 | return PTR_ERR(file); |
| 2672 | |
| 2673 | if (vma->vm_file) |
| 2674 | fput(vma->vm_file); |
| 2675 | vma->vm_file = file; |
| 2676 | vma->vm_ops = &shmem_vm_ops; |
Hugh Dickins | bee4c36a | 2011-03-22 16:33:43 -0700 | [diff] [blame] | 2677 | vma->vm_flags |= VM_CAN_NONLINEAR; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2678 | return 0; |
| 2679 | } |
Hugh Dickins | d9d90e5 | 2011-06-27 16:18:04 -0700 | [diff] [blame] | 2680 | |
| 2681 | /** |
| 2682 | * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags. |
| 2683 | * @mapping: the page's address_space |
| 2684 | * @index: the page index |
| 2685 | * @gfp: the page allocator flags to use if allocating |
| 2686 | * |
| 2687 | * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)", |
| 2688 | * with any new page allocations done using the specified allocation flags. |
| 2689 | * But read_cache_page_gfp() uses the ->readpage() method: which does not |
| 2690 | * suit tmpfs, since it may have pages in swapcache, and needs to find those |
| 2691 | * for itself; although drivers/gpu/drm i915 and ttm rely upon this support. |
| 2692 | * |
Hugh Dickins | 68da9f0 | 2011-07-25 17:12:34 -0700 | [diff] [blame] | 2693 | * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in |
| 2694 | * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily. |
Hugh Dickins | d9d90e5 | 2011-06-27 16:18:04 -0700 | [diff] [blame] | 2695 | */ |
| 2696 | struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, |
| 2697 | pgoff_t index, gfp_t gfp) |
| 2698 | { |
Hugh Dickins | 68da9f0 | 2011-07-25 17:12:34 -0700 | [diff] [blame] | 2699 | #ifdef CONFIG_SHMEM |
| 2700 | struct inode *inode = mapping->host; |
Hugh Dickins | 9276aad | 2011-07-25 17:12:34 -0700 | [diff] [blame] | 2701 | struct page *page; |
Hugh Dickins | 68da9f0 | 2011-07-25 17:12:34 -0700 | [diff] [blame] | 2702 | int error; |
| 2703 | |
| 2704 | BUG_ON(mapping->a_ops != &shmem_aops); |
| 2705 | error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE, gfp, NULL); |
| 2706 | if (error) |
| 2707 | page = ERR_PTR(error); |
| 2708 | else |
| 2709 | unlock_page(page); |
| 2710 | return page; |
| 2711 | #else |
| 2712 | /* |
| 2713 | * The tiny !SHMEM case uses ramfs without swap |
| 2714 | */ |
Hugh Dickins | d9d90e5 | 2011-06-27 16:18:04 -0700 | [diff] [blame] | 2715 | return read_cache_page_gfp(mapping, index, gfp); |
Hugh Dickins | 68da9f0 | 2011-07-25 17:12:34 -0700 | [diff] [blame] | 2716 | #endif |
Hugh Dickins | d9d90e5 | 2011-06-27 16:18:04 -0700 | [diff] [blame] | 2717 | } |
| 2718 | EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp); |