blob: 1bf2615449feada2da94ea76487aecf1deb1a6ad [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002#ifndef __SHMEM_FS_H
3#define __SHMEM_FS_H
4
David Herrmann40e041a2014-08-08 14:25:27 -07005#include <linux/file.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07006#include <linux/swap.h>
7#include <linux/mempolicy.h>
Hugh Dickinsd9d90e52011-06-27 16:18:04 -07008#include <linux/pagemap.h>
Tim Chen7e496292010-08-09 17:19:05 -07009#include <linux/percpu_counter.h>
Aristeu Rozanski38f38652012-08-23 16:53:28 -040010#include <linux/xattr.h>
Al Virod7167b12019-09-07 07:23:15 -040011#include <linux/fs_parser.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012
13/* inode in-kernel data */
14
Linus Torvalds1da177e2005-04-16 15:20:36 -070015struct shmem_inode_info {
16 spinlock_t lock;
David Herrmann40e041a2014-08-08 14:25:27 -070017 unsigned int seals; /* shmem seals */
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 unsigned long flags;
19 unsigned long alloced; /* data pages alloced to file */
Al Viro3ed47db2016-01-22 18:08:52 -050020 unsigned long swapped; /* subtotal assigned to swap */
Kirill A. Shutemov779750d2016-07-26 15:26:38 -070021 struct list_head shrinklist; /* shrinkable hpage inodes */
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 struct list_head swaplist; /* chain of maybes on swap */
Kirill A. Shutemov779750d2016-07-26 15:26:38 -070023 struct shared_policy policy; /* NUMA memory alloc policy */
Aristeu Rozanski38f38652012-08-23 16:53:28 -040024 struct simple_xattrs xattrs; /* list of xattrs */
Hugh Dickinsaf53d3e2019-04-18 17:50:13 -070025 atomic_t stop_eviction; /* hold when working on inode */
Linus Torvalds1da177e2005-04-16 15:20:36 -070026 struct inode vfs_inode;
27};
28
29struct shmem_sb_info {
30 unsigned long max_blocks; /* How many blocks are allowed */
Tim Chen7e496292010-08-09 17:19:05 -070031 struct percpu_counter used_blocks; /* How many are allocated */
Linus Torvalds1da177e2005-04-16 15:20:36 -070032 unsigned long max_inodes; /* How many inodes are allowed */
33 unsigned long free_inodes; /* How many are left for allocation */
akpm@linux-foundation.org680d7942008-02-08 04:21:48 -080034 spinlock_t stat_lock; /* Serialize shmem_sb_info changes */
Kirill A. Shutemov5a6e75f2016-07-26 15:26:13 -070035 umode_t mode; /* Mount mode for root directory */
36 unsigned char huge; /* Whether to try for hugepages */
Eric W. Biederman8751e032012-02-07 16:46:12 -080037 kuid_t uid; /* Mount uid for root directory */
38 kgid_t gid; /* Mount gid for root directory */
Chris Downea3271f2020-08-06 23:20:25 -070039 bool full_inums; /* If i_ino should be uint or ino_t */
Chris Downe809d5f2020-08-06 23:20:20 -070040 ino_t next_ino; /* The next per-sb inode number to use */
41 ino_t __percpu *ino_batch; /* The next per-cpu inode number to use */
Lee Schermerhorn71fe8042008-04-28 02:13:26 -070042 struct mempolicy *mpol; /* default memory policy for mappings */
Kirill A. Shutemov779750d2016-07-26 15:26:38 -070043 spinlock_t shrinklist_lock; /* Protects shrinklist */
44 struct list_head shrinklist; /* List of shinkable inodes */
45 unsigned long shrinklist_len; /* Length of shrinklist */
Linus Torvalds1da177e2005-04-16 15:20:36 -070046};
47
48static inline struct shmem_inode_info *SHMEM_I(struct inode *inode)
49{
50 return container_of(inode, struct shmem_inode_info, vfs_inode);
51}
52
Hugh Dickins072441e2011-06-27 16:18:02 -070053/*
54 * Functions in mm/shmem.c called directly from elsewhere:
55 */
Al Virod7167b12019-09-07 07:23:15 -040056extern const struct fs_parameter_spec shmem_fs_parameters[];
Hugh Dickins41ffe5d2011-08-03 16:21:21 -070057extern int shmem_init(void);
David Howellsf3235622019-03-25 16:38:31 +000058extern int shmem_init_fs_context(struct fs_context *fc);
Hugh Dickins072441e2011-06-27 16:18:02 -070059extern struct file *shmem_file_setup(const char *name,
60 loff_t size, unsigned long flags);
Eric Parisc7277092013-12-02 11:24:19 +000061extern struct file *shmem_kernel_file_setup(const char *name, loff_t size,
62 unsigned long flags);
Matthew Auld703321b2017-10-06 23:18:13 +010063extern struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt,
64 const char *name, loff_t size, unsigned long flags);
Hugh Dickins072441e2011-06-27 16:18:02 -070065extern int shmem_zero_setup(struct vm_area_struct *);
Hugh Dickinsc01d5b32016-07-26 15:26:15 -070066extern unsigned long shmem_get_unmapped_area(struct file *, unsigned long addr,
67 unsigned long len, unsigned long pgoff, unsigned long flags);
Hugh Dickins072441e2011-06-27 16:18:02 -070068extern int shmem_lock(struct file *file, int lock, struct user_struct *user);
Hugh Dickins3a4f8a02017-02-24 14:59:36 -080069#ifdef CONFIG_SHMEM
Johannes Weiner0cd61442014-04-03 14:47:46 -070070extern bool shmem_mapping(struct address_space *mapping);
Hugh Dickins3a4f8a02017-02-24 14:59:36 -080071#else
72static inline bool shmem_mapping(struct address_space *mapping)
73{
74 return false;
75}
76#endif /* CONFIG_SHMEM */
Hugh Dickins24513262012-01-20 14:34:21 -080077extern void shmem_unlock_mapping(struct address_space *mapping);
Hugh Dickinsd9d90e52011-06-27 16:18:04 -070078extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
79 pgoff_t index, gfp_t gfp_mask);
Hugh Dickins94c1e622011-06-27 16:18:03 -070080extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
Vineeth Remanan Pillaib56a2d82019-03-05 15:47:03 -080081extern int shmem_unuse(unsigned int type, bool frontswap,
82 unsigned long *fs_pages_to_unuse);
Kay Sievers2b2af542009-04-30 15:23:42 +020083
Matthew Wilcox (Oracle)396bcc52020-04-06 20:04:35 -070084extern bool shmem_huge_enabled(struct vm_area_struct *vma);
Vlastimil Babka6a15a372016-01-14 15:19:20 -080085extern unsigned long shmem_swap_usage(struct vm_area_struct *vma);
Vlastimil Babka48131e02016-01-14 15:19:23 -080086extern unsigned long shmem_partial_swap_usage(struct address_space *mapping,
87 pgoff_t start, pgoff_t end);
Charan Teja Reddy8011eb22021-06-25 12:30:50 +053088extern void shmem_mark_page_lazyfree(struct page *page, bool tail);
Vlastimil Babka6a15a372016-01-14 15:19:20 -080089
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -070090/* Flag allocation requirements to shmem_getpage */
91enum sgp_type {
92 SGP_READ, /* don't exceed i_size, don't allocate page */
93 SGP_CACHE, /* don't exceed i_size, may allocate page */
94 SGP_NOHUGE, /* like SGP_CACHE, but no huge pages */
95 SGP_HUGE, /* like SGP_CACHE, huge pages preferred */
96 SGP_WRITE, /* may exceed i_size, may allocate !Uptodate page */
97 SGP_FALLOC, /* like SGP_WRITE, but make existing page Uptodate */
98};
99
100extern int shmem_getpage(struct inode *inode, pgoff_t index,
101 struct page **pagep, enum sgp_type sgp);
102
Hugh Dickinsd9d90e52011-06-27 16:18:04 -0700103static inline struct page *shmem_read_mapping_page(
104 struct address_space *mapping, pgoff_t index)
105{
106 return shmem_read_mapping_page_gfp(mapping, index,
107 mapping_gfp_mask(mapping));
108}
109
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -0700110static inline bool shmem_file(struct file *file)
111{
112 if (!IS_ENABLED(CONFIG_SHMEM))
113 return false;
114 if (!file || !file->f_mapping)
115 return false;
116 return shmem_mapping(file->f_mapping);
117}
118
Kirill A. Shutemov800d8c62016-07-26 15:26:18 -0700119extern bool shmem_charge(struct inode *inode, long pages);
120extern void shmem_uncharge(struct inode *inode, long pages);
121
Axel Rasmussen5f6dc072021-05-11 15:05:57 +1000122#ifdef CONFIG_USERFAULTFD
Mike Rapoport4c27fe42017-02-22 15:43:25 -0800123#ifdef CONFIG_SHMEM
Axel Rasmussen5f6dc072021-05-11 15:05:57 +1000124extern int shmem_mfill_atomic_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
Lokesh Gidra84330a52021-05-13 05:11:10 -0700125 struct vm_area_struct *dst_vma,
126 unsigned long dst_addr,
127 unsigned long src_addr,
Axel Rasmussen5f6dc072021-05-11 15:05:57 +1000128 bool zeropage,
Lokesh Gidra84330a52021-05-13 05:11:10 -0700129 struct page **pagep);
Axel Rasmussen5f6dc072021-05-11 15:05:57 +1000130#else /* !CONFIG_SHMEM */
131#define shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr, \
132 src_addr, zeropage, pagep) ({ BUG(); 0; })
133#endif /* CONFIG_SHMEM */
134#endif /* CONFIG_USERFAULTFD */
Mike Rapoport4c27fe42017-02-22 15:43:25 -0800135
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136#endif