Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | #ifndef _LINUX_SWAP_H |
| 3 | #define _LINUX_SWAP_H |
| 4 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | #include <linux/spinlock.h> |
| 6 | #include <linux/linkage.h> |
| 7 | #include <linux/mmzone.h> |
| 8 | #include <linux/list.h> |
Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 9 | #include <linux/memcontrol.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | #include <linux/sched.h> |
Lee Schermerhorn | af936a1 | 2008-10-18 20:26:53 -0700 | [diff] [blame] | 11 | #include <linux/node.h> |
Shaohua Li | 33806f0 | 2013-02-22 16:34:37 -0800 | [diff] [blame] | 12 | #include <linux/fs.h> |
Arun Sharma | 60063497 | 2011-07-26 16:09:06 -0700 | [diff] [blame] | 13 | #include <linux/atomic.h> |
Mel Gorman | c53954a | 2013-07-03 15:02:34 -0700 | [diff] [blame] | 14 | #include <linux/page-flags.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <asm/page.h> |
| 16 | |
Martin Schwidefsky | 8bc719d | 2006-09-25 23:31:20 -0700 | [diff] [blame] | 17 | struct notifier_block; |
| 18 | |
Andrew Morton | ab95416 | 2006-09-25 23:32:42 -0700 | [diff] [blame] | 19 | struct bio; |
| 20 | |
Kuo-Hsin Yang | 64e3d12 | 2018-11-06 13:23:24 +0000 | [diff] [blame] | 21 | struct pagevec; |
| 22 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | #define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */ |
| 24 | #define SWAP_FLAG_PRIO_MASK 0x7fff |
| 25 | #define SWAP_FLAG_PRIO_SHIFT 0 |
Rafael Aquini | dcf6b7d | 2013-07-03 15:02:46 -0700 | [diff] [blame] | 26 | #define SWAP_FLAG_DISCARD 0x10000 /* enable discard for swap */ |
| 27 | #define SWAP_FLAG_DISCARD_ONCE 0x20000 /* discard swap area at swapon-time */ |
| 28 | #define SWAP_FLAG_DISCARD_PAGES 0x40000 /* discard page-clusters after use */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | |
Hugh Dickins | d15cab9 | 2012-03-28 14:42:42 -0700 | [diff] [blame] | 30 | #define SWAP_FLAGS_VALID (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \ |
Rafael Aquini | dcf6b7d | 2013-07-03 15:02:46 -0700 | [diff] [blame] | 31 | SWAP_FLAG_DISCARD | SWAP_FLAG_DISCARD_ONCE | \ |
| 32 | SWAP_FLAG_DISCARD_PAGES) |
Tim Chen | 36005ba | 2017-02-22 15:45:33 -0800 | [diff] [blame] | 33 | #define SWAP_BATCH 64 |
Hugh Dickins | d15cab9 | 2012-03-28 14:42:42 -0700 | [diff] [blame] | 34 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | static inline int current_is_kswapd(void) |
| 36 | { |
| 37 | return current->flags & PF_KSWAPD; |
| 38 | } |
| 39 | |
| 40 | /* |
| 41 | * MAX_SWAPFILES defines the maximum number of swaptypes: things which can |
| 42 | * be swapped to. The swap type and the offset into that swap type are |
| 43 | * encoded into pte's and into pgoff_t's in the swapcache. Using five bits |
| 44 | * for the type means that the maximum number of swapcache pages is 27 bits |
| 45 | * on 32-bit-pgoff_t architectures. And that assumes that the architecture packs |
| 46 | * the type/offset into the pte as 5/27 as well. |
| 47 | */ |
| 48 | #define MAX_SWAPFILES_SHIFT 5 |
Andi Kleen | a7420aa | 2009-09-16 11:50:05 +0200 | [diff] [blame] | 49 | |
| 50 | /* |
| 51 | * Use some of the swap files numbers for other purposes. This |
| 52 | * is a convenient way to hook into the VM to trigger special |
| 53 | * actions on faults. |
| 54 | */ |
| 55 | |
| 56 | /* |
Jérôme Glisse | 5042db4 | 2017-09-08 16:11:43 -0700 | [diff] [blame] | 57 | * Unaddressable device memory support. See include/linux/hmm.h and |
Mike Rapoport | ad56b73 | 2018-03-21 21:22:47 +0200 | [diff] [blame] | 58 | * Documentation/vm/hmm.rst. Short description is we need struct pages for |
Jérôme Glisse | 5042db4 | 2017-09-08 16:11:43 -0700 | [diff] [blame] | 59 | * device memory that is unaddressable (inaccessible) by CPU, so that we can |
| 60 | * migrate part of a process memory to device memory. |
| 61 | * |
| 62 | * When a page is migrated from CPU to device, we set the CPU page table entry |
| 63 | * to a special SWP_DEVICE_* entry. |
| 64 | */ |
| 65 | #ifdef CONFIG_DEVICE_PRIVATE |
| 66 | #define SWP_DEVICE_NUM 2 |
| 67 | #define SWP_DEVICE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM) |
| 68 | #define SWP_DEVICE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+1) |
| 69 | #else |
| 70 | #define SWP_DEVICE_NUM 0 |
| 71 | #endif |
| 72 | |
| 73 | /* |
Andi Kleen | a7420aa | 2009-09-16 11:50:05 +0200 | [diff] [blame] | 74 | * NUMA node memory migration support |
| 75 | */ |
| 76 | #ifdef CONFIG_MIGRATION |
| 77 | #define SWP_MIGRATION_NUM 2 |
| 78 | #define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM) |
| 79 | #define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1) |
Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 80 | #else |
Andi Kleen | a7420aa | 2009-09-16 11:50:05 +0200 | [diff] [blame] | 81 | #define SWP_MIGRATION_NUM 0 |
Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 82 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 | |
| 84 | /* |
Andi Kleen | a7420aa | 2009-09-16 11:50:05 +0200 | [diff] [blame] | 85 | * Handling of hardware poisoned pages with memory corruption. |
| 86 | */ |
| 87 | #ifdef CONFIG_MEMORY_FAILURE |
| 88 | #define SWP_HWPOISON_NUM 1 |
| 89 | #define SWP_HWPOISON MAX_SWAPFILES |
| 90 | #else |
| 91 | #define SWP_HWPOISON_NUM 0 |
| 92 | #endif |
| 93 | |
| 94 | #define MAX_SWAPFILES \ |
Jérôme Glisse | 5042db4 | 2017-09-08 16:11:43 -0700 | [diff] [blame] | 95 | ((1 << MAX_SWAPFILES_SHIFT) - SWP_DEVICE_NUM - \ |
| 96 | SWP_MIGRATION_NUM - SWP_HWPOISON_NUM) |
Andi Kleen | a7420aa | 2009-09-16 11:50:05 +0200 | [diff] [blame] | 97 | |
| 98 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 | * Magic header for a swap area. The first part of the union is |
| 100 | * what the swap magic looks like for the old (limited to 128MB) |
| 101 | * swap area format, the second part of the union adds - in the |
| 102 | * old reserved area - some extra information. Note that the first |
| 103 | * kilobyte is reserved for boot loader or disk label stuff... |
| 104 | * |
| 105 | * Having the magic at the end of the PAGE_SIZE makes detecting swap |
| 106 | * areas somewhat tricky on machines that support multiple page sizes. |
| 107 | * For 2.5 we'll probably want to move the magic to just beyond the |
| 108 | * bootbits... |
| 109 | */ |
| 110 | union swap_header { |
| 111 | struct { |
| 112 | char reserved[PAGE_SIZE - 10]; |
| 113 | char magic[10]; /* SWAP-SPACE or SWAPSPACE2 */ |
| 114 | } magic; |
| 115 | struct { |
Andreas Dilger | e8f03d0 | 2006-06-23 02:03:14 -0700 | [diff] [blame] | 116 | char bootbits[1024]; /* Space for disklabel etc. */ |
| 117 | __u32 version; |
| 118 | __u32 last_page; |
| 119 | __u32 nr_badpages; |
| 120 | unsigned char sws_uuid[16]; |
| 121 | unsigned char sws_volume[16]; |
| 122 | __u32 padding[117]; |
| 123 | __u32 badpages[1]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | } info; |
| 125 | }; |
| 126 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | /* |
| 128 | * current->reclaim_state points to one of these when a task is running |
| 129 | * memory reclaim |
| 130 | */ |
| 131 | struct reclaim_state { |
| 132 | unsigned long reclaimed_slab; |
| 133 | }; |
| 134 | |
| 135 | #ifdef __KERNEL__ |
| 136 | |
| 137 | struct address_space; |
| 138 | struct sysinfo; |
| 139 | struct writeback_control; |
| 140 | struct zone; |
| 141 | |
| 142 | /* |
| 143 | * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of |
| 144 | * disk blocks. A list of swap extents maps the entire swapfile. (Where the |
| 145 | * term `swapfile' refers to either a blockdevice or an IS_REG file. Apart |
| 146 | * from setup, they're handled identically. |
| 147 | * |
| 148 | * We always assume that blocks are of size PAGE_SIZE. |
| 149 | */ |
| 150 | struct swap_extent { |
Aaron Lu | 4efaceb | 2019-07-11 20:55:41 -0700 | [diff] [blame] | 151 | struct rb_node rb_node; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 | pgoff_t start_page; |
| 153 | pgoff_t nr_pages; |
| 154 | sector_t start_block; |
| 155 | }; |
| 156 | |
| 157 | /* |
| 158 | * Max bad pages in the new format.. |
| 159 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 | #define MAX_SWAP_BADPAGES \ |
Pi-Hsun Shih | a4046c0 | 2019-03-13 11:44:33 -0700 | [diff] [blame] | 161 | ((offsetof(union swap_header, magic.magic) - \ |
| 162 | offsetof(union swap_header, info.badpages)) / sizeof(int)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | |
| 164 | enum { |
| 165 | SWP_USED = (1 << 0), /* is slot in swap_info[] used? */ |
| 166 | SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */ |
Rafael Aquini | dcf6b7d | 2013-07-03 15:02:46 -0700 | [diff] [blame] | 167 | SWP_DISCARDABLE = (1 << 2), /* blkdev support discard */ |
Hugh Dickins | 7992fde | 2009-01-06 14:39:53 -0800 | [diff] [blame] | 168 | SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */ |
Hugh Dickins | 20137a4 | 2009-01-06 14:39:54 -0800 | [diff] [blame] | 169 | SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */ |
Hugh Dickins | 570a335b | 2009-12-14 17:58:46 -0800 | [diff] [blame] | 170 | SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */ |
Nitin Gupta | b272564 | 2010-05-17 11:02:42 +0530 | [diff] [blame] | 171 | SWP_BLKDEV = (1 << 6), /* its a block device */ |
Omar Sandoval | bc4ae27 | 2018-10-26 15:10:51 -0700 | [diff] [blame] | 172 | SWP_ACTIVATED = (1 << 7), /* set after swap_activate success */ |
| 173 | SWP_FS = (1 << 8), /* swap file goes through fs */ |
| 174 | SWP_AREA_DISCARD = (1 << 9), /* single-time swap area discards */ |
| 175 | SWP_PAGE_DISCARD = (1 << 10), /* freed swap page-cluster discards */ |
| 176 | SWP_STABLE_WRITES = (1 << 11), /* no overwrite PG_writeback pages */ |
| 177 | SWP_SYNCHRONOUS_IO = (1 << 12), /* synchronous IO is efficient */ |
Huang Ying | eb08557 | 2019-07-11 20:55:33 -0700 | [diff] [blame] | 178 | SWP_VALID = (1 << 13), /* swap is valid to be operated on? */ |
Hugh Dickins | 52b7efdb | 2005-09-03 15:54:39 -0700 | [diff] [blame] | 179 | /* add others here before... */ |
Huang Ying | eb08557 | 2019-07-11 20:55:33 -0700 | [diff] [blame] | 180 | SWP_SCANNING = (1 << 14), /* refcount in scan_swap_map */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 181 | }; |
| 182 | |
Johannes Weiner | d778df5 | 2013-02-22 16:32:12 -0800 | [diff] [blame] | 183 | #define SWAP_CLUSTER_MAX 32UL |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 184 | #define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 185 | |
Wei Yang | 4b4bb6b | 2020-06-01 21:49:13 -0700 | [diff] [blame] | 186 | /* Bit flag in swap_map */ |
Hugh Dickins | 570a335b | 2009-12-14 17:58:46 -0800 | [diff] [blame] | 187 | #define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */ |
Wei Yang | 4b4bb6b | 2020-06-01 21:49:13 -0700 | [diff] [blame] | 188 | #define COUNT_CONTINUED 0x80 /* Flag swap_map continuation for full count */ |
| 189 | |
| 190 | /* Special value in first swap_map */ |
| 191 | #define SWAP_MAP_MAX 0x3e /* Max count */ |
| 192 | #define SWAP_MAP_BAD 0x3f /* Note page is bad */ |
| 193 | #define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs */ |
| 194 | |
| 195 | /* Special value in each swap_map continuation */ |
| 196 | #define SWAP_CONT_MAX 0x7f /* Max count */ |
Hugh Dickins | 253d553 | 2009-12-14 17:58:44 -0800 | [diff] [blame] | 197 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 198 | /* |
Shaohua Li | 2a8f944 | 2013-09-11 14:20:28 -0700 | [diff] [blame] | 199 | * We use this to track usage of a cluster. A cluster is a block of swap disk |
| 200 | * space with SWAPFILE_CLUSTER pages long and naturally aligns in disk. All |
| 201 | * free clusters are organized into a list. We fetch an entry from the list to |
| 202 | * get a free cluster. |
| 203 | * |
| 204 | * The data field stores next cluster if the cluster is free or cluster usage |
| 205 | * counter otherwise. The flags field determines if a cluster is free. This is |
| 206 | * protected by swap_info_struct.lock. |
| 207 | */ |
| 208 | struct swap_cluster_info { |
Huang, Ying | 235b621 | 2017-02-22 15:45:22 -0800 | [diff] [blame] | 209 | spinlock_t lock; /* |
| 210 | * Protect swap_cluster_info fields |
| 211 | * and swap_info_struct->swap_map |
| 212 | * elements correspond to the swap |
| 213 | * cluster |
| 214 | */ |
Shaohua Li | 2a8f944 | 2013-09-11 14:20:28 -0700 | [diff] [blame] | 215 | unsigned int data:24; |
| 216 | unsigned int flags:8; |
| 217 | }; |
| 218 | #define CLUSTER_FLAG_FREE 1 /* This cluster is free */ |
| 219 | #define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */ |
Huang Ying | e070982 | 2017-09-06 16:22:16 -0700 | [diff] [blame] | 220 | #define CLUSTER_FLAG_HUGE 4 /* This cluster is backing a transparent huge page */ |
Shaohua Li | 2a8f944 | 2013-09-11 14:20:28 -0700 | [diff] [blame] | 221 | |
| 222 | /* |
Shaohua Li | ebc2a1a | 2013-09-11 14:20:32 -0700 | [diff] [blame] | 223 | * We assign a cluster to each CPU, so each CPU can allocate swap entry from |
| 224 | * its own cluster and swapout sequentially. The purpose is to optimize swapout |
| 225 | * throughput. |
| 226 | */ |
| 227 | struct percpu_cluster { |
| 228 | struct swap_cluster_info index; /* Current cluster index */ |
| 229 | unsigned int next; /* Likely next allocation offset */ |
| 230 | }; |
| 231 | |
Huang Ying | 6b53491 | 2016-10-07 16:58:42 -0700 | [diff] [blame] | 232 | struct swap_cluster_list { |
| 233 | struct swap_cluster_info head; |
| 234 | struct swap_cluster_info tail; |
| 235 | }; |
| 236 | |
Shaohua Li | ebc2a1a | 2013-09-11 14:20:32 -0700 | [diff] [blame] | 237 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 238 | * The in-memory structure used to track swap areas. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 239 | */ |
| 240 | struct swap_info_struct { |
Hugh Dickins | efa90a9 | 2009-12-14 17:58:41 -0800 | [diff] [blame] | 241 | unsigned long flags; /* SWP_USED etc: see above */ |
| 242 | signed short prio; /* swap priority of this type */ |
Dan Streetman | 18ab4d4 | 2014-06-04 16:09:59 -0700 | [diff] [blame] | 243 | struct plist_node list; /* entry in swap_active_head */ |
Hugh Dickins | efa90a9 | 2009-12-14 17:58:41 -0800 | [diff] [blame] | 244 | signed char type; /* strange name for an index */ |
Hugh Dickins | 7509765 | 2009-12-14 17:58:48 -0800 | [diff] [blame] | 245 | unsigned int max; /* extent of the swap_map */ |
| 246 | unsigned char *swap_map; /* vmalloc'ed array of usage counts */ |
Shaohua Li | 2a8f944 | 2013-09-11 14:20:28 -0700 | [diff] [blame] | 247 | struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */ |
Huang Ying | 6b53491 | 2016-10-07 16:58:42 -0700 | [diff] [blame] | 248 | struct swap_cluster_list free_clusters; /* free clusters list */ |
Hugh Dickins | 7509765 | 2009-12-14 17:58:48 -0800 | [diff] [blame] | 249 | unsigned int lowest_bit; /* index of first free in swap_map */ |
| 250 | unsigned int highest_bit; /* index of last free in swap_map */ |
| 251 | unsigned int pages; /* total of usable pages of swap */ |
| 252 | unsigned int inuse_pages; /* number of those currently in use */ |
| 253 | unsigned int cluster_next; /* likely index for next allocation */ |
| 254 | unsigned int cluster_nr; /* countdown to next cluster search */ |
Huang Ying | 4907058 | 2020-06-01 21:49:22 -0700 | [diff] [blame] | 255 | unsigned int __percpu *cluster_next_cpu; /*percpu index for next allocation */ |
Shaohua Li | ebc2a1a | 2013-09-11 14:20:32 -0700 | [diff] [blame] | 256 | struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */ |
Aaron Lu | 4efaceb | 2019-07-11 20:55:41 -0700 | [diff] [blame] | 257 | struct rb_root swap_extent_root;/* root of the swap extent rbtree */ |
Hugh Dickins | 7509765 | 2009-12-14 17:58:48 -0800 | [diff] [blame] | 258 | struct block_device *bdev; /* swap device or bdev of swap file */ |
| 259 | struct file *swap_file; /* seldom referenced */ |
| 260 | unsigned int old_block_size; /* seldom referenced */ |
Dan Magenheimer | 38b5faf | 2012-04-09 17:08:06 -0600 | [diff] [blame] | 261 | #ifdef CONFIG_FRONTSWAP |
| 262 | unsigned long *frontswap_map; /* frontswap in-use, one bit per page */ |
| 263 | atomic_t frontswap_pages; /* frontswap pages in-use counter */ |
| 264 | #endif |
Shaohua Li | ec8acf2 | 2013-02-22 16:34:38 -0800 | [diff] [blame] | 265 | spinlock_t lock; /* |
| 266 | * protect map scan related fields like |
| 267 | * swap_map, lowest_bit, highest_bit, |
| 268 | * inuse_pages, cluster_next, |
Shaohua Li | 815c2c5 | 2013-09-11 14:20:30 -0700 | [diff] [blame] | 269 | * cluster_nr, lowest_alloc, |
| 270 | * highest_alloc, free/discard cluster |
| 271 | * list. other fields are only changed |
| 272 | * at swapon/swapoff, so are protected |
| 273 | * by swap_lock. changing flags need |
| 274 | * hold this lock and swap_lock. If |
| 275 | * both locks need hold, hold swap_lock |
| 276 | * first. |
Shaohua Li | ec8acf2 | 2013-02-22 16:34:38 -0800 | [diff] [blame] | 277 | */ |
Huang Ying | 2628bd6 | 2017-11-02 15:59:50 -0700 | [diff] [blame] | 278 | spinlock_t cont_lock; /* |
| 279 | * protect swap count continuation page |
| 280 | * list. |
| 281 | */ |
Shaohua Li | 815c2c5 | 2013-09-11 14:20:30 -0700 | [diff] [blame] | 282 | struct work_struct discard_work; /* discard worker */ |
Huang Ying | 6b53491 | 2016-10-07 16:58:42 -0700 | [diff] [blame] | 283 | struct swap_cluster_list discard_clusters; /* discard clusters list */ |
Gustavo A. R. Silva | 16c3380 | 2020-03-23 19:23:10 -0500 | [diff] [blame] | 284 | struct plist_node avail_lists[]; /* |
Aaron Lu | 66f71da | 2018-12-28 00:34:39 -0800 | [diff] [blame] | 285 | * entries in swap_avail_heads, one |
| 286 | * entry per node. |
| 287 | * Must be last as the number of the |
| 288 | * array is nr_node_ids, which is not |
| 289 | * a fixed value so have to allocate |
| 290 | * dynamically. |
| 291 | * And it has to be an array so that |
| 292 | * plist_for_each_* can work. |
| 293 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 294 | }; |
| 295 | |
Huang Ying | ec56017 | 2017-09-06 16:24:36 -0700 | [diff] [blame] | 296 | #ifdef CONFIG_64BIT |
| 297 | #define SWAP_RA_ORDER_CEILING 5 |
| 298 | #else |
| 299 | /* Avoid stack overflow, because we need to save part of page table */ |
| 300 | #define SWAP_RA_ORDER_CEILING 3 |
| 301 | #define SWAP_RA_PTE_CACHE_SIZE (1 << SWAP_RA_ORDER_CEILING) |
| 302 | #endif |
| 303 | |
| 304 | struct vma_swap_readahead { |
| 305 | unsigned short win; |
| 306 | unsigned short offset; |
| 307 | unsigned short nr_pte; |
| 308 | #ifdef CONFIG_64BIT |
| 309 | pte_t *ptes; |
| 310 | #else |
| 311 | pte_t ptes[SWAP_RA_PTE_CACHE_SIZE]; |
| 312 | #endif |
| 313 | }; |
| 314 | |
Johannes Weiner | a528910 | 2014-04-03 14:47:51 -0700 | [diff] [blame] | 315 | /* linux/mm/workingset.c */ |
Johannes Weiner | b910718 | 2019-11-30 17:55:59 -0800 | [diff] [blame] | 316 | void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg); |
Johannes Weiner | 1899ad1 | 2018-10-26 15:06:04 -0700 | [diff] [blame] | 317 | void workingset_refault(struct page *page, void *shadow); |
Johannes Weiner | a528910 | 2014-04-03 14:47:51 -0700 | [diff] [blame] | 318 | void workingset_activation(struct page *page); |
Mel Gorman | c7df8ad | 2017-11-15 17:37:41 -0800 | [diff] [blame] | 319 | |
Matthew Wilcox | 74d6095 | 2017-11-17 10:01:45 -0500 | [diff] [blame] | 320 | /* Only track the nodes of mappings with shadow entries */ |
| 321 | void workingset_update_node(struct xa_node *node); |
| 322 | #define mapping_set_update(xas, mapping) do { \ |
| 323 | if (!dax_mapping(mapping) && !shmem_mapping(mapping)) \ |
| 324 | xas_set_update(xas, workingset_update_node); \ |
| 325 | } while (0) |
Johannes Weiner | a528910 | 2014-04-03 14:47:51 -0700 | [diff] [blame] | 326 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 327 | /* linux/mm/page_alloc.c */ |
Hideo AOKI | cb45b0e | 2006-04-10 22:52:59 -0700 | [diff] [blame] | 328 | extern unsigned long totalreserve_pages; |
Zhang Yanfei | ebec386 | 2013-02-22 16:35:43 -0800 | [diff] [blame] | 329 | extern unsigned long nr_free_buffer_pages(void); |
| 330 | extern unsigned long nr_free_pagecache_pages(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 331 | |
Michal Hocko | c41f012 | 2017-09-06 16:23:36 -0700 | [diff] [blame] | 332 | /* Definition of global_zone_page_state not available yet */ |
| 333 | #define nr_free_pages() global_zone_page_state(NR_FREE_PAGES) |
Christoph Lameter | 9617729 | 2007-02-10 01:43:03 -0800 | [diff] [blame] | 334 | |
| 335 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 336 | /* linux/mm/swap.c */ |
Johannes Weiner | 96f8bf4 | 2020-06-03 16:03:09 -0700 | [diff] [blame] | 337 | extern void lru_note_cost(struct lruvec *lruvec, bool file, |
| 338 | unsigned int nr_pages); |
| 339 | extern void lru_note_cost_page(struct page *); |
Mel Gorman | c53954a | 2013-07-03 15:02:34 -0700 | [diff] [blame] | 340 | extern void lru_cache_add(struct page *); |
Hugh Dickins | fa9add6 | 2012-05-29 15:07:09 -0700 | [diff] [blame] | 341 | extern void lru_add_page_tail(struct page *page, struct page *page_tail, |
Shaohua Li | 5bc7b8a | 2013-04-29 15:08:36 -0700 | [diff] [blame] | 342 | struct lruvec *lruvec, struct list_head *head); |
Harvey Harrison | b3c9752 | 2008-02-13 15:03:15 -0800 | [diff] [blame] | 343 | extern void activate_page(struct page *); |
| 344 | extern void mark_page_accessed(struct page *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 345 | extern void lru_add_drain(void); |
Konstantin Khlebnikov | f0cb3c7 | 2012-03-21 16:34:06 -0700 | [diff] [blame] | 346 | extern void lru_add_drain_cpu(int cpu); |
Ingo Molnar | b01b214 | 2020-05-27 22:11:15 +0200 | [diff] [blame] | 347 | extern void lru_add_drain_cpu_zone(struct zone *zone); |
Chris Metcalf | 5fbc461 | 2013-09-12 15:13:55 -0700 | [diff] [blame] | 348 | extern void lru_add_drain_all(void); |
Miklos Szeredi | ac6aadb | 2008-04-28 02:12:38 -0700 | [diff] [blame] | 349 | extern void rotate_reclaimable_page(struct page *page); |
Minchan Kim | cc5993b | 2015-04-15 16:13:26 -0700 | [diff] [blame] | 350 | extern void deactivate_file_page(struct page *page); |
Minchan Kim | 9c276cc | 2019-09-25 16:49:08 -0700 | [diff] [blame] | 351 | extern void deactivate_page(struct page *page); |
Shaohua Li | f7ad2a6 | 2017-05-03 14:52:29 -0700 | [diff] [blame] | 352 | extern void mark_page_lazyfree(struct page *page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 353 | extern void swap_setup(void); |
| 354 | |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 355 | extern void lru_cache_add_active_or_unevictable(struct page *page, |
| 356 | struct vm_area_struct *vma); |
| 357 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 358 | /* linux/mm/vmscan.c */ |
Mel Gorman | 5a1c84b | 2016-07-28 15:47:31 -0700 | [diff] [blame] | 359 | extern unsigned long zone_reclaimable_pages(struct zone *zone); |
Mel Gorman | dac1d27 | 2008-04-28 02:12:12 -0700 | [diff] [blame] | 360 | extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, |
KAMEZAWA Hiroyuki | 327c0e9 | 2009-03-31 15:23:31 -0700 | [diff] [blame] | 361 | gfp_t gfp_mask, nodemask_t *mask); |
Konstantin Khlebnikov | f3fd4a6 | 2012-05-29 15:06:54 -0700 | [diff] [blame] | 362 | extern int __isolate_lru_page(struct page *page, isolate_mode_t mode); |
Johannes Weiner | b70a2a2 | 2014-10-09 15:28:56 -0700 | [diff] [blame] | 363 | extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, |
| 364 | unsigned long nr_pages, |
| 365 | gfp_t gfp_mask, |
| 366 | bool may_swap); |
Mel Gorman | a9dd0a8 | 2016-07-28 15:46:02 -0700 | [diff] [blame] | 367 | extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem, |
Johannes Weiner | 185efc0 | 2011-09-14 16:21:58 -0700 | [diff] [blame] | 368 | gfp_t gfp_mask, bool noswap, |
Mel Gorman | ef8f232 | 2016-07-28 15:46:05 -0700 | [diff] [blame] | 369 | pg_data_t *pgdat, |
Johannes Weiner | 185efc0 | 2011-09-14 16:21:58 -0700 | [diff] [blame] | 370 | unsigned long *nr_scanned); |
Andrew Morton | 69e0594 | 2006-03-22 00:08:19 -0800 | [diff] [blame] | 371 | extern unsigned long shrink_all_memory(unsigned long nr_pages); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 372 | extern int vm_swappiness; |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 373 | extern int remove_mapping(struct address_space *mapping, struct page *page); |
Zhang Yanfei | b21e0b9 | 2013-02-22 16:35:48 -0800 | [diff] [blame] | 374 | extern unsigned long vm_total_pages; |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 375 | |
Minchan Kim | 1a4e58c | 2019-09-25 16:49:15 -0700 | [diff] [blame] | 376 | extern unsigned long reclaim_pages(struct list_head *page_list); |
Christoph Lameter | 9eeff23 | 2006-01-18 17:42:31 -0800 | [diff] [blame] | 377 | #ifdef CONFIG_NUMA |
Mel Gorman | a5f5f91 | 2016-07-28 15:46:32 -0700 | [diff] [blame] | 378 | extern int node_reclaim_mode; |
Christoph Lameter | 9614634 | 2006-07-03 00:24:13 -0700 | [diff] [blame] | 379 | extern int sysctl_min_unmapped_ratio; |
Christoph Lameter | 0ff3849 | 2006-09-25 23:31:52 -0700 | [diff] [blame] | 380 | extern int sysctl_min_slab_ratio; |
Christoph Lameter | 9eeff23 | 2006-01-18 17:42:31 -0800 | [diff] [blame] | 381 | #else |
Mel Gorman | a5f5f91 | 2016-07-28 15:46:32 -0700 | [diff] [blame] | 382 | #define node_reclaim_mode 0 |
Christoph Lameter | 9eeff23 | 2006-01-18 17:42:31 -0800 | [diff] [blame] | 383 | #endif |
| 384 | |
Kuo-Hsin Yang | 64e3d12 | 2018-11-06 13:23:24 +0000 | [diff] [blame] | 385 | extern void check_move_unevictable_pages(struct pagevec *pvec); |
Lee Schermerhorn | af936a1 | 2008-10-18 20:26:53 -0700 | [diff] [blame] | 386 | |
Yasunori Goto | 3218ae1 | 2006-06-27 02:53:33 -0700 | [diff] [blame] | 387 | extern int kswapd_run(int nid); |
David Rientjes | 8fe23e0 | 2009-12-14 17:58:33 -0800 | [diff] [blame] | 388 | extern void kswapd_stop(int nid); |
Michal Hocko | 33398cf | 2015-09-08 15:01:02 -0700 | [diff] [blame] | 389 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 390 | #ifdef CONFIG_SWAP |
Christoph Hellwig | be29796 | 2016-11-01 07:40:16 -0600 | [diff] [blame] | 391 | |
| 392 | #include <linux/blk_types.h> /* for bio_end_io_t */ |
| 393 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 394 | /* linux/mm/page_io.c */ |
Shaohua Li | 2395562 | 2017-07-10 15:47:11 -0700 | [diff] [blame] | 395 | extern int swap_readpage(struct page *page, bool do_poll); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 396 | extern int swap_writepage(struct page *page, struct writeback_control *wbc); |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 397 | extern void end_swap_bio_write(struct bio *bio); |
Seth Jennings | 1eec670 | 2013-04-29 15:08:35 -0700 | [diff] [blame] | 398 | extern int __swap_writepage(struct page *page, struct writeback_control *wbc, |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 399 | bio_end_io_t end_write_func); |
Mel Gorman | 62c230b | 2012-07-31 16:44:55 -0700 | [diff] [blame] | 400 | extern int swap_set_page_dirty(struct page *page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 401 | |
Mel Gorman | a509bc1 | 2012-07-31 16:44:57 -0700 | [diff] [blame] | 402 | int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page, |
| 403 | unsigned long nr_pages, sector_t start_block); |
| 404 | int generic_swapfile_activate(struct swap_info_struct *, struct file *, |
| 405 | sector_t *); |
| 406 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 407 | /* linux/mm/swap_state.c */ |
Huang, Ying | 4b3ef9d | 2017-02-22 15:45:26 -0800 | [diff] [blame] | 408 | /* One swap address space for each 64M swap space */ |
| 409 | #define SWAP_ADDRESS_SPACE_SHIFT 14 |
| 410 | #define SWAP_ADDRESS_SPACE_PAGES (1 << SWAP_ADDRESS_SPACE_SHIFT) |
| 411 | extern struct address_space *swapper_spaces[]; |
| 412 | #define swap_address_space(entry) \ |
| 413 | (&swapper_spaces[swp_type(entry)][swp_offset(entry) \ |
| 414 | >> SWAP_ADDRESS_SPACE_SHIFT]) |
Shaohua Li | 33806f0 | 2013-02-22 16:34:37 -0800 | [diff] [blame] | 415 | extern unsigned long total_swapcache_pages(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 416 | extern void show_swap_cache_info(void); |
Minchan Kim | 0f07465 | 2017-07-06 15:37:24 -0700 | [diff] [blame] | 417 | extern int add_to_swap(struct page *page); |
Hugh Dickins | 73b1262 | 2008-02-04 22:28:50 -0800 | [diff] [blame] | 418 | extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t); |
Matthew Wilcox | 4e17ec2 | 2017-11-29 08:32:39 -0500 | [diff] [blame] | 419 | extern void __delete_from_swap_cache(struct page *, swp_entry_t entry); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 420 | extern void delete_from_swap_cache(struct page *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 421 | extern void free_page_and_swap_cache(struct page *); |
| 422 | extern void free_pages_and_swap_cache(struct page **, int); |
Huang Ying | ec56017 | 2017-09-06 16:24:36 -0700 | [diff] [blame] | 423 | extern struct page *lookup_swap_cache(swp_entry_t entry, |
| 424 | struct vm_area_struct *vma, |
| 425 | unsigned long addr); |
Hugh Dickins | 02098fe | 2008-02-04 22:28:42 -0800 | [diff] [blame] | 426 | extern struct page *read_swap_cache_async(swp_entry_t, gfp_t, |
Shaohua Li | 2395562 | 2017-07-10 15:47:11 -0700 | [diff] [blame] | 427 | struct vm_area_struct *vma, unsigned long addr, |
| 428 | bool do_poll); |
Dmitry Safonov | 5b999aa | 2015-09-08 15:05:00 -0700 | [diff] [blame] | 429 | extern struct page *__read_swap_cache_async(swp_entry_t, gfp_t, |
| 430 | struct vm_area_struct *vma, unsigned long addr, |
| 431 | bool *new_page_allocated); |
Minchan Kim | e9e9b7e | 2018-04-05 16:23:42 -0700 | [diff] [blame] | 432 | extern struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t flag, |
| 433 | struct vm_fault *vmf); |
| 434 | extern struct page *swapin_readahead(swp_entry_t entry, gfp_t flag, |
| 435 | struct vm_fault *vmf); |
Huang Ying | ec56017 | 2017-09-06 16:24:36 -0700 | [diff] [blame] | 436 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 437 | /* linux/mm/swapfile.c */ |
Shaohua Li | ec8acf2 | 2013-02-22 16:34:38 -0800 | [diff] [blame] | 438 | extern atomic_long_t nr_swap_pages; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 439 | extern long total_swap_pages; |
Huang Ying | 81a0298 | 2017-09-06 16:24:43 -0700 | [diff] [blame] | 440 | extern atomic_t nr_rotate_swap; |
Tim Chen | 67afa38 | 2017-02-22 15:45:39 -0800 | [diff] [blame] | 441 | extern bool has_usable_swap(void); |
Shaohua Li | ec8acf2 | 2013-02-22 16:34:38 -0800 | [diff] [blame] | 442 | |
| 443 | /* Swap 50% full? Release swapcache more aggressively.. */ |
| 444 | static inline bool vm_swap_full(void) |
| 445 | { |
| 446 | return atomic_long_read(&nr_swap_pages) * 2 < total_swap_pages; |
| 447 | } |
| 448 | |
| 449 | static inline long get_nr_swap_pages(void) |
| 450 | { |
| 451 | return atomic_long_read(&nr_swap_pages); |
| 452 | } |
| 453 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 454 | extern void si_swapinfo(struct sysinfo *); |
Huang Ying | 38d8b4e | 2017-07-06 15:37:18 -0700 | [diff] [blame] | 455 | extern swp_entry_t get_swap_page(struct page *page); |
Minchan Kim | 75f6d6d | 2017-07-06 15:37:21 -0700 | [diff] [blame] | 456 | extern void put_swap_page(struct page *page, swp_entry_t entry); |
Hugh Dickins | 910321e | 2010-09-09 16:38:07 -0700 | [diff] [blame] | 457 | extern swp_entry_t get_swap_page_of_type(int); |
Huang Ying | 5d5e8f1 | 2018-08-21 21:52:20 -0700 | [diff] [blame] | 458 | extern int get_swap_pages(int n, swp_entry_t swp_entries[], int entry_size); |
Hugh Dickins | 570a335b | 2009-12-14 17:58:46 -0800 | [diff] [blame] | 459 | extern int add_swap_count_continuation(swp_entry_t, gfp_t); |
Hugh Dickins | aaa4686 | 2009-12-14 17:58:47 -0800 | [diff] [blame] | 460 | extern void swap_shmem_alloc(swp_entry_t); |
Hugh Dickins | 570a335b | 2009-12-14 17:58:46 -0800 | [diff] [blame] | 461 | extern int swap_duplicate(swp_entry_t); |
| 462 | extern int swapcache_prepare(swp_entry_t); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 463 | extern void swap_free(swp_entry_t); |
Tim Chen | 7c00baf | 2017-02-22 15:45:36 -0800 | [diff] [blame] | 464 | extern void swapcache_free_entries(swp_entry_t *entries, int n); |
Hugh Dickins | 2509ef2 | 2009-01-06 14:40:10 -0800 | [diff] [blame] | 465 | extern int free_swap_and_cache(swp_entry_t); |
Rafael J. Wysocki | 7bf2368 | 2007-01-05 16:36:28 -0800 | [diff] [blame] | 466 | extern int swap_type_of(dev_t, sector_t, struct block_device **); |
Rafael J. Wysocki | f577eb3 | 2006-03-23 02:59:59 -0800 | [diff] [blame] | 467 | extern unsigned int count_swap_pages(int, int); |
Lee Schermerhorn | d4906e1 | 2009-12-14 17:58:49 -0800 | [diff] [blame] | 468 | extern sector_t map_swap_page(struct page *, struct block_device **); |
Rafael J. Wysocki | 3aef83e | 2006-12-06 20:34:10 -0800 | [diff] [blame] | 469 | extern sector_t swapdev_block(int, pgoff_t); |
Hugh Dickins | bde05d1 | 2012-05-29 15:06:38 -0700 | [diff] [blame] | 470 | extern int page_swapcount(struct page *); |
Huang Ying | eb08557 | 2019-07-11 20:55:33 -0700 | [diff] [blame] | 471 | extern int __swap_count(swp_entry_t entry); |
Tim Chen | e8c26ab | 2017-02-22 15:45:29 -0800 | [diff] [blame] | 472 | extern int __swp_swapcount(swp_entry_t entry); |
Minchan Kim | 8334b96 | 2015-09-08 15:00:24 -0700 | [diff] [blame] | 473 | extern int swp_swapcount(swp_entry_t entry); |
Mel Gorman | f981c59 | 2012-07-31 16:44:47 -0700 | [diff] [blame] | 474 | extern struct swap_info_struct *page_swap_info(struct page *); |
Minchan Kim | 0bcac06 | 2017-11-15 17:33:07 -0800 | [diff] [blame] | 475 | extern struct swap_info_struct *swp_swap_info(swp_entry_t entry); |
Andrea Arcangeli | 6d0a07e | 2016-05-12 15:42:25 -0700 | [diff] [blame] | 476 | extern bool reuse_swap_page(struct page *, int *); |
Hugh Dickins | a2c43ee | 2009-01-06 14:39:36 -0800 | [diff] [blame] | 477 | extern int try_to_free_swap(struct page *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 478 | struct backing_dev_info; |
Huang, Ying | 4b3ef9d | 2017-02-22 15:45:26 -0800 | [diff] [blame] | 479 | extern int init_swap_address_space(unsigned int type, unsigned long nr_pages); |
| 480 | extern void exit_swap_address_space(unsigned int type); |
Huang Ying | eb08557 | 2019-07-11 20:55:33 -0700 | [diff] [blame] | 481 | extern struct swap_info_struct *get_swap_device(swp_entry_t entry); |
| 482 | |
| 483 | static inline void put_swap_device(struct swap_info_struct *si) |
| 484 | { |
| 485 | rcu_read_unlock(); |
| 486 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 487 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 488 | #else /* CONFIG_SWAP */ |
| 489 | |
Minchan Kim | 0bcac06 | 2017-11-15 17:33:07 -0800 | [diff] [blame] | 490 | static inline int swap_readpage(struct page *page, bool do_poll) |
| 491 | { |
| 492 | return 0; |
| 493 | } |
| 494 | |
| 495 | static inline struct swap_info_struct *swp_swap_info(swp_entry_t entry) |
| 496 | { |
| 497 | return NULL; |
| 498 | } |
| 499 | |
Joonsoo Kim | d2cf5ad | 2013-09-11 14:21:29 -0700 | [diff] [blame] | 500 | #define swap_address_space(entry) (NULL) |
Shaohua Li | ec8acf2 | 2013-02-22 16:34:38 -0800 | [diff] [blame] | 501 | #define get_nr_swap_pages() 0L |
Hugh Dickins | b962716 | 2009-01-06 14:39:41 -0800 | [diff] [blame] | 502 | #define total_swap_pages 0L |
Shaohua Li | 33806f0 | 2013-02-22 16:34:37 -0800 | [diff] [blame] | 503 | #define total_swapcache_pages() 0UL |
Shaohua Li | ec8acf2 | 2013-02-22 16:34:38 -0800 | [diff] [blame] | 504 | #define vm_swap_full() 0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 505 | |
| 506 | #define si_swapinfo(val) \ |
| 507 | do { (val)->freeswap = (val)->totalswap = 0; } while (0) |
Olaf Hering | 9ae5b3c | 2005-08-07 09:42:24 -0700 | [diff] [blame] | 508 | /* only sparc can not include linux/pagemap.h in this file |
Kirill A. Shutemov | ea1754a | 2016-04-01 15:29:48 +0300 | [diff] [blame] | 509 | * so leave put_page and release_pages undeclared... */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 510 | #define free_page_and_swap_cache(page) \ |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 511 | put_page(page) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 512 | #define free_pages_and_swap_cache(pages, nr) \ |
Mel Gorman | c6f92f9 | 2017-11-15 17:37:55 -0800 | [diff] [blame] | 513 | release_pages((pages), (nr)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 514 | |
Con Kolivas | bd96b9e | 2006-06-23 02:03:42 -0700 | [diff] [blame] | 515 | static inline void show_swap_cache_info(void) |
| 516 | { |
| 517 | } |
| 518 | |
Jérôme Glisse | 5042db4 | 2017-09-08 16:11:43 -0700 | [diff] [blame] | 519 | #define free_swap_and_cache(e) ({(is_migration_entry(e) || is_device_private_entry(e));}) |
| 520 | #define swapcache_prepare(e) ({(is_migration_entry(e) || is_device_private_entry(e));}) |
Con Kolivas | bd96b9e | 2006-06-23 02:03:42 -0700 | [diff] [blame] | 521 | |
Hugh Dickins | 570a335b | 2009-12-14 17:58:46 -0800 | [diff] [blame] | 522 | static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask) |
KAMEZAWA Hiroyuki | 355cfa7 | 2009-06-16 15:32:53 -0700 | [diff] [blame] | 523 | { |
Hugh Dickins | 570a335b | 2009-12-14 17:58:46 -0800 | [diff] [blame] | 524 | return 0; |
| 525 | } |
| 526 | |
Hugh Dickins | aaa4686 | 2009-12-14 17:58:47 -0800 | [diff] [blame] | 527 | static inline void swap_shmem_alloc(swp_entry_t swp) |
| 528 | { |
| 529 | } |
| 530 | |
Hugh Dickins | 570a335b | 2009-12-14 17:58:46 -0800 | [diff] [blame] | 531 | static inline int swap_duplicate(swp_entry_t swp) |
| 532 | { |
| 533 | return 0; |
KAMEZAWA Hiroyuki | 355cfa7 | 2009-06-16 15:32:53 -0700 | [diff] [blame] | 534 | } |
| 535 | |
Con Kolivas | bd96b9e | 2006-06-23 02:03:42 -0700 | [diff] [blame] | 536 | static inline void swap_free(swp_entry_t swp) |
| 537 | { |
| 538 | } |
| 539 | |
Minchan Kim | 75f6d6d | 2017-07-06 15:37:21 -0700 | [diff] [blame] | 540 | static inline void put_swap_page(struct page *page, swp_entry_t swp) |
KAMEZAWA Hiroyuki | cb4b86b | 2009-06-16 15:32:52 -0700 | [diff] [blame] | 541 | { |
| 542 | } |
| 543 | |
Minchan Kim | e9e9b7e | 2018-04-05 16:23:42 -0700 | [diff] [blame] | 544 | static inline struct page *swap_cluster_readahead(swp_entry_t entry, |
| 545 | gfp_t gfp_mask, struct vm_fault *vmf) |
Con Kolivas | bd96b9e | 2006-06-23 02:03:42 -0700 | [diff] [blame] | 546 | { |
| 547 | return NULL; |
| 548 | } |
| 549 | |
Minchan Kim | e9e9b7e | 2018-04-05 16:23:42 -0700 | [diff] [blame] | 550 | static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, |
| 551 | struct vm_fault *vmf) |
Huang Ying | ec56017 | 2017-09-06 16:24:36 -0700 | [diff] [blame] | 552 | { |
| 553 | return NULL; |
| 554 | } |
| 555 | |
Hugh Dickins | 9fab561 | 2009-03-31 15:23:33 -0700 | [diff] [blame] | 556 | static inline int swap_writepage(struct page *p, struct writeback_control *wbc) |
| 557 | { |
| 558 | return 0; |
| 559 | } |
| 560 | |
Huang Ying | ec56017 | 2017-09-06 16:24:36 -0700 | [diff] [blame] | 561 | static inline struct page *lookup_swap_cache(swp_entry_t swp, |
| 562 | struct vm_area_struct *vma, |
| 563 | unsigned long addr) |
Con Kolivas | bd96b9e | 2006-06-23 02:03:42 -0700 | [diff] [blame] | 564 | { |
| 565 | return NULL; |
| 566 | } |
| 567 | |
Minchan Kim | 0f07465 | 2017-07-06 15:37:24 -0700 | [diff] [blame] | 568 | static inline int add_to_swap(struct page *page) |
Hugh Dickins | 60371d9 | 2009-01-06 14:39:40 -0800 | [diff] [blame] | 569 | { |
| 570 | return 0; |
| 571 | } |
| 572 | |
Hugh Dickins | 73b1262 | 2008-02-04 22:28:50 -0800 | [diff] [blame] | 573 | static inline int add_to_swap_cache(struct page *page, swp_entry_t entry, |
| 574 | gfp_t gfp_mask) |
Con Kolivas | bd96b9e | 2006-06-23 02:03:42 -0700 | [diff] [blame] | 575 | { |
Hugh Dickins | 73b1262 | 2008-02-04 22:28:50 -0800 | [diff] [blame] | 576 | return -1; |
Con Kolivas | bd96b9e | 2006-06-23 02:03:42 -0700 | [diff] [blame] | 577 | } |
| 578 | |
Matthew Wilcox | 4e17ec2 | 2017-11-29 08:32:39 -0500 | [diff] [blame] | 579 | static inline void __delete_from_swap_cache(struct page *page, |
| 580 | swp_entry_t entry) |
Con Kolivas | bd96b9e | 2006-06-23 02:03:42 -0700 | [diff] [blame] | 581 | { |
| 582 | } |
| 583 | |
| 584 | static inline void delete_from_swap_cache(struct page *page) |
| 585 | { |
| 586 | } |
| 587 | |
Hugh Dickins | bde05d1 | 2012-05-29 15:06:38 -0700 | [diff] [blame] | 588 | static inline int page_swapcount(struct page *page) |
| 589 | { |
| 590 | return 0; |
| 591 | } |
| 592 | |
Huang Ying | eb08557 | 2019-07-11 20:55:33 -0700 | [diff] [blame] | 593 | static inline int __swap_count(swp_entry_t entry) |
Minchan Kim | aa8d22a | 2017-11-15 17:33:11 -0800 | [diff] [blame] | 594 | { |
| 595 | return 0; |
| 596 | } |
| 597 | |
Tim Chen | e8c26ab | 2017-02-22 15:45:29 -0800 | [diff] [blame] | 598 | static inline int __swp_swapcount(swp_entry_t entry) |
| 599 | { |
| 600 | return 0; |
| 601 | } |
| 602 | |
Minchan Kim | 8334b96 | 2015-09-08 15:00:24 -0700 | [diff] [blame] | 603 | static inline int swp_swapcount(swp_entry_t entry) |
| 604 | { |
| 605 | return 0; |
| 606 | } |
| 607 | |
Huang Ying | ba3c4ce | 2017-09-06 16:22:19 -0700 | [diff] [blame] | 608 | #define reuse_swap_page(page, total_map_swapcount) \ |
| 609 | (page_trans_huge_mapcount(page, total_map_swapcount) == 1) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 610 | |
Hugh Dickins | a2c43ee | 2009-01-06 14:39:36 -0800 | [diff] [blame] | 611 | static inline int try_to_free_swap(struct page *page) |
Rik van Riel | 68a22394 | 2008-10-18 20:26:23 -0700 | [diff] [blame] | 612 | { |
| 613 | return 0; |
| 614 | } |
| 615 | |
Huang Ying | 38d8b4e | 2017-07-06 15:37:18 -0700 | [diff] [blame] | 616 | static inline swp_entry_t get_swap_page(struct page *page) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 617 | { |
| 618 | swp_entry_t entry; |
| 619 | entry.val = 0; |
| 620 | return entry; |
| 621 | } |
| 622 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 623 | #endif /* CONFIG_SWAP */ |
Vladimir Davydov | 6f2cb2f | 2016-01-20 15:03:05 -0800 | [diff] [blame] | 624 | |
Huang Ying | 5980768 | 2017-09-06 16:22:34 -0700 | [diff] [blame] | 625 | #ifdef CONFIG_THP_SWAP |
| 626 | extern int split_swap_cluster(swp_entry_t entry); |
| 627 | #else |
| 628 | static inline int split_swap_cluster(swp_entry_t entry) |
| 629 | { |
| 630 | return 0; |
| 631 | } |
| 632 | #endif |
| 633 | |
Vladimir Davydov | 6f2cb2f | 2016-01-20 15:03:05 -0800 | [diff] [blame] | 634 | #ifdef CONFIG_MEMCG |
| 635 | static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg) |
| 636 | { |
Johannes Weiner | 4550c4e | 2016-05-05 16:22:03 -0700 | [diff] [blame] | 637 | /* Cgroup2 doesn't have per-cgroup swappiness */ |
| 638 | if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) |
| 639 | return vm_swappiness; |
| 640 | |
Vladimir Davydov | 6f2cb2f | 2016-01-20 15:03:05 -0800 | [diff] [blame] | 641 | /* root ? */ |
Yang Shi | 59118c4 | 2019-03-05 15:48:02 -0800 | [diff] [blame] | 642 | if (mem_cgroup_disabled() || mem_cgroup_is_root(memcg)) |
Vladimir Davydov | 6f2cb2f | 2016-01-20 15:03:05 -0800 | [diff] [blame] | 643 | return vm_swappiness; |
| 644 | |
| 645 | return memcg->swappiness; |
| 646 | } |
Vladimir Davydov | 6f2cb2f | 2016-01-20 15:03:05 -0800 | [diff] [blame] | 647 | #else |
| 648 | static inline int mem_cgroup_swappiness(struct mem_cgroup *mem) |
| 649 | { |
| 650 | return vm_swappiness; |
| 651 | } |
| 652 | #endif |
| 653 | |
Tejun Heo | 2cf8558 | 2018-07-03 11:14:56 -0400 | [diff] [blame] | 654 | #if defined(CONFIG_SWAP) && defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP) |
Johannes Weiner | 6caa6a0 | 2020-06-03 16:01:38 -0700 | [diff] [blame] | 655 | extern void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask); |
Tejun Heo | 2cf8558 | 2018-07-03 11:14:56 -0400 | [diff] [blame] | 656 | #else |
Johannes Weiner | 6caa6a0 | 2020-06-03 16:01:38 -0700 | [diff] [blame] | 657 | static inline void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask) |
Tejun Heo | 2cf8558 | 2018-07-03 11:14:56 -0400 | [diff] [blame] | 658 | { |
| 659 | } |
| 660 | #endif |
| 661 | |
Vladimir Davydov | 6f2cb2f | 2016-01-20 15:03:05 -0800 | [diff] [blame] | 662 | #ifdef CONFIG_MEMCG_SWAP |
| 663 | extern void mem_cgroup_swapout(struct page *page, swp_entry_t entry); |
| 664 | extern int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry); |
Huang Ying | 38d8b4e | 2017-07-06 15:37:18 -0700 | [diff] [blame] | 665 | extern void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages); |
Vladimir Davydov | d8b3843 | 2016-01-20 15:03:07 -0800 | [diff] [blame] | 666 | extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg); |
Vladimir Davydov | 5ccc5ab | 2016-01-20 15:03:10 -0800 | [diff] [blame] | 667 | extern bool mem_cgroup_swap_full(struct page *page); |
Vladimir Davydov | 6f2cb2f | 2016-01-20 15:03:05 -0800 | [diff] [blame] | 668 | #else |
| 669 | static inline void mem_cgroup_swapout(struct page *page, swp_entry_t entry) |
| 670 | { |
| 671 | } |
| 672 | |
| 673 | static inline int mem_cgroup_try_charge_swap(struct page *page, |
| 674 | swp_entry_t entry) |
| 675 | { |
| 676 | return 0; |
| 677 | } |
| 678 | |
Huang Ying | 38d8b4e | 2017-07-06 15:37:18 -0700 | [diff] [blame] | 679 | static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, |
| 680 | unsigned int nr_pages) |
Vladimir Davydov | 6f2cb2f | 2016-01-20 15:03:05 -0800 | [diff] [blame] | 681 | { |
| 682 | } |
Vladimir Davydov | d8b3843 | 2016-01-20 15:03:07 -0800 | [diff] [blame] | 683 | |
| 684 | static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) |
| 685 | { |
| 686 | return get_nr_swap_pages(); |
| 687 | } |
Vladimir Davydov | 5ccc5ab | 2016-01-20 15:03:10 -0800 | [diff] [blame] | 688 | |
| 689 | static inline bool mem_cgroup_swap_full(struct page *page) |
| 690 | { |
| 691 | return vm_swap_full(); |
| 692 | } |
Vladimir Davydov | 6f2cb2f | 2016-01-20 15:03:05 -0800 | [diff] [blame] | 693 | #endif |
| 694 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 695 | #endif /* __KERNEL__*/ |
| 696 | #endif /* _LINUX_SWAP_H */ |