Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_SWAP_H |
| 2 | #define _LINUX_SWAP_H |
| 3 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | #include <linux/spinlock.h> |
| 5 | #include <linux/linkage.h> |
| 6 | #include <linux/mmzone.h> |
| 7 | #include <linux/list.h> |
Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 8 | #include <linux/memcontrol.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | #include <linux/sched.h> |
Lee Schermerhorn | af936a1 | 2008-10-18 20:26:53 -0700 | [diff] [blame] | 10 | #include <linux/node.h> |
Shaohua Li | 33806f0 | 2013-02-22 16:34:37 -0800 | [diff] [blame] | 11 | #include <linux/fs.h> |
Arun Sharma | 60063497 | 2011-07-26 16:09:06 -0700 | [diff] [blame] | 12 | #include <linux/atomic.h> |
Mel Gorman | c53954a | 2013-07-03 15:02:34 -0700 | [diff] [blame] | 13 | #include <linux/page-flags.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include <asm/page.h> |
| 15 | |
Martin Schwidefsky | 8bc719d | 2006-09-25 23:31:20 -0700 | [diff] [blame] | 16 | struct notifier_block; |
| 17 | |
Andrew Morton | ab95416 | 2006-09-25 23:32:42 -0700 | [diff] [blame] | 18 | struct bio; |
| 19 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | #define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */ |
| 21 | #define SWAP_FLAG_PRIO_MASK 0x7fff |
| 22 | #define SWAP_FLAG_PRIO_SHIFT 0 |
Rafael Aquini | dcf6b7d | 2013-07-03 15:02:46 -0700 | [diff] [blame] | 23 | #define SWAP_FLAG_DISCARD 0x10000 /* enable discard for swap */ |
| 24 | #define SWAP_FLAG_DISCARD_ONCE 0x20000 /* discard swap area at swapon-time */ |
| 25 | #define SWAP_FLAG_DISCARD_PAGES 0x40000 /* discard page-clusters after use */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | |
Hugh Dickins | d15cab9 | 2012-03-28 14:42:42 -0700 | [diff] [blame] | 27 | #define SWAP_FLAGS_VALID (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \ |
Rafael Aquini | dcf6b7d | 2013-07-03 15:02:46 -0700 | [diff] [blame] | 28 | SWAP_FLAG_DISCARD | SWAP_FLAG_DISCARD_ONCE | \ |
| 29 | SWAP_FLAG_DISCARD_PAGES) |
Hugh Dickins | d15cab9 | 2012-03-28 14:42:42 -0700 | [diff] [blame] | 30 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | static inline int current_is_kswapd(void) |
| 32 | { |
| 33 | return current->flags & PF_KSWAPD; |
| 34 | } |
| 35 | |
| 36 | /* |
| 37 | * MAX_SWAPFILES defines the maximum number of swaptypes: things which can |
| 38 | * be swapped to. The swap type and the offset into that swap type are |
| 39 | * encoded into pte's and into pgoff_t's in the swapcache. Using five bits |
| 40 | * for the type means that the maximum number of swapcache pages is 27 bits |
| 41 | * on 32-bit-pgoff_t architectures. And that assumes that the architecture packs |
| 42 | * the type/offset into the pte as 5/27 as well. |
| 43 | */ |
| 44 | #define MAX_SWAPFILES_SHIFT 5 |
Andi Kleen | a7420aa | 2009-09-16 11:50:05 +0200 | [diff] [blame] | 45 | |
| 46 | /* |
| 47 | * Use some of the swap files numbers for other purposes. This |
| 48 | * is a convenient way to hook into the VM to trigger special |
| 49 | * actions on faults. |
| 50 | */ |
| 51 | |
| 52 | /* |
| 53 | * NUMA node memory migration support |
| 54 | */ |
| 55 | #ifdef CONFIG_MIGRATION |
| 56 | #define SWP_MIGRATION_NUM 2 |
| 57 | #define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM) |
| 58 | #define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1) |
Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 59 | #else |
Andi Kleen | a7420aa | 2009-09-16 11:50:05 +0200 | [diff] [blame] | 60 | #define SWP_MIGRATION_NUM 0 |
Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 61 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | |
| 63 | /* |
Andi Kleen | a7420aa | 2009-09-16 11:50:05 +0200 | [diff] [blame] | 64 | * Handling of hardware poisoned pages with memory corruption. |
| 65 | */ |
| 66 | #ifdef CONFIG_MEMORY_FAILURE |
| 67 | #define SWP_HWPOISON_NUM 1 |
| 68 | #define SWP_HWPOISON MAX_SWAPFILES |
| 69 | #else |
| 70 | #define SWP_HWPOISON_NUM 0 |
| 71 | #endif |
| 72 | |
| 73 | #define MAX_SWAPFILES \ |
| 74 | ((1 << MAX_SWAPFILES_SHIFT) - SWP_MIGRATION_NUM - SWP_HWPOISON_NUM) |
| 75 | |
| 76 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 77 | * Magic header for a swap area. The first part of the union is |
| 78 | * what the swap magic looks like for the old (limited to 128MB) |
| 79 | * swap area format, the second part of the union adds - in the |
| 80 | * old reserved area - some extra information. Note that the first |
| 81 | * kilobyte is reserved for boot loader or disk label stuff... |
| 82 | * |
| 83 | * Having the magic at the end of the PAGE_SIZE makes detecting swap |
| 84 | * areas somewhat tricky on machines that support multiple page sizes. |
| 85 | * For 2.5 we'll probably want to move the magic to just beyond the |
| 86 | * bootbits... |
| 87 | */ |
| 88 | union swap_header { |
| 89 | struct { |
| 90 | char reserved[PAGE_SIZE - 10]; |
| 91 | char magic[10]; /* SWAP-SPACE or SWAPSPACE2 */ |
| 92 | } magic; |
| 93 | struct { |
Andreas Dilger | e8f03d0 | 2006-06-23 02:03:14 -0700 | [diff] [blame] | 94 | char bootbits[1024]; /* Space for disklabel etc. */ |
| 95 | __u32 version; |
| 96 | __u32 last_page; |
| 97 | __u32 nr_badpages; |
| 98 | unsigned char sws_uuid[16]; |
| 99 | unsigned char sws_volume[16]; |
| 100 | __u32 padding[117]; |
| 101 | __u32 badpages[1]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 102 | } info; |
| 103 | }; |
| 104 | |
| 105 | /* A swap entry has to fit into a "unsigned long", as |
| 106 | * the entry is hidden in the "index" field of the |
| 107 | * swapper address space. |
| 108 | */ |
| 109 | typedef struct { |
| 110 | unsigned long val; |
| 111 | } swp_entry_t; |
| 112 | |
| 113 | /* |
| 114 | * current->reclaim_state points to one of these when a task is running |
| 115 | * memory reclaim |
| 116 | */ |
| 117 | struct reclaim_state { |
| 118 | unsigned long reclaimed_slab; |
| 119 | }; |
| 120 | |
| 121 | #ifdef __KERNEL__ |
| 122 | |
| 123 | struct address_space; |
| 124 | struct sysinfo; |
| 125 | struct writeback_control; |
| 126 | struct zone; |
| 127 | |
| 128 | /* |
| 129 | * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of |
| 130 | * disk blocks. A list of swap extents maps the entire swapfile. (Where the |
| 131 | * term `swapfile' refers to either a blockdevice or an IS_REG file. Apart |
| 132 | * from setup, they're handled identically. |
| 133 | * |
| 134 | * We always assume that blocks are of size PAGE_SIZE. |
| 135 | */ |
| 136 | struct swap_extent { |
| 137 | struct list_head list; |
| 138 | pgoff_t start_page; |
| 139 | pgoff_t nr_pages; |
| 140 | sector_t start_block; |
| 141 | }; |
| 142 | |
| 143 | /* |
| 144 | * Max bad pages in the new format.. |
| 145 | */ |
| 146 | #define __swapoffset(x) ((unsigned long)&((union swap_header *)0)->x) |
| 147 | #define MAX_SWAP_BADPAGES \ |
| 148 | ((__swapoffset(magic.magic) - __swapoffset(info.badpages)) / sizeof(int)) |
| 149 | |
| 150 | enum { |
| 151 | SWP_USED = (1 << 0), /* is slot in swap_info[] used? */ |
| 152 | SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */ |
Rafael Aquini | dcf6b7d | 2013-07-03 15:02:46 -0700 | [diff] [blame] | 153 | SWP_DISCARDABLE = (1 << 2), /* blkdev support discard */ |
Hugh Dickins | 7992fde | 2009-01-06 14:39:53 -0800 | [diff] [blame] | 154 | SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */ |
Hugh Dickins | 20137a4 | 2009-01-06 14:39:54 -0800 | [diff] [blame] | 155 | SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */ |
Hugh Dickins | 570a335b | 2009-12-14 17:58:46 -0800 | [diff] [blame] | 156 | SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */ |
Nitin Gupta | b272564 | 2010-05-17 11:02:42 +0530 | [diff] [blame] | 157 | SWP_BLKDEV = (1 << 6), /* its a block device */ |
Mel Gorman | 62c230b | 2012-07-31 16:44:55 -0700 | [diff] [blame] | 158 | SWP_FILE = (1 << 7), /* set after swap_activate success */ |
Rafael Aquini | dcf6b7d | 2013-07-03 15:02:46 -0700 | [diff] [blame] | 159 | SWP_AREA_DISCARD = (1 << 8), /* single-time swap area discards */ |
| 160 | SWP_PAGE_DISCARD = (1 << 9), /* freed swap page-cluster discards */ |
Hugh Dickins | 52b7efdb | 2005-09-03 15:54:39 -0700 | [diff] [blame] | 161 | /* add others here before... */ |
Rafael Aquini | dcf6b7d | 2013-07-03 15:02:46 -0700 | [diff] [blame] | 162 | SWP_SCANNING = (1 << 10), /* refcount in scan_swap_map */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | }; |
| 164 | |
Johannes Weiner | d778df5 | 2013-02-22 16:32:12 -0800 | [diff] [blame] | 165 | #define SWAP_CLUSTER_MAX 32UL |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 166 | #define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 167 | |
Mel Gorman | 8afdcec | 2011-03-22 16:33:04 -0700 | [diff] [blame] | 168 | /* |
| 169 | * Ratio between the present memory in the zone and the "gap" that |
| 170 | * we're allowing kswapd to shrink in addition to the per-zone high |
| 171 | * wmark, even for zones that already have the high wmark satisfied, |
| 172 | * in order to provide better per-zone lru behavior. We are ok to |
| 173 | * spend not more than 1% of the memory for this zone balancing "gap". |
| 174 | */ |
| 175 | #define KSWAPD_ZONE_BALANCE_GAP_RATIO 100 |
| 176 | |
Hugh Dickins | 570a335b | 2009-12-14 17:58:46 -0800 | [diff] [blame] | 177 | #define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */ |
| 178 | #define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */ |
| 179 | #define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */ |
| 180 | #define SWAP_CONT_MAX 0x7f /* Max count, in each swap_map continuation */ |
| 181 | #define COUNT_CONTINUED 0x80 /* See swap_map continuation for full count */ |
Hugh Dickins | aaa4686 | 2009-12-14 17:58:47 -0800 | [diff] [blame] | 182 | #define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs, in first swap_map */ |
Hugh Dickins | 253d553 | 2009-12-14 17:58:44 -0800 | [diff] [blame] | 183 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | /* |
Shaohua Li | 2a8f944 | 2013-09-11 14:20:28 -0700 | [diff] [blame] | 185 | * We use this to track usage of a cluster. A cluster is a block of swap disk |
| 186 | * space with SWAPFILE_CLUSTER pages long and naturally aligns in disk. All |
| 187 | * free clusters are organized into a list. We fetch an entry from the list to |
| 188 | * get a free cluster. |
| 189 | * |
| 190 | * The data field stores next cluster if the cluster is free or cluster usage |
| 191 | * counter otherwise. The flags field determines if a cluster is free. This is |
| 192 | * protected by swap_info_struct.lock. |
| 193 | */ |
| 194 | struct swap_cluster_info { |
| 195 | unsigned int data:24; |
| 196 | unsigned int flags:8; |
| 197 | }; |
| 198 | #define CLUSTER_FLAG_FREE 1 /* This cluster is free */ |
| 199 | #define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */ |
| 200 | |
| 201 | /* |
Shaohua Li | ebc2a1a | 2013-09-11 14:20:32 -0700 | [diff] [blame] | 202 | * We assign a cluster to each CPU, so each CPU can allocate swap entry from |
| 203 | * its own cluster and swapout sequentially. The purpose is to optimize swapout |
| 204 | * throughput. |
| 205 | */ |
| 206 | struct percpu_cluster { |
| 207 | struct swap_cluster_info index; /* Current cluster index */ |
| 208 | unsigned int next; /* Likely next allocation offset */ |
| 209 | }; |
| 210 | |
| 211 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 212 | * The in-memory structure used to track swap areas. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 213 | */ |
| 214 | struct swap_info_struct { |
Hugh Dickins | efa90a9 | 2009-12-14 17:58:41 -0800 | [diff] [blame] | 215 | unsigned long flags; /* SWP_USED etc: see above */ |
| 216 | signed short prio; /* swap priority of this type */ |
| 217 | signed char type; /* strange name for an index */ |
| 218 | signed char next; /* next type on the swap list */ |
Hugh Dickins | 7509765 | 2009-12-14 17:58:48 -0800 | [diff] [blame] | 219 | unsigned int max; /* extent of the swap_map */ |
| 220 | unsigned char *swap_map; /* vmalloc'ed array of usage counts */ |
Shaohua Li | 2a8f944 | 2013-09-11 14:20:28 -0700 | [diff] [blame] | 221 | struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */ |
| 222 | struct swap_cluster_info free_cluster_head; /* free cluster list head */ |
| 223 | struct swap_cluster_info free_cluster_tail; /* free cluster list tail */ |
Hugh Dickins | 7509765 | 2009-12-14 17:58:48 -0800 | [diff] [blame] | 224 | unsigned int lowest_bit; /* index of first free in swap_map */ |
| 225 | unsigned int highest_bit; /* index of last free in swap_map */ |
| 226 | unsigned int pages; /* total of usable pages of swap */ |
| 227 | unsigned int inuse_pages; /* number of those currently in use */ |
| 228 | unsigned int cluster_next; /* likely index for next allocation */ |
| 229 | unsigned int cluster_nr; /* countdown to next cluster search */ |
Shaohua Li | ebc2a1a | 2013-09-11 14:20:32 -0700 | [diff] [blame] | 230 | struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */ |
Hugh Dickins | 7509765 | 2009-12-14 17:58:48 -0800 | [diff] [blame] | 231 | struct swap_extent *curr_swap_extent; |
| 232 | struct swap_extent first_swap_extent; |
| 233 | struct block_device *bdev; /* swap device or bdev of swap file */ |
| 234 | struct file *swap_file; /* seldom referenced */ |
| 235 | unsigned int old_block_size; /* seldom referenced */ |
Dan Magenheimer | 38b5faf | 2012-04-09 17:08:06 -0600 | [diff] [blame] | 236 | #ifdef CONFIG_FRONTSWAP |
| 237 | unsigned long *frontswap_map; /* frontswap in-use, one bit per page */ |
| 238 | atomic_t frontswap_pages; /* frontswap pages in-use counter */ |
| 239 | #endif |
Shaohua Li | ec8acf2 | 2013-02-22 16:34:38 -0800 | [diff] [blame] | 240 | spinlock_t lock; /* |
| 241 | * protect map scan related fields like |
| 242 | * swap_map, lowest_bit, highest_bit, |
| 243 | * inuse_pages, cluster_next, |
Shaohua Li | 815c2c5 | 2013-09-11 14:20:30 -0700 | [diff] [blame] | 244 | * cluster_nr, lowest_alloc, |
| 245 | * highest_alloc, free/discard cluster |
| 246 | * list. other fields are only changed |
| 247 | * at swapon/swapoff, so are protected |
| 248 | * by swap_lock. changing flags need |
| 249 | * hold this lock and swap_lock. If |
| 250 | * both locks need hold, hold swap_lock |
| 251 | * first. |
Shaohua Li | ec8acf2 | 2013-02-22 16:34:38 -0800 | [diff] [blame] | 252 | */ |
Shaohua Li | 815c2c5 | 2013-09-11 14:20:30 -0700 | [diff] [blame] | 253 | struct work_struct discard_work; /* discard worker */ |
| 254 | struct swap_cluster_info discard_cluster_head; /* list head of discard clusters */ |
| 255 | struct swap_cluster_info discard_cluster_tail; /* list tail of discard clusters */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 256 | }; |
| 257 | |
| 258 | struct swap_list_t { |
| 259 | int head; /* head of priority-ordered swapfile list */ |
| 260 | int next; /* swapfile to be used next */ |
| 261 | }; |
| 262 | |
Johannes Weiner | a528910 | 2014-04-03 14:47:51 -0700 | [diff] [blame] | 263 | /* linux/mm/workingset.c */ |
| 264 | void *workingset_eviction(struct address_space *mapping, struct page *page); |
| 265 | bool workingset_refault(void *shadow); |
| 266 | void workingset_activation(struct page *page); |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 267 | extern struct list_lru workingset_shadow_nodes; |
| 268 | |
| 269 | static inline unsigned int workingset_node_pages(struct radix_tree_node *node) |
| 270 | { |
| 271 | return node->count & RADIX_TREE_COUNT_MASK; |
| 272 | } |
| 273 | |
| 274 | static inline void workingset_node_pages_inc(struct radix_tree_node *node) |
| 275 | { |
| 276 | node->count++; |
| 277 | } |
| 278 | |
| 279 | static inline void workingset_node_pages_dec(struct radix_tree_node *node) |
| 280 | { |
| 281 | node->count--; |
| 282 | } |
| 283 | |
| 284 | static inline unsigned int workingset_node_shadows(struct radix_tree_node *node) |
| 285 | { |
| 286 | return node->count >> RADIX_TREE_COUNT_SHIFT; |
| 287 | } |
| 288 | |
| 289 | static inline void workingset_node_shadows_inc(struct radix_tree_node *node) |
| 290 | { |
| 291 | node->count += 1U << RADIX_TREE_COUNT_SHIFT; |
| 292 | } |
| 293 | |
| 294 | static inline void workingset_node_shadows_dec(struct radix_tree_node *node) |
| 295 | { |
| 296 | node->count -= 1U << RADIX_TREE_COUNT_SHIFT; |
| 297 | } |
Johannes Weiner | a528910 | 2014-04-03 14:47:51 -0700 | [diff] [blame] | 298 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 299 | /* linux/mm/page_alloc.c */ |
| 300 | extern unsigned long totalram_pages; |
Hideo AOKI | cb45b0e | 2006-04-10 22:52:59 -0700 | [diff] [blame] | 301 | extern unsigned long totalreserve_pages; |
Johannes Weiner | ab8fabd | 2012-01-10 15:07:42 -0800 | [diff] [blame] | 302 | extern unsigned long dirty_balance_reserve; |
Zhang Yanfei | ebec386 | 2013-02-22 16:35:43 -0800 | [diff] [blame] | 303 | extern unsigned long nr_free_buffer_pages(void); |
| 304 | extern unsigned long nr_free_pagecache_pages(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 305 | |
Christoph Lameter | 9617729 | 2007-02-10 01:43:03 -0800 | [diff] [blame] | 306 | /* Definition of global_page_state not available yet */ |
| 307 | #define nr_free_pages() global_page_state(NR_FREE_PAGES) |
| 308 | |
| 309 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 310 | /* linux/mm/swap.c */ |
Mel Gorman | c53954a | 2013-07-03 15:02:34 -0700 | [diff] [blame] | 311 | extern void __lru_cache_add(struct page *); |
| 312 | extern void lru_cache_add(struct page *); |
Hugh Dickins | fa9add6 | 2012-05-29 15:07:09 -0700 | [diff] [blame] | 313 | extern void lru_add_page_tail(struct page *page, struct page *page_tail, |
Shaohua Li | 5bc7b8a | 2013-04-29 15:08:36 -0700 | [diff] [blame] | 314 | struct lruvec *lruvec, struct list_head *head); |
Harvey Harrison | b3c9752 | 2008-02-13 15:03:15 -0800 | [diff] [blame] | 315 | extern void activate_page(struct page *); |
| 316 | extern void mark_page_accessed(struct page *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 317 | extern void lru_add_drain(void); |
Konstantin Khlebnikov | f0cb3c7 | 2012-03-21 16:34:06 -0700 | [diff] [blame] | 318 | extern void lru_add_drain_cpu(int cpu); |
Chris Metcalf | 5fbc461 | 2013-09-12 15:13:55 -0700 | [diff] [blame] | 319 | extern void lru_add_drain_all(void); |
Miklos Szeredi | ac6aadb | 2008-04-28 02:12:38 -0700 | [diff] [blame] | 320 | extern void rotate_reclaimable_page(struct page *page); |
Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 321 | extern void deactivate_page(struct page *page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 322 | extern void swap_setup(void); |
| 323 | |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 324 | extern void add_page_to_unevictable_list(struct page *page); |
| 325 | |
KOSAKI Motohiro | f04e9eb | 2008-10-18 20:26:19 -0700 | [diff] [blame] | 326 | /** |
| 327 | * lru_cache_add: add a page to the page lists |
| 328 | * @page: the page to add |
| 329 | */ |
Rik van Riel | 4f98a2f | 2008-10-18 20:26:32 -0700 | [diff] [blame] | 330 | static inline void lru_cache_add_anon(struct page *page) |
KOSAKI Motohiro | f04e9eb | 2008-10-18 20:26:19 -0700 | [diff] [blame] | 331 | { |
Mel Gorman | c53954a | 2013-07-03 15:02:34 -0700 | [diff] [blame] | 332 | ClearPageActive(page); |
| 333 | __lru_cache_add(page); |
KOSAKI Motohiro | f04e9eb | 2008-10-18 20:26:19 -0700 | [diff] [blame] | 334 | } |
| 335 | |
Rik van Riel | 4f98a2f | 2008-10-18 20:26:32 -0700 | [diff] [blame] | 336 | static inline void lru_cache_add_file(struct page *page) |
| 337 | { |
Mel Gorman | c53954a | 2013-07-03 15:02:34 -0700 | [diff] [blame] | 338 | ClearPageActive(page); |
| 339 | __lru_cache_add(page); |
Rik van Riel | 4f98a2f | 2008-10-18 20:26:32 -0700 | [diff] [blame] | 340 | } |
| 341 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 342 | /* linux/mm/vmscan.c */ |
Mel Gorman | dac1d27 | 2008-04-28 02:12:12 -0700 | [diff] [blame] | 343 | extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, |
KAMEZAWA Hiroyuki | 327c0e9 | 2009-03-31 15:23:31 -0700 | [diff] [blame] | 344 | gfp_t gfp_mask, nodemask_t *mask); |
Konstantin Khlebnikov | f3fd4a6 | 2012-05-29 15:06:54 -0700 | [diff] [blame] | 345 | extern int __isolate_lru_page(struct page *page, isolate_mode_t mode); |
Johannes Weiner | 185efc0 | 2011-09-14 16:21:58 -0700 | [diff] [blame] | 346 | extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem, |
| 347 | gfp_t gfp_mask, bool noswap); |
| 348 | extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, |
| 349 | gfp_t gfp_mask, bool noswap, |
| 350 | struct zone *zone, |
| 351 | unsigned long *nr_scanned); |
Andrew Morton | 69e0594 | 2006-03-22 00:08:19 -0800 | [diff] [blame] | 352 | extern unsigned long shrink_all_memory(unsigned long nr_pages); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 353 | extern int vm_swappiness; |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 354 | extern int remove_mapping(struct address_space *mapping, struct page *page); |
Zhang Yanfei | b21e0b9 | 2013-02-22 16:35:48 -0800 | [diff] [blame] | 355 | extern unsigned long vm_total_pages; |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 356 | |
Christoph Lameter | 9eeff23 | 2006-01-18 17:42:31 -0800 | [diff] [blame] | 357 | #ifdef CONFIG_NUMA |
| 358 | extern int zone_reclaim_mode; |
Christoph Lameter | 9614634 | 2006-07-03 00:24:13 -0700 | [diff] [blame] | 359 | extern int sysctl_min_unmapped_ratio; |
Christoph Lameter | 0ff3849 | 2006-09-25 23:31:52 -0700 | [diff] [blame] | 360 | extern int sysctl_min_slab_ratio; |
Christoph Lameter | 9eeff23 | 2006-01-18 17:42:31 -0800 | [diff] [blame] | 361 | extern int zone_reclaim(struct zone *, gfp_t, unsigned int); |
| 362 | #else |
| 363 | #define zone_reclaim_mode 0 |
| 364 | static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order) |
| 365 | { |
| 366 | return 0; |
| 367 | } |
| 368 | #endif |
| 369 | |
Hugh Dickins | 39b5f29 | 2012-10-08 16:33:18 -0700 | [diff] [blame] | 370 | extern int page_evictable(struct page *page); |
Hugh Dickins | 2451326 | 2012-01-20 14:34:21 -0800 | [diff] [blame] | 371 | extern void check_move_unevictable_pages(struct page **, int nr_pages); |
Lee Schermerhorn | af936a1 | 2008-10-18 20:26:53 -0700 | [diff] [blame] | 372 | |
| 373 | extern unsigned long scan_unevictable_pages; |
Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 374 | extern int scan_unevictable_handler(struct ctl_table *, int, |
Lee Schermerhorn | af936a1 | 2008-10-18 20:26:53 -0700 | [diff] [blame] | 375 | void __user *, size_t *, loff_t *); |
Thadeu Lima de Souza Cascardo | e4455ab | 2010-10-26 14:21:28 -0700 | [diff] [blame] | 376 | #ifdef CONFIG_NUMA |
Lee Schermerhorn | af936a1 | 2008-10-18 20:26:53 -0700 | [diff] [blame] | 377 | extern int scan_unevictable_register_node(struct node *node); |
| 378 | extern void scan_unevictable_unregister_node(struct node *node); |
Thadeu Lima de Souza Cascardo | e4455ab | 2010-10-26 14:21:28 -0700 | [diff] [blame] | 379 | #else |
| 380 | static inline int scan_unevictable_register_node(struct node *node) |
| 381 | { |
| 382 | return 0; |
| 383 | } |
| 384 | static inline void scan_unevictable_unregister_node(struct node *node) |
| 385 | { |
| 386 | } |
| 387 | #endif |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 388 | |
Yasunori Goto | 3218ae1 | 2006-06-27 02:53:33 -0700 | [diff] [blame] | 389 | extern int kswapd_run(int nid); |
David Rientjes | 8fe23e0 | 2009-12-14 17:58:33 -0800 | [diff] [blame] | 390 | extern void kswapd_stop(int nid); |
Andrew Morton | c255a45 | 2012-07-31 16:43:02 -0700 | [diff] [blame] | 391 | #ifdef CONFIG_MEMCG |
KAMEZAWA Hiroyuki | 1f4c025 | 2011-07-26 16:08:21 -0700 | [diff] [blame] | 392 | extern int mem_cgroup_swappiness(struct mem_cgroup *mem); |
| 393 | #else |
| 394 | static inline int mem_cgroup_swappiness(struct mem_cgroup *mem) |
| 395 | { |
| 396 | return vm_swappiness; |
| 397 | } |
| 398 | #endif |
Andrew Morton | c255a45 | 2012-07-31 16:43:02 -0700 | [diff] [blame] | 399 | #ifdef CONFIG_MEMCG_SWAP |
Michal Hocko | dac23b0 | 2012-04-05 14:25:16 -0700 | [diff] [blame] | 400 | extern void mem_cgroup_uncharge_swap(swp_entry_t ent); |
| 401 | #else |
| 402 | static inline void mem_cgroup_uncharge_swap(swp_entry_t ent) |
| 403 | { |
| 404 | } |
| 405 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 406 | #ifdef CONFIG_SWAP |
| 407 | /* linux/mm/page_io.c */ |
Minchan Kim | aca8bf3 | 2009-06-16 15:33:02 -0700 | [diff] [blame] | 408 | extern int swap_readpage(struct page *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 409 | extern int swap_writepage(struct page *page, struct writeback_control *wbc); |
Seth Jennings | 1eec670 | 2013-04-29 15:08:35 -0700 | [diff] [blame] | 410 | extern void end_swap_bio_write(struct bio *bio, int err); |
| 411 | extern int __swap_writepage(struct page *page, struct writeback_control *wbc, |
| 412 | void (*end_write_func)(struct bio *, int)); |
Mel Gorman | 62c230b | 2012-07-31 16:44:55 -0700 | [diff] [blame] | 413 | extern int swap_set_page_dirty(struct page *page); |
NeilBrown | 6712ecf | 2007-09-27 12:47:43 +0200 | [diff] [blame] | 414 | extern void end_swap_bio_read(struct bio *bio, int err); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 415 | |
Mel Gorman | a509bc1 | 2012-07-31 16:44:57 -0700 | [diff] [blame] | 416 | int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page, |
| 417 | unsigned long nr_pages, sector_t start_block); |
| 418 | int generic_swapfile_activate(struct swap_info_struct *, struct file *, |
| 419 | sector_t *); |
| 420 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 421 | /* linux/mm/swap_state.c */ |
Shaohua Li | 33806f0 | 2013-02-22 16:34:37 -0800 | [diff] [blame] | 422 | extern struct address_space swapper_spaces[]; |
| 423 | #define swap_address_space(entry) (&swapper_spaces[swp_type(entry)]) |
| 424 | extern unsigned long total_swapcache_pages(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 425 | extern void show_swap_cache_info(void); |
Shaohua Li | 5bc7b8a | 2013-04-29 15:08:36 -0700 | [diff] [blame] | 426 | extern int add_to_swap(struct page *, struct list_head *list); |
Hugh Dickins | 73b1262 | 2008-02-04 22:28:50 -0800 | [diff] [blame] | 427 | extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t); |
Seth Jennings | 2f772e6 | 2013-04-29 15:08:34 -0700 | [diff] [blame] | 428 | extern int __add_to_swap_cache(struct page *page, swp_entry_t entry); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 429 | extern void __delete_from_swap_cache(struct page *); |
| 430 | extern void delete_from_swap_cache(struct page *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 431 | extern void free_page_and_swap_cache(struct page *); |
| 432 | extern void free_pages_and_swap_cache(struct page **, int); |
Hugh Dickins | 46017e9 | 2008-02-04 22:28:41 -0800 | [diff] [blame] | 433 | extern struct page *lookup_swap_cache(swp_entry_t); |
Hugh Dickins | 02098fe | 2008-02-04 22:28:42 -0800 | [diff] [blame] | 434 | extern struct page *read_swap_cache_async(swp_entry_t, gfp_t, |
Hugh Dickins | 46017e9 | 2008-02-04 22:28:41 -0800 | [diff] [blame] | 435 | struct vm_area_struct *vma, unsigned long addr); |
Hugh Dickins | 02098fe | 2008-02-04 22:28:42 -0800 | [diff] [blame] | 436 | extern struct page *swapin_readahead(swp_entry_t, gfp_t, |
Hugh Dickins | 46017e9 | 2008-02-04 22:28:41 -0800 | [diff] [blame] | 437 | struct vm_area_struct *vma, unsigned long addr); |
| 438 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 439 | /* linux/mm/swapfile.c */ |
Shaohua Li | ec8acf2 | 2013-02-22 16:34:38 -0800 | [diff] [blame] | 440 | extern atomic_long_t nr_swap_pages; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 441 | extern long total_swap_pages; |
Shaohua Li | ec8acf2 | 2013-02-22 16:34:38 -0800 | [diff] [blame] | 442 | |
| 443 | /* Swap 50% full? Release swapcache more aggressively.. */ |
| 444 | static inline bool vm_swap_full(void) |
| 445 | { |
| 446 | return atomic_long_read(&nr_swap_pages) * 2 < total_swap_pages; |
| 447 | } |
| 448 | |
| 449 | static inline long get_nr_swap_pages(void) |
| 450 | { |
| 451 | return atomic_long_read(&nr_swap_pages); |
| 452 | } |
| 453 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 454 | extern void si_swapinfo(struct sysinfo *); |
| 455 | extern swp_entry_t get_swap_page(void); |
Hugh Dickins | 910321e | 2010-09-09 16:38:07 -0700 | [diff] [blame] | 456 | extern swp_entry_t get_swap_page_of_type(int); |
Hugh Dickins | 570a335b | 2009-12-14 17:58:46 -0800 | [diff] [blame] | 457 | extern int add_swap_count_continuation(swp_entry_t, gfp_t); |
Hugh Dickins | aaa4686 | 2009-12-14 17:58:47 -0800 | [diff] [blame] | 458 | extern void swap_shmem_alloc(swp_entry_t); |
Hugh Dickins | 570a335b | 2009-12-14 17:58:46 -0800 | [diff] [blame] | 459 | extern int swap_duplicate(swp_entry_t); |
| 460 | extern int swapcache_prepare(swp_entry_t); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 461 | extern void swap_free(swp_entry_t); |
KAMEZAWA Hiroyuki | cb4b86b | 2009-06-16 15:32:52 -0700 | [diff] [blame] | 462 | extern void swapcache_free(swp_entry_t, struct page *page); |
Hugh Dickins | 2509ef2 | 2009-01-06 14:40:10 -0800 | [diff] [blame] | 463 | extern int free_swap_and_cache(swp_entry_t); |
Rafael J. Wysocki | 7bf2368 | 2007-01-05 16:36:28 -0800 | [diff] [blame] | 464 | extern int swap_type_of(dev_t, sector_t, struct block_device **); |
Rafael J. Wysocki | f577eb3 | 2006-03-23 02:59:59 -0800 | [diff] [blame] | 465 | extern unsigned int count_swap_pages(int, int); |
Lee Schermerhorn | d4906e1 | 2009-12-14 17:58:49 -0800 | [diff] [blame] | 466 | extern sector_t map_swap_page(struct page *, struct block_device **); |
Rafael J. Wysocki | 3aef83e | 2006-12-06 20:34:10 -0800 | [diff] [blame] | 467 | extern sector_t swapdev_block(int, pgoff_t); |
Hugh Dickins | bde05d1 | 2012-05-29 15:06:38 -0700 | [diff] [blame] | 468 | extern int page_swapcount(struct page *); |
Mel Gorman | f981c59 | 2012-07-31 16:44:47 -0700 | [diff] [blame] | 469 | extern struct swap_info_struct *page_swap_info(struct page *); |
Hugh Dickins | 7b1fe59 | 2009-01-06 14:39:34 -0800 | [diff] [blame] | 470 | extern int reuse_swap_page(struct page *); |
Hugh Dickins | a2c43ee | 2009-01-06 14:39:36 -0800 | [diff] [blame] | 471 | extern int try_to_free_swap(struct page *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 472 | struct backing_dev_info; |
| 473 | |
Andrew Morton | c255a45 | 2012-07-31 16:43:02 -0700 | [diff] [blame] | 474 | #ifdef CONFIG_MEMCG |
KAMEZAWA Hiroyuki | 8a9478c | 2009-06-17 16:27:17 -0700 | [diff] [blame] | 475 | extern void |
| 476 | mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout); |
KAMEZAWA Hiroyuki | d13d144 | 2009-01-07 18:07:56 -0800 | [diff] [blame] | 477 | #else |
KAMEZAWA Hiroyuki | 8c7c6e34 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 478 | static inline void |
KAMEZAWA Hiroyuki | 8a9478c | 2009-06-17 16:27:17 -0700 | [diff] [blame] | 479 | mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout) |
KAMEZAWA Hiroyuki | 8c7c6e34 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 480 | { |
| 481 | } |
| 482 | #endif |
KAMEZAWA Hiroyuki | d13d144 | 2009-01-07 18:07:56 -0800 | [diff] [blame] | 483 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 484 | #else /* CONFIG_SWAP */ |
| 485 | |
Joonsoo Kim | d2cf5ad | 2013-09-11 14:21:29 -0700 | [diff] [blame] | 486 | #define swap_address_space(entry) (NULL) |
Shaohua Li | ec8acf2 | 2013-02-22 16:34:38 -0800 | [diff] [blame] | 487 | #define get_nr_swap_pages() 0L |
Hugh Dickins | b962716 | 2009-01-06 14:39:41 -0800 | [diff] [blame] | 488 | #define total_swap_pages 0L |
Shaohua Li | 33806f0 | 2013-02-22 16:34:37 -0800 | [diff] [blame] | 489 | #define total_swapcache_pages() 0UL |
Shaohua Li | ec8acf2 | 2013-02-22 16:34:38 -0800 | [diff] [blame] | 490 | #define vm_swap_full() 0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 491 | |
| 492 | #define si_swapinfo(val) \ |
| 493 | do { (val)->freeswap = (val)->totalswap = 0; } while (0) |
Olaf Hering | 9ae5b3c | 2005-08-07 09:42:24 -0700 | [diff] [blame] | 494 | /* only sparc can not include linux/pagemap.h in this file |
| 495 | * so leave page_cache_release and release_pages undeclared... */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 496 | #define free_page_and_swap_cache(page) \ |
| 497 | page_cache_release(page) |
| 498 | #define free_pages_and_swap_cache(pages, nr) \ |
| 499 | release_pages((pages), (nr), 0); |
| 500 | |
Con Kolivas | bd96b9e | 2006-06-23 02:03:42 -0700 | [diff] [blame] | 501 | static inline void show_swap_cache_info(void) |
| 502 | { |
| 503 | } |
| 504 | |
Hugh Dickins | 2509ef2 | 2009-01-06 14:40:10 -0800 | [diff] [blame] | 505 | #define free_swap_and_cache(swp) is_migration_entry(swp) |
KAMEZAWA Hiroyuki | cb4b86b | 2009-06-16 15:32:52 -0700 | [diff] [blame] | 506 | #define swapcache_prepare(swp) is_migration_entry(swp) |
Con Kolivas | bd96b9e | 2006-06-23 02:03:42 -0700 | [diff] [blame] | 507 | |
Hugh Dickins | 570a335b | 2009-12-14 17:58:46 -0800 | [diff] [blame] | 508 | static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask) |
KAMEZAWA Hiroyuki | 355cfa7 | 2009-06-16 15:32:53 -0700 | [diff] [blame] | 509 | { |
Hugh Dickins | 570a335b | 2009-12-14 17:58:46 -0800 | [diff] [blame] | 510 | return 0; |
| 511 | } |
| 512 | |
Hugh Dickins | aaa4686 | 2009-12-14 17:58:47 -0800 | [diff] [blame] | 513 | static inline void swap_shmem_alloc(swp_entry_t swp) |
| 514 | { |
| 515 | } |
| 516 | |
Hugh Dickins | 570a335b | 2009-12-14 17:58:46 -0800 | [diff] [blame] | 517 | static inline int swap_duplicate(swp_entry_t swp) |
| 518 | { |
| 519 | return 0; |
KAMEZAWA Hiroyuki | 355cfa7 | 2009-06-16 15:32:53 -0700 | [diff] [blame] | 520 | } |
| 521 | |
Con Kolivas | bd96b9e | 2006-06-23 02:03:42 -0700 | [diff] [blame] | 522 | static inline void swap_free(swp_entry_t swp) |
| 523 | { |
| 524 | } |
| 525 | |
KAMEZAWA Hiroyuki | cb4b86b | 2009-06-16 15:32:52 -0700 | [diff] [blame] | 526 | static inline void swapcache_free(swp_entry_t swp, struct page *page) |
| 527 | { |
| 528 | } |
| 529 | |
Hugh Dickins | 02098fe | 2008-02-04 22:28:42 -0800 | [diff] [blame] | 530 | static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, |
Con Kolivas | bd96b9e | 2006-06-23 02:03:42 -0700 | [diff] [blame] | 531 | struct vm_area_struct *vma, unsigned long addr) |
| 532 | { |
| 533 | return NULL; |
| 534 | } |
| 535 | |
Hugh Dickins | 9fab561 | 2009-03-31 15:23:33 -0700 | [diff] [blame] | 536 | static inline int swap_writepage(struct page *p, struct writeback_control *wbc) |
| 537 | { |
| 538 | return 0; |
| 539 | } |
| 540 | |
Con Kolivas | bd96b9e | 2006-06-23 02:03:42 -0700 | [diff] [blame] | 541 | static inline struct page *lookup_swap_cache(swp_entry_t swp) |
| 542 | { |
| 543 | return NULL; |
| 544 | } |
| 545 | |
Shaohua Li | 5bc7b8a | 2013-04-29 15:08:36 -0700 | [diff] [blame] | 546 | static inline int add_to_swap(struct page *page, struct list_head *list) |
Hugh Dickins | 60371d9 | 2009-01-06 14:39:40 -0800 | [diff] [blame] | 547 | { |
| 548 | return 0; |
| 549 | } |
| 550 | |
Hugh Dickins | 73b1262 | 2008-02-04 22:28:50 -0800 | [diff] [blame] | 551 | static inline int add_to_swap_cache(struct page *page, swp_entry_t entry, |
| 552 | gfp_t gfp_mask) |
Con Kolivas | bd96b9e | 2006-06-23 02:03:42 -0700 | [diff] [blame] | 553 | { |
Hugh Dickins | 73b1262 | 2008-02-04 22:28:50 -0800 | [diff] [blame] | 554 | return -1; |
Con Kolivas | bd96b9e | 2006-06-23 02:03:42 -0700 | [diff] [blame] | 555 | } |
| 556 | |
| 557 | static inline void __delete_from_swap_cache(struct page *page) |
| 558 | { |
| 559 | } |
| 560 | |
| 561 | static inline void delete_from_swap_cache(struct page *page) |
| 562 | { |
| 563 | } |
| 564 | |
Hugh Dickins | bde05d1 | 2012-05-29 15:06:38 -0700 | [diff] [blame] | 565 | static inline int page_swapcount(struct page *page) |
| 566 | { |
| 567 | return 0; |
| 568 | } |
| 569 | |
Hugh Dickins | 7b1fe59 | 2009-01-06 14:39:34 -0800 | [diff] [blame] | 570 | #define reuse_swap_page(page) (page_mapcount(page) == 1) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 571 | |
Hugh Dickins | a2c43ee | 2009-01-06 14:39:36 -0800 | [diff] [blame] | 572 | static inline int try_to_free_swap(struct page *page) |
Rik van Riel | 68a22394 | 2008-10-18 20:26:23 -0700 | [diff] [blame] | 573 | { |
| 574 | return 0; |
| 575 | } |
| 576 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 577 | static inline swp_entry_t get_swap_page(void) |
| 578 | { |
| 579 | swp_entry_t entry; |
| 580 | entry.val = 0; |
| 581 | return entry; |
| 582 | } |
| 583 | |
Daisuke Nishimura | e767e05 | 2009-05-28 14:34:28 -0700 | [diff] [blame] | 584 | static inline void |
| 585 | mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent) |
| 586 | { |
| 587 | } |
| 588 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 589 | #endif /* CONFIG_SWAP */ |
| 590 | #endif /* __KERNEL__*/ |
| 591 | #endif /* _LINUX_SWAP_H */ |