Thomas Gleixner | 7a33847 | 2019-06-04 10:11:15 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 2 | /* |
| 3 | * Frontswap frontend |
| 4 | * |
| 5 | * This code provides the generic "frontend" layer to call a matching |
| 6 | * "backend" driver implementation of frontswap. See |
Mike Rapoport | ad56b73 | 2018-03-21 21:22:47 +0200 | [diff] [blame] | 7 | * Documentation/vm/frontswap.rst for more information. |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 8 | * |
| 9 | * Copyright (C) 2009-2012 Oracle Corp. All rights reserved. |
| 10 | * Author: Dan Magenheimer |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 11 | */ |
| 12 | |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 13 | #include <linux/mman.h> |
| 14 | #include <linux/swap.h> |
| 15 | #include <linux/swapops.h> |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 16 | #include <linux/security.h> |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 17 | #include <linux/module.h> |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 18 | #include <linux/debugfs.h> |
| 19 | #include <linux/frontswap.h> |
| 20 | #include <linux/swapfile.h> |
| 21 | |
Vlastimil Babka | 8ea1d2a | 2016-07-26 15:24:42 -0700 | [diff] [blame] | 22 | DEFINE_STATIC_KEY_FALSE(frontswap_enabled_key); |
| 23 | |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 24 | /* |
Dan Streetman | d1dc6f1 | 2015-06-24 16:58:18 -0700 | [diff] [blame] | 25 | * frontswap_ops are added by frontswap_register_ops, and provide the |
| 26 | * frontswap "backend" implementation functions. Multiple implementations |
| 27 | * may be registered, but implementations can never deregister. This |
| 28 | * is a simple singly-linked list of all registered implementations. |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 29 | */ |
Konrad Rzeszutek Wilk | 1e01c96 | 2013-04-30 15:26:51 -0700 | [diff] [blame] | 30 | static struct frontswap_ops *frontswap_ops __read_mostly; |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 31 | |
Dan Streetman | d1dc6f1 | 2015-06-24 16:58:18 -0700 | [diff] [blame] | 32 | #define for_each_frontswap_ops(ops) \ |
| 33 | for ((ops) = frontswap_ops; (ops); (ops) = (ops)->next) |
| 34 | |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 35 | /* |
Konrad Rzeszutek Wilk | 165c8ae | 2012-05-15 11:32:15 -0400 | [diff] [blame] | 36 | * If enabled, frontswap_store will return failure even on success. As |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 37 | * a result, the swap subsystem will always write the page to swap, in |
| 38 | * effect converting frontswap into a writethrough cache. In this mode, |
| 39 | * there is no direct reduction in swap writes, but a frontswap backend |
| 40 | * can unilaterally "reclaim" any pages in use with no data loss, thus |
| 41 | * providing increases control over maximum memory usage due to frontswap. |
| 42 | */ |
| 43 | static bool frontswap_writethrough_enabled __read_mostly; |
| 44 | |
Dan Magenheimer | e3483a5 | 2012-09-20 12:16:52 -0700 | [diff] [blame] | 45 | /* |
| 46 | * If enabled, the underlying tmem implementation is capable of doing |
| 47 | * exclusive gets, so frontswap_load, on a successful tmem_get must |
| 48 | * mark the page as no longer in frontswap AND mark it dirty. |
| 49 | */ |
| 50 | static bool frontswap_tmem_exclusive_gets_enabled __read_mostly; |
| 51 | |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 52 | #ifdef CONFIG_DEBUG_FS |
| 53 | /* |
| 54 | * Counters available via /sys/kernel/debug/frontswap (if debugfs is |
| 55 | * properly configured). These are for information only so are not protected |
| 56 | * against increment races. |
| 57 | */ |
Konrad Rzeszutek Wilk | 165c8ae | 2012-05-15 11:32:15 -0400 | [diff] [blame] | 58 | static u64 frontswap_loads; |
| 59 | static u64 frontswap_succ_stores; |
| 60 | static u64 frontswap_failed_stores; |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 61 | static u64 frontswap_invalidates; |
| 62 | |
Zhiyuan Dai | 68d68ff | 2021-05-04 18:40:12 -0700 | [diff] [blame] | 63 | static inline void inc_frontswap_loads(void) |
| 64 | { |
Qian Cai | 96bdd2b | 2020-08-14 17:31:17 -0700 | [diff] [blame] | 65 | data_race(frontswap_loads++); |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 66 | } |
Zhiyuan Dai | 68d68ff | 2021-05-04 18:40:12 -0700 | [diff] [blame] | 67 | static inline void inc_frontswap_succ_stores(void) |
| 68 | { |
Qian Cai | 96bdd2b | 2020-08-14 17:31:17 -0700 | [diff] [blame] | 69 | data_race(frontswap_succ_stores++); |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 70 | } |
Zhiyuan Dai | 68d68ff | 2021-05-04 18:40:12 -0700 | [diff] [blame] | 71 | static inline void inc_frontswap_failed_stores(void) |
| 72 | { |
Qian Cai | 96bdd2b | 2020-08-14 17:31:17 -0700 | [diff] [blame] | 73 | data_race(frontswap_failed_stores++); |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 74 | } |
Zhiyuan Dai | 68d68ff | 2021-05-04 18:40:12 -0700 | [diff] [blame] | 75 | static inline void inc_frontswap_invalidates(void) |
| 76 | { |
Qian Cai | 96bdd2b | 2020-08-14 17:31:17 -0700 | [diff] [blame] | 77 | data_race(frontswap_invalidates++); |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 78 | } |
| 79 | #else |
Konrad Rzeszutek Wilk | 165c8ae | 2012-05-15 11:32:15 -0400 | [diff] [blame] | 80 | static inline void inc_frontswap_loads(void) { } |
| 81 | static inline void inc_frontswap_succ_stores(void) { } |
| 82 | static inline void inc_frontswap_failed_stores(void) { } |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 83 | static inline void inc_frontswap_invalidates(void) { } |
| 84 | #endif |
Dan Magenheimer | 905cd0e | 2013-04-30 15:26:50 -0700 | [diff] [blame] | 85 | |
| 86 | /* |
| 87 | * Due to the asynchronous nature of the backends loading potentially |
| 88 | * _after_ the swap system has been activated, we have chokepoints |
| 89 | * on all frontswap functions to not call the backend until the backend |
| 90 | * has registered. |
| 91 | * |
Dan Magenheimer | 905cd0e | 2013-04-30 15:26:50 -0700 | [diff] [blame] | 92 | * This would not guards us against the user deciding to call swapoff right as |
| 93 | * we are calling the backend to initialize (so swapon is in action). |
Ethon Paul | 404f3ec | 2020-06-04 16:49:25 -0700 | [diff] [blame] | 94 | * Fortunately for us, the swapon_mutex has been taken by the callee so we are |
Dan Magenheimer | 905cd0e | 2013-04-30 15:26:50 -0700 | [diff] [blame] | 95 | * OK. The other scenario where calls to frontswap_store (called via |
| 96 | * swap_writepage) is racing with frontswap_invalidate_area (called via |
| 97 | * swapoff) is again guarded by the swap subsystem. |
| 98 | * |
| 99 | * While no backend is registered all calls to frontswap_[store|load| |
| 100 | * invalidate_area|invalidate_page] are ignored or fail. |
| 101 | * |
| 102 | * The time between the backend being registered and the swap file system |
| 103 | * calling the backend (via the frontswap_* functions) is indeterminate as |
Konrad Rzeszutek Wilk | 1e01c96 | 2013-04-30 15:26:51 -0700 | [diff] [blame] | 104 | * frontswap_ops is not atomic_t (or a value guarded by a spinlock). |
Dan Magenheimer | 905cd0e | 2013-04-30 15:26:50 -0700 | [diff] [blame] | 105 | * That is OK as we are comfortable missing some of these calls to the newly |
| 106 | * registered backend. |
| 107 | * |
| 108 | * Obviously the opposite (unloading the backend) must be done after all |
| 109 | * the frontswap_[store|load|invalidate_area|invalidate_page] start |
Dan Streetman | d1dc6f1 | 2015-06-24 16:58:18 -0700 | [diff] [blame] | 110 | * ignoring or failing the requests. However, there is currently no way |
| 111 | * to unload a backend once it is registered. |
Dan Magenheimer | 905cd0e | 2013-04-30 15:26:50 -0700 | [diff] [blame] | 112 | */ |
Dan Magenheimer | 905cd0e | 2013-04-30 15:26:50 -0700 | [diff] [blame] | 113 | |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 114 | /* |
Dan Streetman | d1dc6f1 | 2015-06-24 16:58:18 -0700 | [diff] [blame] | 115 | * Register operations for frontswap |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 116 | */ |
Dan Streetman | d1dc6f1 | 2015-06-24 16:58:18 -0700 | [diff] [blame] | 117 | void frontswap_register_ops(struct frontswap_ops *ops) |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 118 | { |
Dan Streetman | d1dc6f1 | 2015-06-24 16:58:18 -0700 | [diff] [blame] | 119 | DECLARE_BITMAP(a, MAX_SWAPFILES); |
| 120 | DECLARE_BITMAP(b, MAX_SWAPFILES); |
| 121 | struct swap_info_struct *si; |
| 122 | unsigned int i; |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 123 | |
Dan Streetman | d1dc6f1 | 2015-06-24 16:58:18 -0700 | [diff] [blame] | 124 | bitmap_zero(a, MAX_SWAPFILES); |
| 125 | bitmap_zero(b, MAX_SWAPFILES); |
| 126 | |
| 127 | spin_lock(&swap_lock); |
| 128 | plist_for_each_entry(si, &swap_active_head, list) { |
| 129 | if (!WARN_ON(!si->frontswap_map)) |
| 130 | set_bit(si->type, a); |
| 131 | } |
| 132 | spin_unlock(&swap_lock); |
| 133 | |
| 134 | /* the new ops needs to know the currently active swap devices */ |
| 135 | for_each_set_bit(i, a, MAX_SWAPFILES) |
| 136 | ops->init(i); |
| 137 | |
| 138 | /* |
| 139 | * Setting frontswap_ops must happen after the ops->init() calls |
| 140 | * above; cmpxchg implies smp_mb() which will ensure the init is |
| 141 | * complete at this point. |
| 142 | */ |
| 143 | do { |
| 144 | ops->next = frontswap_ops; |
| 145 | } while (cmpxchg(&frontswap_ops, ops->next, ops) != ops->next); |
| 146 | |
Vlastimil Babka | 8ea1d2a | 2016-07-26 15:24:42 -0700 | [diff] [blame] | 147 | static_branch_inc(&frontswap_enabled_key); |
| 148 | |
Dan Streetman | d1dc6f1 | 2015-06-24 16:58:18 -0700 | [diff] [blame] | 149 | spin_lock(&swap_lock); |
| 150 | plist_for_each_entry(si, &swap_active_head, list) { |
| 151 | if (si->frontswap_map) |
| 152 | set_bit(si->type, b); |
| 153 | } |
| 154 | spin_unlock(&swap_lock); |
| 155 | |
| 156 | /* |
| 157 | * On the very unlikely chance that a swap device was added or |
| 158 | * removed between setting the "a" list bits and the ops init |
| 159 | * calls, we re-check and do init or invalidate for any changed |
| 160 | * bits. |
| 161 | */ |
| 162 | if (unlikely(!bitmap_equal(a, b, MAX_SWAPFILES))) { |
| 163 | for (i = 0; i < MAX_SWAPFILES; i++) { |
| 164 | if (!test_bit(i, a) && test_bit(i, b)) |
| 165 | ops->init(i); |
| 166 | else if (test_bit(i, a) && !test_bit(i, b)) |
| 167 | ops->invalidate_area(i); |
Minchan Kim | 4f89849 | 2013-04-30 15:26:54 -0700 | [diff] [blame] | 168 | } |
Dan Magenheimer | 905cd0e | 2013-04-30 15:26:50 -0700 | [diff] [blame] | 169 | } |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 170 | } |
| 171 | EXPORT_SYMBOL(frontswap_register_ops); |
| 172 | |
| 173 | /* |
| 174 | * Enable/disable frontswap writethrough (see above). |
| 175 | */ |
| 176 | void frontswap_writethrough(bool enable) |
| 177 | { |
| 178 | frontswap_writethrough_enabled = enable; |
| 179 | } |
| 180 | EXPORT_SYMBOL(frontswap_writethrough); |
| 181 | |
| 182 | /* |
Dan Magenheimer | e3483a5 | 2012-09-20 12:16:52 -0700 | [diff] [blame] | 183 | * Enable/disable frontswap exclusive gets (see above). |
| 184 | */ |
| 185 | void frontswap_tmem_exclusive_gets(bool enable) |
| 186 | { |
| 187 | frontswap_tmem_exclusive_gets_enabled = enable; |
| 188 | } |
| 189 | EXPORT_SYMBOL(frontswap_tmem_exclusive_gets); |
| 190 | |
| 191 | /* |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 192 | * Called when a swap device is swapon'd. |
| 193 | */ |
Minchan Kim | 4f89849 | 2013-04-30 15:26:54 -0700 | [diff] [blame] | 194 | void __frontswap_init(unsigned type, unsigned long *map) |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 195 | { |
| 196 | struct swap_info_struct *sis = swap_info[type]; |
Dan Streetman | d1dc6f1 | 2015-06-24 16:58:18 -0700 | [diff] [blame] | 197 | struct frontswap_ops *ops; |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 198 | |
Vlastimil Babka | 8ea1d2a | 2016-07-26 15:24:42 -0700 | [diff] [blame] | 199 | VM_BUG_ON(sis == NULL); |
Minchan Kim | 4f89849 | 2013-04-30 15:26:54 -0700 | [diff] [blame] | 200 | |
| 201 | /* |
| 202 | * p->frontswap is a bitmap that we MUST have to figure out which page |
| 203 | * has gone in frontswap. Without it there is no point of continuing. |
| 204 | */ |
| 205 | if (WARN_ON(!map)) |
| 206 | return; |
| 207 | /* |
| 208 | * Irregardless of whether the frontswap backend has been loaded |
| 209 | * before this function or it will be later, we _MUST_ have the |
| 210 | * p->frontswap set to something valid to work properly. |
| 211 | */ |
| 212 | frontswap_map_set(sis, map); |
Dan Streetman | d1dc6f1 | 2015-06-24 16:58:18 -0700 | [diff] [blame] | 213 | |
| 214 | for_each_frontswap_ops(ops) |
| 215 | ops->init(type); |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 216 | } |
| 217 | EXPORT_SYMBOL(__frontswap_init); |
| 218 | |
Bob Liu | f066ea2 | 2013-04-30 15:26:53 -0700 | [diff] [blame] | 219 | bool __frontswap_test(struct swap_info_struct *sis, |
| 220 | pgoff_t offset) |
Sasha Levin | 611edfe | 2012-06-10 12:51:07 +0200 | [diff] [blame] | 221 | { |
Dan Streetman | d1dc6f1 | 2015-06-24 16:58:18 -0700 | [diff] [blame] | 222 | if (sis->frontswap_map) |
| 223 | return test_bit(offset, sis->frontswap_map); |
| 224 | return false; |
Bob Liu | f066ea2 | 2013-04-30 15:26:53 -0700 | [diff] [blame] | 225 | } |
| 226 | EXPORT_SYMBOL(__frontswap_test); |
| 227 | |
Dan Streetman | d1dc6f1 | 2015-06-24 16:58:18 -0700 | [diff] [blame] | 228 | static inline void __frontswap_set(struct swap_info_struct *sis, |
| 229 | pgoff_t offset) |
| 230 | { |
| 231 | set_bit(offset, sis->frontswap_map); |
| 232 | atomic_inc(&sis->frontswap_pages); |
| 233 | } |
| 234 | |
Bob Liu | f066ea2 | 2013-04-30 15:26:53 -0700 | [diff] [blame] | 235 | static inline void __frontswap_clear(struct swap_info_struct *sis, |
Dan Streetman | d1dc6f1 | 2015-06-24 16:58:18 -0700 | [diff] [blame] | 236 | pgoff_t offset) |
Bob Liu | f066ea2 | 2013-04-30 15:26:53 -0700 | [diff] [blame] | 237 | { |
| 238 | clear_bit(offset, sis->frontswap_map); |
Sasha Levin | 611edfe | 2012-06-10 12:51:07 +0200 | [diff] [blame] | 239 | atomic_dec(&sis->frontswap_pages); |
| 240 | } |
| 241 | |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 242 | /* |
Konrad Rzeszutek Wilk | 165c8ae | 2012-05-15 11:32:15 -0400 | [diff] [blame] | 243 | * "Store" data from a page to frontswap and associate it with the page's |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 244 | * swaptype and offset. Page must be locked and in the swap cache. |
| 245 | * If frontswap already contains a page with matching swaptype and |
Wanpeng Li | 1d00015 | 2012-06-16 20:37:48 +0800 | [diff] [blame] | 246 | * offset, the frontswap implementation may either overwrite the data and |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 247 | * return success or invalidate the page from frontswap and return failure. |
| 248 | */ |
Konrad Rzeszutek Wilk | 165c8ae | 2012-05-15 11:32:15 -0400 | [diff] [blame] | 249 | int __frontswap_store(struct page *page) |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 250 | { |
Dan Streetman | d1dc6f1 | 2015-06-24 16:58:18 -0700 | [diff] [blame] | 251 | int ret = -1; |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 252 | swp_entry_t entry = { .val = page_private(page), }; |
| 253 | int type = swp_type(entry); |
| 254 | struct swap_info_struct *sis = swap_info[type]; |
| 255 | pgoff_t offset = swp_offset(entry); |
Dan Streetman | d1dc6f1 | 2015-06-24 16:58:18 -0700 | [diff] [blame] | 256 | struct frontswap_ops *ops; |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 257 | |
Vlastimil Babka | 8ea1d2a | 2016-07-26 15:24:42 -0700 | [diff] [blame] | 258 | VM_BUG_ON(!frontswap_ops); |
| 259 | VM_BUG_ON(!PageLocked(page)); |
| 260 | VM_BUG_ON(sis == NULL); |
Dan Streetman | d1dc6f1 | 2015-06-24 16:58:18 -0700 | [diff] [blame] | 261 | |
| 262 | /* |
| 263 | * If a dup, we must remove the old page first; we can't leave the |
| 264 | * old page no matter if the store of the new page succeeds or fails, |
| 265 | * and we can't rely on the new page replacing the old page as we may |
| 266 | * not store to the same implementation that contains the old page. |
| 267 | */ |
| 268 | if (__frontswap_test(sis, offset)) { |
| 269 | __frontswap_clear(sis, offset); |
| 270 | for_each_frontswap_ops(ops) |
| 271 | ops->invalidate_page(type, offset); |
| 272 | } |
| 273 | |
| 274 | /* Try to store in each implementation, until one succeeds. */ |
| 275 | for_each_frontswap_ops(ops) { |
| 276 | ret = ops->store(type, offset, page); |
| 277 | if (!ret) /* successful store */ |
| 278 | break; |
| 279 | } |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 280 | if (ret == 0) { |
Dan Streetman | d1dc6f1 | 2015-06-24 16:58:18 -0700 | [diff] [blame] | 281 | __frontswap_set(sis, offset); |
Konrad Rzeszutek Wilk | 165c8ae | 2012-05-15 11:32:15 -0400 | [diff] [blame] | 282 | inc_frontswap_succ_stores(); |
Sasha Levin | d9674dd | 2012-06-10 12:51:04 +0200 | [diff] [blame] | 283 | } else { |
Konrad Rzeszutek Wilk | 165c8ae | 2012-05-15 11:32:15 -0400 | [diff] [blame] | 284 | inc_frontswap_failed_stores(); |
Sasha Levin | 4bb3e31 | 2012-06-10 12:51:00 +0200 | [diff] [blame] | 285 | } |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 286 | if (frontswap_writethrough_enabled) |
| 287 | /* report failure so swap also writes to swap device */ |
| 288 | ret = -1; |
| 289 | return ret; |
| 290 | } |
Konrad Rzeszutek Wilk | 165c8ae | 2012-05-15 11:32:15 -0400 | [diff] [blame] | 291 | EXPORT_SYMBOL(__frontswap_store); |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 292 | |
| 293 | /* |
| 294 | * "Get" data from frontswap associated with swaptype and offset that were |
| 295 | * specified when the data was put to frontswap and use it to fill the |
| 296 | * specified page with data. Page must be locked and in the swap cache. |
| 297 | */ |
Konrad Rzeszutek Wilk | 165c8ae | 2012-05-15 11:32:15 -0400 | [diff] [blame] | 298 | int __frontswap_load(struct page *page) |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 299 | { |
| 300 | int ret = -1; |
| 301 | swp_entry_t entry = { .val = page_private(page), }; |
| 302 | int type = swp_type(entry); |
| 303 | struct swap_info_struct *sis = swap_info[type]; |
| 304 | pgoff_t offset = swp_offset(entry); |
Dan Streetman | d1dc6f1 | 2015-06-24 16:58:18 -0700 | [diff] [blame] | 305 | struct frontswap_ops *ops; |
| 306 | |
Vlastimil Babka | 8ea1d2a | 2016-07-26 15:24:42 -0700 | [diff] [blame] | 307 | VM_BUG_ON(!frontswap_ops); |
| 308 | VM_BUG_ON(!PageLocked(page)); |
| 309 | VM_BUG_ON(sis == NULL); |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 310 | |
Dan Streetman | d1dc6f1 | 2015-06-24 16:58:18 -0700 | [diff] [blame] | 311 | if (!__frontswap_test(sis, offset)) |
| 312 | return -1; |
| 313 | |
| 314 | /* Try loading from each implementation, until one succeeds. */ |
| 315 | for_each_frontswap_ops(ops) { |
| 316 | ret = ops->load(type, offset, page); |
| 317 | if (!ret) /* successful load */ |
| 318 | break; |
| 319 | } |
Dan Magenheimer | e3483a5 | 2012-09-20 12:16:52 -0700 | [diff] [blame] | 320 | if (ret == 0) { |
Konrad Rzeszutek Wilk | 165c8ae | 2012-05-15 11:32:15 -0400 | [diff] [blame] | 321 | inc_frontswap_loads(); |
Dan Magenheimer | e3483a5 | 2012-09-20 12:16:52 -0700 | [diff] [blame] | 322 | if (frontswap_tmem_exclusive_gets_enabled) { |
| 323 | SetPageDirty(page); |
Bob Liu | f066ea2 | 2013-04-30 15:26:53 -0700 | [diff] [blame] | 324 | __frontswap_clear(sis, offset); |
Dan Magenheimer | e3483a5 | 2012-09-20 12:16:52 -0700 | [diff] [blame] | 325 | } |
| 326 | } |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 327 | return ret; |
| 328 | } |
Konrad Rzeszutek Wilk | 165c8ae | 2012-05-15 11:32:15 -0400 | [diff] [blame] | 329 | EXPORT_SYMBOL(__frontswap_load); |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 330 | |
| 331 | /* |
| 332 | * Invalidate any data from frontswap associated with the specified swaptype |
| 333 | * and offset so that a subsequent "get" will fail. |
| 334 | */ |
| 335 | void __frontswap_invalidate_page(unsigned type, pgoff_t offset) |
| 336 | { |
| 337 | struct swap_info_struct *sis = swap_info[type]; |
Dan Streetman | d1dc6f1 | 2015-06-24 16:58:18 -0700 | [diff] [blame] | 338 | struct frontswap_ops *ops; |
| 339 | |
Vlastimil Babka | 8ea1d2a | 2016-07-26 15:24:42 -0700 | [diff] [blame] | 340 | VM_BUG_ON(!frontswap_ops); |
| 341 | VM_BUG_ON(sis == NULL); |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 342 | |
Dan Streetman | d1dc6f1 | 2015-06-24 16:58:18 -0700 | [diff] [blame] | 343 | if (!__frontswap_test(sis, offset)) |
| 344 | return; |
| 345 | |
| 346 | for_each_frontswap_ops(ops) |
| 347 | ops->invalidate_page(type, offset); |
| 348 | __frontswap_clear(sis, offset); |
| 349 | inc_frontswap_invalidates(); |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 350 | } |
| 351 | EXPORT_SYMBOL(__frontswap_invalidate_page); |
| 352 | |
| 353 | /* |
| 354 | * Invalidate all data from frontswap associated with all offsets for the |
| 355 | * specified swaptype. |
| 356 | */ |
| 357 | void __frontswap_invalidate_area(unsigned type) |
| 358 | { |
| 359 | struct swap_info_struct *sis = swap_info[type]; |
Dan Streetman | d1dc6f1 | 2015-06-24 16:58:18 -0700 | [diff] [blame] | 360 | struct frontswap_ops *ops; |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 361 | |
Vlastimil Babka | 8ea1d2a | 2016-07-26 15:24:42 -0700 | [diff] [blame] | 362 | VM_BUG_ON(!frontswap_ops); |
| 363 | VM_BUG_ON(sis == NULL); |
Dan Streetman | d1dc6f1 | 2015-06-24 16:58:18 -0700 | [diff] [blame] | 364 | |
Dan Streetman | d1dc6f1 | 2015-06-24 16:58:18 -0700 | [diff] [blame] | 365 | if (sis->frontswap_map == NULL) |
| 366 | return; |
| 367 | |
| 368 | for_each_frontswap_ops(ops) |
| 369 | ops->invalidate_area(type); |
| 370 | atomic_set(&sis->frontswap_pages, 0); |
| 371 | bitmap_zero(sis->frontswap_map, sis->max); |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 372 | } |
| 373 | EXPORT_SYMBOL(__frontswap_invalidate_area); |
| 374 | |
Sasha Levin | 9625344 | 2012-06-10 12:51:01 +0200 | [diff] [blame] | 375 | static unsigned long __frontswap_curr_pages(void) |
| 376 | { |
Sasha Levin | 9625344 | 2012-06-10 12:51:01 +0200 | [diff] [blame] | 377 | unsigned long totalpages = 0; |
| 378 | struct swap_info_struct *si = NULL; |
| 379 | |
| 380 | assert_spin_locked(&swap_lock); |
Dan Streetman | 18ab4d4 | 2014-06-04 16:09:59 -0700 | [diff] [blame] | 381 | plist_for_each_entry(si, &swap_active_head, list) |
Sasha Levin | 9625344 | 2012-06-10 12:51:01 +0200 | [diff] [blame] | 382 | totalpages += atomic_read(&si->frontswap_pages); |
Sasha Levin | 9625344 | 2012-06-10 12:51:01 +0200 | [diff] [blame] | 383 | return totalpages; |
| 384 | } |
| 385 | |
Sasha Levin | f116695 | 2012-06-10 12:51:02 +0200 | [diff] [blame] | 386 | static int __frontswap_unuse_pages(unsigned long total, unsigned long *unused, |
| 387 | int *swapid) |
| 388 | { |
| 389 | int ret = -EINVAL; |
| 390 | struct swap_info_struct *si = NULL; |
| 391 | int si_frontswap_pages; |
| 392 | unsigned long total_pages_to_unuse = total; |
| 393 | unsigned long pages = 0, pages_to_unuse = 0; |
Sasha Levin | f116695 | 2012-06-10 12:51:02 +0200 | [diff] [blame] | 394 | |
| 395 | assert_spin_locked(&swap_lock); |
Dan Streetman | 18ab4d4 | 2014-06-04 16:09:59 -0700 | [diff] [blame] | 396 | plist_for_each_entry(si, &swap_active_head, list) { |
Sasha Levin | f116695 | 2012-06-10 12:51:02 +0200 | [diff] [blame] | 397 | si_frontswap_pages = atomic_read(&si->frontswap_pages); |
| 398 | if (total_pages_to_unuse < si_frontswap_pages) { |
| 399 | pages = pages_to_unuse = total_pages_to_unuse; |
| 400 | } else { |
| 401 | pages = si_frontswap_pages; |
| 402 | pages_to_unuse = 0; /* unuse all */ |
| 403 | } |
| 404 | /* ensure there is enough RAM to fetch pages from frontswap */ |
| 405 | if (security_vm_enough_memory_mm(current->mm, pages)) { |
| 406 | ret = -ENOMEM; |
| 407 | continue; |
| 408 | } |
| 409 | vm_unacct_memory(pages); |
| 410 | *unused = pages_to_unuse; |
Dan Streetman | adfab83 | 2014-06-04 16:09:53 -0700 | [diff] [blame] | 411 | *swapid = si->type; |
Sasha Levin | f116695 | 2012-06-10 12:51:02 +0200 | [diff] [blame] | 412 | ret = 0; |
| 413 | break; |
| 414 | } |
| 415 | |
| 416 | return ret; |
| 417 | } |
| 418 | |
Zhenzhong Duan | a00bb1e | 2012-09-21 16:40:30 +0800 | [diff] [blame] | 419 | /* |
Ethon Paul | 404f3ec | 2020-06-04 16:49:25 -0700 | [diff] [blame] | 420 | * Used to check if it's necessary and feasible to unuse pages. |
| 421 | * Return 1 when nothing to do, 0 when need to shrink pages, |
Zhenzhong Duan | a00bb1e | 2012-09-21 16:40:30 +0800 | [diff] [blame] | 422 | * error code when there is an error. |
| 423 | */ |
Sasha Levin | 69217b4 | 2012-06-10 12:51:03 +0200 | [diff] [blame] | 424 | static int __frontswap_shrink(unsigned long target_pages, |
| 425 | unsigned long *pages_to_unuse, |
| 426 | int *type) |
| 427 | { |
| 428 | unsigned long total_pages = 0, total_pages_to_unuse; |
| 429 | |
| 430 | assert_spin_locked(&swap_lock); |
| 431 | |
| 432 | total_pages = __frontswap_curr_pages(); |
| 433 | if (total_pages <= target_pages) { |
| 434 | /* Nothing to do */ |
| 435 | *pages_to_unuse = 0; |
Zhenzhong Duan | a00bb1e | 2012-09-21 16:40:30 +0800 | [diff] [blame] | 436 | return 1; |
Sasha Levin | 69217b4 | 2012-06-10 12:51:03 +0200 | [diff] [blame] | 437 | } |
| 438 | total_pages_to_unuse = total_pages - target_pages; |
| 439 | return __frontswap_unuse_pages(total_pages_to_unuse, pages_to_unuse, type); |
| 440 | } |
| 441 | |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 442 | /* |
| 443 | * Frontswap, like a true swap device, may unnecessarily retain pages |
| 444 | * under certain circumstances; "shrink" frontswap is essentially a |
| 445 | * "partial swapoff" and works by calling try_to_unuse to attempt to |
| 446 | * unuse enough frontswap pages to attempt to -- subject to memory |
| 447 | * constraints -- reduce the number of pages in frontswap to the |
| 448 | * number given in the parameter target_pages. |
| 449 | */ |
| 450 | void frontswap_shrink(unsigned long target_pages) |
| 451 | { |
Sasha Levin | f116695 | 2012-06-10 12:51:02 +0200 | [diff] [blame] | 452 | unsigned long pages_to_unuse = 0; |
Kees Cook | 3f649ab | 2020-06-03 13:09:38 -0700 | [diff] [blame] | 453 | int type, ret; |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 454 | |
| 455 | /* |
| 456 | * we don't want to hold swap_lock while doing a very |
| 457 | * lengthy try_to_unuse, but swap_list may change |
Dan Streetman | 18ab4d4 | 2014-06-04 16:09:59 -0700 | [diff] [blame] | 458 | * so restart scan from swap_active_head each time |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 459 | */ |
| 460 | spin_lock(&swap_lock); |
Sasha Levin | 69217b4 | 2012-06-10 12:51:03 +0200 | [diff] [blame] | 461 | ret = __frontswap_shrink(target_pages, &pages_to_unuse, &type); |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 462 | spin_unlock(&swap_lock); |
Zhenzhong Duan | a00bb1e | 2012-09-21 16:40:30 +0800 | [diff] [blame] | 463 | if (ret == 0) |
Sasha Levin | 69217b4 | 2012-06-10 12:51:03 +0200 | [diff] [blame] | 464 | try_to_unuse(type, true, pages_to_unuse); |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 465 | return; |
| 466 | } |
| 467 | EXPORT_SYMBOL(frontswap_shrink); |
| 468 | |
| 469 | /* |
| 470 | * Count and return the number of frontswap pages across all |
| 471 | * swap devices. This is exported so that backend drivers can |
| 472 | * determine current usage without reading debugfs. |
| 473 | */ |
| 474 | unsigned long frontswap_curr_pages(void) |
| 475 | { |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 476 | unsigned long totalpages = 0; |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 477 | |
| 478 | spin_lock(&swap_lock); |
Sasha Levin | 9625344 | 2012-06-10 12:51:01 +0200 | [diff] [blame] | 479 | totalpages = __frontswap_curr_pages(); |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 480 | spin_unlock(&swap_lock); |
Sasha Levin | 9625344 | 2012-06-10 12:51:01 +0200 | [diff] [blame] | 481 | |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 482 | return totalpages; |
| 483 | } |
| 484 | EXPORT_SYMBOL(frontswap_curr_pages); |
| 485 | |
| 486 | static int __init init_frontswap(void) |
| 487 | { |
| 488 | #ifdef CONFIG_DEBUG_FS |
| 489 | struct dentry *root = debugfs_create_dir("frontswap", NULL); |
| 490 | if (root == NULL) |
| 491 | return -ENXIO; |
Joe Perches | 0825a6f | 2018-06-14 15:27:58 -0700 | [diff] [blame] | 492 | debugfs_create_u64("loads", 0444, root, &frontswap_loads); |
| 493 | debugfs_create_u64("succ_stores", 0444, root, &frontswap_succ_stores); |
| 494 | debugfs_create_u64("failed_stores", 0444, root, |
| 495 | &frontswap_failed_stores); |
| 496 | debugfs_create_u64("invalidates", 0444, root, &frontswap_invalidates); |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 497 | #endif |
Dan Magenheimer | 29f233c | 2012-04-09 17:09:27 -0600 | [diff] [blame] | 498 | return 0; |
| 499 | } |
| 500 | |
| 501 | module_init(init_frontswap); |