Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Tim Chen | 67afa38 | 2017-02-22 15:45:39 -0800 | [diff] [blame] | 2 | /* |
| 3 | * Manage cache of swap slots to be used for and returned from |
| 4 | * swap. |
| 5 | * |
| 6 | * Copyright(c) 2016 Intel Corporation. |
| 7 | * |
| 8 | * Author: Tim Chen <tim.c.chen@linux.intel.com> |
| 9 | * |
| 10 | * We allocate the swap slots from the global pool and put |
| 11 | * it into local per cpu caches. This has the advantage |
| 12 | * of no needing to acquire the swap_info lock every time |
| 13 | * we need a new slot. |
| 14 | * |
| 15 | * There is also opportunity to simply return the slot |
| 16 | * to local caches without needing to acquire swap_info |
| 17 | * lock. We do not reuse the returned slots directly but |
| 18 | * move them back to the global pool in a batch. This |
Ingo Molnar | f0953a1 | 2021-05-06 18:06:47 -0700 | [diff] [blame] | 19 | * allows the slots to coalesce and reduce fragmentation. |
Tim Chen | 67afa38 | 2017-02-22 15:45:39 -0800 | [diff] [blame] | 20 | * |
| 21 | * The swap entry allocated is marked with SWAP_HAS_CACHE |
| 22 | * flag in map_count that prevents it from being allocated |
| 23 | * again from the global pool. |
| 24 | * |
| 25 | * The swap slots cache is protected by a mutex instead of |
| 26 | * a spin lock as when we search for slots with scan_swap_map, |
| 27 | * we can possibly sleep. |
| 28 | */ |
| 29 | |
| 30 | #include <linux/swap_slots.h> |
| 31 | #include <linux/cpu.h> |
| 32 | #include <linux/cpumask.h> |
Jakub Kicinski | 8581fd4 | 2021-12-02 12:34:00 -0800 | [diff] [blame] | 33 | #include <linux/slab.h> |
Tim Chen | 67afa38 | 2017-02-22 15:45:39 -0800 | [diff] [blame] | 34 | #include <linux/vmalloc.h> |
| 35 | #include <linux/mutex.h> |
Huang Ying | 54f180d | 2017-05-08 15:57:40 -0700 | [diff] [blame] | 36 | #include <linux/mm.h> |
Tim Chen | 67afa38 | 2017-02-22 15:45:39 -0800 | [diff] [blame] | 37 | |
Tim Chen | 67afa38 | 2017-02-22 15:45:39 -0800 | [diff] [blame] | 38 | static DEFINE_PER_CPU(struct swap_slots_cache, swp_slots); |
| 39 | static bool swap_slot_cache_active; |
Huang Ying | ba81f83 | 2017-02-22 15:45:46 -0800 | [diff] [blame] | 40 | bool swap_slot_cache_enabled; |
Tim Chen | 67afa38 | 2017-02-22 15:45:39 -0800 | [diff] [blame] | 41 | static bool swap_slot_cache_initialized; |
Colin Ian King | 31f21da | 2018-08-17 15:46:54 -0700 | [diff] [blame] | 42 | static DEFINE_MUTEX(swap_slots_cache_mutex); |
Tim Chen | 67afa38 | 2017-02-22 15:45:39 -0800 | [diff] [blame] | 43 | /* Serialize swap slots cache enable/disable operations */ |
Colin Ian King | 31f21da | 2018-08-17 15:46:54 -0700 | [diff] [blame] | 44 | static DEFINE_MUTEX(swap_slots_cache_enable_mutex); |
Tim Chen | 67afa38 | 2017-02-22 15:45:39 -0800 | [diff] [blame] | 45 | |
| 46 | static void __drain_swap_slots_cache(unsigned int type); |
Tim Chen | 67afa38 | 2017-02-22 15:45:39 -0800 | [diff] [blame] | 47 | |
Zhen Lei | e0f3ebb | 2020-08-06 23:20:11 -0700 | [diff] [blame] | 48 | #define use_swap_slot_cache (swap_slot_cache_active && swap_slot_cache_enabled) |
Tim Chen | 67afa38 | 2017-02-22 15:45:39 -0800 | [diff] [blame] | 49 | #define SLOTS_CACHE 0x1 |
| 50 | #define SLOTS_CACHE_RET 0x2 |
| 51 | |
| 52 | static void deactivate_swap_slots_cache(void) |
| 53 | { |
| 54 | mutex_lock(&swap_slots_cache_mutex); |
| 55 | swap_slot_cache_active = false; |
| 56 | __drain_swap_slots_cache(SLOTS_CACHE|SLOTS_CACHE_RET); |
| 57 | mutex_unlock(&swap_slots_cache_mutex); |
| 58 | } |
| 59 | |
| 60 | static void reactivate_swap_slots_cache(void) |
| 61 | { |
| 62 | mutex_lock(&swap_slots_cache_mutex); |
| 63 | swap_slot_cache_active = true; |
| 64 | mutex_unlock(&swap_slots_cache_mutex); |
| 65 | } |
| 66 | |
| 67 | /* Must not be called with cpu hot plug lock */ |
| 68 | void disable_swap_slots_cache_lock(void) |
| 69 | { |
| 70 | mutex_lock(&swap_slots_cache_enable_mutex); |
| 71 | swap_slot_cache_enabled = false; |
| 72 | if (swap_slot_cache_initialized) { |
| 73 | /* serialize with cpu hotplug operations */ |
Sebastian Andrzej Siewior | 7625ecc | 2021-08-03 16:16:03 +0200 | [diff] [blame] | 74 | cpus_read_lock(); |
Tim Chen | 67afa38 | 2017-02-22 15:45:39 -0800 | [diff] [blame] | 75 | __drain_swap_slots_cache(SLOTS_CACHE|SLOTS_CACHE_RET); |
Sebastian Andrzej Siewior | 7625ecc | 2021-08-03 16:16:03 +0200 | [diff] [blame] | 76 | cpus_read_unlock(); |
Tim Chen | 67afa38 | 2017-02-22 15:45:39 -0800 | [diff] [blame] | 77 | } |
| 78 | } |
| 79 | |
| 80 | static void __reenable_swap_slots_cache(void) |
| 81 | { |
| 82 | swap_slot_cache_enabled = has_usable_swap(); |
| 83 | } |
| 84 | |
| 85 | void reenable_swap_slots_cache_unlock(void) |
| 86 | { |
| 87 | __reenable_swap_slots_cache(); |
| 88 | mutex_unlock(&swap_slots_cache_enable_mutex); |
| 89 | } |
| 90 | |
| 91 | static bool check_cache_active(void) |
| 92 | { |
| 93 | long pages; |
| 94 | |
Zhen Lei | e0f3ebb | 2020-08-06 23:20:11 -0700 | [diff] [blame] | 95 | if (!swap_slot_cache_enabled) |
Tim Chen | 67afa38 | 2017-02-22 15:45:39 -0800 | [diff] [blame] | 96 | return false; |
| 97 | |
| 98 | pages = get_nr_swap_pages(); |
| 99 | if (!swap_slot_cache_active) { |
| 100 | if (pages > num_online_cpus() * |
| 101 | THRESHOLD_ACTIVATE_SWAP_SLOTS_CACHE) |
| 102 | reactivate_swap_slots_cache(); |
| 103 | goto out; |
| 104 | } |
| 105 | |
| 106 | /* if global pool of slot caches too low, deactivate cache */ |
| 107 | if (pages < num_online_cpus() * THRESHOLD_DEACTIVATE_SWAP_SLOTS_CACHE) |
| 108 | deactivate_swap_slots_cache(); |
| 109 | out: |
| 110 | return swap_slot_cache_active; |
| 111 | } |
| 112 | |
| 113 | static int alloc_swap_slot_cache(unsigned int cpu) |
| 114 | { |
| 115 | struct swap_slots_cache *cache; |
| 116 | swp_entry_t *slots, *slots_ret; |
| 117 | |
| 118 | /* |
| 119 | * Do allocation outside swap_slots_cache_mutex |
Huang Ying | 54f180d | 2017-05-08 15:57:40 -0700 | [diff] [blame] | 120 | * as kvzalloc could trigger reclaim and get_swap_page, |
Tim Chen | 67afa38 | 2017-02-22 15:45:39 -0800 | [diff] [blame] | 121 | * which can lock swap_slots_cache_mutex. |
| 122 | */ |
Kees Cook | 778e1cd | 2018-06-12 14:04:48 -0700 | [diff] [blame] | 123 | slots = kvcalloc(SWAP_SLOTS_CACHE_SIZE, sizeof(swp_entry_t), |
Huang Ying | 54f180d | 2017-05-08 15:57:40 -0700 | [diff] [blame] | 124 | GFP_KERNEL); |
Tim Chen | 67afa38 | 2017-02-22 15:45:39 -0800 | [diff] [blame] | 125 | if (!slots) |
| 126 | return -ENOMEM; |
| 127 | |
Kees Cook | 778e1cd | 2018-06-12 14:04:48 -0700 | [diff] [blame] | 128 | slots_ret = kvcalloc(SWAP_SLOTS_CACHE_SIZE, sizeof(swp_entry_t), |
Huang Ying | 54f180d | 2017-05-08 15:57:40 -0700 | [diff] [blame] | 129 | GFP_KERNEL); |
Tim Chen | 67afa38 | 2017-02-22 15:45:39 -0800 | [diff] [blame] | 130 | if (!slots_ret) { |
Huang Ying | 54f180d | 2017-05-08 15:57:40 -0700 | [diff] [blame] | 131 | kvfree(slots); |
Tim Chen | 67afa38 | 2017-02-22 15:45:39 -0800 | [diff] [blame] | 132 | return -ENOMEM; |
| 133 | } |
| 134 | |
| 135 | mutex_lock(&swap_slots_cache_mutex); |
| 136 | cache = &per_cpu(swp_slots, cpu); |
Zhen Lei | f90eae2 | 2020-08-06 23:20:05 -0700 | [diff] [blame] | 137 | if (cache->slots || cache->slots_ret) { |
Tim Chen | 67afa38 | 2017-02-22 15:45:39 -0800 | [diff] [blame] | 138 | /* cache already allocated */ |
Zhen Lei | f90eae2 | 2020-08-06 23:20:05 -0700 | [diff] [blame] | 139 | mutex_unlock(&swap_slots_cache_mutex); |
| 140 | |
| 141 | kvfree(slots); |
| 142 | kvfree(slots_ret); |
| 143 | |
| 144 | return 0; |
| 145 | } |
| 146 | |
Tim Chen | 67afa38 | 2017-02-22 15:45:39 -0800 | [diff] [blame] | 147 | if (!cache->lock_initialized) { |
| 148 | mutex_init(&cache->alloc_lock); |
| 149 | spin_lock_init(&cache->free_lock); |
| 150 | cache->lock_initialized = true; |
| 151 | } |
| 152 | cache->nr = 0; |
| 153 | cache->cur = 0; |
| 154 | cache->n_ret = 0; |
Tim Chen | a2e1673 | 2017-11-15 17:34:18 -0800 | [diff] [blame] | 155 | /* |
| 156 | * We initialized alloc_lock and free_lock earlier. We use |
| 157 | * !cache->slots or !cache->slots_ret to know if it is safe to acquire |
| 158 | * the corresponding lock and use the cache. Memory barrier below |
| 159 | * ensures the assumption. |
| 160 | */ |
| 161 | mb(); |
Tim Chen | 67afa38 | 2017-02-22 15:45:39 -0800 | [diff] [blame] | 162 | cache->slots = slots; |
Tim Chen | 67afa38 | 2017-02-22 15:45:39 -0800 | [diff] [blame] | 163 | cache->slots_ret = slots_ret; |
Tim Chen | 67afa38 | 2017-02-22 15:45:39 -0800 | [diff] [blame] | 164 | mutex_unlock(&swap_slots_cache_mutex); |
Tim Chen | 67afa38 | 2017-02-22 15:45:39 -0800 | [diff] [blame] | 165 | return 0; |
| 166 | } |
| 167 | |
| 168 | static void drain_slots_cache_cpu(unsigned int cpu, unsigned int type, |
| 169 | bool free_slots) |
| 170 | { |
| 171 | struct swap_slots_cache *cache; |
| 172 | swp_entry_t *slots = NULL; |
| 173 | |
| 174 | cache = &per_cpu(swp_slots, cpu); |
| 175 | if ((type & SLOTS_CACHE) && cache->slots) { |
| 176 | mutex_lock(&cache->alloc_lock); |
| 177 | swapcache_free_entries(cache->slots + cache->cur, cache->nr); |
| 178 | cache->cur = 0; |
| 179 | cache->nr = 0; |
| 180 | if (free_slots && cache->slots) { |
Huang Ying | 54f180d | 2017-05-08 15:57:40 -0700 | [diff] [blame] | 181 | kvfree(cache->slots); |
Tim Chen | 67afa38 | 2017-02-22 15:45:39 -0800 | [diff] [blame] | 182 | cache->slots = NULL; |
| 183 | } |
| 184 | mutex_unlock(&cache->alloc_lock); |
| 185 | } |
| 186 | if ((type & SLOTS_CACHE_RET) && cache->slots_ret) { |
| 187 | spin_lock_irq(&cache->free_lock); |
| 188 | swapcache_free_entries(cache->slots_ret, cache->n_ret); |
| 189 | cache->n_ret = 0; |
| 190 | if (free_slots && cache->slots_ret) { |
| 191 | slots = cache->slots_ret; |
| 192 | cache->slots_ret = NULL; |
| 193 | } |
| 194 | spin_unlock_irq(&cache->free_lock); |
Yang Li | 191a722 | 2021-02-24 12:02:55 -0800 | [diff] [blame] | 195 | kvfree(slots); |
Tim Chen | 67afa38 | 2017-02-22 15:45:39 -0800 | [diff] [blame] | 196 | } |
| 197 | } |
| 198 | |
| 199 | static void __drain_swap_slots_cache(unsigned int type) |
| 200 | { |
| 201 | unsigned int cpu; |
| 202 | |
| 203 | /* |
| 204 | * This function is called during |
| 205 | * 1) swapoff, when we have to make sure no |
| 206 | * left over slots are in cache when we remove |
| 207 | * a swap device; |
| 208 | * 2) disabling of swap slot cache, when we run low |
| 209 | * on swap slots when allocating memory and need |
| 210 | * to return swap slots to global pool. |
| 211 | * |
| 212 | * We cannot acquire cpu hot plug lock here as |
| 213 | * this function can be invoked in the cpu |
| 214 | * hot plug path: |
| 215 | * cpu_up -> lock cpu_hotplug -> cpu hotplug state callback |
| 216 | * -> memory allocation -> direct reclaim -> get_swap_page |
| 217 | * -> drain_swap_slots_cache |
| 218 | * |
| 219 | * Hence the loop over current online cpu below could miss cpu that |
| 220 | * is being brought online but not yet marked as online. |
| 221 | * That is okay as we do not schedule and run anything on a |
| 222 | * cpu before it has been marked online. Hence, we will not |
| 223 | * fill any swap slots in slots cache of such cpu. |
| 224 | * There are no slots on such cpu that need to be drained. |
| 225 | */ |
| 226 | for_each_online_cpu(cpu) |
| 227 | drain_slots_cache_cpu(cpu, type, false); |
| 228 | } |
| 229 | |
| 230 | static int free_slot_cache(unsigned int cpu) |
| 231 | { |
| 232 | mutex_lock(&swap_slots_cache_mutex); |
| 233 | drain_slots_cache_cpu(cpu, SLOTS_CACHE | SLOTS_CACHE_RET, true); |
| 234 | mutex_unlock(&swap_slots_cache_mutex); |
| 235 | return 0; |
| 236 | } |
| 237 | |
Miaohe Lin | f3bc52c | 2020-10-13 16:52:18 -0700 | [diff] [blame] | 238 | void enable_swap_slots_cache(void) |
Tim Chen | 67afa38 | 2017-02-22 15:45:39 -0800 | [diff] [blame] | 239 | { |
Tim Chen | 67afa38 | 2017-02-22 15:45:39 -0800 | [diff] [blame] | 240 | mutex_lock(&swap_slots_cache_enable_mutex); |
Zhen Lei | d69a957 | 2020-08-06 23:20:08 -0700 | [diff] [blame] | 241 | if (!swap_slot_cache_initialized) { |
| 242 | int ret; |
| 243 | |
| 244 | ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "swap_slots_cache", |
| 245 | alloc_swap_slot_cache, free_slot_cache); |
| 246 | if (WARN_ONCE(ret < 0, "Cache allocation failed (%s), operating " |
| 247 | "without swap slots cache.\n", __func__)) |
| 248 | goto out_unlock; |
| 249 | |
| 250 | swap_slot_cache_initialized = true; |
Tim Chen | 67afa38 | 2017-02-22 15:45:39 -0800 | [diff] [blame] | 251 | } |
| 252 | |
Tim Chen | 67afa38 | 2017-02-22 15:45:39 -0800 | [diff] [blame] | 253 | __reenable_swap_slots_cache(); |
| 254 | out_unlock: |
| 255 | mutex_unlock(&swap_slots_cache_enable_mutex); |
Tim Chen | 67afa38 | 2017-02-22 15:45:39 -0800 | [diff] [blame] | 256 | } |
| 257 | |
| 258 | /* called with swap slot cache's alloc lock held */ |
| 259 | static int refill_swap_slots_cache(struct swap_slots_cache *cache) |
| 260 | { |
| 261 | if (!use_swap_slot_cache || cache->nr) |
| 262 | return 0; |
| 263 | |
| 264 | cache->cur = 0; |
| 265 | if (swap_slot_cache_active) |
Huang Ying | 5d5e8f1 | 2018-08-21 21:52:20 -0700 | [diff] [blame] | 266 | cache->nr = get_swap_pages(SWAP_SLOTS_CACHE_SIZE, |
| 267 | cache->slots, 1); |
Tim Chen | 67afa38 | 2017-02-22 15:45:39 -0800 | [diff] [blame] | 268 | |
| 269 | return cache->nr; |
| 270 | } |
| 271 | |
| 272 | int free_swap_slot(swp_entry_t entry) |
| 273 | { |
| 274 | struct swap_slots_cache *cache; |
| 275 | |
Sebastian Andrzej Siewior | f07e0f84 | 2017-07-10 15:49:29 -0700 | [diff] [blame] | 276 | cache = raw_cpu_ptr(&swp_slots); |
Tim Chen | a2e1673 | 2017-11-15 17:34:18 -0800 | [diff] [blame] | 277 | if (likely(use_swap_slot_cache && cache->slots_ret)) { |
Tim Chen | 67afa38 | 2017-02-22 15:45:39 -0800 | [diff] [blame] | 278 | spin_lock_irq(&cache->free_lock); |
| 279 | /* Swap slots cache may be deactivated before acquiring lock */ |
Sebastian Andrzej Siewior | f07e0f84 | 2017-07-10 15:49:29 -0700 | [diff] [blame] | 280 | if (!use_swap_slot_cache || !cache->slots_ret) { |
Tim Chen | 67afa38 | 2017-02-22 15:45:39 -0800 | [diff] [blame] | 281 | spin_unlock_irq(&cache->free_lock); |
| 282 | goto direct_free; |
| 283 | } |
| 284 | if (cache->n_ret >= SWAP_SLOTS_CACHE_SIZE) { |
| 285 | /* |
| 286 | * Return slots to global pool. |
| 287 | * The current swap_map value is SWAP_HAS_CACHE. |
| 288 | * Set it to 0 to indicate it is available for |
| 289 | * allocation in global pool |
| 290 | */ |
| 291 | swapcache_free_entries(cache->slots_ret, cache->n_ret); |
| 292 | cache->n_ret = 0; |
| 293 | } |
| 294 | cache->slots_ret[cache->n_ret++] = entry; |
| 295 | spin_unlock_irq(&cache->free_lock); |
| 296 | } else { |
| 297 | direct_free: |
| 298 | swapcache_free_entries(&entry, 1); |
| 299 | } |
Tim Chen | 67afa38 | 2017-02-22 15:45:39 -0800 | [diff] [blame] | 300 | |
| 301 | return 0; |
| 302 | } |
| 303 | |
Huang Ying | 38d8b4e | 2017-07-06 15:37:18 -0700 | [diff] [blame] | 304 | swp_entry_t get_swap_page(struct page *page) |
Tim Chen | 67afa38 | 2017-02-22 15:45:39 -0800 | [diff] [blame] | 305 | { |
Wei Yang | 2406b76 | 2020-04-01 21:06:16 -0700 | [diff] [blame] | 306 | swp_entry_t entry; |
Tim Chen | 67afa38 | 2017-02-22 15:45:39 -0800 | [diff] [blame] | 307 | struct swap_slots_cache *cache; |
| 308 | |
Huang Ying | 38d8b4e | 2017-07-06 15:37:18 -0700 | [diff] [blame] | 309 | entry.val = 0; |
| 310 | |
| 311 | if (PageTransHuge(page)) { |
| 312 | if (IS_ENABLED(CONFIG_THP_SWAP)) |
Huang Ying | 5d5e8f1 | 2018-08-21 21:52:20 -0700 | [diff] [blame] | 313 | get_swap_pages(1, &entry, HPAGE_PMD_NR); |
Tejun Heo | bb98f2c | 2018-06-07 17:05:31 -0700 | [diff] [blame] | 314 | goto out; |
Huang Ying | 38d8b4e | 2017-07-06 15:37:18 -0700 | [diff] [blame] | 315 | } |
| 316 | |
Tim Chen | 67afa38 | 2017-02-22 15:45:39 -0800 | [diff] [blame] | 317 | /* |
| 318 | * Preemption is allowed here, because we may sleep |
| 319 | * in refill_swap_slots_cache(). But it is safe, because |
| 320 | * accesses to the per-CPU data structure are protected by the |
| 321 | * mutex cache->alloc_lock. |
| 322 | * |
| 323 | * The alloc path here does not touch cache->slots_ret |
| 324 | * so cache->free_lock is not taken. |
| 325 | */ |
| 326 | cache = raw_cpu_ptr(&swp_slots); |
| 327 | |
Tim Chen | a2e1673 | 2017-11-15 17:34:18 -0800 | [diff] [blame] | 328 | if (likely(check_cache_active() && cache->slots)) { |
Tim Chen | 67afa38 | 2017-02-22 15:45:39 -0800 | [diff] [blame] | 329 | mutex_lock(&cache->alloc_lock); |
| 330 | if (cache->slots) { |
| 331 | repeat: |
| 332 | if (cache->nr) { |
Wei Yang | 2406b76 | 2020-04-01 21:06:16 -0700 | [diff] [blame] | 333 | entry = cache->slots[cache->cur]; |
| 334 | cache->slots[cache->cur++].val = 0; |
Tim Chen | 67afa38 | 2017-02-22 15:45:39 -0800 | [diff] [blame] | 335 | cache->nr--; |
Wei Yang | 2406b76 | 2020-04-01 21:06:16 -0700 | [diff] [blame] | 336 | } else if (refill_swap_slots_cache(cache)) { |
| 337 | goto repeat; |
Tim Chen | 67afa38 | 2017-02-22 15:45:39 -0800 | [diff] [blame] | 338 | } |
| 339 | } |
| 340 | mutex_unlock(&cache->alloc_lock); |
| 341 | if (entry.val) |
Tejun Heo | bb98f2c | 2018-06-07 17:05:31 -0700 | [diff] [blame] | 342 | goto out; |
Tim Chen | 67afa38 | 2017-02-22 15:45:39 -0800 | [diff] [blame] | 343 | } |
| 344 | |
Huang Ying | 5d5e8f1 | 2018-08-21 21:52:20 -0700 | [diff] [blame] | 345 | get_swap_pages(1, &entry, 1); |
Tejun Heo | bb98f2c | 2018-06-07 17:05:31 -0700 | [diff] [blame] | 346 | out: |
| 347 | if (mem_cgroup_try_charge_swap(page, entry)) { |
| 348 | put_swap_page(page, entry); |
| 349 | entry.val = 0; |
| 350 | } |
Tim Chen | 67afa38 | 2017-02-22 15:45:39 -0800 | [diff] [blame] | 351 | return entry; |
| 352 | } |