blob: 16f706c55d925900bd6932177fba221ea3f8f56f [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Tim Chen67afa382017-02-22 15:45:39 -08002/*
3 * Manage cache of swap slots to be used for and returned from
4 * swap.
5 *
6 * Copyright(c) 2016 Intel Corporation.
7 *
8 * Author: Tim Chen <tim.c.chen@linux.intel.com>
9 *
10 * We allocate the swap slots from the global pool and put
11 * it into local per cpu caches. This has the advantage
12 * of no needing to acquire the swap_info lock every time
13 * we need a new slot.
14 *
15 * There is also opportunity to simply return the slot
16 * to local caches without needing to acquire swap_info
17 * lock. We do not reuse the returned slots directly but
18 * move them back to the global pool in a batch. This
Ingo Molnarf0953a12021-05-06 18:06:47 -070019 * allows the slots to coalesce and reduce fragmentation.
Tim Chen67afa382017-02-22 15:45:39 -080020 *
21 * The swap entry allocated is marked with SWAP_HAS_CACHE
22 * flag in map_count that prevents it from being allocated
23 * again from the global pool.
24 *
25 * The swap slots cache is protected by a mutex instead of
26 * a spin lock as when we search for slots with scan_swap_map,
27 * we can possibly sleep.
28 */
29
30#include <linux/swap_slots.h>
31#include <linux/cpu.h>
32#include <linux/cpumask.h>
33#include <linux/vmalloc.h>
34#include <linux/mutex.h>
Huang Ying54f180d2017-05-08 15:57:40 -070035#include <linux/mm.h>
Tim Chen67afa382017-02-22 15:45:39 -080036
Tim Chen67afa382017-02-22 15:45:39 -080037static DEFINE_PER_CPU(struct swap_slots_cache, swp_slots);
38static bool swap_slot_cache_active;
Huang Yingba81f832017-02-22 15:45:46 -080039bool swap_slot_cache_enabled;
Tim Chen67afa382017-02-22 15:45:39 -080040static bool swap_slot_cache_initialized;
Colin Ian King31f21da2018-08-17 15:46:54 -070041static DEFINE_MUTEX(swap_slots_cache_mutex);
Tim Chen67afa382017-02-22 15:45:39 -080042/* Serialize swap slots cache enable/disable operations */
Colin Ian King31f21da2018-08-17 15:46:54 -070043static DEFINE_MUTEX(swap_slots_cache_enable_mutex);
Tim Chen67afa382017-02-22 15:45:39 -080044
45static void __drain_swap_slots_cache(unsigned int type);
Tim Chen67afa382017-02-22 15:45:39 -080046
Zhen Leie0f3ebb2020-08-06 23:20:11 -070047#define use_swap_slot_cache (swap_slot_cache_active && swap_slot_cache_enabled)
Tim Chen67afa382017-02-22 15:45:39 -080048#define SLOTS_CACHE 0x1
49#define SLOTS_CACHE_RET 0x2
50
51static void deactivate_swap_slots_cache(void)
52{
53 mutex_lock(&swap_slots_cache_mutex);
54 swap_slot_cache_active = false;
55 __drain_swap_slots_cache(SLOTS_CACHE|SLOTS_CACHE_RET);
56 mutex_unlock(&swap_slots_cache_mutex);
57}
58
59static void reactivate_swap_slots_cache(void)
60{
61 mutex_lock(&swap_slots_cache_mutex);
62 swap_slot_cache_active = true;
63 mutex_unlock(&swap_slots_cache_mutex);
64}
65
66/* Must not be called with cpu hot plug lock */
67void disable_swap_slots_cache_lock(void)
68{
69 mutex_lock(&swap_slots_cache_enable_mutex);
70 swap_slot_cache_enabled = false;
71 if (swap_slot_cache_initialized) {
72 /* serialize with cpu hotplug operations */
Sebastian Andrzej Siewior7625ecc2021-08-03 16:16:03 +020073 cpus_read_lock();
Tim Chen67afa382017-02-22 15:45:39 -080074 __drain_swap_slots_cache(SLOTS_CACHE|SLOTS_CACHE_RET);
Sebastian Andrzej Siewior7625ecc2021-08-03 16:16:03 +020075 cpus_read_unlock();
Tim Chen67afa382017-02-22 15:45:39 -080076 }
77}
78
79static void __reenable_swap_slots_cache(void)
80{
81 swap_slot_cache_enabled = has_usable_swap();
82}
83
84void reenable_swap_slots_cache_unlock(void)
85{
86 __reenable_swap_slots_cache();
87 mutex_unlock(&swap_slots_cache_enable_mutex);
88}
89
90static bool check_cache_active(void)
91{
92 long pages;
93
Zhen Leie0f3ebb2020-08-06 23:20:11 -070094 if (!swap_slot_cache_enabled)
Tim Chen67afa382017-02-22 15:45:39 -080095 return false;
96
97 pages = get_nr_swap_pages();
98 if (!swap_slot_cache_active) {
99 if (pages > num_online_cpus() *
100 THRESHOLD_ACTIVATE_SWAP_SLOTS_CACHE)
101 reactivate_swap_slots_cache();
102 goto out;
103 }
104
105 /* if global pool of slot caches too low, deactivate cache */
106 if (pages < num_online_cpus() * THRESHOLD_DEACTIVATE_SWAP_SLOTS_CACHE)
107 deactivate_swap_slots_cache();
108out:
109 return swap_slot_cache_active;
110}
111
112static int alloc_swap_slot_cache(unsigned int cpu)
113{
114 struct swap_slots_cache *cache;
115 swp_entry_t *slots, *slots_ret;
116
117 /*
118 * Do allocation outside swap_slots_cache_mutex
Huang Ying54f180d2017-05-08 15:57:40 -0700119 * as kvzalloc could trigger reclaim and get_swap_page,
Tim Chen67afa382017-02-22 15:45:39 -0800120 * which can lock swap_slots_cache_mutex.
121 */
Kees Cook778e1cd2018-06-12 14:04:48 -0700122 slots = kvcalloc(SWAP_SLOTS_CACHE_SIZE, sizeof(swp_entry_t),
Huang Ying54f180d2017-05-08 15:57:40 -0700123 GFP_KERNEL);
Tim Chen67afa382017-02-22 15:45:39 -0800124 if (!slots)
125 return -ENOMEM;
126
Kees Cook778e1cd2018-06-12 14:04:48 -0700127 slots_ret = kvcalloc(SWAP_SLOTS_CACHE_SIZE, sizeof(swp_entry_t),
Huang Ying54f180d2017-05-08 15:57:40 -0700128 GFP_KERNEL);
Tim Chen67afa382017-02-22 15:45:39 -0800129 if (!slots_ret) {
Huang Ying54f180d2017-05-08 15:57:40 -0700130 kvfree(slots);
Tim Chen67afa382017-02-22 15:45:39 -0800131 return -ENOMEM;
132 }
133
134 mutex_lock(&swap_slots_cache_mutex);
135 cache = &per_cpu(swp_slots, cpu);
Zhen Leif90eae22020-08-06 23:20:05 -0700136 if (cache->slots || cache->slots_ret) {
Tim Chen67afa382017-02-22 15:45:39 -0800137 /* cache already allocated */
Zhen Leif90eae22020-08-06 23:20:05 -0700138 mutex_unlock(&swap_slots_cache_mutex);
139
140 kvfree(slots);
141 kvfree(slots_ret);
142
143 return 0;
144 }
145
Tim Chen67afa382017-02-22 15:45:39 -0800146 if (!cache->lock_initialized) {
147 mutex_init(&cache->alloc_lock);
148 spin_lock_init(&cache->free_lock);
149 cache->lock_initialized = true;
150 }
151 cache->nr = 0;
152 cache->cur = 0;
153 cache->n_ret = 0;
Tim Chena2e16732017-11-15 17:34:18 -0800154 /*
155 * We initialized alloc_lock and free_lock earlier. We use
156 * !cache->slots or !cache->slots_ret to know if it is safe to acquire
157 * the corresponding lock and use the cache. Memory barrier below
158 * ensures the assumption.
159 */
160 mb();
Tim Chen67afa382017-02-22 15:45:39 -0800161 cache->slots = slots;
Tim Chen67afa382017-02-22 15:45:39 -0800162 cache->slots_ret = slots_ret;
Tim Chen67afa382017-02-22 15:45:39 -0800163 mutex_unlock(&swap_slots_cache_mutex);
Tim Chen67afa382017-02-22 15:45:39 -0800164 return 0;
165}
166
167static void drain_slots_cache_cpu(unsigned int cpu, unsigned int type,
168 bool free_slots)
169{
170 struct swap_slots_cache *cache;
171 swp_entry_t *slots = NULL;
172
173 cache = &per_cpu(swp_slots, cpu);
174 if ((type & SLOTS_CACHE) && cache->slots) {
175 mutex_lock(&cache->alloc_lock);
176 swapcache_free_entries(cache->slots + cache->cur, cache->nr);
177 cache->cur = 0;
178 cache->nr = 0;
179 if (free_slots && cache->slots) {
Huang Ying54f180d2017-05-08 15:57:40 -0700180 kvfree(cache->slots);
Tim Chen67afa382017-02-22 15:45:39 -0800181 cache->slots = NULL;
182 }
183 mutex_unlock(&cache->alloc_lock);
184 }
185 if ((type & SLOTS_CACHE_RET) && cache->slots_ret) {
186 spin_lock_irq(&cache->free_lock);
187 swapcache_free_entries(cache->slots_ret, cache->n_ret);
188 cache->n_ret = 0;
189 if (free_slots && cache->slots_ret) {
190 slots = cache->slots_ret;
191 cache->slots_ret = NULL;
192 }
193 spin_unlock_irq(&cache->free_lock);
Yang Li191a7222021-02-24 12:02:55 -0800194 kvfree(slots);
Tim Chen67afa382017-02-22 15:45:39 -0800195 }
196}
197
198static void __drain_swap_slots_cache(unsigned int type)
199{
200 unsigned int cpu;
201
202 /*
203 * This function is called during
204 * 1) swapoff, when we have to make sure no
205 * left over slots are in cache when we remove
206 * a swap device;
207 * 2) disabling of swap slot cache, when we run low
208 * on swap slots when allocating memory and need
209 * to return swap slots to global pool.
210 *
211 * We cannot acquire cpu hot plug lock here as
212 * this function can be invoked in the cpu
213 * hot plug path:
214 * cpu_up -> lock cpu_hotplug -> cpu hotplug state callback
215 * -> memory allocation -> direct reclaim -> get_swap_page
216 * -> drain_swap_slots_cache
217 *
218 * Hence the loop over current online cpu below could miss cpu that
219 * is being brought online but not yet marked as online.
220 * That is okay as we do not schedule and run anything on a
221 * cpu before it has been marked online. Hence, we will not
222 * fill any swap slots in slots cache of such cpu.
223 * There are no slots on such cpu that need to be drained.
224 */
225 for_each_online_cpu(cpu)
226 drain_slots_cache_cpu(cpu, type, false);
227}
228
229static int free_slot_cache(unsigned int cpu)
230{
231 mutex_lock(&swap_slots_cache_mutex);
232 drain_slots_cache_cpu(cpu, SLOTS_CACHE | SLOTS_CACHE_RET, true);
233 mutex_unlock(&swap_slots_cache_mutex);
234 return 0;
235}
236
Miaohe Linf3bc52c2020-10-13 16:52:18 -0700237void enable_swap_slots_cache(void)
Tim Chen67afa382017-02-22 15:45:39 -0800238{
Tim Chen67afa382017-02-22 15:45:39 -0800239 mutex_lock(&swap_slots_cache_enable_mutex);
Zhen Leid69a9572020-08-06 23:20:08 -0700240 if (!swap_slot_cache_initialized) {
241 int ret;
242
243 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "swap_slots_cache",
244 alloc_swap_slot_cache, free_slot_cache);
245 if (WARN_ONCE(ret < 0, "Cache allocation failed (%s), operating "
246 "without swap slots cache.\n", __func__))
247 goto out_unlock;
248
249 swap_slot_cache_initialized = true;
Tim Chen67afa382017-02-22 15:45:39 -0800250 }
251
Tim Chen67afa382017-02-22 15:45:39 -0800252 __reenable_swap_slots_cache();
253out_unlock:
254 mutex_unlock(&swap_slots_cache_enable_mutex);
Tim Chen67afa382017-02-22 15:45:39 -0800255}
256
257/* called with swap slot cache's alloc lock held */
258static int refill_swap_slots_cache(struct swap_slots_cache *cache)
259{
260 if (!use_swap_slot_cache || cache->nr)
261 return 0;
262
263 cache->cur = 0;
264 if (swap_slot_cache_active)
Huang Ying5d5e8f12018-08-21 21:52:20 -0700265 cache->nr = get_swap_pages(SWAP_SLOTS_CACHE_SIZE,
266 cache->slots, 1);
Tim Chen67afa382017-02-22 15:45:39 -0800267
268 return cache->nr;
269}
270
271int free_swap_slot(swp_entry_t entry)
272{
273 struct swap_slots_cache *cache;
274
Sebastian Andrzej Siewiorf07e0f842017-07-10 15:49:29 -0700275 cache = raw_cpu_ptr(&swp_slots);
Tim Chena2e16732017-11-15 17:34:18 -0800276 if (likely(use_swap_slot_cache && cache->slots_ret)) {
Tim Chen67afa382017-02-22 15:45:39 -0800277 spin_lock_irq(&cache->free_lock);
278 /* Swap slots cache may be deactivated before acquiring lock */
Sebastian Andrzej Siewiorf07e0f842017-07-10 15:49:29 -0700279 if (!use_swap_slot_cache || !cache->slots_ret) {
Tim Chen67afa382017-02-22 15:45:39 -0800280 spin_unlock_irq(&cache->free_lock);
281 goto direct_free;
282 }
283 if (cache->n_ret >= SWAP_SLOTS_CACHE_SIZE) {
284 /*
285 * Return slots to global pool.
286 * The current swap_map value is SWAP_HAS_CACHE.
287 * Set it to 0 to indicate it is available for
288 * allocation in global pool
289 */
290 swapcache_free_entries(cache->slots_ret, cache->n_ret);
291 cache->n_ret = 0;
292 }
293 cache->slots_ret[cache->n_ret++] = entry;
294 spin_unlock_irq(&cache->free_lock);
295 } else {
296direct_free:
297 swapcache_free_entries(&entry, 1);
298 }
Tim Chen67afa382017-02-22 15:45:39 -0800299
300 return 0;
301}
302
Huang Ying38d8b4e2017-07-06 15:37:18 -0700303swp_entry_t get_swap_page(struct page *page)
Tim Chen67afa382017-02-22 15:45:39 -0800304{
Wei Yang2406b762020-04-01 21:06:16 -0700305 swp_entry_t entry;
Tim Chen67afa382017-02-22 15:45:39 -0800306 struct swap_slots_cache *cache;
307
Huang Ying38d8b4e2017-07-06 15:37:18 -0700308 entry.val = 0;
309
310 if (PageTransHuge(page)) {
311 if (IS_ENABLED(CONFIG_THP_SWAP))
Huang Ying5d5e8f12018-08-21 21:52:20 -0700312 get_swap_pages(1, &entry, HPAGE_PMD_NR);
Tejun Heobb98f2c2018-06-07 17:05:31 -0700313 goto out;
Huang Ying38d8b4e2017-07-06 15:37:18 -0700314 }
315
Tim Chen67afa382017-02-22 15:45:39 -0800316 /*
317 * Preemption is allowed here, because we may sleep
318 * in refill_swap_slots_cache(). But it is safe, because
319 * accesses to the per-CPU data structure are protected by the
320 * mutex cache->alloc_lock.
321 *
322 * The alloc path here does not touch cache->slots_ret
323 * so cache->free_lock is not taken.
324 */
325 cache = raw_cpu_ptr(&swp_slots);
326
Tim Chena2e16732017-11-15 17:34:18 -0800327 if (likely(check_cache_active() && cache->slots)) {
Tim Chen67afa382017-02-22 15:45:39 -0800328 mutex_lock(&cache->alloc_lock);
329 if (cache->slots) {
330repeat:
331 if (cache->nr) {
Wei Yang2406b762020-04-01 21:06:16 -0700332 entry = cache->slots[cache->cur];
333 cache->slots[cache->cur++].val = 0;
Tim Chen67afa382017-02-22 15:45:39 -0800334 cache->nr--;
Wei Yang2406b762020-04-01 21:06:16 -0700335 } else if (refill_swap_slots_cache(cache)) {
336 goto repeat;
Tim Chen67afa382017-02-22 15:45:39 -0800337 }
338 }
339 mutex_unlock(&cache->alloc_lock);
340 if (entry.val)
Tejun Heobb98f2c2018-06-07 17:05:31 -0700341 goto out;
Tim Chen67afa382017-02-22 15:45:39 -0800342 }
343
Huang Ying5d5e8f12018-08-21 21:52:20 -0700344 get_swap_pages(1, &entry, 1);
Tejun Heobb98f2c2018-06-07 17:05:31 -0700345out:
346 if (mem_cgroup_try_charge_swap(page, entry)) {
347 put_swap_page(page, entry);
348 entry.val = 0;
349 }
Tim Chen67afa382017-02-22 15:45:39 -0800350 return entry;
351}