blob: 08b6cefae5d84a9f53d5656326bd4f2a96dc0c89 [file] [log] [blame]
Thomas Gleixnerc942fdd2019-05-27 08:55:06 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Seth Jennings2b281112013-07-10 16:05:03 -07002/*
3 * zswap.c - zswap driver file
4 *
5 * zswap is a backend for frontswap that takes pages that are in the process
6 * of being swapped out and attempts to compress and store them in a
7 * RAM-based memory pool. This can result in a significant I/O reduction on
8 * the swap device and, in the case where decompressing from RAM is faster
9 * than reading from the swap device, can also improve workload performance.
10 *
11 * Copyright (C) 2012 Seth Jennings <sjenning@linux.vnet.ibm.com>
Seth Jennings2b281112013-07-10 16:05:03 -070012*/
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16#include <linux/module.h>
17#include <linux/cpu.h>
18#include <linux/highmem.h>
19#include <linux/slab.h>
20#include <linux/spinlock.h>
21#include <linux/types.h>
22#include <linux/atomic.h>
23#include <linux/frontswap.h>
24#include <linux/rbtree.h>
25#include <linux/swap.h>
26#include <linux/crypto.h>
27#include <linux/mempool.h>
Dan Streetman12d79d62014-08-06 16:08:40 -070028#include <linux/zpool.h>
Seth Jennings2b281112013-07-10 16:05:03 -070029
30#include <linux/mm_types.h>
31#include <linux/page-flags.h>
32#include <linux/swapops.h>
33#include <linux/writeback.h>
34#include <linux/pagemap.h>
35
36/*********************************
37* statistics
38**********************************/
Dan Streetman12d79d62014-08-06 16:08:40 -070039/* Total bytes used by the compressed storage */
40static u64 zswap_pool_total_size;
Seth Jennings2b281112013-07-10 16:05:03 -070041/* The number of compressed pages currently stored in zswap */
42static atomic_t zswap_stored_pages = ATOMIC_INIT(0);
Srividya Desireddya85f8782018-01-31 16:15:59 -080043/* The number of same-value filled pages currently stored in zswap */
44static atomic_t zswap_same_filled_pages = ATOMIC_INIT(0);
Seth Jennings2b281112013-07-10 16:05:03 -070045
46/*
47 * The statistics below are not protected from concurrent access for
48 * performance reasons so they may not be a 100% accurate. However,
49 * they do provide useful information on roughly how many times a
50 * certain event is occurring.
51*/
52
53/* Pool limit was hit (see zswap_max_pool_percent) */
54static u64 zswap_pool_limit_hit;
55/* Pages written back when pool limit was reached */
56static u64 zswap_written_back_pages;
57/* Store failed due to a reclaim failure after pool limit was reached */
58static u64 zswap_reject_reclaim_fail;
59/* Compressed page was too big for the allocator to (optimally) store */
60static u64 zswap_reject_compress_poor;
61/* Store failed because underlying allocator could not get memory */
62static u64 zswap_reject_alloc_fail;
63/* Store failed because the entry metadata could not be allocated (rare) */
64static u64 zswap_reject_kmemcache_fail;
65/* Duplicate store was encountered (rare) */
66static u64 zswap_duplicate_entry;
67
68/*********************************
69* tunables
70**********************************/
Dan Streetmanc00ed162015-06-25 15:00:35 -070071
Dan Streetmanbae21db2017-02-27 14:26:50 -080072#define ZSWAP_PARAM_UNSET ""
73
Dan Streetmanc00ed162015-06-25 15:00:35 -070074/* Enable/disable zswap (disabled by default) */
75static bool zswap_enabled;
Dan Streetmand7b028f2017-02-03 13:13:09 -080076static int zswap_enabled_param_set(const char *,
77 const struct kernel_param *);
78static struct kernel_param_ops zswap_enabled_param_ops = {
79 .set = zswap_enabled_param_set,
80 .get = param_get_bool,
81};
82module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644);
Seth Jennings2b281112013-07-10 16:05:03 -070083
Dan Streetman90b0fc22015-09-09 15:35:21 -070084/* Crypto compressor to use */
Seth Jennings2b281112013-07-10 16:05:03 -070085#define ZSWAP_COMPRESSOR_DEFAULT "lzo"
Dan Streetmanc99b42c2015-11-06 16:29:15 -080086static char *zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT;
Dan Streetman90b0fc22015-09-09 15:35:21 -070087static int zswap_compressor_param_set(const char *,
88 const struct kernel_param *);
89static struct kernel_param_ops zswap_compressor_param_ops = {
90 .set = zswap_compressor_param_set,
Dan Streetmanc99b42c2015-11-06 16:29:15 -080091 .get = param_get_charp,
92 .free = param_free_charp,
Dan Streetman90b0fc22015-09-09 15:35:21 -070093};
94module_param_cb(compressor, &zswap_compressor_param_ops,
Dan Streetmanc99b42c2015-11-06 16:29:15 -080095 &zswap_compressor, 0644);
Dan Streetman90b0fc22015-09-09 15:35:21 -070096
97/* Compressed storage zpool to use */
98#define ZSWAP_ZPOOL_DEFAULT "zbud"
Dan Streetmanc99b42c2015-11-06 16:29:15 -080099static char *zswap_zpool_type = ZSWAP_ZPOOL_DEFAULT;
Dan Streetman90b0fc22015-09-09 15:35:21 -0700100static int zswap_zpool_param_set(const char *, const struct kernel_param *);
101static struct kernel_param_ops zswap_zpool_param_ops = {
Dan Streetmanc99b42c2015-11-06 16:29:15 -0800102 .set = zswap_zpool_param_set,
103 .get = param_get_charp,
104 .free = param_free_charp,
Dan Streetman90b0fc22015-09-09 15:35:21 -0700105};
Dan Streetmanc99b42c2015-11-06 16:29:15 -0800106module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644);
Seth Jennings2b281112013-07-10 16:05:03 -0700107
108/* The maximum percentage of memory that the compressed pool can occupy */
109static unsigned int zswap_max_pool_percent = 20;
Dan Streetman90b0fc22015-09-09 15:35:21 -0700110module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644);
Minchan Kim60105e12014-04-07 15:38:27 -0700111
Srividya Desireddya85f8782018-01-31 16:15:59 -0800112/* Enable/disable handling same-value filled pages (enabled by default) */
113static bool zswap_same_filled_pages_enabled = true;
114module_param_named(same_filled_pages_enabled, zswap_same_filled_pages_enabled,
115 bool, 0644);
116
Seth Jennings2b281112013-07-10 16:05:03 -0700117/*********************************
Seth Jennings2b281112013-07-10 16:05:03 -0700118* data structures
119**********************************/
Dan Streetmanf1c54842015-09-09 15:35:19 -0700120
121struct zswap_pool {
122 struct zpool *zpool;
123 struct crypto_comp * __percpu *tfm;
124 struct kref kref;
125 struct list_head list;
Dan Streetman200867a2016-05-20 16:59:54 -0700126 struct work_struct work;
Sebastian Andrzej Siewiorcab7a7e2016-11-27 00:13:40 +0100127 struct hlist_node node;
Dan Streetmanf1c54842015-09-09 15:35:19 -0700128 char tfm_name[CRYPTO_MAX_ALG_NAME];
129};
130
Seth Jennings2b281112013-07-10 16:05:03 -0700131/*
132 * struct zswap_entry
133 *
134 * This structure contains the metadata for tracking a single compressed
135 * page within zswap.
136 *
137 * rbnode - links the entry into red-black tree for the appropriate swap type
Dan Streetmanf1c54842015-09-09 15:35:19 -0700138 * offset - the swap offset for the entry. Index into the red-black tree.
Seth Jennings2b281112013-07-10 16:05:03 -0700139 * refcount - the number of outstanding reference to the entry. This is needed
140 * to protect against premature freeing of the entry by code
SeongJae Park6b452512014-04-07 15:38:25 -0700141 * concurrent calls to load, invalidate, and writeback. The lock
Seth Jennings2b281112013-07-10 16:05:03 -0700142 * for the zswap_tree structure that contains the entry must
143 * be held while changing the refcount. Since the lock must
144 * be held, there is no reason to also make refcount atomic.
Seth Jennings2b281112013-07-10 16:05:03 -0700145 * length - the length in bytes of the compressed page data. Needed during
Srividya Desireddya85f8782018-01-31 16:15:59 -0800146 * decompression. For a same value filled page length is 0.
Dan Streetmanf1c54842015-09-09 15:35:19 -0700147 * pool - the zswap_pool the entry's data is in
148 * handle - zpool allocation handle that stores the compressed page data
Srividya Desireddya85f8782018-01-31 16:15:59 -0800149 * value - value of the same-value filled pages which have same content
Seth Jennings2b281112013-07-10 16:05:03 -0700150 */
151struct zswap_entry {
152 struct rb_node rbnode;
153 pgoff_t offset;
154 int refcount;
155 unsigned int length;
Dan Streetmanf1c54842015-09-09 15:35:19 -0700156 struct zswap_pool *pool;
Srividya Desireddya85f8782018-01-31 16:15:59 -0800157 union {
158 unsigned long handle;
159 unsigned long value;
160 };
Seth Jennings2b281112013-07-10 16:05:03 -0700161};
162
163struct zswap_header {
164 swp_entry_t swpentry;
165};
166
167/*
168 * The tree lock in the zswap_tree struct protects a few things:
169 * - the rbtree
170 * - the refcount field of each entry in the tree
171 */
172struct zswap_tree {
173 struct rb_root rbroot;
174 spinlock_t lock;
Seth Jennings2b281112013-07-10 16:05:03 -0700175};
176
177static struct zswap_tree *zswap_trees[MAX_SWAPFILES];
178
Dan Streetmanf1c54842015-09-09 15:35:19 -0700179/* RCU-protected iteration */
180static LIST_HEAD(zswap_pools);
181/* protects zswap_pools list modification */
182static DEFINE_SPINLOCK(zswap_pools_lock);
Dan Streetman32a4e1692016-05-05 16:22:23 -0700183/* pool counter to provide unique names to zpool */
184static atomic_t zswap_pools_count = ATOMIC_INIT(0);
Dan Streetmanf1c54842015-09-09 15:35:19 -0700185
Dan Streetman90b0fc22015-09-09 15:35:21 -0700186/* used by param callback function */
187static bool zswap_init_started;
188
Dan Streetmand7b028f2017-02-03 13:13:09 -0800189/* fatal error during init */
190static bool zswap_init_failed;
191
Dan Streetmanae3d89a2017-02-27 14:26:47 -0800192/* init completed, but couldn't create the initial pool */
193static bool zswap_has_pool;
194
Dan Streetmanf1c54842015-09-09 15:35:19 -0700195/*********************************
196* helpers and fwd declarations
197**********************************/
198
199#define zswap_pool_debug(msg, p) \
200 pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name, \
201 zpool_get_type((p)->zpool))
202
203static int zswap_writeback_entry(struct zpool *pool, unsigned long handle);
204static int zswap_pool_get(struct zswap_pool *pool);
205static void zswap_pool_put(struct zswap_pool *pool);
206
207static const struct zpool_ops zswap_zpool_ops = {
208 .evict = zswap_writeback_entry
209};
210
211static bool zswap_is_full(void)
212{
Arun KSca79b0c2018-12-28 00:34:29 -0800213 return totalram_pages() * zswap_max_pool_percent / 100 <
214 DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
Dan Streetmanf1c54842015-09-09 15:35:19 -0700215}
216
217static void zswap_update_total_size(void)
218{
219 struct zswap_pool *pool;
220 u64 total = 0;
221
222 rcu_read_lock();
223
224 list_for_each_entry_rcu(pool, &zswap_pools, list)
225 total += zpool_get_total_size(pool->zpool);
226
227 rcu_read_unlock();
228
229 zswap_pool_total_size = total;
230}
231
Seth Jennings2b281112013-07-10 16:05:03 -0700232/*********************************
233* zswap entry functions
234**********************************/
235static struct kmem_cache *zswap_entry_cache;
236
Mahendran Ganeshdd01d7d2014-12-12 16:57:15 -0800237static int __init zswap_entry_cache_create(void)
Seth Jennings2b281112013-07-10 16:05:03 -0700238{
239 zswap_entry_cache = KMEM_CACHE(zswap_entry, 0);
SeongJae Park5d2d42d2014-04-07 15:38:28 -0700240 return zswap_entry_cache == NULL;
Seth Jennings2b281112013-07-10 16:05:03 -0700241}
242
Fabian Frederickc1192392014-08-08 14:19:35 -0700243static void __init zswap_entry_cache_destroy(void)
Seth Jennings2b281112013-07-10 16:05:03 -0700244{
245 kmem_cache_destroy(zswap_entry_cache);
246}
247
248static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp)
249{
250 struct zswap_entry *entry;
251 entry = kmem_cache_alloc(zswap_entry_cache, gfp);
252 if (!entry)
253 return NULL;
254 entry->refcount = 1;
Weijie Yang0ab0abc2013-11-12 15:08:27 -0800255 RB_CLEAR_NODE(&entry->rbnode);
Seth Jennings2b281112013-07-10 16:05:03 -0700256 return entry;
257}
258
259static void zswap_entry_cache_free(struct zswap_entry *entry)
260{
261 kmem_cache_free(zswap_entry_cache, entry);
262}
263
Seth Jennings2b281112013-07-10 16:05:03 -0700264/*********************************
265* rbtree functions
266**********************************/
267static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset)
268{
269 struct rb_node *node = root->rb_node;
270 struct zswap_entry *entry;
271
272 while (node) {
273 entry = rb_entry(node, struct zswap_entry, rbnode);
274 if (entry->offset > offset)
275 node = node->rb_left;
276 else if (entry->offset < offset)
277 node = node->rb_right;
278 else
279 return entry;
280 }
281 return NULL;
282}
283
284/*
285 * In the case that a entry with the same offset is found, a pointer to
286 * the existing entry is stored in dupentry and the function returns -EEXIST
287 */
288static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry,
289 struct zswap_entry **dupentry)
290{
291 struct rb_node **link = &root->rb_node, *parent = NULL;
292 struct zswap_entry *myentry;
293
294 while (*link) {
295 parent = *link;
296 myentry = rb_entry(parent, struct zswap_entry, rbnode);
297 if (myentry->offset > entry->offset)
298 link = &(*link)->rb_left;
299 else if (myentry->offset < entry->offset)
300 link = &(*link)->rb_right;
301 else {
302 *dupentry = myentry;
303 return -EEXIST;
304 }
305 }
306 rb_link_node(&entry->rbnode, parent, link);
307 rb_insert_color(&entry->rbnode, root);
308 return 0;
309}
310
Weijie Yang0ab0abc2013-11-12 15:08:27 -0800311static void zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry)
312{
313 if (!RB_EMPTY_NODE(&entry->rbnode)) {
314 rb_erase(&entry->rbnode, root);
315 RB_CLEAR_NODE(&entry->rbnode);
316 }
317}
318
319/*
Dan Streetman12d79d62014-08-06 16:08:40 -0700320 * Carries out the common pattern of freeing and entry's zpool allocation,
Weijie Yang0ab0abc2013-11-12 15:08:27 -0800321 * freeing the entry itself, and decrementing the number of stored pages.
322 */
Minchan Kim60105e12014-04-07 15:38:27 -0700323static void zswap_free_entry(struct zswap_entry *entry)
Weijie Yang0ab0abc2013-11-12 15:08:27 -0800324{
Srividya Desireddya85f8782018-01-31 16:15:59 -0800325 if (!entry->length)
326 atomic_dec(&zswap_same_filled_pages);
327 else {
328 zpool_free(entry->pool->zpool, entry->handle);
329 zswap_pool_put(entry->pool);
330 }
Weijie Yang0ab0abc2013-11-12 15:08:27 -0800331 zswap_entry_cache_free(entry);
332 atomic_dec(&zswap_stored_pages);
Dan Streetmanf1c54842015-09-09 15:35:19 -0700333 zswap_update_total_size();
Weijie Yang0ab0abc2013-11-12 15:08:27 -0800334}
335
336/* caller must hold the tree lock */
337static void zswap_entry_get(struct zswap_entry *entry)
338{
339 entry->refcount++;
340}
341
342/* caller must hold the tree lock
343* remove from the tree and free it, if nobody reference the entry
344*/
345static void zswap_entry_put(struct zswap_tree *tree,
346 struct zswap_entry *entry)
347{
348 int refcount = --entry->refcount;
349
350 BUG_ON(refcount < 0);
351 if (refcount == 0) {
352 zswap_rb_erase(&tree->rbroot, entry);
Minchan Kim60105e12014-04-07 15:38:27 -0700353 zswap_free_entry(entry);
Weijie Yang0ab0abc2013-11-12 15:08:27 -0800354 }
355}
356
357/* caller must hold the tree lock */
358static struct zswap_entry *zswap_entry_find_get(struct rb_root *root,
359 pgoff_t offset)
360{
Alexey Klimovb0c98652015-11-06 16:29:09 -0800361 struct zswap_entry *entry;
Weijie Yang0ab0abc2013-11-12 15:08:27 -0800362
363 entry = zswap_rb_search(root, offset);
364 if (entry)
365 zswap_entry_get(entry);
366
367 return entry;
368}
369
Seth Jennings2b281112013-07-10 16:05:03 -0700370/*********************************
371* per-cpu code
372**********************************/
373static DEFINE_PER_CPU(u8 *, zswap_dstmem);
374
Sebastian Andrzej Siewiorad7ed772016-11-27 00:13:39 +0100375static int zswap_dstmem_prepare(unsigned int cpu)
Seth Jennings2b281112013-07-10 16:05:03 -0700376{
Seth Jennings2b281112013-07-10 16:05:03 -0700377 u8 *dst;
378
Sebastian Andrzej Siewiorad7ed772016-11-27 00:13:39 +0100379 dst = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
Markus Elfring2b2695f2017-07-06 15:40:40 -0700380 if (!dst)
Sebastian Andrzej Siewiorad7ed772016-11-27 00:13:39 +0100381 return -ENOMEM;
Markus Elfring2b2695f2017-07-06 15:40:40 -0700382
Sebastian Andrzej Siewiorad7ed772016-11-27 00:13:39 +0100383 per_cpu(zswap_dstmem, cpu) = dst;
Seth Jennings2b281112013-07-10 16:05:03 -0700384 return 0;
Seth Jennings2b281112013-07-10 16:05:03 -0700385}
386
Sebastian Andrzej Siewiorad7ed772016-11-27 00:13:39 +0100387static int zswap_dstmem_dead(unsigned int cpu)
Seth Jennings2b281112013-07-10 16:05:03 -0700388{
Sebastian Andrzej Siewiorad7ed772016-11-27 00:13:39 +0100389 u8 *dst;
Dan Streetmanf1c54842015-09-09 15:35:19 -0700390
Sebastian Andrzej Siewiorad7ed772016-11-27 00:13:39 +0100391 dst = per_cpu(zswap_dstmem, cpu);
392 kfree(dst);
393 per_cpu(zswap_dstmem, cpu) = NULL;
394
395 return 0;
Dan Streetmanf1c54842015-09-09 15:35:19 -0700396}
397
Sebastian Andrzej Siewiorcab7a7e2016-11-27 00:13:40 +0100398static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
Dan Streetmanf1c54842015-09-09 15:35:19 -0700399{
Sebastian Andrzej Siewiorcab7a7e2016-11-27 00:13:40 +0100400 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
Dan Streetmanf1c54842015-09-09 15:35:19 -0700401 struct crypto_comp *tfm;
402
Sebastian Andrzej Siewiorcab7a7e2016-11-27 00:13:40 +0100403 if (WARN_ON(*per_cpu_ptr(pool->tfm, cpu)))
404 return 0;
405
406 tfm = crypto_alloc_comp(pool->tfm_name, 0, 0);
407 if (IS_ERR_OR_NULL(tfm)) {
408 pr_err("could not alloc crypto comp %s : %ld\n",
409 pool->tfm_name, PTR_ERR(tfm));
410 return -ENOMEM;
Dan Streetmanf1c54842015-09-09 15:35:19 -0700411 }
Sebastian Andrzej Siewiorcab7a7e2016-11-27 00:13:40 +0100412 *per_cpu_ptr(pool->tfm, cpu) = tfm;
Dan Streetmanf1c54842015-09-09 15:35:19 -0700413 return 0;
Dan Streetmanf1c54842015-09-09 15:35:19 -0700414}
415
Sebastian Andrzej Siewiorcab7a7e2016-11-27 00:13:40 +0100416static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
Dan Streetmanf1c54842015-09-09 15:35:19 -0700417{
Sebastian Andrzej Siewiorcab7a7e2016-11-27 00:13:40 +0100418 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
419 struct crypto_comp *tfm;
Dan Streetmanf1c54842015-09-09 15:35:19 -0700420
Sebastian Andrzej Siewiorcab7a7e2016-11-27 00:13:40 +0100421 tfm = *per_cpu_ptr(pool->tfm, cpu);
422 if (!IS_ERR_OR_NULL(tfm))
423 crypto_free_comp(tfm);
424 *per_cpu_ptr(pool->tfm, cpu) = NULL;
425 return 0;
Dan Streetmanf1c54842015-09-09 15:35:19 -0700426}
427
428/*********************************
429* pool functions
430**********************************/
431
432static struct zswap_pool *__zswap_pool_current(void)
433{
434 struct zswap_pool *pool;
435
436 pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list);
Dan Streetmanae3d89a2017-02-27 14:26:47 -0800437 WARN_ONCE(!pool && zswap_has_pool,
438 "%s: no page storage pool!\n", __func__);
Dan Streetmanf1c54842015-09-09 15:35:19 -0700439
440 return pool;
441}
442
443static struct zswap_pool *zswap_pool_current(void)
444{
445 assert_spin_locked(&zswap_pools_lock);
446
447 return __zswap_pool_current();
448}
449
450static struct zswap_pool *zswap_pool_current_get(void)
451{
452 struct zswap_pool *pool;
453
454 rcu_read_lock();
455
456 pool = __zswap_pool_current();
Dan Streetmanae3d89a2017-02-27 14:26:47 -0800457 if (!zswap_pool_get(pool))
Dan Streetmanf1c54842015-09-09 15:35:19 -0700458 pool = NULL;
459
460 rcu_read_unlock();
461
462 return pool;
463}
464
465static struct zswap_pool *zswap_pool_last_get(void)
466{
467 struct zswap_pool *pool, *last = NULL;
468
469 rcu_read_lock();
470
471 list_for_each_entry_rcu(pool, &zswap_pools, list)
472 last = pool;
Dan Streetmanae3d89a2017-02-27 14:26:47 -0800473 WARN_ONCE(!last && zswap_has_pool,
474 "%s: no page storage pool!\n", __func__);
475 if (!zswap_pool_get(last))
Dan Streetmanf1c54842015-09-09 15:35:19 -0700476 last = NULL;
477
478 rcu_read_unlock();
479
480 return last;
481}
482
Dan Streetman8bc8b222015-12-18 14:22:04 -0800483/* type and compressor must be null-terminated */
Dan Streetmanf1c54842015-09-09 15:35:19 -0700484static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
485{
486 struct zswap_pool *pool;
487
488 assert_spin_locked(&zswap_pools_lock);
489
490 list_for_each_entry_rcu(pool, &zswap_pools, list) {
Dan Streetman8bc8b222015-12-18 14:22:04 -0800491 if (strcmp(pool->tfm_name, compressor))
Dan Streetmanf1c54842015-09-09 15:35:19 -0700492 continue;
Dan Streetman8bc8b222015-12-18 14:22:04 -0800493 if (strcmp(zpool_get_type(pool->zpool), type))
Dan Streetmanf1c54842015-09-09 15:35:19 -0700494 continue;
495 /* if we can't get it, it's about to be destroyed */
496 if (!zswap_pool_get(pool))
497 continue;
498 return pool;
499 }
500
501 return NULL;
502}
503
504static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
505{
506 struct zswap_pool *pool;
Dan Streetman32a4e1692016-05-05 16:22:23 -0700507 char name[38]; /* 'zswap' + 32 char (max) num + \0 */
Mel Gormand0164ad2015-11-06 16:28:21 -0800508 gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
Sebastian Andrzej Siewiorcab7a7e2016-11-27 00:13:40 +0100509 int ret;
Dan Streetmanf1c54842015-09-09 15:35:19 -0700510
Dan Streetmanbae21db2017-02-27 14:26:50 -0800511 if (!zswap_has_pool) {
512 /* if either are unset, pool initialization failed, and we
513 * need both params to be set correctly before trying to
514 * create a pool.
515 */
516 if (!strcmp(type, ZSWAP_PARAM_UNSET))
517 return NULL;
518 if (!strcmp(compressor, ZSWAP_PARAM_UNSET))
519 return NULL;
520 }
521
Dan Streetmanf1c54842015-09-09 15:35:19 -0700522 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
Markus Elfringf4ae0ce2017-07-06 15:40:34 -0700523 if (!pool)
Dan Streetmanf1c54842015-09-09 15:35:19 -0700524 return NULL;
Dan Streetmanf1c54842015-09-09 15:35:19 -0700525
Dan Streetman32a4e1692016-05-05 16:22:23 -0700526 /* unique name for each pool specifically required by zsmalloc */
527 snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count));
528
529 pool->zpool = zpool_create_pool(type, name, gfp, &zswap_zpool_ops);
Dan Streetmanf1c54842015-09-09 15:35:19 -0700530 if (!pool->zpool) {
531 pr_err("%s zpool not available\n", type);
532 goto error;
533 }
534 pr_debug("using %s zpool\n", zpool_get_type(pool->zpool));
535
536 strlcpy(pool->tfm_name, compressor, sizeof(pool->tfm_name));
537 pool->tfm = alloc_percpu(struct crypto_comp *);
538 if (!pool->tfm) {
539 pr_err("percpu alloc failed\n");
540 goto error;
541 }
542
Sebastian Andrzej Siewiorcab7a7e2016-11-27 00:13:40 +0100543 ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE,
544 &pool->node);
545 if (ret)
Dan Streetmanf1c54842015-09-09 15:35:19 -0700546 goto error;
547 pr_debug("using %s compressor\n", pool->tfm_name);
548
549 /* being the current pool takes 1 ref; this func expects the
550 * caller to always add the new pool as the current pool
551 */
552 kref_init(&pool->kref);
553 INIT_LIST_HEAD(&pool->list);
554
555 zswap_pool_debug("created", pool);
556
557 return pool;
558
559error:
560 free_percpu(pool->tfm);
561 if (pool->zpool)
562 zpool_destroy_pool(pool->zpool);
563 kfree(pool);
564 return NULL;
565}
566
Dan Streetmanc99b42c2015-11-06 16:29:15 -0800567static __init struct zswap_pool *__zswap_pool_create_fallback(void)
Dan Streetmanf1c54842015-09-09 15:35:19 -0700568{
Dan Streetmanbae21db2017-02-27 14:26:50 -0800569 bool has_comp, has_zpool;
570
571 has_comp = crypto_has_comp(zswap_compressor, 0, 0);
572 if (!has_comp && strcmp(zswap_compressor, ZSWAP_COMPRESSOR_DEFAULT)) {
Dan Streetmanf1c54842015-09-09 15:35:19 -0700573 pr_err("compressor %s not available, using default %s\n",
574 zswap_compressor, ZSWAP_COMPRESSOR_DEFAULT);
Dan Streetmanc99b42c2015-11-06 16:29:15 -0800575 param_free_charp(&zswap_compressor);
576 zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT;
Dan Streetmanbae21db2017-02-27 14:26:50 -0800577 has_comp = crypto_has_comp(zswap_compressor, 0, 0);
Dan Streetmanf1c54842015-09-09 15:35:19 -0700578 }
Dan Streetmanbae21db2017-02-27 14:26:50 -0800579 if (!has_comp) {
580 pr_err("default compressor %s not available\n",
581 zswap_compressor);
582 param_free_charp(&zswap_compressor);
583 zswap_compressor = ZSWAP_PARAM_UNSET;
584 }
585
586 has_zpool = zpool_has_pool(zswap_zpool_type);
587 if (!has_zpool && strcmp(zswap_zpool_type, ZSWAP_ZPOOL_DEFAULT)) {
Dan Streetmanf1c54842015-09-09 15:35:19 -0700588 pr_err("zpool %s not available, using default %s\n",
589 zswap_zpool_type, ZSWAP_ZPOOL_DEFAULT);
Dan Streetmanc99b42c2015-11-06 16:29:15 -0800590 param_free_charp(&zswap_zpool_type);
591 zswap_zpool_type = ZSWAP_ZPOOL_DEFAULT;
Dan Streetmanbae21db2017-02-27 14:26:50 -0800592 has_zpool = zpool_has_pool(zswap_zpool_type);
Dan Streetmanf1c54842015-09-09 15:35:19 -0700593 }
Dan Streetmanbae21db2017-02-27 14:26:50 -0800594 if (!has_zpool) {
595 pr_err("default zpool %s not available\n",
596 zswap_zpool_type);
597 param_free_charp(&zswap_zpool_type);
598 zswap_zpool_type = ZSWAP_PARAM_UNSET;
599 }
600
601 if (!has_comp || !has_zpool)
602 return NULL;
Dan Streetmanf1c54842015-09-09 15:35:19 -0700603
604 return zswap_pool_create(zswap_zpool_type, zswap_compressor);
605}
606
607static void zswap_pool_destroy(struct zswap_pool *pool)
608{
609 zswap_pool_debug("destroying", pool);
610
Sebastian Andrzej Siewiorcab7a7e2016-11-27 00:13:40 +0100611 cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
Dan Streetmanf1c54842015-09-09 15:35:19 -0700612 free_percpu(pool->tfm);
613 zpool_destroy_pool(pool->zpool);
614 kfree(pool);
615}
616
617static int __must_check zswap_pool_get(struct zswap_pool *pool)
618{
Dan Streetmanae3d89a2017-02-27 14:26:47 -0800619 if (!pool)
620 return 0;
621
Dan Streetmanf1c54842015-09-09 15:35:19 -0700622 return kref_get_unless_zero(&pool->kref);
623}
624
Dan Streetman200867a2016-05-20 16:59:54 -0700625static void __zswap_pool_release(struct work_struct *work)
Dan Streetmanf1c54842015-09-09 15:35:19 -0700626{
Dan Streetman200867a2016-05-20 16:59:54 -0700627 struct zswap_pool *pool = container_of(work, typeof(*pool), work);
628
629 synchronize_rcu();
Dan Streetmanf1c54842015-09-09 15:35:19 -0700630
631 /* nobody should have been able to get a kref... */
632 WARN_ON(kref_get_unless_zero(&pool->kref));
633
634 /* pool is now off zswap_pools list and has no references. */
635 zswap_pool_destroy(pool);
636}
637
638static void __zswap_pool_empty(struct kref *kref)
639{
640 struct zswap_pool *pool;
641
642 pool = container_of(kref, typeof(*pool), kref);
643
644 spin_lock(&zswap_pools_lock);
645
646 WARN_ON(pool == zswap_pool_current());
647
648 list_del_rcu(&pool->list);
Dan Streetman200867a2016-05-20 16:59:54 -0700649
650 INIT_WORK(&pool->work, __zswap_pool_release);
651 schedule_work(&pool->work);
Dan Streetmanf1c54842015-09-09 15:35:19 -0700652
653 spin_unlock(&zswap_pools_lock);
654}
655
656static void zswap_pool_put(struct zswap_pool *pool)
657{
658 kref_put(&pool->kref, __zswap_pool_empty);
Seth Jennings2b281112013-07-10 16:05:03 -0700659}
660
Seth Jennings2b281112013-07-10 16:05:03 -0700661/*********************************
Dan Streetman90b0fc22015-09-09 15:35:21 -0700662* param callbacks
663**********************************/
664
Dan Streetmanc99b42c2015-11-06 16:29:15 -0800665/* val must be a null-terminated string */
Dan Streetman90b0fc22015-09-09 15:35:21 -0700666static int __zswap_param_set(const char *val, const struct kernel_param *kp,
667 char *type, char *compressor)
668{
669 struct zswap_pool *pool, *put_pool = NULL;
Dan Streetmanc99b42c2015-11-06 16:29:15 -0800670 char *s = strstrip((char *)val);
Dan Streetman90b0fc22015-09-09 15:35:21 -0700671 int ret;
672
Dan Streetmand7b028f2017-02-03 13:13:09 -0800673 if (zswap_init_failed) {
674 pr_err("can't set param, initialization failed\n");
675 return -ENODEV;
676 }
677
Dan Streetmanc99b42c2015-11-06 16:29:15 -0800678 /* no change required */
Dan Streetmanae3d89a2017-02-27 14:26:47 -0800679 if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool)
Dan Streetmanc99b42c2015-11-06 16:29:15 -0800680 return 0;
Dan Streetman90b0fc22015-09-09 15:35:21 -0700681
682 /* if this is load-time (pre-init) param setting,
683 * don't create a pool; that's done during init.
684 */
685 if (!zswap_init_started)
Dan Streetmanc99b42c2015-11-06 16:29:15 -0800686 return param_set_charp(s, kp);
Dan Streetman90b0fc22015-09-09 15:35:21 -0700687
688 if (!type) {
Dan Streetmanc99b42c2015-11-06 16:29:15 -0800689 if (!zpool_has_pool(s)) {
690 pr_err("zpool %s not available\n", s);
691 return -ENOENT;
692 }
Dan Streetman90b0fc22015-09-09 15:35:21 -0700693 type = s;
Dan Streetman90b0fc22015-09-09 15:35:21 -0700694 } else if (!compressor) {
Dan Streetmanc99b42c2015-11-06 16:29:15 -0800695 if (!crypto_has_comp(s, 0, 0)) {
696 pr_err("compressor %s not available\n", s);
Dan Streetman90b0fc22015-09-09 15:35:21 -0700697 return -ENOENT;
698 }
Dan Streetmanc99b42c2015-11-06 16:29:15 -0800699 compressor = s;
700 } else {
701 WARN_ON(1);
702 return -EINVAL;
Dan Streetman90b0fc22015-09-09 15:35:21 -0700703 }
704
705 spin_lock(&zswap_pools_lock);
706
707 pool = zswap_pool_find_get(type, compressor);
708 if (pool) {
709 zswap_pool_debug("using existing", pool);
Dan Streetmanfd5bb662017-02-27 14:26:53 -0800710 WARN_ON(pool == zswap_pool_current());
Dan Streetman90b0fc22015-09-09 15:35:21 -0700711 list_del_rcu(&pool->list);
Dan Streetman90b0fc22015-09-09 15:35:21 -0700712 }
713
Dan Streetmanfd5bb662017-02-27 14:26:53 -0800714 spin_unlock(&zswap_pools_lock);
715
716 if (!pool)
717 pool = zswap_pool_create(type, compressor);
718
Dan Streetman90b0fc22015-09-09 15:35:21 -0700719 if (pool)
Dan Streetmanc99b42c2015-11-06 16:29:15 -0800720 ret = param_set_charp(s, kp);
Dan Streetman90b0fc22015-09-09 15:35:21 -0700721 else
722 ret = -EINVAL;
723
Dan Streetmanfd5bb662017-02-27 14:26:53 -0800724 spin_lock(&zswap_pools_lock);
725
Dan Streetman90b0fc22015-09-09 15:35:21 -0700726 if (!ret) {
727 put_pool = zswap_pool_current();
728 list_add_rcu(&pool->list, &zswap_pools);
Dan Streetmanae3d89a2017-02-27 14:26:47 -0800729 zswap_has_pool = true;
Dan Streetman90b0fc22015-09-09 15:35:21 -0700730 } else if (pool) {
731 /* add the possibly pre-existing pool to the end of the pools
732 * list; if it's new (and empty) then it'll be removed and
733 * destroyed by the put after we drop the lock
734 */
735 list_add_tail_rcu(&pool->list, &zswap_pools);
736 put_pool = pool;
Dan Streetmanfd5bb662017-02-27 14:26:53 -0800737 }
738
739 spin_unlock(&zswap_pools_lock);
740
741 if (!zswap_has_pool && !pool) {
Dan Streetmanae3d89a2017-02-27 14:26:47 -0800742 /* if initial pool creation failed, and this pool creation also
743 * failed, maybe both compressor and zpool params were bad.
744 * Allow changing this param, so pool creation will succeed
745 * when the other param is changed. We already verified this
746 * param is ok in the zpool_has_pool() or crypto_has_comp()
747 * checks above.
748 */
749 ret = param_set_charp(s, kp);
Dan Streetman90b0fc22015-09-09 15:35:21 -0700750 }
751
Dan Streetman90b0fc22015-09-09 15:35:21 -0700752 /* drop the ref from either the old current pool,
753 * or the new pool we failed to add
754 */
755 if (put_pool)
756 zswap_pool_put(put_pool);
757
758 return ret;
759}
760
761static int zswap_compressor_param_set(const char *val,
762 const struct kernel_param *kp)
763{
764 return __zswap_param_set(val, kp, zswap_zpool_type, NULL);
765}
766
767static int zswap_zpool_param_set(const char *val,
768 const struct kernel_param *kp)
769{
770 return __zswap_param_set(val, kp, NULL, zswap_compressor);
771}
772
Dan Streetmand7b028f2017-02-03 13:13:09 -0800773static int zswap_enabled_param_set(const char *val,
774 const struct kernel_param *kp)
775{
776 if (zswap_init_failed) {
777 pr_err("can't enable, initialization failed\n");
778 return -ENODEV;
779 }
Dan Streetmanae3d89a2017-02-27 14:26:47 -0800780 if (!zswap_has_pool && zswap_init_started) {
781 pr_err("can't enable, no pool configured\n");
782 return -ENODEV;
783 }
Dan Streetmand7b028f2017-02-03 13:13:09 -0800784
785 return param_set_bool(val, kp);
786}
787
Dan Streetman90b0fc22015-09-09 15:35:21 -0700788/*********************************
Seth Jennings2b281112013-07-10 16:05:03 -0700789* writeback code
790**********************************/
791/* return enum for zswap_get_swap_cache_page */
792enum zswap_get_swap_ret {
793 ZSWAP_SWAPCACHE_NEW,
794 ZSWAP_SWAPCACHE_EXIST,
Weijie Yang67d13fe2013-11-12 15:08:26 -0800795 ZSWAP_SWAPCACHE_FAIL,
Seth Jennings2b281112013-07-10 16:05:03 -0700796};
797
798/*
799 * zswap_get_swap_cache_page
800 *
801 * This is an adaption of read_swap_cache_async()
802 *
803 * This function tries to find a page with the given swap entry
804 * in the swapper_space address space (the swap cache). If the page
805 * is found, it is returned in retpage. Otherwise, a page is allocated,
806 * added to the swap cache, and returned in retpage.
807 *
808 * If success, the swap cache page is returned in retpage
Weijie Yang67d13fe2013-11-12 15:08:26 -0800809 * Returns ZSWAP_SWAPCACHE_EXIST if page was already in the swap cache
810 * Returns ZSWAP_SWAPCACHE_NEW if the new page needs to be populated,
811 * the new page is added to swapcache and locked
812 * Returns ZSWAP_SWAPCACHE_FAIL on error
Seth Jennings2b281112013-07-10 16:05:03 -0700813 */
814static int zswap_get_swap_cache_page(swp_entry_t entry,
815 struct page **retpage)
816{
Dmitry Safonov5b999aa2015-09-08 15:05:00 -0700817 bool page_was_allocated;
Seth Jennings2b281112013-07-10 16:05:03 -0700818
Dmitry Safonov5b999aa2015-09-08 15:05:00 -0700819 *retpage = __read_swap_cache_async(entry, GFP_KERNEL,
820 NULL, 0, &page_was_allocated);
821 if (page_was_allocated)
822 return ZSWAP_SWAPCACHE_NEW;
823 if (!*retpage)
Weijie Yang67d13fe2013-11-12 15:08:26 -0800824 return ZSWAP_SWAPCACHE_FAIL;
Seth Jennings2b281112013-07-10 16:05:03 -0700825 return ZSWAP_SWAPCACHE_EXIST;
826}
827
828/*
829 * Attempts to free an entry by adding a page to the swap cache,
830 * decompressing the entry data into the page, and issuing a
831 * bio write to write the page back to the swap device.
832 *
833 * This can be thought of as a "resumed writeback" of the page
834 * to the swap device. We are basically resuming the same swap
835 * writeback path that was intercepted with the frontswap_store()
836 * in the first place. After the page has been decompressed into
837 * the swap cache, the compressed version stored by zswap can be
838 * freed.
839 */
Dan Streetman12d79d62014-08-06 16:08:40 -0700840static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
Seth Jennings2b281112013-07-10 16:05:03 -0700841{
842 struct zswap_header *zhdr;
843 swp_entry_t swpentry;
844 struct zswap_tree *tree;
845 pgoff_t offset;
846 struct zswap_entry *entry;
847 struct page *page;
Dan Streetmanf1c54842015-09-09 15:35:19 -0700848 struct crypto_comp *tfm;
Seth Jennings2b281112013-07-10 16:05:03 -0700849 u8 *src, *dst;
850 unsigned int dlen;
Weijie Yang0ab0abc2013-11-12 15:08:27 -0800851 int ret;
Seth Jennings2b281112013-07-10 16:05:03 -0700852 struct writeback_control wbc = {
853 .sync_mode = WB_SYNC_NONE,
854 };
855
856 /* extract swpentry from data */
Dan Streetman12d79d62014-08-06 16:08:40 -0700857 zhdr = zpool_map_handle(pool, handle, ZPOOL_MM_RO);
Seth Jennings2b281112013-07-10 16:05:03 -0700858 swpentry = zhdr->swpentry; /* here */
Dan Streetman12d79d62014-08-06 16:08:40 -0700859 zpool_unmap_handle(pool, handle);
Seth Jennings2b281112013-07-10 16:05:03 -0700860 tree = zswap_trees[swp_type(swpentry)];
861 offset = swp_offset(swpentry);
Seth Jennings2b281112013-07-10 16:05:03 -0700862
863 /* find and ref zswap entry */
864 spin_lock(&tree->lock);
Weijie Yang0ab0abc2013-11-12 15:08:27 -0800865 entry = zswap_entry_find_get(&tree->rbroot, offset);
Seth Jennings2b281112013-07-10 16:05:03 -0700866 if (!entry) {
867 /* entry was invalidated */
868 spin_unlock(&tree->lock);
869 return 0;
870 }
Seth Jennings2b281112013-07-10 16:05:03 -0700871 spin_unlock(&tree->lock);
872 BUG_ON(offset != entry->offset);
873
874 /* try to allocate swap cache page */
875 switch (zswap_get_swap_cache_page(swpentry, &page)) {
Weijie Yang67d13fe2013-11-12 15:08:26 -0800876 case ZSWAP_SWAPCACHE_FAIL: /* no memory or invalidate happened */
Seth Jennings2b281112013-07-10 16:05:03 -0700877 ret = -ENOMEM;
878 goto fail;
879
Weijie Yang67d13fe2013-11-12 15:08:26 -0800880 case ZSWAP_SWAPCACHE_EXIST:
Seth Jennings2b281112013-07-10 16:05:03 -0700881 /* page is already in the swap cache, ignore for now */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300882 put_page(page);
Seth Jennings2b281112013-07-10 16:05:03 -0700883 ret = -EEXIST;
884 goto fail;
885
886 case ZSWAP_SWAPCACHE_NEW: /* page is locked */
887 /* decompress */
888 dlen = PAGE_SIZE;
Dan Streetmanf1c54842015-09-09 15:35:19 -0700889 src = (u8 *)zpool_map_handle(entry->pool->zpool, entry->handle,
Dan Streetman12d79d62014-08-06 16:08:40 -0700890 ZPOOL_MM_RO) + sizeof(struct zswap_header);
Seth Jennings2b281112013-07-10 16:05:03 -0700891 dst = kmap_atomic(page);
Dan Streetmanf1c54842015-09-09 15:35:19 -0700892 tfm = *get_cpu_ptr(entry->pool->tfm);
893 ret = crypto_comp_decompress(tfm, src, entry->length,
894 dst, &dlen);
895 put_cpu_ptr(entry->pool->tfm);
Seth Jennings2b281112013-07-10 16:05:03 -0700896 kunmap_atomic(dst);
Dan Streetmanf1c54842015-09-09 15:35:19 -0700897 zpool_unmap_handle(entry->pool->zpool, entry->handle);
Seth Jennings2b281112013-07-10 16:05:03 -0700898 BUG_ON(ret);
899 BUG_ON(dlen != PAGE_SIZE);
900
901 /* page is up to date */
902 SetPageUptodate(page);
903 }
904
Weijie Yangb349acc2013-11-12 15:07:52 -0800905 /* move it to the tail of the inactive list after end_writeback */
906 SetPageReclaim(page);
907
Seth Jennings2b281112013-07-10 16:05:03 -0700908 /* start writeback */
909 __swap_writepage(page, &wbc, end_swap_bio_write);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300910 put_page(page);
Seth Jennings2b281112013-07-10 16:05:03 -0700911 zswap_written_back_pages++;
912
913 spin_lock(&tree->lock);
Seth Jennings2b281112013-07-10 16:05:03 -0700914 /* drop local reference */
Weijie Yang0ab0abc2013-11-12 15:08:27 -0800915 zswap_entry_put(tree, entry);
Seth Jennings2b281112013-07-10 16:05:03 -0700916
917 /*
Weijie Yang0ab0abc2013-11-12 15:08:27 -0800918 * There are two possible situations for entry here:
919 * (1) refcount is 1(normal case), entry is valid and on the tree
920 * (2) refcount is 0, entry is freed and not on the tree
921 * because invalidate happened during writeback
922 * search the tree and free the entry if find entry
923 */
924 if (entry == zswap_rb_search(&tree->rbroot, offset))
925 zswap_entry_put(tree, entry);
Seth Jennings2b281112013-07-10 16:05:03 -0700926 spin_unlock(&tree->lock);
Seth Jennings2b281112013-07-10 16:05:03 -0700927
Weijie Yang0ab0abc2013-11-12 15:08:27 -0800928 goto end;
929
930 /*
931 * if we get here due to ZSWAP_SWAPCACHE_EXIST
932 * a load may happening concurrently
933 * it is safe and okay to not free the entry
934 * if we free the entry in the following put
935 * it it either okay to return !0
936 */
Seth Jennings2b281112013-07-10 16:05:03 -0700937fail:
938 spin_lock(&tree->lock);
Weijie Yang0ab0abc2013-11-12 15:08:27 -0800939 zswap_entry_put(tree, entry);
Seth Jennings2b281112013-07-10 16:05:03 -0700940 spin_unlock(&tree->lock);
Weijie Yang0ab0abc2013-11-12 15:08:27 -0800941
942end:
Seth Jennings2b281112013-07-10 16:05:03 -0700943 return ret;
944}
945
Dan Streetmanf1c54842015-09-09 15:35:19 -0700946static int zswap_shrink(void)
947{
948 struct zswap_pool *pool;
949 int ret;
950
951 pool = zswap_pool_last_get();
952 if (!pool)
953 return -ENOENT;
954
955 ret = zpool_shrink(pool->zpool, 1, NULL);
956
957 zswap_pool_put(pool);
958
959 return ret;
960}
961
Srividya Desireddya85f8782018-01-31 16:15:59 -0800962static int zswap_is_page_same_filled(void *ptr, unsigned long *value)
963{
964 unsigned int pos;
965 unsigned long *page;
966
967 page = (unsigned long *)ptr;
968 for (pos = 1; pos < PAGE_SIZE / sizeof(*page); pos++) {
969 if (page[pos] != page[0])
970 return 0;
971 }
972 *value = page[0];
973 return 1;
974}
975
976static void zswap_fill_page(void *ptr, unsigned long value)
977{
978 unsigned long *page;
979
980 page = (unsigned long *)ptr;
981 memset_l(page, value, PAGE_SIZE / sizeof(unsigned long));
982}
983
Seth Jennings2b281112013-07-10 16:05:03 -0700984/*********************************
985* frontswap hooks
986**********************************/
987/* attempts to compress and store an single page */
988static int zswap_frontswap_store(unsigned type, pgoff_t offset,
989 struct page *page)
990{
991 struct zswap_tree *tree = zswap_trees[type];
992 struct zswap_entry *entry, *dupentry;
Dan Streetmanf1c54842015-09-09 15:35:19 -0700993 struct crypto_comp *tfm;
Seth Jennings2b281112013-07-10 16:05:03 -0700994 int ret;
Yu Zhao9c3760e2018-01-31 16:19:59 -0800995 unsigned int hlen, dlen = PAGE_SIZE;
Srividya Desireddya85f8782018-01-31 16:15:59 -0800996 unsigned long handle, value;
Seth Jennings2b281112013-07-10 16:05:03 -0700997 char *buf;
998 u8 *src, *dst;
Yu Zhao9c3760e2018-01-31 16:19:59 -0800999 struct zswap_header zhdr = { .swpentry = swp_entry(type, offset) };
Hui Zhud2fcd822019-09-23 15:39:40 -07001000 gfp_t gfp;
Seth Jennings2b281112013-07-10 16:05:03 -07001001
Huang Ying7ba71662018-02-21 14:45:39 -08001002 /* THP isn't supported */
1003 if (PageTransHuge(page)) {
1004 ret = -EINVAL;
1005 goto reject;
1006 }
1007
Dan Streetmanc00ed162015-06-25 15:00:35 -07001008 if (!zswap_enabled || !tree) {
Seth Jennings2b281112013-07-10 16:05:03 -07001009 ret = -ENODEV;
1010 goto reject;
1011 }
1012
1013 /* reclaim space if needed */
1014 if (zswap_is_full()) {
1015 zswap_pool_limit_hit++;
Dan Streetmanf1c54842015-09-09 15:35:19 -07001016 if (zswap_shrink()) {
Seth Jennings2b281112013-07-10 16:05:03 -07001017 zswap_reject_reclaim_fail++;
1018 ret = -ENOMEM;
1019 goto reject;
1020 }
Li Wang16e536e2018-07-26 16:37:42 -07001021
1022 /* A second zswap_is_full() check after
1023 * zswap_shrink() to make sure it's now
1024 * under the max_pool_percent
1025 */
1026 if (zswap_is_full()) {
1027 ret = -ENOMEM;
1028 goto reject;
1029 }
Seth Jennings2b281112013-07-10 16:05:03 -07001030 }
1031
1032 /* allocate entry */
1033 entry = zswap_entry_cache_alloc(GFP_KERNEL);
1034 if (!entry) {
1035 zswap_reject_kmemcache_fail++;
1036 ret = -ENOMEM;
1037 goto reject;
1038 }
1039
Srividya Desireddya85f8782018-01-31 16:15:59 -08001040 if (zswap_same_filled_pages_enabled) {
1041 src = kmap_atomic(page);
1042 if (zswap_is_page_same_filled(src, &value)) {
1043 kunmap_atomic(src);
1044 entry->offset = offset;
1045 entry->length = 0;
1046 entry->value = value;
1047 atomic_inc(&zswap_same_filled_pages);
1048 goto insert_entry;
1049 }
1050 kunmap_atomic(src);
1051 }
1052
Dan Streetmanf1c54842015-09-09 15:35:19 -07001053 /* if entry is successfully added, it keeps the reference */
1054 entry->pool = zswap_pool_current_get();
1055 if (!entry->pool) {
Seth Jennings2b281112013-07-10 16:05:03 -07001056 ret = -EINVAL;
1057 goto freepage;
1058 }
1059
Dan Streetmanf1c54842015-09-09 15:35:19 -07001060 /* compress */
1061 dst = get_cpu_var(zswap_dstmem);
1062 tfm = *get_cpu_ptr(entry->pool->tfm);
1063 src = kmap_atomic(page);
1064 ret = crypto_comp_compress(tfm, src, PAGE_SIZE, dst, &dlen);
1065 kunmap_atomic(src);
1066 put_cpu_ptr(entry->pool->tfm);
1067 if (ret) {
1068 ret = -EINVAL;
1069 goto put_dstmem;
1070 }
1071
Seth Jennings2b281112013-07-10 16:05:03 -07001072 /* store */
Yu Zhao9c3760e2018-01-31 16:19:59 -08001073 hlen = zpool_evictable(entry->pool->zpool) ? sizeof(zhdr) : 0;
Hui Zhud2fcd822019-09-23 15:39:40 -07001074 gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
1075 if (zpool_malloc_support_movable(entry->pool->zpool))
1076 gfp |= __GFP_HIGHMEM | __GFP_MOVABLE;
1077 ret = zpool_malloc(entry->pool->zpool, hlen + dlen, gfp, &handle);
Seth Jennings2b281112013-07-10 16:05:03 -07001078 if (ret == -ENOSPC) {
1079 zswap_reject_compress_poor++;
Dan Streetmanf1c54842015-09-09 15:35:19 -07001080 goto put_dstmem;
Seth Jennings2b281112013-07-10 16:05:03 -07001081 }
1082 if (ret) {
1083 zswap_reject_alloc_fail++;
Dan Streetmanf1c54842015-09-09 15:35:19 -07001084 goto put_dstmem;
Seth Jennings2b281112013-07-10 16:05:03 -07001085 }
Yu Zhao9c3760e2018-01-31 16:19:59 -08001086 buf = zpool_map_handle(entry->pool->zpool, handle, ZPOOL_MM_RW);
1087 memcpy(buf, &zhdr, hlen);
1088 memcpy(buf + hlen, dst, dlen);
Dan Streetmanf1c54842015-09-09 15:35:19 -07001089 zpool_unmap_handle(entry->pool->zpool, handle);
Seth Jennings2b281112013-07-10 16:05:03 -07001090 put_cpu_var(zswap_dstmem);
1091
1092 /* populate entry */
1093 entry->offset = offset;
1094 entry->handle = handle;
1095 entry->length = dlen;
1096
Srividya Desireddya85f8782018-01-31 16:15:59 -08001097insert_entry:
Seth Jennings2b281112013-07-10 16:05:03 -07001098 /* map */
1099 spin_lock(&tree->lock);
1100 do {
1101 ret = zswap_rb_insert(&tree->rbroot, entry, &dupentry);
1102 if (ret == -EEXIST) {
1103 zswap_duplicate_entry++;
1104 /* remove from rbtree */
Weijie Yang0ab0abc2013-11-12 15:08:27 -08001105 zswap_rb_erase(&tree->rbroot, dupentry);
1106 zswap_entry_put(tree, dupentry);
Seth Jennings2b281112013-07-10 16:05:03 -07001107 }
1108 } while (ret == -EEXIST);
1109 spin_unlock(&tree->lock);
1110
1111 /* update stats */
1112 atomic_inc(&zswap_stored_pages);
Dan Streetmanf1c54842015-09-09 15:35:19 -07001113 zswap_update_total_size();
Seth Jennings2b281112013-07-10 16:05:03 -07001114
1115 return 0;
1116
Dan Streetmanf1c54842015-09-09 15:35:19 -07001117put_dstmem:
Seth Jennings2b281112013-07-10 16:05:03 -07001118 put_cpu_var(zswap_dstmem);
Dan Streetmanf1c54842015-09-09 15:35:19 -07001119 zswap_pool_put(entry->pool);
1120freepage:
Seth Jennings2b281112013-07-10 16:05:03 -07001121 zswap_entry_cache_free(entry);
1122reject:
1123 return ret;
1124}
1125
1126/*
1127 * returns 0 if the page was successfully decompressed
1128 * return -1 on entry not found or error
1129*/
1130static int zswap_frontswap_load(unsigned type, pgoff_t offset,
1131 struct page *page)
1132{
1133 struct zswap_tree *tree = zswap_trees[type];
1134 struct zswap_entry *entry;
Dan Streetmanf1c54842015-09-09 15:35:19 -07001135 struct crypto_comp *tfm;
Seth Jennings2b281112013-07-10 16:05:03 -07001136 u8 *src, *dst;
1137 unsigned int dlen;
Weijie Yang0ab0abc2013-11-12 15:08:27 -08001138 int ret;
Seth Jennings2b281112013-07-10 16:05:03 -07001139
1140 /* find */
1141 spin_lock(&tree->lock);
Weijie Yang0ab0abc2013-11-12 15:08:27 -08001142 entry = zswap_entry_find_get(&tree->rbroot, offset);
Seth Jennings2b281112013-07-10 16:05:03 -07001143 if (!entry) {
1144 /* entry was written back */
1145 spin_unlock(&tree->lock);
1146 return -1;
1147 }
Seth Jennings2b281112013-07-10 16:05:03 -07001148 spin_unlock(&tree->lock);
1149
Srividya Desireddya85f8782018-01-31 16:15:59 -08001150 if (!entry->length) {
1151 dst = kmap_atomic(page);
1152 zswap_fill_page(dst, entry->value);
1153 kunmap_atomic(dst);
1154 goto freeentry;
1155 }
1156
Seth Jennings2b281112013-07-10 16:05:03 -07001157 /* decompress */
1158 dlen = PAGE_SIZE;
Yu Zhao9c3760e2018-01-31 16:19:59 -08001159 src = zpool_map_handle(entry->pool->zpool, entry->handle, ZPOOL_MM_RO);
1160 if (zpool_evictable(entry->pool->zpool))
1161 src += sizeof(struct zswap_header);
Seth Jennings2b281112013-07-10 16:05:03 -07001162 dst = kmap_atomic(page);
Dan Streetmanf1c54842015-09-09 15:35:19 -07001163 tfm = *get_cpu_ptr(entry->pool->tfm);
1164 ret = crypto_comp_decompress(tfm, src, entry->length, dst, &dlen);
1165 put_cpu_ptr(entry->pool->tfm);
Seth Jennings2b281112013-07-10 16:05:03 -07001166 kunmap_atomic(dst);
Dan Streetmanf1c54842015-09-09 15:35:19 -07001167 zpool_unmap_handle(entry->pool->zpool, entry->handle);
Seth Jennings2b281112013-07-10 16:05:03 -07001168 BUG_ON(ret);
1169
Srividya Desireddya85f8782018-01-31 16:15:59 -08001170freeentry:
Seth Jennings2b281112013-07-10 16:05:03 -07001171 spin_lock(&tree->lock);
Weijie Yang0ab0abc2013-11-12 15:08:27 -08001172 zswap_entry_put(tree, entry);
Seth Jennings2b281112013-07-10 16:05:03 -07001173 spin_unlock(&tree->lock);
1174
Seth Jennings2b281112013-07-10 16:05:03 -07001175 return 0;
1176}
1177
1178/* frees an entry in zswap */
1179static void zswap_frontswap_invalidate_page(unsigned type, pgoff_t offset)
1180{
1181 struct zswap_tree *tree = zswap_trees[type];
1182 struct zswap_entry *entry;
Seth Jennings2b281112013-07-10 16:05:03 -07001183
1184 /* find */
1185 spin_lock(&tree->lock);
1186 entry = zswap_rb_search(&tree->rbroot, offset);
1187 if (!entry) {
1188 /* entry was written back */
1189 spin_unlock(&tree->lock);
1190 return;
1191 }
1192
1193 /* remove from rbtree */
Weijie Yang0ab0abc2013-11-12 15:08:27 -08001194 zswap_rb_erase(&tree->rbroot, entry);
Seth Jennings2b281112013-07-10 16:05:03 -07001195
1196 /* drop the initial reference from entry creation */
Weijie Yang0ab0abc2013-11-12 15:08:27 -08001197 zswap_entry_put(tree, entry);
Seth Jennings2b281112013-07-10 16:05:03 -07001198
1199 spin_unlock(&tree->lock);
Seth Jennings2b281112013-07-10 16:05:03 -07001200}
1201
1202/* frees all zswap entries for the given swap type */
1203static void zswap_frontswap_invalidate_area(unsigned type)
1204{
1205 struct zswap_tree *tree = zswap_trees[type];
Cody P Schafer0bd42132013-09-11 14:25:33 -07001206 struct zswap_entry *entry, *n;
Seth Jennings2b281112013-07-10 16:05:03 -07001207
1208 if (!tree)
1209 return;
1210
1211 /* walk the tree and free everything */
1212 spin_lock(&tree->lock);
Weijie Yang0ab0abc2013-11-12 15:08:27 -08001213 rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode)
Minchan Kim60105e12014-04-07 15:38:27 -07001214 zswap_free_entry(entry);
Seth Jennings2b281112013-07-10 16:05:03 -07001215 tree->rbroot = RB_ROOT;
1216 spin_unlock(&tree->lock);
Weijie Yangaa9bca02013-10-16 13:46:54 -07001217 kfree(tree);
1218 zswap_trees[type] = NULL;
Seth Jennings2b281112013-07-10 16:05:03 -07001219}
1220
Seth Jennings2b281112013-07-10 16:05:03 -07001221static void zswap_frontswap_init(unsigned type)
1222{
1223 struct zswap_tree *tree;
1224
Markus Elfring9cd1f702017-07-06 15:40:37 -07001225 tree = kzalloc(sizeof(*tree), GFP_KERNEL);
Minchan Kim60105e12014-04-07 15:38:27 -07001226 if (!tree) {
1227 pr_err("alloc failed, zswap disabled for swap type %d\n", type);
1228 return;
1229 }
1230
Seth Jennings2b281112013-07-10 16:05:03 -07001231 tree->rbroot = RB_ROOT;
1232 spin_lock_init(&tree->lock);
1233 zswap_trees[type] = tree;
Seth Jennings2b281112013-07-10 16:05:03 -07001234}
1235
1236static struct frontswap_ops zswap_frontswap_ops = {
1237 .store = zswap_frontswap_store,
1238 .load = zswap_frontswap_load,
1239 .invalidate_page = zswap_frontswap_invalidate_page,
1240 .invalidate_area = zswap_frontswap_invalidate_area,
1241 .init = zswap_frontswap_init
1242};
1243
1244/*********************************
1245* debugfs functions
1246**********************************/
1247#ifdef CONFIG_DEBUG_FS
1248#include <linux/debugfs.h>
1249
1250static struct dentry *zswap_debugfs_root;
1251
1252static int __init zswap_debugfs_init(void)
1253{
1254 if (!debugfs_initialized())
1255 return -ENODEV;
1256
1257 zswap_debugfs_root = debugfs_create_dir("zswap", NULL);
Seth Jennings2b281112013-07-10 16:05:03 -07001258
Joe Perches0825a6f2018-06-14 15:27:58 -07001259 debugfs_create_u64("pool_limit_hit", 0444,
1260 zswap_debugfs_root, &zswap_pool_limit_hit);
1261 debugfs_create_u64("reject_reclaim_fail", 0444,
1262 zswap_debugfs_root, &zswap_reject_reclaim_fail);
1263 debugfs_create_u64("reject_alloc_fail", 0444,
1264 zswap_debugfs_root, &zswap_reject_alloc_fail);
1265 debugfs_create_u64("reject_kmemcache_fail", 0444,
1266 zswap_debugfs_root, &zswap_reject_kmemcache_fail);
1267 debugfs_create_u64("reject_compress_poor", 0444,
1268 zswap_debugfs_root, &zswap_reject_compress_poor);
1269 debugfs_create_u64("written_back_pages", 0444,
1270 zswap_debugfs_root, &zswap_written_back_pages);
1271 debugfs_create_u64("duplicate_entry", 0444,
1272 zswap_debugfs_root, &zswap_duplicate_entry);
1273 debugfs_create_u64("pool_total_size", 0444,
1274 zswap_debugfs_root, &zswap_pool_total_size);
1275 debugfs_create_atomic_t("stored_pages", 0444,
1276 zswap_debugfs_root, &zswap_stored_pages);
Srividya Desireddya85f8782018-01-31 16:15:59 -08001277 debugfs_create_atomic_t("same_filled_pages", 0444,
Joe Perches0825a6f2018-06-14 15:27:58 -07001278 zswap_debugfs_root, &zswap_same_filled_pages);
Seth Jennings2b281112013-07-10 16:05:03 -07001279
1280 return 0;
1281}
1282
1283static void __exit zswap_debugfs_exit(void)
1284{
1285 debugfs_remove_recursive(zswap_debugfs_root);
1286}
1287#else
1288static int __init zswap_debugfs_init(void)
1289{
1290 return 0;
1291}
1292
1293static void __exit zswap_debugfs_exit(void) { }
1294#endif
1295
1296/*********************************
1297* module init and exit
1298**********************************/
1299static int __init init_zswap(void)
1300{
Dan Streetmanf1c54842015-09-09 15:35:19 -07001301 struct zswap_pool *pool;
Sebastian Andrzej Siewiorad7ed772016-11-27 00:13:39 +01001302 int ret;
Minchan Kim60105e12014-04-07 15:38:27 -07001303
Dan Streetman90b0fc22015-09-09 15:35:21 -07001304 zswap_init_started = true;
1305
Seth Jennings2b281112013-07-10 16:05:03 -07001306 if (zswap_entry_cache_create()) {
1307 pr_err("entry cache creation failed\n");
Dan Streetmanf1c54842015-09-09 15:35:19 -07001308 goto cache_fail;
Seth Jennings2b281112013-07-10 16:05:03 -07001309 }
Dan Streetmanf1c54842015-09-09 15:35:19 -07001310
Sebastian Andrzej Siewiorad7ed772016-11-27 00:13:39 +01001311 ret = cpuhp_setup_state(CPUHP_MM_ZSWP_MEM_PREPARE, "mm/zswap:prepare",
1312 zswap_dstmem_prepare, zswap_dstmem_dead);
1313 if (ret) {
Dan Streetmanf1c54842015-09-09 15:35:19 -07001314 pr_err("dstmem alloc failed\n");
1315 goto dstmem_fail;
Seth Jennings2b281112013-07-10 16:05:03 -07001316 }
Dan Streetmanf1c54842015-09-09 15:35:19 -07001317
Sebastian Andrzej Siewiorcab7a7e2016-11-27 00:13:40 +01001318 ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE,
1319 "mm/zswap_pool:prepare",
1320 zswap_cpu_comp_prepare,
1321 zswap_cpu_comp_dead);
1322 if (ret)
1323 goto hp_fail;
1324
Dan Streetmanf1c54842015-09-09 15:35:19 -07001325 pool = __zswap_pool_create_fallback();
Dan Streetmanae3d89a2017-02-27 14:26:47 -08001326 if (pool) {
1327 pr_info("loaded using pool %s/%s\n", pool->tfm_name,
1328 zpool_get_type(pool->zpool));
1329 list_add(&pool->list, &zswap_pools);
1330 zswap_has_pool = true;
1331 } else {
Dan Streetmanf1c54842015-09-09 15:35:19 -07001332 pr_err("pool creation failed\n");
Dan Streetmanae3d89a2017-02-27 14:26:47 -08001333 zswap_enabled = false;
Seth Jennings2b281112013-07-10 16:05:03 -07001334 }
Minchan Kim60105e12014-04-07 15:38:27 -07001335
Seth Jennings2b281112013-07-10 16:05:03 -07001336 frontswap_register_ops(&zswap_frontswap_ops);
1337 if (zswap_debugfs_init())
1338 pr_warn("debugfs initialization failed\n");
1339 return 0;
Dan Streetmanf1c54842015-09-09 15:35:19 -07001340
Sebastian Andrzej Siewiorcab7a7e2016-11-27 00:13:40 +01001341hp_fail:
Sebastian Andrzej Siewiorad7ed772016-11-27 00:13:39 +01001342 cpuhp_remove_state(CPUHP_MM_ZSWP_MEM_PREPARE);
Dan Streetmanf1c54842015-09-09 15:35:19 -07001343dstmem_fail:
Fabian Frederickc1192392014-08-08 14:19:35 -07001344 zswap_entry_cache_destroy();
Dan Streetmanf1c54842015-09-09 15:35:19 -07001345cache_fail:
Dan Streetmand7b028f2017-02-03 13:13:09 -08001346 /* if built-in, we aren't unloaded on failure; don't allow use */
1347 zswap_init_failed = true;
1348 zswap_enabled = false;
Seth Jennings2b281112013-07-10 16:05:03 -07001349 return -ENOMEM;
1350}
1351/* must be late so crypto has time to come up */
1352late_initcall(init_zswap);
1353
1354MODULE_LICENSE("GPL");
Seth Jennings68386da2014-11-12 21:08:46 -06001355MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>");
Seth Jennings2b281112013-07-10 16:05:03 -07001356MODULE_DESCRIPTION("Compressed cache for swap pages");