blob: 7454cf188c8e24bafe0815b4c289ed01545c1be3 [file] [log] [blame]
Nitin Gupta306b0c92009-09-22 10:26:53 +05301/*
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05302 * Compressed RAM block device
Nitin Gupta306b0c92009-09-22 10:26:53 +05303 *
Nitin Gupta1130ebb2010-01-28 21:21:35 +05304 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
Minchan Kim7bfb3de2014-01-30 15:45:55 -08005 * 2012, 2013 Minchan Kim
Nitin Gupta306b0c92009-09-22 10:26:53 +05306 *
7 * This code is released using a dual license strategy: BSD/GPL
8 * You can choose the licence that better fits your requirements.
9 *
10 * Released under the terms of 3-clause BSD License
11 * Released under the terms of GNU General Public License Version 2.0
12 *
Nitin Gupta306b0c92009-09-22 10:26:53 +053013 */
14
Nitin Guptaf1e3cff2010-06-01 13:31:25 +053015#define KMSG_COMPONENT "zram"
Nitin Gupta306b0c92009-09-22 10:26:53 +053016#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17
18#include <linux/module.h>
19#include <linux/kernel.h>
Randy Dunlap8946a082010-06-23 20:27:09 -070020#include <linux/bio.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053021#include <linux/bitops.h>
22#include <linux/blkdev.h>
23#include <linux/buffer_head.h>
24#include <linux/device.h>
25#include <linux/genhd.h>
26#include <linux/highmem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090027#include <linux/slab.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053028#include <linux/string.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053029#include <linux/vmalloc.h>
Sergey Senozhatskyfcfa8d92014-04-07 15:38:20 -070030#include <linux/err.h>
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -070031#include <linux/idr.h>
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -070032#include <linux/sysfs.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053033
Nitin Gupta16a4bfb2010-06-01 13:31:24 +053034#include "zram_drv.h"
Nitin Gupta306b0c92009-09-22 10:26:53 +053035
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -070036static DEFINE_IDR(zram_index_idr);
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -070037/* idr index must be protected */
38static DEFINE_MUTEX(zram_index_mutex);
39
Nitin Guptaf1e3cff2010-06-01 13:31:25 +053040static int zram_major;
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -070041static const char *default_compressor = "lzo";
Nitin Gupta306b0c92009-09-22 10:26:53 +053042
Nitin Gupta306b0c92009-09-22 10:26:53 +053043/* Module params (documentation at end) */
Davidlohr Buesoca3d70b2013-01-01 21:24:13 -080044static unsigned int num_devices = 1;
Nitin Gupta33863c22010-08-09 22:56:47 +053045
Sergey Senozhatsky8f7d2822015-04-15 16:16:09 -070046static inline void deprecated_attr_warn(const char *name)
47{
48 pr_warn_once("%d (%s) Attribute %s (and others) will be removed. %s\n",
49 task_pid_nr(current),
50 current->comm,
51 name,
52 "See zram documentation.");
53}
54
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -070055#define ZRAM_ATTR_RO(name) \
Sergey Senozhatsky3bca3ef2015-06-25 15:00:03 -070056static ssize_t name##_show(struct device *d, \
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -070057 struct device_attribute *attr, char *b) \
58{ \
59 struct zram *zram = dev_to_zram(d); \
Sergey Senozhatsky8f7d2822015-04-15 16:16:09 -070060 \
61 deprecated_attr_warn(__stringify(name)); \
Sergey Senozhatsky56b4e8c2014-04-07 15:38:22 -070062 return scnprintf(b, PAGE_SIZE, "%llu\n", \
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -070063 (u64)atomic64_read(&zram->stats.name)); \
64} \
Ganesh Mahendran083914e2014-12-12 16:57:13 -080065static DEVICE_ATTR_RO(name);
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -070066
Minchan Kim08eee692015-02-12 15:00:45 -080067static inline bool init_done(struct zram *zram)
Sergey Senozhatskybe2d1d52014-04-07 15:38:00 -070068{
Minchan Kim08eee692015-02-12 15:00:45 -080069 return zram->disksize;
Sergey Senozhatskybe2d1d52014-04-07 15:38:00 -070070}
71
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +030072static inline struct zram *dev_to_zram(struct device *dev)
73{
74 return (struct zram *)dev_to_disk(dev)->private_data;
75}
76
Sergey Senozhatskyb31177f2015-06-25 15:00:16 -070077/* flag operations require table entry bit_spin_lock() being held */
Sergey Senozhatsky522698d2015-06-25 15:00:08 -070078static int zram_test_flag(struct zram_meta *meta, u32 index,
79 enum zram_pageflags flag)
Andrew Morton99ebbd32015-05-05 16:23:25 -070080{
Sergey Senozhatsky522698d2015-06-25 15:00:08 -070081 return meta->table[index].value & BIT(flag);
Andrew Morton99ebbd32015-05-05 16:23:25 -070082}
83
Sergey Senozhatsky522698d2015-06-25 15:00:08 -070084static void zram_set_flag(struct zram_meta *meta, u32 index,
85 enum zram_pageflags flag)
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +030086{
Sergey Senozhatsky522698d2015-06-25 15:00:08 -070087 meta->table[index].value |= BIT(flag);
88}
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +030089
Sergey Senozhatsky522698d2015-06-25 15:00:08 -070090static void zram_clear_flag(struct zram_meta *meta, u32 index,
91 enum zram_pageflags flag)
92{
93 meta->table[index].value &= ~BIT(flag);
94}
95
96static size_t zram_get_obj_size(struct zram_meta *meta, u32 index)
97{
98 return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
99}
100
101static void zram_set_obj_size(struct zram_meta *meta,
102 u32 index, size_t size)
103{
104 unsigned long flags = meta->table[index].value >> ZRAM_FLAG_SHIFT;
105
106 meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
107}
108
Geliang Tang1c53e0d2015-11-06 16:29:06 -0800109static inline bool is_partial_io(struct bio_vec *bvec)
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700110{
111 return bvec->bv_len != PAGE_SIZE;
112}
113
114/*
115 * Check if request is within bounds and aligned on zram logical blocks.
116 */
Geliang Tang1c53e0d2015-11-06 16:29:06 -0800117static inline bool valid_io_request(struct zram *zram,
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700118 sector_t start, unsigned int size)
119{
120 u64 end, bound;
121
122 /* unaligned request */
123 if (unlikely(start & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
Geliang Tang1c53e0d2015-11-06 16:29:06 -0800124 return false;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700125 if (unlikely(size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
Geliang Tang1c53e0d2015-11-06 16:29:06 -0800126 return false;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700127
128 end = start + (size >> SECTOR_SHIFT);
129 bound = zram->disksize >> SECTOR_SHIFT;
130 /* out of range range */
131 if (unlikely(start >= bound || end > bound || start > end))
Geliang Tang1c53e0d2015-11-06 16:29:06 -0800132 return false;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700133
134 /* I/O request is valid */
Geliang Tang1c53e0d2015-11-06 16:29:06 -0800135 return true;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700136}
137
138static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
139{
140 if (*offset + bvec->bv_len >= PAGE_SIZE)
141 (*index)++;
142 *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
143}
144
145static inline void update_used_max(struct zram *zram,
146 const unsigned long pages)
147{
148 unsigned long old_max, cur_max;
149
150 old_max = atomic_long_read(&zram->stats.max_used_pages);
151
152 do {
153 cur_max = old_max;
154 if (pages > cur_max)
155 old_max = atomic_long_cmpxchg(
156 &zram->stats.max_used_pages, cur_max, pages);
157 } while (old_max != cur_max);
158}
159
Geliang Tang1c53e0d2015-11-06 16:29:06 -0800160static bool page_zero_filled(void *ptr)
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700161{
162 unsigned int pos;
163 unsigned long *page;
164
165 page = (unsigned long *)ptr;
166
167 for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
168 if (page[pos])
Geliang Tang1c53e0d2015-11-06 16:29:06 -0800169 return false;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700170 }
171
Geliang Tang1c53e0d2015-11-06 16:29:06 -0800172 return true;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700173}
174
175static void handle_zero_page(struct bio_vec *bvec)
176{
177 struct page *page = bvec->bv_page;
178 void *user_mem;
179
180 user_mem = kmap_atomic(page);
181 if (is_partial_io(bvec))
182 memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
183 else
184 clear_page(user_mem);
185 kunmap_atomic(user_mem);
186
187 flush_dcache_page(page);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300188}
189
190static ssize_t initstate_show(struct device *dev,
191 struct device_attribute *attr, char *buf)
192{
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -0700193 u32 val;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300194 struct zram *zram = dev_to_zram(dev);
195
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -0700196 down_read(&zram->init_lock);
197 val = init_done(zram);
198 up_read(&zram->init_lock);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300199
Sergey Senozhatsky56b4e8c2014-04-07 15:38:22 -0700200 return scnprintf(buf, PAGE_SIZE, "%u\n", val);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300201}
202
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700203static ssize_t disksize_show(struct device *dev,
204 struct device_attribute *attr, char *buf)
205{
206 struct zram *zram = dev_to_zram(dev);
207
208 return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
209}
210
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300211static ssize_t orig_data_size_show(struct device *dev,
212 struct device_attribute *attr, char *buf)
213{
214 struct zram *zram = dev_to_zram(dev);
215
Sergey Senozhatsky8f7d2822015-04-15 16:16:09 -0700216 deprecated_attr_warn("orig_data_size");
Sergey Senozhatsky56b4e8c2014-04-07 15:38:22 -0700217 return scnprintf(buf, PAGE_SIZE, "%llu\n",
Sergey Senozhatsky90a78062014-04-07 15:38:03 -0700218 (u64)(atomic64_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300219}
220
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300221static ssize_t mem_used_total_show(struct device *dev,
222 struct device_attribute *attr, char *buf)
223{
224 u64 val = 0;
225 struct zram *zram = dev_to_zram(dev);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300226
Sergey Senozhatsky8f7d2822015-04-15 16:16:09 -0700227 deprecated_attr_warn("mem_used_total");
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300228 down_read(&zram->init_lock);
Weijie Yang5a99e952014-10-29 14:50:57 -0700229 if (init_done(zram)) {
230 struct zram_meta *meta = zram->meta;
Minchan Kim722cdc12014-10-09 15:29:50 -0700231 val = zs_get_total_pages(meta->mem_pool);
Weijie Yang5a99e952014-10-29 14:50:57 -0700232 }
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300233 up_read(&zram->init_lock);
234
Minchan Kim722cdc12014-10-09 15:29:50 -0700235 return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300236}
237
Minchan Kim9ada9da2014-10-09 15:29:53 -0700238static ssize_t mem_limit_show(struct device *dev,
239 struct device_attribute *attr, char *buf)
240{
241 u64 val;
242 struct zram *zram = dev_to_zram(dev);
243
Sergey Senozhatsky8f7d2822015-04-15 16:16:09 -0700244 deprecated_attr_warn("mem_limit");
Minchan Kim9ada9da2014-10-09 15:29:53 -0700245 down_read(&zram->init_lock);
246 val = zram->limit_pages;
247 up_read(&zram->init_lock);
248
249 return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
250}
251
252static ssize_t mem_limit_store(struct device *dev,
253 struct device_attribute *attr, const char *buf, size_t len)
254{
255 u64 limit;
256 char *tmp;
257 struct zram *zram = dev_to_zram(dev);
258
259 limit = memparse(buf, &tmp);
260 if (buf == tmp) /* no chars parsed, invalid input */
261 return -EINVAL;
262
263 down_write(&zram->init_lock);
264 zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT;
265 up_write(&zram->init_lock);
266
267 return len;
268}
269
Minchan Kim461a8ee2014-10-09 15:29:55 -0700270static ssize_t mem_used_max_show(struct device *dev,
271 struct device_attribute *attr, char *buf)
272{
273 u64 val = 0;
274 struct zram *zram = dev_to_zram(dev);
275
Sergey Senozhatsky8f7d2822015-04-15 16:16:09 -0700276 deprecated_attr_warn("mem_used_max");
Minchan Kim461a8ee2014-10-09 15:29:55 -0700277 down_read(&zram->init_lock);
278 if (init_done(zram))
279 val = atomic_long_read(&zram->stats.max_used_pages);
280 up_read(&zram->init_lock);
281
282 return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
283}
284
285static ssize_t mem_used_max_store(struct device *dev,
286 struct device_attribute *attr, const char *buf, size_t len)
287{
288 int err;
289 unsigned long val;
290 struct zram *zram = dev_to_zram(dev);
Minchan Kim461a8ee2014-10-09 15:29:55 -0700291
292 err = kstrtoul(buf, 10, &val);
293 if (err || val != 0)
294 return -EINVAL;
295
296 down_read(&zram->init_lock);
Weijie Yang5a99e952014-10-29 14:50:57 -0700297 if (init_done(zram)) {
298 struct zram_meta *meta = zram->meta;
Minchan Kim461a8ee2014-10-09 15:29:55 -0700299 atomic_long_set(&zram->stats.max_used_pages,
300 zs_get_total_pages(meta->mem_pool));
Weijie Yang5a99e952014-10-29 14:50:57 -0700301 }
Minchan Kim461a8ee2014-10-09 15:29:55 -0700302 up_read(&zram->init_lock);
303
304 return len;
305}
306
Sergey Senozhatsky43209ea2016-05-20 16:59:59 -0700307/*
308 * We switched to per-cpu streams and this attr is not needed anymore.
309 * However, we will keep it around for some time, because:
310 * a) we may revert per-cpu streams in the future
311 * b) it's visible to user space and we need to follow our 2 years
312 * retirement rule; but we already have a number of 'soon to be
313 * altered' attrs, so max_comp_streams need to wait for the next
314 * layoff cycle.
315 */
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700316static ssize_t max_comp_streams_show(struct device *dev,
317 struct device_attribute *attr, char *buf)
318{
Sergey Senozhatsky43209ea2016-05-20 16:59:59 -0700319 return scnprintf(buf, PAGE_SIZE, "%d\n", num_online_cpus());
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700320}
321
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700322static ssize_t max_comp_streams_store(struct device *dev,
323 struct device_attribute *attr, const char *buf, size_t len)
324{
Sergey Senozhatsky43209ea2016-05-20 16:59:59 -0700325 return len;
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700326}
327
Sergey Senozhatskye46b8a02014-04-07 15:38:17 -0700328static ssize_t comp_algorithm_show(struct device *dev,
329 struct device_attribute *attr, char *buf)
330{
331 size_t sz;
332 struct zram *zram = dev_to_zram(dev);
333
334 down_read(&zram->init_lock);
335 sz = zcomp_available_show(zram->compressor, buf);
336 up_read(&zram->init_lock);
337
338 return sz;
339}
340
341static ssize_t comp_algorithm_store(struct device *dev,
342 struct device_attribute *attr, const char *buf, size_t len)
343{
344 struct zram *zram = dev_to_zram(dev);
Sergey Senozhatsky415403b2016-07-26 15:22:48 -0700345 char compressor[CRYPTO_MAX_ALG_NAME];
Sergey Senozhatsky4bbacd52015-06-25 15:00:29 -0700346 size_t sz;
347
Sergey Senozhatsky415403b2016-07-26 15:22:48 -0700348 strlcpy(compressor, buf, sizeof(compressor));
349 /* ignore trailing newline */
350 sz = strlen(compressor);
351 if (sz > 0 && compressor[sz - 1] == '\n')
352 compressor[sz - 1] = 0x00;
353
354 if (!zcomp_available_algorithm(compressor))
Luis Henriques1d5b43b2015-11-06 16:29:01 -0800355 return -EINVAL;
356
Sergey Senozhatskye46b8a02014-04-07 15:38:17 -0700357 down_write(&zram->init_lock);
358 if (init_done(zram)) {
359 up_write(&zram->init_lock);
360 pr_info("Can't change algorithm for initialized device\n");
361 return -EBUSY;
362 }
Sergey Senozhatsky4bbacd52015-06-25 15:00:29 -0700363
Sergey Senozhatsky415403b2016-07-26 15:22:48 -0700364 strlcpy(zram->compressor, compressor, sizeof(compressor));
Sergey Senozhatskye46b8a02014-04-07 15:38:17 -0700365 up_write(&zram->init_lock);
366 return len;
367}
368
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700369static ssize_t compact_store(struct device *dev,
370 struct device_attribute *attr, const char *buf, size_t len)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530371{
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700372 struct zram *zram = dev_to_zram(dev);
373 struct zram_meta *meta;
374
375 down_read(&zram->init_lock);
376 if (!init_done(zram)) {
377 up_read(&zram->init_lock);
378 return -EINVAL;
379 }
380
381 meta = zram->meta;
Sergey Senozhatsky7d3f3932015-09-08 15:04:35 -0700382 zs_compact(meta->mem_pool);
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700383 up_read(&zram->init_lock);
384
385 return len;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530386}
387
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700388static ssize_t io_stat_show(struct device *dev,
389 struct device_attribute *attr, char *buf)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530390{
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700391 struct zram *zram = dev_to_zram(dev);
392 ssize_t ret;
393
394 down_read(&zram->init_lock);
395 ret = scnprintf(buf, PAGE_SIZE,
396 "%8llu %8llu %8llu %8llu\n",
397 (u64)atomic64_read(&zram->stats.failed_reads),
398 (u64)atomic64_read(&zram->stats.failed_writes),
399 (u64)atomic64_read(&zram->stats.invalid_io),
400 (u64)atomic64_read(&zram->stats.notify_free));
401 up_read(&zram->init_lock);
402
403 return ret;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530404}
405
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700406static ssize_t mm_stat_show(struct device *dev,
407 struct device_attribute *attr, char *buf)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530408{
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700409 struct zram *zram = dev_to_zram(dev);
Sergey Senozhatsky7d3f3932015-09-08 15:04:35 -0700410 struct zs_pool_stats pool_stats;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700411 u64 orig_size, mem_used = 0;
412 long max_used;
413 ssize_t ret;
414
Sergey Senozhatsky7d3f3932015-09-08 15:04:35 -0700415 memset(&pool_stats, 0x00, sizeof(struct zs_pool_stats));
416
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700417 down_read(&zram->init_lock);
Sergey Senozhatsky7d3f3932015-09-08 15:04:35 -0700418 if (init_done(zram)) {
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700419 mem_used = zs_get_total_pages(zram->meta->mem_pool);
Sergey Senozhatsky7d3f3932015-09-08 15:04:35 -0700420 zs_pool_stats(zram->meta->mem_pool, &pool_stats);
421 }
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700422
423 orig_size = atomic64_read(&zram->stats.pages_stored);
424 max_used = atomic_long_read(&zram->stats.max_used_pages);
425
426 ret = scnprintf(buf, PAGE_SIZE,
Sergey Senozhatsky7d3f3932015-09-08 15:04:35 -0700427 "%8llu %8llu %8llu %8lu %8ld %8llu %8lu\n",
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700428 orig_size << PAGE_SHIFT,
429 (u64)atomic64_read(&zram->stats.compr_data_size),
430 mem_used << PAGE_SHIFT,
431 zram->limit_pages << PAGE_SHIFT,
432 max_used << PAGE_SHIFT,
433 (u64)atomic64_read(&zram->stats.zero_pages),
Sergey Senozhatsky860c7072015-09-08 15:04:38 -0700434 pool_stats.pages_compacted);
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700435 up_read(&zram->init_lock);
436
437 return ret;
Weijie Yangd2d5e762014-08-06 16:08:31 -0700438}
439
Sergey Senozhatsky623e47f2016-05-20 17:00:02 -0700440static ssize_t debug_stat_show(struct device *dev,
441 struct device_attribute *attr, char *buf)
442{
443 int version = 1;
444 struct zram *zram = dev_to_zram(dev);
445 ssize_t ret;
446
447 down_read(&zram->init_lock);
448 ret = scnprintf(buf, PAGE_SIZE,
449 "version: %d\n%8llu\n",
450 version,
451 (u64)atomic64_read(&zram->stats.writestall));
452 up_read(&zram->init_lock);
453
454 return ret;
455}
456
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700457static DEVICE_ATTR_RO(io_stat);
458static DEVICE_ATTR_RO(mm_stat);
Sergey Senozhatsky623e47f2016-05-20 17:00:02 -0700459static DEVICE_ATTR_RO(debug_stat);
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700460ZRAM_ATTR_RO(num_reads);
461ZRAM_ATTR_RO(num_writes);
462ZRAM_ATTR_RO(failed_reads);
463ZRAM_ATTR_RO(failed_writes);
464ZRAM_ATTR_RO(invalid_io);
465ZRAM_ATTR_RO(notify_free);
466ZRAM_ATTR_RO(zero_pages);
467ZRAM_ATTR_RO(compr_data_size);
468
469static inline bool zram_meta_get(struct zram *zram)
Weijie Yangd2d5e762014-08-06 16:08:31 -0700470{
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700471 if (atomic_inc_not_zero(&zram->refcount))
472 return true;
473 return false;
Weijie Yangd2d5e762014-08-06 16:08:31 -0700474}
475
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700476static inline void zram_meta_put(struct zram *zram)
Weijie Yangd2d5e762014-08-06 16:08:31 -0700477{
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700478 atomic_dec(&zram->refcount);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300479}
480
Ganesh Mahendran1fec1172015-02-12 15:00:33 -0800481static void zram_meta_free(struct zram_meta *meta, u64 disksize)
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300482{
Ganesh Mahendran1fec1172015-02-12 15:00:33 -0800483 size_t num_pages = disksize >> PAGE_SHIFT;
484 size_t index;
485
486 /* Free all pages that are still in this zram device */
487 for (index = 0; index < num_pages; index++) {
488 unsigned long handle = meta->table[index].handle;
489
490 if (!handle)
491 continue;
492
493 zs_free(meta->mem_pool, handle);
494 }
495
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300496 zs_destroy_pool(meta->mem_pool);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300497 vfree(meta->table);
498 kfree(meta);
499}
500
Sergey Senozhatsky4ce321f2015-08-14 15:35:19 -0700501static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize)
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300502{
503 size_t num_pages;
504 struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
Sergey Senozhatskyb8179952015-02-12 15:00:31 -0800505
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300506 if (!meta)
Sergey Senozhatskyb8179952015-02-12 15:00:31 -0800507 return NULL;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300508
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300509 num_pages = disksize >> PAGE_SHIFT;
510 meta->table = vzalloc(num_pages * sizeof(*meta->table));
511 if (!meta->table) {
512 pr_err("Error allocating zram address table\n");
Sergey Senozhatskyb8179952015-02-12 15:00:31 -0800513 goto out_error;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300514 }
515
Sergey Senozhatskyd0d8da22016-05-20 16:59:48 -0700516 meta->mem_pool = zs_create_pool(pool_name);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300517 if (!meta->mem_pool) {
518 pr_err("Error creating memory pool\n");
Sergey Senozhatskyb8179952015-02-12 15:00:31 -0800519 goto out_error;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300520 }
521
522 return meta;
523
Sergey Senozhatskyb8179952015-02-12 15:00:31 -0800524out_error:
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300525 vfree(meta->table);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300526 kfree(meta);
Sergey Senozhatskyb8179952015-02-12 15:00:31 -0800527 return NULL;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300528}
529
Weijie Yangd2d5e762014-08-06 16:08:31 -0700530/*
531 * To protect concurrent access to the same index entry,
532 * caller should hold this table index entry's bit_spinlock to
533 * indicate this index entry is accessing.
534 */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530535static void zram_free_page(struct zram *zram, size_t index)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530536{
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900537 struct zram_meta *meta = zram->meta;
538 unsigned long handle = meta->table[index].handle;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530539
Nitin Guptafd1a30d2012-01-09 16:51:59 -0600540 if (unlikely(!handle)) {
Nitin Gupta2e882282010-01-28 21:13:41 +0530541 /*
542 * No memory is allocated for zero filled pages.
543 * Simply clear zero page flag.
544 */
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900545 if (zram_test_flag(meta, index, ZRAM_ZERO)) {
546 zram_clear_flag(meta, index, ZRAM_ZERO);
Sergey Senozhatsky90a78062014-04-07 15:38:03 -0700547 atomic64_dec(&zram->stats.zero_pages);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530548 }
549 return;
550 }
551
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900552 zs_free(meta->mem_pool, handle);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530553
Weijie Yangd2d5e762014-08-06 16:08:31 -0700554 atomic64_sub(zram_get_obj_size(meta, index),
555 &zram->stats.compr_data_size);
Sergey Senozhatsky90a78062014-04-07 15:38:03 -0700556 atomic64_dec(&zram->stats.pages_stored);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530557
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900558 meta->table[index].handle = 0;
Weijie Yangd2d5e762014-08-06 16:08:31 -0700559 zram_set_obj_size(meta, index, 0);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530560}
561
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300562static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530563{
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700564 int ret = 0;
Jerome Marchand924bd882011-06-10 15:28:48 +0200565 unsigned char *cmem;
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900566 struct zram_meta *meta = zram->meta;
Minchan Kim92967472014-01-30 15:46:03 -0800567 unsigned long handle;
Sergey Senozhatskyebaf9ab2016-07-26 15:22:45 -0700568 unsigned int size;
Minchan Kim92967472014-01-30 15:46:03 -0800569
Weijie Yangd2d5e762014-08-06 16:08:31 -0700570 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Minchan Kim92967472014-01-30 15:46:03 -0800571 handle = meta->table[index].handle;
Weijie Yangd2d5e762014-08-06 16:08:31 -0700572 size = zram_get_obj_size(meta, index);
Jerome Marchand924bd882011-06-10 15:28:48 +0200573
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900574 if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
Weijie Yangd2d5e762014-08-06 16:08:31 -0700575 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
Jiang Liu42e99bd2013-06-07 00:07:30 +0800576 clear_page(mem);
Jerome Marchand924bd882011-06-10 15:28:48 +0200577 return 0;
578 }
579
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900580 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
Sergey Senozhatskyebaf9ab2016-07-26 15:22:45 -0700581 if (size == PAGE_SIZE) {
Jiang Liu42e99bd2013-06-07 00:07:30 +0800582 copy_page(mem, cmem);
Sergey Senozhatskyebaf9ab2016-07-26 15:22:45 -0700583 } else {
584 struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp);
585
586 ret = zcomp_decompress(zstrm, cmem, size, mem);
587 zcomp_stream_put(zram->comp);
588 }
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900589 zs_unmap_object(meta->mem_pool, handle);
Weijie Yangd2d5e762014-08-06 16:08:31 -0700590 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
Jerome Marchand924bd882011-06-10 15:28:48 +0200591
592 /* Should NEVER happen. Return bio error if it does. */
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700593 if (unlikely(ret)) {
Jerome Marchand924bd882011-06-10 15:28:48 +0200594 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
Jerome Marchand924bd882011-06-10 15:28:48 +0200595 return ret;
596 }
597
598 return 0;
599}
600
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300601static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
karam.leeb627cff2014-12-12 16:56:47 -0800602 u32 index, int offset)
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300603{
604 int ret;
605 struct page *page;
606 unsigned char *user_mem, *uncmem = NULL;
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900607 struct zram_meta *meta = zram->meta;
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300608 page = bvec->bv_page;
609
Weijie Yangd2d5e762014-08-06 16:08:31 -0700610 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900611 if (unlikely(!meta->table[index].handle) ||
612 zram_test_flag(meta, index, ZRAM_ZERO)) {
Weijie Yangd2d5e762014-08-06 16:08:31 -0700613 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300614 handle_zero_page(bvec);
615 return 0;
616 }
Weijie Yangd2d5e762014-08-06 16:08:31 -0700617 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300618
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300619 if (is_partial_io(bvec))
620 /* Use a temporary buffer to decompress the page */
Minchan Kim7e5a5102013-01-30 11:41:39 +0900621 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
622
623 user_mem = kmap_atomic(page);
624 if (!is_partial_io(bvec))
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300625 uncmem = user_mem;
626
627 if (!uncmem) {
Sergey Senozhatsky70864962015-09-08 15:04:58 -0700628 pr_err("Unable to allocate temp memory\n");
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300629 ret = -ENOMEM;
630 goto out_cleanup;
631 }
632
633 ret = zram_decompress_page(zram, uncmem, index);
634 /* Should NEVER happen. Return bio error if it does. */
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700635 if (unlikely(ret))
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300636 goto out_cleanup;
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300637
638 if (is_partial_io(bvec))
639 memcpy(user_mem + bvec->bv_offset, uncmem + offset,
640 bvec->bv_len);
641
642 flush_dcache_page(page);
643 ret = 0;
644out_cleanup:
645 kunmap_atomic(user_mem);
646 if (is_partial_io(bvec))
647 kfree(uncmem);
648 return ret;
649}
650
Jerome Marchand924bd882011-06-10 15:28:48 +0200651static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
652 int offset)
653{
Nitin Gupta397c6062013-01-02 08:53:41 -0800654 int ret = 0;
Sergey Senozhatskyebaf9ab2016-07-26 15:22:45 -0700655 unsigned int clen;
Sergey Senozhatskyda9556a2016-05-20 16:59:51 -0700656 unsigned long handle = 0;
Minchan Kim130f3152012-06-08 15:39:27 +0900657 struct page *page;
Jerome Marchand924bd882011-06-10 15:28:48 +0200658 unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900659 struct zram_meta *meta = zram->meta;
Sergey Senozhatsky17162f42015-06-25 15:00:27 -0700660 struct zcomp_strm *zstrm = NULL;
Minchan Kim461a8ee2014-10-09 15:29:55 -0700661 unsigned long alloced_pages;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200662
663 page = bvec->bv_page;
Jerome Marchand924bd882011-06-10 15:28:48 +0200664 if (is_partial_io(bvec)) {
665 /*
666 * This is a partial IO. We need to read the full page
667 * before to write the changes.
668 */
Minchan Kim7e5a5102013-01-30 11:41:39 +0900669 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
Jerome Marchand924bd882011-06-10 15:28:48 +0200670 if (!uncmem) {
Jerome Marchand924bd882011-06-10 15:28:48 +0200671 ret = -ENOMEM;
672 goto out;
673 }
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300674 ret = zram_decompress_page(zram, uncmem, index);
Nitin Gupta397c6062013-01-02 08:53:41 -0800675 if (ret)
Jerome Marchand924bd882011-06-10 15:28:48 +0200676 goto out;
Jerome Marchand924bd882011-06-10 15:28:48 +0200677 }
678
Sergey Senozhatskyda9556a2016-05-20 16:59:51 -0700679compress_again:
Cong Wangba82fe22011-11-25 23:14:25 +0800680 user_mem = kmap_atomic(page);
Nitin Gupta397c6062013-01-02 08:53:41 -0800681 if (is_partial_io(bvec)) {
Jerome Marchand924bd882011-06-10 15:28:48 +0200682 memcpy(uncmem + offset, user_mem + bvec->bv_offset,
683 bvec->bv_len);
Nitin Gupta397c6062013-01-02 08:53:41 -0800684 kunmap_atomic(user_mem);
685 user_mem = NULL;
686 } else {
Jerome Marchand924bd882011-06-10 15:28:48 +0200687 uncmem = user_mem;
Nitin Gupta397c6062013-01-02 08:53:41 -0800688 }
Jerome Marchand924bd882011-06-10 15:28:48 +0200689
690 if (page_zero_filled(uncmem)) {
Weijie Yangc4065152014-11-13 15:19:05 -0800691 if (user_mem)
692 kunmap_atomic(user_mem);
Sunghan Suhf40ac2a2013-07-03 20:10:05 +0900693 /* Free memory associated with this sector now. */
Weijie Yangd2d5e762014-08-06 16:08:31 -0700694 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Sunghan Suhf40ac2a2013-07-03 20:10:05 +0900695 zram_free_page(zram, index);
Minchan Kim92967472014-01-30 15:46:03 -0800696 zram_set_flag(meta, index, ZRAM_ZERO);
Weijie Yangd2d5e762014-08-06 16:08:31 -0700697 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
Sunghan Suhf40ac2a2013-07-03 20:10:05 +0900698
Sergey Senozhatsky90a78062014-04-07 15:38:03 -0700699 atomic64_inc(&zram->stats.zero_pages);
Jerome Marchand924bd882011-06-10 15:28:48 +0200700 ret = 0;
701 goto out;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200702 }
703
Sergey Senozhatsky2aea8492016-07-26 15:22:42 -0700704 zstrm = zcomp_stream_get(zram->comp);
Sergey Senozhatskyebaf9ab2016-07-26 15:22:45 -0700705 ret = zcomp_compress(zstrm, uncmem, &clen);
Nitin Gupta397c6062013-01-02 08:53:41 -0800706 if (!is_partial_io(bvec)) {
707 kunmap_atomic(user_mem);
708 user_mem = NULL;
709 uncmem = NULL;
710 }
Jerome Marchand8c921b22011-06-10 15:28:47 +0200711
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700712 if (unlikely(ret)) {
Jerome Marchand8c921b22011-06-10 15:28:47 +0200713 pr_err("Compression failed! err=%d\n", ret);
Jerome Marchand924bd882011-06-10 15:28:48 +0200714 goto out;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200715 }
Sergey Senozhatskyda9556a2016-05-20 16:59:51 -0700716
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700717 src = zstrm->buffer;
Nitin Guptac8f2f0d2012-10-10 17:42:18 -0700718 if (unlikely(clen > max_zpage_size)) {
Nitin Guptac8f2f0d2012-10-10 17:42:18 -0700719 clen = PAGE_SIZE;
Nitin Gupta397c6062013-01-02 08:53:41 -0800720 if (is_partial_io(bvec))
721 src = uncmem;
Nitin Guptac8f2f0d2012-10-10 17:42:18 -0700722 }
Jerome Marchand8c921b22011-06-10 15:28:47 +0200723
Sergey Senozhatskyda9556a2016-05-20 16:59:51 -0700724 /*
725 * handle allocation has 2 paths:
726 * a) fast path is executed with preemption disabled (for
727 * per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear,
728 * since we can't sleep;
729 * b) slow path enables preemption and attempts to allocate
730 * the page with __GFP_DIRECT_RECLAIM bit set. we have to
731 * put per-cpu compression stream and, thus, to re-do
732 * the compression once handle is allocated.
733 *
734 * if we have a 'non-null' handle here then we are coming
735 * from the slow path and handle has already been allocated.
736 */
737 if (!handle)
738 handle = zs_malloc(meta->mem_pool, clen,
739 __GFP_KSWAPD_RECLAIM |
740 __GFP_NOWARN |
Minchan Kim9bc482d2016-07-26 15:23:34 -0700741 __GFP_HIGHMEM |
742 __GFP_MOVABLE);
Nitin Guptafd1a30d2012-01-09 16:51:59 -0600743 if (!handle) {
Sergey Senozhatsky2aea8492016-07-26 15:22:42 -0700744 zcomp_stream_put(zram->comp);
Sergey Senozhatskyda9556a2016-05-20 16:59:51 -0700745 zstrm = NULL;
746
Sergey Senozhatsky623e47f2016-05-20 17:00:02 -0700747 atomic64_inc(&zram->stats.writestall);
748
Sergey Senozhatskyda9556a2016-05-20 16:59:51 -0700749 handle = zs_malloc(meta->mem_pool, clen,
Minchan Kim9bc482d2016-07-26 15:23:34 -0700750 GFP_NOIO | __GFP_HIGHMEM |
751 __GFP_MOVABLE);
Sergey Senozhatskyda9556a2016-05-20 16:59:51 -0700752 if (handle)
753 goto compress_again;
754
Sergey Senozhatskyebaf9ab2016-07-26 15:22:45 -0700755 pr_err("Error allocating memory for compressed page: %u, size=%u\n",
Marlies Ruck596b3dd2013-05-16 14:30:39 -0400756 index, clen);
Jerome Marchand924bd882011-06-10 15:28:48 +0200757 ret = -ENOMEM;
758 goto out;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200759 }
Minchan Kim9ada9da2014-10-09 15:29:53 -0700760
Minchan Kim461a8ee2014-10-09 15:29:55 -0700761 alloced_pages = zs_get_total_pages(meta->mem_pool);
Sergey SENOZHATSKY12372752015-11-06 16:29:04 -0800762 update_used_max(zram, alloced_pages);
763
Minchan Kim461a8ee2014-10-09 15:29:55 -0700764 if (zram->limit_pages && alloced_pages > zram->limit_pages) {
Minchan Kim9ada9da2014-10-09 15:29:53 -0700765 zs_free(meta->mem_pool, handle);
766 ret = -ENOMEM;
767 goto out;
768 }
769
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900770 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
Jerome Marchand8c921b22011-06-10 15:28:47 +0200771
Jiang Liu42e99bd2013-06-07 00:07:30 +0800772 if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
Nitin Gupta397c6062013-01-02 08:53:41 -0800773 src = kmap_atomic(page);
Jiang Liu42e99bd2013-06-07 00:07:30 +0800774 copy_page(cmem, src);
Nitin Gupta397c6062013-01-02 08:53:41 -0800775 kunmap_atomic(src);
Jiang Liu42e99bd2013-06-07 00:07:30 +0800776 } else {
777 memcpy(cmem, src, clen);
778 }
Jerome Marchand8c921b22011-06-10 15:28:47 +0200779
Sergey Senozhatsky2aea8492016-07-26 15:22:42 -0700780 zcomp_stream_put(zram->comp);
Sergey Senozhatsky17162f42015-06-25 15:00:27 -0700781 zstrm = NULL;
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900782 zs_unmap_object(meta->mem_pool, handle);
Nitin Guptafd1a30d2012-01-09 16:51:59 -0600783
Sunghan Suhf40ac2a2013-07-03 20:10:05 +0900784 /*
785 * Free memory associated with this sector
786 * before overwriting unused sectors.
787 */
Weijie Yangd2d5e762014-08-06 16:08:31 -0700788 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Sunghan Suhf40ac2a2013-07-03 20:10:05 +0900789 zram_free_page(zram, index);
790
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900791 meta->table[index].handle = handle;
Weijie Yangd2d5e762014-08-06 16:08:31 -0700792 zram_set_obj_size(meta, index, clen);
793 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
Jerome Marchand8c921b22011-06-10 15:28:47 +0200794
795 /* Update stats */
Sergey Senozhatsky90a78062014-04-07 15:38:03 -0700796 atomic64_add(clen, &zram->stats.compr_data_size);
797 atomic64_inc(&zram->stats.pages_stored);
Jerome Marchand924bd882011-06-10 15:28:48 +0200798out:
Sergey Senozhatsky17162f42015-06-25 15:00:27 -0700799 if (zstrm)
Sergey Senozhatsky2aea8492016-07-26 15:22:42 -0700800 zcomp_stream_put(zram->comp);
Nitin Gupta397c6062013-01-02 08:53:41 -0800801 if (is_partial_io(bvec))
802 kfree(uncmem);
Jerome Marchand924bd882011-06-10 15:28:48 +0200803 return ret;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200804}
805
Joonsoo Kimf4659d82014-04-07 15:38:24 -0700806/*
807 * zram_bio_discard - handler on discard request
808 * @index: physical block index in PAGE_SIZE units
809 * @offset: byte offset within physical block
810 */
811static void zram_bio_discard(struct zram *zram, u32 index,
812 int offset, struct bio *bio)
813{
814 size_t n = bio->bi_iter.bi_size;
Weijie Yangd2d5e762014-08-06 16:08:31 -0700815 struct zram_meta *meta = zram->meta;
Joonsoo Kimf4659d82014-04-07 15:38:24 -0700816
817 /*
818 * zram manages data in physical block size units. Because logical block
819 * size isn't identical with physical block size on some arch, we
820 * could get a discard request pointing to a specific offset within a
821 * certain physical block. Although we can handle this request by
822 * reading that physiclal block and decompressing and partially zeroing
823 * and re-compressing and then re-storing it, this isn't reasonable
824 * because our intent with a discard request is to save memory. So
825 * skipping this logical block is appropriate here.
826 */
827 if (offset) {
Weijie Yang38515c72014-06-04 16:11:06 -0700828 if (n <= (PAGE_SIZE - offset))
Joonsoo Kimf4659d82014-04-07 15:38:24 -0700829 return;
830
Weijie Yang38515c72014-06-04 16:11:06 -0700831 n -= (PAGE_SIZE - offset);
Joonsoo Kimf4659d82014-04-07 15:38:24 -0700832 index++;
833 }
834
835 while (n >= PAGE_SIZE) {
Weijie Yangd2d5e762014-08-06 16:08:31 -0700836 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Joonsoo Kimf4659d82014-04-07 15:38:24 -0700837 zram_free_page(zram, index);
Weijie Yangd2d5e762014-08-06 16:08:31 -0700838 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
Sergey Senozhatsky015254d2014-10-09 15:29:57 -0700839 atomic64_inc(&zram->stats.notify_free);
Joonsoo Kimf4659d82014-04-07 15:38:24 -0700840 index++;
841 n -= PAGE_SIZE;
842 }
843}
844
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700845static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
846 int offset, int rw)
847{
848 unsigned long start_time = jiffies;
849 int ret;
850
851 generic_start_io_acct(rw, bvec->bv_len >> SECTOR_SHIFT,
852 &zram->disk->part0);
853
854 if (rw == READ) {
855 atomic64_inc(&zram->stats.num_reads);
856 ret = zram_bvec_read(zram, bvec, index, offset);
857 } else {
858 atomic64_inc(&zram->stats.num_writes);
859 ret = zram_bvec_write(zram, bvec, index, offset);
860 }
861
862 generic_end_io_acct(rw, &zram->disk->part0, start_time);
863
864 if (unlikely(ret)) {
865 if (rw == READ)
866 atomic64_inc(&zram->stats.failed_reads);
867 else
868 atomic64_inc(&zram->stats.failed_writes);
869 }
870
871 return ret;
872}
873
874static void __zram_make_request(struct zram *zram, struct bio *bio)
875{
876 int offset, rw;
877 u32 index;
878 struct bio_vec bvec;
879 struct bvec_iter iter;
880
881 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
882 offset = (bio->bi_iter.bi_sector &
883 (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
884
Mike Christie95fe6c12016-06-05 14:31:48 -0500885 if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700886 zram_bio_discard(zram, index, offset, bio);
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200887 bio_endio(bio);
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700888 return;
889 }
890
891 rw = bio_data_dir(bio);
892 bio_for_each_segment(bvec, bio, iter) {
893 int max_transfer_size = PAGE_SIZE - offset;
894
895 if (bvec.bv_len > max_transfer_size) {
896 /*
897 * zram_bvec_rw() can only make operation on a single
898 * zram page. Split the bio vector.
899 */
900 struct bio_vec bv;
901
902 bv.bv_page = bvec.bv_page;
903 bv.bv_len = max_transfer_size;
904 bv.bv_offset = bvec.bv_offset;
905
906 if (zram_bvec_rw(zram, &bv, index, offset, rw) < 0)
907 goto out;
908
909 bv.bv_len = bvec.bv_len - max_transfer_size;
910 bv.bv_offset += max_transfer_size;
911 if (zram_bvec_rw(zram, &bv, index + 1, 0, rw) < 0)
912 goto out;
913 } else
914 if (zram_bvec_rw(zram, &bvec, index, offset, rw) < 0)
915 goto out;
916
917 update_position(&index, &offset, &bvec);
918 }
919
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200920 bio_endio(bio);
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700921 return;
922
923out:
924 bio_io_error(bio);
925}
926
927/*
928 * Handler function for all zram I/O requests.
929 */
Jens Axboedece1632015-11-05 10:41:16 -0700930static blk_qc_t zram_make_request(struct request_queue *queue, struct bio *bio)
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700931{
932 struct zram *zram = queue->queuedata;
933
934 if (unlikely(!zram_meta_get(zram)))
935 goto error;
936
Kent Overstreet54efd502015-04-23 22:37:18 -0700937 blk_queue_split(queue, &bio, queue->bio_split);
938
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700939 if (!valid_io_request(zram, bio->bi_iter.bi_sector,
940 bio->bi_iter.bi_size)) {
941 atomic64_inc(&zram->stats.invalid_io);
942 goto put_zram;
943 }
944
945 __zram_make_request(zram, bio);
946 zram_meta_put(zram);
Jens Axboedece1632015-11-05 10:41:16 -0700947 return BLK_QC_T_NONE;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700948put_zram:
949 zram_meta_put(zram);
950error:
951 bio_io_error(bio);
Jens Axboedece1632015-11-05 10:41:16 -0700952 return BLK_QC_T_NONE;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700953}
954
955static void zram_slot_free_notify(struct block_device *bdev,
956 unsigned long index)
957{
958 struct zram *zram;
959 struct zram_meta *meta;
960
961 zram = bdev->bd_disk->private_data;
962 meta = zram->meta;
963
964 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
965 zram_free_page(zram, index);
966 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
967 atomic64_inc(&zram->stats.notify_free);
968}
969
970static int zram_rw_page(struct block_device *bdev, sector_t sector,
971 struct page *page, int rw)
972{
973 int offset, err = -EIO;
974 u32 index;
975 struct zram *zram;
976 struct bio_vec bv;
977
978 zram = bdev->bd_disk->private_data;
979 if (unlikely(!zram_meta_get(zram)))
980 goto out;
981
982 if (!valid_io_request(zram, sector, PAGE_SIZE)) {
983 atomic64_inc(&zram->stats.invalid_io);
984 err = -EINVAL;
985 goto put_zram;
986 }
987
988 index = sector >> SECTORS_PER_PAGE_SHIFT;
989 offset = sector & (SECTORS_PER_PAGE - 1) << SECTOR_SHIFT;
990
991 bv.bv_page = page;
992 bv.bv_len = PAGE_SIZE;
993 bv.bv_offset = 0;
994
995 err = zram_bvec_rw(zram, &bv, index, offset, rw);
996put_zram:
997 zram_meta_put(zram);
998out:
999 /*
1000 * If I/O fails, just return error(ie, non-zero) without
1001 * calling page_endio.
1002 * It causes resubmit the I/O with bio request by upper functions
1003 * of rw_page(e.g., swap_readpage, __swap_writepage) and
1004 * bio->bi_end_io does things to handle the error
1005 * (e.g., SetPageError, set_page_dirty and extra works).
1006 */
1007 if (err == 0)
1008 page_endio(page, rw, 0);
1009 return err;
1010}
1011
Sergey Senozhatskyba6b17d2015-02-12 15:00:36 -08001012static void zram_reset_device(struct zram *zram)
Jerome Marchand924bd882011-06-10 15:28:48 +02001013{
Minchan Kim08eee692015-02-12 15:00:45 -08001014 struct zram_meta *meta;
1015 struct zcomp *comp;
1016 u64 disksize;
1017
Sergey Senozhatsky644d4782013-06-26 15:28:39 +03001018 down_write(&zram->init_lock);
Minchan Kim9ada9da2014-10-09 15:29:53 -07001019
1020 zram->limit_pages = 0;
1021
Sergey Senozhatskybe2d1d52014-04-07 15:38:00 -07001022 if (!init_done(zram)) {
Sergey Senozhatsky644d4782013-06-26 15:28:39 +03001023 up_write(&zram->init_lock);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001024 return;
Sergey Senozhatsky644d4782013-06-26 15:28:39 +03001025 }
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001026
Minchan Kim08eee692015-02-12 15:00:45 -08001027 meta = zram->meta;
1028 comp = zram->comp;
1029 disksize = zram->disksize;
1030 /*
1031 * Refcount will go down to 0 eventually and r/w handler
1032 * cannot handle further I/O so it will bail out by
1033 * check zram_meta_get.
1034 */
1035 zram_meta_put(zram);
1036 /*
1037 * We want to free zram_meta in process context to avoid
1038 * deadlock between reclaim path and any other locks.
1039 */
1040 wait_event(zram->io_done, atomic_read(&zram->refcount) == 0);
1041
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001042 /* Reset stats */
1043 memset(&zram->stats, 0, sizeof(zram->stats));
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001044 zram->disksize = 0;
Weijie Yangd7ad41a2015-06-10 11:14:49 -07001045
Sergey Senozhatskya096caf2015-02-12 15:00:39 -08001046 set_capacity(zram->disk, 0);
Weijie Yangd7ad41a2015-06-10 11:14:49 -07001047 part_stat_set_all(&zram->disk->part0, 0);
Sergey Senozhatskya096caf2015-02-12 15:00:39 -08001048
Sergey Senozhatsky644d4782013-06-26 15:28:39 +03001049 up_write(&zram->init_lock);
Minchan Kim08eee692015-02-12 15:00:45 -08001050 /* I/O operation under all of CPU are done so let's free */
1051 zram_meta_free(meta, disksize);
1052 zcomp_destroy(comp);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001053}
1054
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001055static ssize_t disksize_store(struct device *dev,
1056 struct device_attribute *attr, const char *buf, size_t len)
1057{
1058 u64 disksize;
Sergey Senozhatskyd61f98c2014-04-07 15:38:19 -07001059 struct zcomp *comp;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001060 struct zram_meta *meta;
1061 struct zram *zram = dev_to_zram(dev);
Sergey Senozhatskyfcfa8d92014-04-07 15:38:20 -07001062 int err;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001063
1064 disksize = memparse(buf, NULL);
1065 if (!disksize)
1066 return -EINVAL;
1067
1068 disksize = PAGE_ALIGN(disksize);
Sergey Senozhatsky4ce321f2015-08-14 15:35:19 -07001069 meta = zram_meta_alloc(zram->disk->disk_name, disksize);
Minchan Kimdb5d7112014-03-03 15:38:34 -08001070 if (!meta)
1071 return -ENOMEM;
Sergey Senozhatskyb67d1ec2014-04-07 15:38:09 -07001072
Sergey Senozhatskyda9556a2016-05-20 16:59:51 -07001073 comp = zcomp_create(zram->compressor);
Sergey Senozhatskyfcfa8d92014-04-07 15:38:20 -07001074 if (IS_ERR(comp)) {
Sergey Senozhatsky70864962015-09-08 15:04:58 -07001075 pr_err("Cannot initialise %s compressing backend\n",
Sergey Senozhatskye46b8a02014-04-07 15:38:17 -07001076 zram->compressor);
Sergey Senozhatskyfcfa8d92014-04-07 15:38:20 -07001077 err = PTR_ERR(comp);
1078 goto out_free_meta;
Sergey Senozhatskyd61f98c2014-04-07 15:38:19 -07001079 }
1080
1081 down_write(&zram->init_lock);
1082 if (init_done(zram)) {
Sergey Senozhatskyd61f98c2014-04-07 15:38:19 -07001083 pr_info("Cannot change disksize for initialized device\n");
1084 err = -EBUSY;
Sergey Senozhatskyfcfa8d92014-04-07 15:38:20 -07001085 goto out_destroy_comp;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001086 }
1087
Minchan Kim08eee692015-02-12 15:00:45 -08001088 init_waitqueue_head(&zram->io_done);
1089 atomic_set(&zram->refcount, 1);
Sergey Senozhatskyb67d1ec2014-04-07 15:38:09 -07001090 zram->meta = meta;
Sergey Senozhatskyd61f98c2014-04-07 15:38:19 -07001091 zram->comp = comp;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001092 zram->disksize = disksize;
1093 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001094 up_write(&zram->init_lock);
Minchan Kimb4c5c602014-07-23 14:00:04 -07001095
1096 /*
1097 * Revalidate disk out of the init_lock to avoid lockdep splat.
1098 * It's okay because disk's capacity is protected by init_lock
1099 * so that revalidate_disk always sees up-to-date capacity.
1100 */
1101 revalidate_disk(zram->disk);
1102
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001103 return len;
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -07001104
Sergey Senozhatskyfcfa8d92014-04-07 15:38:20 -07001105out_destroy_comp:
1106 up_write(&zram->init_lock);
1107 zcomp_destroy(comp);
1108out_free_meta:
Ganesh Mahendran1fec1172015-02-12 15:00:33 -08001109 zram_meta_free(meta, disksize);
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -07001110 return err;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001111}
1112
1113static ssize_t reset_store(struct device *dev,
1114 struct device_attribute *attr, const char *buf, size_t len)
1115{
1116 int ret;
1117 unsigned short do_reset;
1118 struct zram *zram;
1119 struct block_device *bdev;
1120
Sergey Senozhatskyf405c442015-06-25 15:00:21 -07001121 ret = kstrtou16(buf, 10, &do_reset);
1122 if (ret)
1123 return ret;
1124
1125 if (!do_reset)
1126 return -EINVAL;
1127
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001128 zram = dev_to_zram(dev);
1129 bdev = bdget_disk(zram->disk, 0);
Rashika Kheria46a51c82013-10-30 18:36:32 +05301130 if (!bdev)
1131 return -ENOMEM;
1132
Sergey Senozhatskyba6b17d2015-02-12 15:00:36 -08001133 mutex_lock(&bdev->bd_mutex);
Sergey Senozhatskyf405c442015-06-25 15:00:21 -07001134 /* Do not reset an active device or claimed device */
1135 if (bdev->bd_openers || zram->claim) {
1136 mutex_unlock(&bdev->bd_mutex);
1137 bdput(bdev);
1138 return -EBUSY;
Rashika Kheria1b672222013-11-10 22:13:53 +05301139 }
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001140
Sergey Senozhatskyf405c442015-06-25 15:00:21 -07001141 /* From now on, anyone can't open /dev/zram[0-9] */
1142 zram->claim = true;
1143 mutex_unlock(&bdev->bd_mutex);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001144
Sergey Senozhatskyf405c442015-06-25 15:00:21 -07001145 /* Make sure all the pending I/O are finished */
Rashika Kheria46a51c82013-10-30 18:36:32 +05301146 fsync_bdev(bdev);
Sergey Senozhatskyba6b17d2015-02-12 15:00:36 -08001147 zram_reset_device(zram);
Sergey Senozhatskyba6b17d2015-02-12 15:00:36 -08001148 revalidate_disk(zram->disk);
Rashika Kheria1b672222013-11-10 22:13:53 +05301149 bdput(bdev);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001150
Sergey Senozhatskyf405c442015-06-25 15:00:21 -07001151 mutex_lock(&bdev->bd_mutex);
1152 zram->claim = false;
Sergey Senozhatskyba6b17d2015-02-12 15:00:36 -08001153 mutex_unlock(&bdev->bd_mutex);
Sergey Senozhatskyf405c442015-06-25 15:00:21 -07001154
1155 return len;
1156}
1157
1158static int zram_open(struct block_device *bdev, fmode_t mode)
1159{
1160 int ret = 0;
1161 struct zram *zram;
1162
1163 WARN_ON(!mutex_is_locked(&bdev->bd_mutex));
1164
1165 zram = bdev->bd_disk->private_data;
1166 /* zram was claimed to reset so open request fails */
1167 if (zram->claim)
1168 ret = -EBUSY;
1169
Rashika Kheria1b672222013-11-10 22:13:53 +05301170 return ret;
Jerome Marchand8c921b22011-06-10 15:28:47 +02001171}
1172
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301173static const struct block_device_operations zram_devops = {
Sergey Senozhatskyf405c442015-06-25 15:00:21 -07001174 .open = zram_open,
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301175 .swap_slot_free_notify = zram_slot_free_notify,
karam.lee8c7f0102014-12-12 16:56:53 -08001176 .rw_page = zram_rw_page,
Nitin Gupta107c1612010-05-17 11:02:44 +05301177 .owner = THIS_MODULE
Nitin Gupta306b0c92009-09-22 10:26:53 +05301178};
1179
Andrew Morton99ebbd32015-05-05 16:23:25 -07001180static DEVICE_ATTR_WO(compact);
Ganesh Mahendran083914e2014-12-12 16:57:13 -08001181static DEVICE_ATTR_RW(disksize);
1182static DEVICE_ATTR_RO(initstate);
1183static DEVICE_ATTR_WO(reset);
1184static DEVICE_ATTR_RO(orig_data_size);
1185static DEVICE_ATTR_RO(mem_used_total);
1186static DEVICE_ATTR_RW(mem_limit);
1187static DEVICE_ATTR_RW(mem_used_max);
1188static DEVICE_ATTR_RW(max_comp_streams);
1189static DEVICE_ATTR_RW(comp_algorithm);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001190
1191static struct attribute *zram_disk_attrs[] = {
1192 &dev_attr_disksize.attr,
1193 &dev_attr_initstate.attr,
1194 &dev_attr_reset.attr,
1195 &dev_attr_num_reads.attr,
1196 &dev_attr_num_writes.attr,
Sergey Senozhatsky64447242014-04-07 15:38:05 -07001197 &dev_attr_failed_reads.attr,
1198 &dev_attr_failed_writes.attr,
Andrew Morton99ebbd32015-05-05 16:23:25 -07001199 &dev_attr_compact.attr,
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001200 &dev_attr_invalid_io.attr,
1201 &dev_attr_notify_free.attr,
1202 &dev_attr_zero_pages.attr,
1203 &dev_attr_orig_data_size.attr,
1204 &dev_attr_compr_data_size.attr,
1205 &dev_attr_mem_used_total.attr,
Minchan Kim9ada9da2014-10-09 15:29:53 -07001206 &dev_attr_mem_limit.attr,
Minchan Kim461a8ee2014-10-09 15:29:55 -07001207 &dev_attr_mem_used_max.attr,
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -07001208 &dev_attr_max_comp_streams.attr,
Sergey Senozhatskye46b8a02014-04-07 15:38:17 -07001209 &dev_attr_comp_algorithm.attr,
Sergey Senozhatsky2f6a3be2015-04-15 16:16:03 -07001210 &dev_attr_io_stat.attr,
Sergey Senozhatsky4f2109f2015-04-15 16:16:06 -07001211 &dev_attr_mm_stat.attr,
Sergey Senozhatsky623e47f2016-05-20 17:00:02 -07001212 &dev_attr_debug_stat.attr,
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001213 NULL,
1214};
1215
1216static struct attribute_group zram_disk_attr_group = {
1217 .attrs = zram_disk_attrs,
1218};
1219
Sergey Senozhatsky92ff1522015-06-25 15:00:19 -07001220/*
1221 * Allocate and initialize new zram device. the function returns
1222 * '>= 0' device_id upon success, and negative value otherwise.
1223 */
1224static int zram_add(void)
Nitin Gupta306b0c92009-09-22 10:26:53 +05301225{
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001226 struct zram *zram;
Sergey Senozhatskyee9801602015-02-12 15:00:48 -08001227 struct request_queue *queue;
Sergey Senozhatsky92ff1522015-06-25 15:00:19 -07001228 int ret, device_id;
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001229
1230 zram = kzalloc(sizeof(struct zram), GFP_KERNEL);
1231 if (!zram)
1232 return -ENOMEM;
1233
Sergey Senozhatsky92ff1522015-06-25 15:00:19 -07001234 ret = idr_alloc(&zram_index_idr, zram, 0, 0, GFP_KERNEL);
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001235 if (ret < 0)
1236 goto out_free_dev;
Sergey Senozhatsky92ff1522015-06-25 15:00:19 -07001237 device_id = ret;
Nitin Guptade1a21a2010-01-28 21:13:40 +05301238
Jerome Marchand0900bea2011-09-06 15:02:11 +02001239 init_rwsem(&zram->init_lock);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301240
Sergey Senozhatskyee9801602015-02-12 15:00:48 -08001241 queue = blk_alloc_queue(GFP_KERNEL);
1242 if (!queue) {
Nitin Gupta306b0c92009-09-22 10:26:53 +05301243 pr_err("Error allocating disk queue for device %d\n",
1244 device_id);
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001245 ret = -ENOMEM;
1246 goto out_free_idr;
Nitin Gupta306b0c92009-09-22 10:26:53 +05301247 }
1248
Sergey Senozhatskyee9801602015-02-12 15:00:48 -08001249 blk_queue_make_request(queue, zram_make_request);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301250
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001251 /* gendisk structure */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301252 zram->disk = alloc_disk(1);
1253 if (!zram->disk) {
Sergey Senozhatsky70864962015-09-08 15:04:58 -07001254 pr_err("Error allocating disk structure for device %d\n",
Nitin Gupta306b0c92009-09-22 10:26:53 +05301255 device_id);
Julia Lawall201c7b72015-04-15 16:16:27 -07001256 ret = -ENOMEM;
Jiang Liu39a9b8a2013-06-07 00:07:24 +08001257 goto out_free_queue;
Nitin Gupta306b0c92009-09-22 10:26:53 +05301258 }
1259
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301260 zram->disk->major = zram_major;
1261 zram->disk->first_minor = device_id;
1262 zram->disk->fops = &zram_devops;
Sergey Senozhatskyee9801602015-02-12 15:00:48 -08001263 zram->disk->queue = queue;
1264 zram->disk->queue->queuedata = zram;
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301265 zram->disk->private_data = zram;
1266 snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301267
Nitin Gupta33863c22010-08-09 22:56:47 +05301268 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301269 set_capacity(zram->disk, 0);
Sergey Senozhatskyb67d1ec2014-04-07 15:38:09 -07001270 /* zram devices sort of resembles non-rotational disks */
1271 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
Mike Snitzerb277da02014-10-04 10:55:32 -06001272 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);
Nitin Guptaa1dd52a2010-06-01 13:31:23 +05301273 /*
1274 * To ensure that we always get PAGE_SIZE aligned
1275 * and n*PAGE_SIZED sized I/O requests.
1276 */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301277 blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
Robert Jennings7b19b8d2011-01-28 08:58:17 -06001278 blk_queue_logical_block_size(zram->disk->queue,
1279 ZRAM_LOGICAL_BLOCK_SIZE);
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301280 blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
1281 blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
Joonsoo Kimf4659d82014-04-07 15:38:24 -07001282 zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
Jens Axboe2bb4cd52015-07-14 08:15:12 -06001283 blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX);
Joonsoo Kimf4659d82014-04-07 15:38:24 -07001284 /*
1285 * zram_bio_discard() will clear all logical blocks if logical block
1286 * size is identical with physical block size(PAGE_SIZE). But if it is
1287 * different, we will skip discarding some parts of logical blocks in
1288 * the part of the request range which isn't aligned to physical block
1289 * size. So we can't ensure that all discarded logical blocks are
1290 * zeroed.
1291 */
1292 if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
1293 zram->disk->queue->limits.discard_zeroes_data = 1;
1294 else
1295 zram->disk->queue->limits.discard_zeroes_data = 0;
1296 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue);
Nitin Gupta5d83d5a2010-01-28 21:13:39 +05301297
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301298 add_disk(zram->disk);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301299
Nitin Gupta33863c22010-08-09 22:56:47 +05301300 ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
1301 &zram_disk_attr_group);
1302 if (ret < 0) {
Sergey Senozhatsky70864962015-09-08 15:04:58 -07001303 pr_err("Error creating sysfs group for device %d\n",
1304 device_id);
Jiang Liu39a9b8a2013-06-07 00:07:24 +08001305 goto out_free_disk;
Nitin Gupta33863c22010-08-09 22:56:47 +05301306 }
Sergey Senozhatskye46b8a02014-04-07 15:38:17 -07001307 strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
Sergey Senozhatskybe2d1d52014-04-07 15:38:00 -07001308 zram->meta = NULL;
Sergey Senozhatskyd12b63c2015-06-25 15:00:14 -07001309
1310 pr_info("Added device: %s\n", zram->disk->disk_name);
Sergey Senozhatsky92ff1522015-06-25 15:00:19 -07001311 return device_id;
Nitin Guptade1a21a2010-01-28 21:13:40 +05301312
Jiang Liu39a9b8a2013-06-07 00:07:24 +08001313out_free_disk:
1314 del_gendisk(zram->disk);
1315 put_disk(zram->disk);
1316out_free_queue:
Sergey Senozhatskyee9801602015-02-12 15:00:48 -08001317 blk_cleanup_queue(queue);
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001318out_free_idr:
1319 idr_remove(&zram_index_idr, device_id);
1320out_free_dev:
1321 kfree(zram);
Nitin Guptade1a21a2010-01-28 21:13:40 +05301322 return ret;
Nitin Gupta306b0c92009-09-22 10:26:53 +05301323}
1324
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001325static int zram_remove(struct zram *zram)
Nitin Gupta306b0c92009-09-22 10:26:53 +05301326{
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001327 struct block_device *bdev;
1328
1329 bdev = bdget_disk(zram->disk, 0);
1330 if (!bdev)
1331 return -ENOMEM;
1332
1333 mutex_lock(&bdev->bd_mutex);
1334 if (bdev->bd_openers || zram->claim) {
1335 mutex_unlock(&bdev->bd_mutex);
1336 bdput(bdev);
1337 return -EBUSY;
1338 }
1339
1340 zram->claim = true;
1341 mutex_unlock(&bdev->bd_mutex);
1342
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001343 /*
1344 * Remove sysfs first, so no one will perform a disksize
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001345 * store while we destroy the devices. This also helps during
1346 * hot_remove -- zram_reset_device() is the last holder of
1347 * ->init_lock, no later/concurrent disksize_store() or any
1348 * other sysfs handlers are possible.
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001349 */
1350 sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
1351 &zram_disk_attr_group);
Nitin Gupta33863c22010-08-09 22:56:47 +05301352
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001353 /* Make sure all the pending I/O are finished */
1354 fsync_bdev(bdev);
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001355 zram_reset_device(zram);
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001356 bdput(bdev);
1357
1358 pr_info("Removed device: %s\n", zram->disk->disk_name);
1359
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001360 blk_cleanup_queue(zram->disk->queue);
1361 del_gendisk(zram->disk);
1362 put_disk(zram->disk);
1363 kfree(zram);
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001364 return 0;
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001365}
Nitin Gupta306b0c92009-09-22 10:26:53 +05301366
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001367/* zram-control sysfs attributes */
1368static ssize_t hot_add_show(struct class *class,
1369 struct class_attribute *attr,
1370 char *buf)
1371{
1372 int ret;
1373
1374 mutex_lock(&zram_index_mutex);
1375 ret = zram_add();
1376 mutex_unlock(&zram_index_mutex);
1377
1378 if (ret < 0)
1379 return ret;
1380 return scnprintf(buf, PAGE_SIZE, "%d\n", ret);
1381}
1382
1383static ssize_t hot_remove_store(struct class *class,
1384 struct class_attribute *attr,
1385 const char *buf,
1386 size_t count)
1387{
1388 struct zram *zram;
1389 int ret, dev_id;
1390
1391 /* dev_id is gendisk->first_minor, which is `int' */
1392 ret = kstrtoint(buf, 10, &dev_id);
1393 if (ret)
1394 return ret;
1395 if (dev_id < 0)
1396 return -EINVAL;
1397
1398 mutex_lock(&zram_index_mutex);
1399
1400 zram = idr_find(&zram_index_idr, dev_id);
Jerome Marchand17ec4cd2016-01-15 16:54:48 -08001401 if (zram) {
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001402 ret = zram_remove(zram);
Jerome Marchand17ec4cd2016-01-15 16:54:48 -08001403 idr_remove(&zram_index_idr, dev_id);
1404 } else {
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001405 ret = -ENODEV;
Jerome Marchand17ec4cd2016-01-15 16:54:48 -08001406 }
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001407
1408 mutex_unlock(&zram_index_mutex);
1409 return ret ? ret : count;
1410}
1411
1412static struct class_attribute zram_control_class_attrs[] = {
1413 __ATTR_RO(hot_add),
1414 __ATTR_WO(hot_remove),
1415 __ATTR_NULL,
1416};
1417
1418static struct class zram_control_class = {
1419 .name = "zram-control",
1420 .owner = THIS_MODULE,
1421 .class_attrs = zram_control_class_attrs,
1422};
1423
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001424static int zram_remove_cb(int id, void *ptr, void *data)
1425{
1426 zram_remove(ptr);
1427 return 0;
1428}
Sergey Senozhatskya096caf2015-02-12 15:00:39 -08001429
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001430static void destroy_devices(void)
1431{
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001432 class_unregister(&zram_control_class);
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001433 idr_for_each(&zram_index_idr, &zram_remove_cb, NULL);
1434 idr_destroy(&zram_index_idr);
Sergey Senozhatskya096caf2015-02-12 15:00:39 -08001435 unregister_blkdev(zram_major, "zram");
Nitin Gupta306b0c92009-09-22 10:26:53 +05301436}
1437
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301438static int __init zram_init(void)
Nitin Gupta306b0c92009-09-22 10:26:53 +05301439{
Sergey Senozhatsky92ff1522015-06-25 15:00:19 -07001440 int ret;
Nitin Gupta306b0c92009-09-22 10:26:53 +05301441
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001442 ret = class_register(&zram_control_class);
1443 if (ret) {
Sergey Senozhatsky70864962015-09-08 15:04:58 -07001444 pr_err("Unable to register zram-control class\n");
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001445 return ret;
1446 }
1447
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301448 zram_major = register_blkdev(0, "zram");
1449 if (zram_major <= 0) {
Sergey Senozhatsky70864962015-09-08 15:04:58 -07001450 pr_err("Unable to get major number\n");
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001451 class_unregister(&zram_control_class);
Sergey Senozhatskya096caf2015-02-12 15:00:39 -08001452 return -EBUSY;
Nitin Gupta306b0c92009-09-22 10:26:53 +05301453 }
1454
Sergey Senozhatsky92ff1522015-06-25 15:00:19 -07001455 while (num_devices != 0) {
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001456 mutex_lock(&zram_index_mutex);
Sergey Senozhatsky92ff1522015-06-25 15:00:19 -07001457 ret = zram_add();
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001458 mutex_unlock(&zram_index_mutex);
Sergey Senozhatsky92ff1522015-06-25 15:00:19 -07001459 if (ret < 0)
Sergey Senozhatskya096caf2015-02-12 15:00:39 -08001460 goto out_error;
Sergey Senozhatsky92ff1522015-06-25 15:00:19 -07001461 num_devices--;
Nitin Guptade1a21a2010-01-28 21:13:40 +05301462 }
1463
Nitin Gupta306b0c92009-09-22 10:26:53 +05301464 return 0;
Nitin Guptade1a21a2010-01-28 21:13:40 +05301465
Sergey Senozhatskya096caf2015-02-12 15:00:39 -08001466out_error:
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001467 destroy_devices();
Nitin Gupta306b0c92009-09-22 10:26:53 +05301468 return ret;
1469}
1470
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301471static void __exit zram_exit(void)
Nitin Gupta306b0c92009-09-22 10:26:53 +05301472{
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001473 destroy_devices();
Nitin Gupta306b0c92009-09-22 10:26:53 +05301474}
1475
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301476module_init(zram_init);
1477module_exit(zram_exit);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301478
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001479module_param(num_devices, uint, 0);
Sergey Senozhatskyc3cdb402015-06-25 15:00:11 -07001480MODULE_PARM_DESC(num_devices, "Number of pre-created zram devices");
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001481
Nitin Gupta306b0c92009-09-22 10:26:53 +05301482MODULE_LICENSE("Dual BSD/GPL");
1483MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301484MODULE_DESCRIPTION("Compressed RAM Block Device");