blob: 0e07652cf7c12c3b8ec4a36fe17dbeacf6c99125 [file] [log] [blame]
Nitin Gupta306b0c92009-09-22 10:26:53 +05301/*
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05302 * Compressed RAM block device
Nitin Gupta306b0c92009-09-22 10:26:53 +05303 *
Nitin Gupta1130ebb2010-01-28 21:21:35 +05304 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
Minchan Kim7bfb3de2014-01-30 15:45:55 -08005 * 2012, 2013 Minchan Kim
Nitin Gupta306b0c92009-09-22 10:26:53 +05306 *
7 * This code is released using a dual license strategy: BSD/GPL
8 * You can choose the licence that better fits your requirements.
9 *
10 * Released under the terms of 3-clause BSD License
11 * Released under the terms of GNU General Public License Version 2.0
12 *
Nitin Gupta306b0c92009-09-22 10:26:53 +053013 */
14
Nitin Guptaf1e3cff2010-06-01 13:31:25 +053015#define KMSG_COMPONENT "zram"
Nitin Gupta306b0c92009-09-22 10:26:53 +053016#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17
Robert Jenningsb1f5b812011-01-28 08:59:26 -060018#ifdef CONFIG_ZRAM_DEBUG
19#define DEBUG
20#endif
21
Nitin Gupta306b0c92009-09-22 10:26:53 +053022#include <linux/module.h>
23#include <linux/kernel.h>
Randy Dunlap8946a082010-06-23 20:27:09 -070024#include <linux/bio.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053025#include <linux/bitops.h>
26#include <linux/blkdev.h>
27#include <linux/buffer_head.h>
28#include <linux/device.h>
29#include <linux/genhd.h>
30#include <linux/highmem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090031#include <linux/slab.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053032#include <linux/string.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053033#include <linux/vmalloc.h>
Sergey Senozhatskyfcfa8d92014-04-07 15:38:20 -070034#include <linux/err.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053035
Nitin Gupta16a4bfb2010-06-01 13:31:24 +053036#include "zram_drv.h"
Nitin Gupta306b0c92009-09-22 10:26:53 +053037
38/* Globals */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +053039static int zram_major;
Jiang Liu0f0e3ba2013-06-07 00:07:29 +080040static struct zram *zram_devices;
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -070041static const char *default_compressor = "lzo";
Nitin Gupta306b0c92009-09-22 10:26:53 +053042
Nitin Gupta306b0c92009-09-22 10:26:53 +053043/* Module params (documentation at end) */
Davidlohr Buesoca3d70b2013-01-01 21:24:13 -080044static unsigned int num_devices = 1;
Nitin Gupta33863c22010-08-09 22:56:47 +053045
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -070046#define ZRAM_ATTR_RO(name) \
Ganesh Mahendran083914e2014-12-12 16:57:13 -080047static ssize_t name##_show(struct device *d, \
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -070048 struct device_attribute *attr, char *b) \
49{ \
50 struct zram *zram = dev_to_zram(d); \
Sergey Senozhatsky56b4e8c2014-04-07 15:38:22 -070051 return scnprintf(b, PAGE_SIZE, "%llu\n", \
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -070052 (u64)atomic64_read(&zram->stats.name)); \
53} \
Ganesh Mahendran083914e2014-12-12 16:57:13 -080054static DEVICE_ATTR_RO(name);
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -070055
Sergey Senozhatskybe2d1d52014-04-07 15:38:00 -070056static inline int init_done(struct zram *zram)
57{
58 return zram->meta != NULL;
59}
60
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +030061static inline struct zram *dev_to_zram(struct device *dev)
62{
63 return (struct zram *)dev_to_disk(dev)->private_data;
64}
65
66static ssize_t disksize_show(struct device *dev,
67 struct device_attribute *attr, char *buf)
68{
69 struct zram *zram = dev_to_zram(dev);
70
Sergey Senozhatsky56b4e8c2014-04-07 15:38:22 -070071 return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +030072}
73
74static ssize_t initstate_show(struct device *dev,
75 struct device_attribute *attr, char *buf)
76{
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -070077 u32 val;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +030078 struct zram *zram = dev_to_zram(dev);
79
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -070080 down_read(&zram->init_lock);
81 val = init_done(zram);
82 up_read(&zram->init_lock);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +030083
Sergey Senozhatsky56b4e8c2014-04-07 15:38:22 -070084 return scnprintf(buf, PAGE_SIZE, "%u\n", val);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +030085}
86
87static ssize_t orig_data_size_show(struct device *dev,
88 struct device_attribute *attr, char *buf)
89{
90 struct zram *zram = dev_to_zram(dev);
91
Sergey Senozhatsky56b4e8c2014-04-07 15:38:22 -070092 return scnprintf(buf, PAGE_SIZE, "%llu\n",
Sergey Senozhatsky90a78062014-04-07 15:38:03 -070093 (u64)(atomic64_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +030094}
95
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +030096static ssize_t mem_used_total_show(struct device *dev,
97 struct device_attribute *attr, char *buf)
98{
99 u64 val = 0;
100 struct zram *zram = dev_to_zram(dev);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300101
102 down_read(&zram->init_lock);
Weijie Yang5a99e952014-10-29 14:50:57 -0700103 if (init_done(zram)) {
104 struct zram_meta *meta = zram->meta;
Minchan Kim722cdc12014-10-09 15:29:50 -0700105 val = zs_get_total_pages(meta->mem_pool);
Weijie Yang5a99e952014-10-29 14:50:57 -0700106 }
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300107 up_read(&zram->init_lock);
108
Minchan Kim722cdc12014-10-09 15:29:50 -0700109 return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300110}
111
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700112static ssize_t max_comp_streams_show(struct device *dev,
113 struct device_attribute *attr, char *buf)
114{
115 int val;
116 struct zram *zram = dev_to_zram(dev);
117
118 down_read(&zram->init_lock);
119 val = zram->max_comp_streams;
120 up_read(&zram->init_lock);
121
Sergey Senozhatsky56b4e8c2014-04-07 15:38:22 -0700122 return scnprintf(buf, PAGE_SIZE, "%d\n", val);
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700123}
124
Minchan Kim9ada9da2014-10-09 15:29:53 -0700125static ssize_t mem_limit_show(struct device *dev,
126 struct device_attribute *attr, char *buf)
127{
128 u64 val;
129 struct zram *zram = dev_to_zram(dev);
130
131 down_read(&zram->init_lock);
132 val = zram->limit_pages;
133 up_read(&zram->init_lock);
134
135 return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
136}
137
138static ssize_t mem_limit_store(struct device *dev,
139 struct device_attribute *attr, const char *buf, size_t len)
140{
141 u64 limit;
142 char *tmp;
143 struct zram *zram = dev_to_zram(dev);
144
145 limit = memparse(buf, &tmp);
146 if (buf == tmp) /* no chars parsed, invalid input */
147 return -EINVAL;
148
149 down_write(&zram->init_lock);
150 zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT;
151 up_write(&zram->init_lock);
152
153 return len;
154}
155
Minchan Kim461a8ee2014-10-09 15:29:55 -0700156static ssize_t mem_used_max_show(struct device *dev,
157 struct device_attribute *attr, char *buf)
158{
159 u64 val = 0;
160 struct zram *zram = dev_to_zram(dev);
161
162 down_read(&zram->init_lock);
163 if (init_done(zram))
164 val = atomic_long_read(&zram->stats.max_used_pages);
165 up_read(&zram->init_lock);
166
167 return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
168}
169
170static ssize_t mem_used_max_store(struct device *dev,
171 struct device_attribute *attr, const char *buf, size_t len)
172{
173 int err;
174 unsigned long val;
175 struct zram *zram = dev_to_zram(dev);
Minchan Kim461a8ee2014-10-09 15:29:55 -0700176
177 err = kstrtoul(buf, 10, &val);
178 if (err || val != 0)
179 return -EINVAL;
180
181 down_read(&zram->init_lock);
Weijie Yang5a99e952014-10-29 14:50:57 -0700182 if (init_done(zram)) {
183 struct zram_meta *meta = zram->meta;
Minchan Kim461a8ee2014-10-09 15:29:55 -0700184 atomic_long_set(&zram->stats.max_used_pages,
185 zs_get_total_pages(meta->mem_pool));
Weijie Yang5a99e952014-10-29 14:50:57 -0700186 }
Minchan Kim461a8ee2014-10-09 15:29:55 -0700187 up_read(&zram->init_lock);
188
189 return len;
190}
191
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700192static ssize_t max_comp_streams_store(struct device *dev,
193 struct device_attribute *attr, const char *buf, size_t len)
194{
195 int num;
196 struct zram *zram = dev_to_zram(dev);
Minchan Kim60a726e2014-04-07 15:38:21 -0700197 int ret;
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700198
Minchan Kim60a726e2014-04-07 15:38:21 -0700199 ret = kstrtoint(buf, 0, &num);
200 if (ret < 0)
201 return ret;
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700202 if (num < 1)
203 return -EINVAL;
Minchan Kim60a726e2014-04-07 15:38:21 -0700204
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700205 down_write(&zram->init_lock);
206 if (init_done(zram)) {
Minchan Kim60a726e2014-04-07 15:38:21 -0700207 if (!zcomp_set_max_streams(zram->comp, num)) {
Sergey Senozhatskyfe8eb122014-04-07 15:38:15 -0700208 pr_info("Cannot change max compression streams\n");
Minchan Kim60a726e2014-04-07 15:38:21 -0700209 ret = -EINVAL;
210 goto out;
211 }
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700212 }
Minchan Kim60a726e2014-04-07 15:38:21 -0700213
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700214 zram->max_comp_streams = num;
Minchan Kim60a726e2014-04-07 15:38:21 -0700215 ret = len;
216out:
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700217 up_write(&zram->init_lock);
Minchan Kim60a726e2014-04-07 15:38:21 -0700218 return ret;
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700219}
220
Sergey Senozhatskye46b8a02014-04-07 15:38:17 -0700221static ssize_t comp_algorithm_show(struct device *dev,
222 struct device_attribute *attr, char *buf)
223{
224 size_t sz;
225 struct zram *zram = dev_to_zram(dev);
226
227 down_read(&zram->init_lock);
228 sz = zcomp_available_show(zram->compressor, buf);
229 up_read(&zram->init_lock);
230
231 return sz;
232}
233
234static ssize_t comp_algorithm_store(struct device *dev,
235 struct device_attribute *attr, const char *buf, size_t len)
236{
237 struct zram *zram = dev_to_zram(dev);
238 down_write(&zram->init_lock);
239 if (init_done(zram)) {
240 up_write(&zram->init_lock);
241 pr_info("Can't change algorithm for initialized device\n");
242 return -EBUSY;
243 }
244 strlcpy(zram->compressor, buf, sizeof(zram->compressor));
245 up_write(&zram->init_lock);
246 return len;
247}
248
Minchan Kim92967472014-01-30 15:46:03 -0800249/* flag operations needs meta->tb_lock */
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900250static int zram_test_flag(struct zram_meta *meta, u32 index,
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530251 enum zram_pageflags flag)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530252{
Weijie Yangd2d5e762014-08-06 16:08:31 -0700253 return meta->table[index].value & BIT(flag);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530254}
255
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900256static void zram_set_flag(struct zram_meta *meta, u32 index,
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530257 enum zram_pageflags flag)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530258{
Weijie Yangd2d5e762014-08-06 16:08:31 -0700259 meta->table[index].value |= BIT(flag);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530260}
261
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900262static void zram_clear_flag(struct zram_meta *meta, u32 index,
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530263 enum zram_pageflags flag)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530264{
Weijie Yangd2d5e762014-08-06 16:08:31 -0700265 meta->table[index].value &= ~BIT(flag);
266}
267
268static size_t zram_get_obj_size(struct zram_meta *meta, u32 index)
269{
270 return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
271}
272
273static void zram_set_obj_size(struct zram_meta *meta,
274 u32 index, size_t size)
275{
276 unsigned long flags = meta->table[index].value >> ZRAM_FLAG_SHIFT;
277
278 meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530279}
280
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300281static inline int is_partial_io(struct bio_vec *bvec)
282{
283 return bvec->bv_len != PAGE_SIZE;
284}
285
286/*
287 * Check if request is within bounds and aligned on zram logical blocks.
288 */
karam.lee54850e72014-12-12 16:56:50 -0800289static inline int valid_io_request(struct zram *zram,
290 sector_t start, unsigned int size)
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300291{
karam.lee54850e72014-12-12 16:56:50 -0800292 u64 end, bound;
Kumar Gaurava539c722013-08-08 23:53:24 +0530293
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300294 /* unaligned request */
karam.lee54850e72014-12-12 16:56:50 -0800295 if (unlikely(start & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300296 return 0;
karam.lee54850e72014-12-12 16:56:50 -0800297 if (unlikely(size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300298 return 0;
299
karam.lee54850e72014-12-12 16:56:50 -0800300 end = start + (size >> SECTOR_SHIFT);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300301 bound = zram->disksize >> SECTOR_SHIFT;
302 /* out of range range */
Sergey Senozhatsky75c7caf2013-06-22 17:21:00 +0300303 if (unlikely(start >= bound || end > bound || start > end))
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300304 return 0;
305
306 /* I/O request is valid */
307 return 1;
308}
309
Ganesh Mahendran1fec1172015-02-12 15:00:33 -0800310static void zram_meta_free(struct zram_meta *meta, u64 disksize)
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300311{
Ganesh Mahendran1fec1172015-02-12 15:00:33 -0800312 size_t num_pages = disksize >> PAGE_SHIFT;
313 size_t index;
314
315 /* Free all pages that are still in this zram device */
316 for (index = 0; index < num_pages; index++) {
317 unsigned long handle = meta->table[index].handle;
318
319 if (!handle)
320 continue;
321
322 zs_free(meta->mem_pool, handle);
323 }
324
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300325 zs_destroy_pool(meta->mem_pool);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300326 vfree(meta->table);
327 kfree(meta);
328}
329
330static struct zram_meta *zram_meta_alloc(u64 disksize)
331{
332 size_t num_pages;
333 struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
Sergey Senozhatskyb8179952015-02-12 15:00:31 -0800334
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300335 if (!meta)
Sergey Senozhatskyb8179952015-02-12 15:00:31 -0800336 return NULL;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300337
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300338 num_pages = disksize >> PAGE_SHIFT;
339 meta->table = vzalloc(num_pages * sizeof(*meta->table));
340 if (!meta->table) {
341 pr_err("Error allocating zram address table\n");
Sergey Senozhatskyb8179952015-02-12 15:00:31 -0800342 goto out_error;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300343 }
344
345 meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM);
346 if (!meta->mem_pool) {
347 pr_err("Error creating memory pool\n");
Sergey Senozhatskyb8179952015-02-12 15:00:31 -0800348 goto out_error;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300349 }
350
351 return meta;
352
Sergey Senozhatskyb8179952015-02-12 15:00:31 -0800353out_error:
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300354 vfree(meta->table);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300355 kfree(meta);
Sergey Senozhatskyb8179952015-02-12 15:00:31 -0800356 return NULL;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300357}
358
359static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
360{
361 if (*offset + bvec->bv_len >= PAGE_SIZE)
362 (*index)++;
363 *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
364}
365
Nitin Gupta306b0c92009-09-22 10:26:53 +0530366static int page_zero_filled(void *ptr)
367{
368 unsigned int pos;
369 unsigned long *page;
370
371 page = (unsigned long *)ptr;
372
373 for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
374 if (page[pos])
375 return 0;
376 }
377
378 return 1;
379}
380
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300381static void handle_zero_page(struct bio_vec *bvec)
382{
383 struct page *page = bvec->bv_page;
384 void *user_mem;
385
386 user_mem = kmap_atomic(page);
387 if (is_partial_io(bvec))
388 memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
389 else
390 clear_page(user_mem);
391 kunmap_atomic(user_mem);
392
393 flush_dcache_page(page);
394}
395
Weijie Yangd2d5e762014-08-06 16:08:31 -0700396
397/*
398 * To protect concurrent access to the same index entry,
399 * caller should hold this table index entry's bit_spinlock to
400 * indicate this index entry is accessing.
401 */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530402static void zram_free_page(struct zram *zram, size_t index)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530403{
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900404 struct zram_meta *meta = zram->meta;
405 unsigned long handle = meta->table[index].handle;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530406
Nitin Guptafd1a30d2012-01-09 16:51:59 -0600407 if (unlikely(!handle)) {
Nitin Gupta2e882282010-01-28 21:13:41 +0530408 /*
409 * No memory is allocated for zero filled pages.
410 * Simply clear zero page flag.
411 */
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900412 if (zram_test_flag(meta, index, ZRAM_ZERO)) {
413 zram_clear_flag(meta, index, ZRAM_ZERO);
Sergey Senozhatsky90a78062014-04-07 15:38:03 -0700414 atomic64_dec(&zram->stats.zero_pages);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530415 }
416 return;
417 }
418
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900419 zs_free(meta->mem_pool, handle);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530420
Weijie Yangd2d5e762014-08-06 16:08:31 -0700421 atomic64_sub(zram_get_obj_size(meta, index),
422 &zram->stats.compr_data_size);
Sergey Senozhatsky90a78062014-04-07 15:38:03 -0700423 atomic64_dec(&zram->stats.pages_stored);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530424
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900425 meta->table[index].handle = 0;
Weijie Yangd2d5e762014-08-06 16:08:31 -0700426 zram_set_obj_size(meta, index, 0);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530427}
428
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300429static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530430{
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700431 int ret = 0;
Jerome Marchand924bd882011-06-10 15:28:48 +0200432 unsigned char *cmem;
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900433 struct zram_meta *meta = zram->meta;
Minchan Kim92967472014-01-30 15:46:03 -0800434 unsigned long handle;
Minchan Kim023b4092014-08-06 16:08:29 -0700435 size_t size;
Minchan Kim92967472014-01-30 15:46:03 -0800436
Weijie Yangd2d5e762014-08-06 16:08:31 -0700437 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Minchan Kim92967472014-01-30 15:46:03 -0800438 handle = meta->table[index].handle;
Weijie Yangd2d5e762014-08-06 16:08:31 -0700439 size = zram_get_obj_size(meta, index);
Jerome Marchand924bd882011-06-10 15:28:48 +0200440
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900441 if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
Weijie Yangd2d5e762014-08-06 16:08:31 -0700442 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
Jiang Liu42e99bd2013-06-07 00:07:30 +0800443 clear_page(mem);
Jerome Marchand924bd882011-06-10 15:28:48 +0200444 return 0;
445 }
446
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900447 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
Minchan Kim92967472014-01-30 15:46:03 -0800448 if (size == PAGE_SIZE)
Jiang Liu42e99bd2013-06-07 00:07:30 +0800449 copy_page(mem, cmem);
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300450 else
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700451 ret = zcomp_decompress(zram->comp, cmem, size, mem);
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900452 zs_unmap_object(meta->mem_pool, handle);
Weijie Yangd2d5e762014-08-06 16:08:31 -0700453 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
Jerome Marchand924bd882011-06-10 15:28:48 +0200454
455 /* Should NEVER happen. Return bio error if it does. */
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700456 if (unlikely(ret)) {
Jerome Marchand924bd882011-06-10 15:28:48 +0200457 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
Jerome Marchand924bd882011-06-10 15:28:48 +0200458 return ret;
459 }
460
461 return 0;
462}
463
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300464static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
karam.leeb627cff2014-12-12 16:56:47 -0800465 u32 index, int offset)
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300466{
467 int ret;
468 struct page *page;
469 unsigned char *user_mem, *uncmem = NULL;
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900470 struct zram_meta *meta = zram->meta;
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300471 page = bvec->bv_page;
472
Weijie Yangd2d5e762014-08-06 16:08:31 -0700473 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900474 if (unlikely(!meta->table[index].handle) ||
475 zram_test_flag(meta, index, ZRAM_ZERO)) {
Weijie Yangd2d5e762014-08-06 16:08:31 -0700476 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300477 handle_zero_page(bvec);
478 return 0;
479 }
Weijie Yangd2d5e762014-08-06 16:08:31 -0700480 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300481
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300482 if (is_partial_io(bvec))
483 /* Use a temporary buffer to decompress the page */
Minchan Kim7e5a5102013-01-30 11:41:39 +0900484 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
485
486 user_mem = kmap_atomic(page);
487 if (!is_partial_io(bvec))
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300488 uncmem = user_mem;
489
490 if (!uncmem) {
491 pr_info("Unable to allocate temp memory\n");
492 ret = -ENOMEM;
493 goto out_cleanup;
494 }
495
496 ret = zram_decompress_page(zram, uncmem, index);
497 /* Should NEVER happen. Return bio error if it does. */
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700498 if (unlikely(ret))
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300499 goto out_cleanup;
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300500
501 if (is_partial_io(bvec))
502 memcpy(user_mem + bvec->bv_offset, uncmem + offset,
503 bvec->bv_len);
504
505 flush_dcache_page(page);
506 ret = 0;
507out_cleanup:
508 kunmap_atomic(user_mem);
509 if (is_partial_io(bvec))
510 kfree(uncmem);
511 return ret;
512}
513
Minchan Kim461a8ee2014-10-09 15:29:55 -0700514static inline void update_used_max(struct zram *zram,
515 const unsigned long pages)
516{
517 int old_max, cur_max;
518
519 old_max = atomic_long_read(&zram->stats.max_used_pages);
520
521 do {
522 cur_max = old_max;
523 if (pages > cur_max)
524 old_max = atomic_long_cmpxchg(
525 &zram->stats.max_used_pages, cur_max, pages);
526 } while (old_max != cur_max);
527}
528
Jerome Marchand924bd882011-06-10 15:28:48 +0200529static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
530 int offset)
531{
Nitin Gupta397c6062013-01-02 08:53:41 -0800532 int ret = 0;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200533 size_t clen;
Minchan Kimc2344342012-06-08 15:39:25 +0900534 unsigned long handle;
Minchan Kim130f3152012-06-08 15:39:27 +0900535 struct page *page;
Jerome Marchand924bd882011-06-10 15:28:48 +0200536 unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900537 struct zram_meta *meta = zram->meta;
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700538 struct zcomp_strm *zstrm;
Minchan Kime46e3312014-01-30 15:46:06 -0800539 bool locked = false;
Minchan Kim461a8ee2014-10-09 15:29:55 -0700540 unsigned long alloced_pages;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200541
542 page = bvec->bv_page;
Jerome Marchand924bd882011-06-10 15:28:48 +0200543 if (is_partial_io(bvec)) {
544 /*
545 * This is a partial IO. We need to read the full page
546 * before to write the changes.
547 */
Minchan Kim7e5a5102013-01-30 11:41:39 +0900548 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
Jerome Marchand924bd882011-06-10 15:28:48 +0200549 if (!uncmem) {
Jerome Marchand924bd882011-06-10 15:28:48 +0200550 ret = -ENOMEM;
551 goto out;
552 }
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300553 ret = zram_decompress_page(zram, uncmem, index);
Nitin Gupta397c6062013-01-02 08:53:41 -0800554 if (ret)
Jerome Marchand924bd882011-06-10 15:28:48 +0200555 goto out;
Jerome Marchand924bd882011-06-10 15:28:48 +0200556 }
557
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700558 zstrm = zcomp_strm_find(zram->comp);
Minchan Kime46e3312014-01-30 15:46:06 -0800559 locked = true;
Cong Wangba82fe22011-11-25 23:14:25 +0800560 user_mem = kmap_atomic(page);
Jerome Marchand924bd882011-06-10 15:28:48 +0200561
Nitin Gupta397c6062013-01-02 08:53:41 -0800562 if (is_partial_io(bvec)) {
Jerome Marchand924bd882011-06-10 15:28:48 +0200563 memcpy(uncmem + offset, user_mem + bvec->bv_offset,
564 bvec->bv_len);
Nitin Gupta397c6062013-01-02 08:53:41 -0800565 kunmap_atomic(user_mem);
566 user_mem = NULL;
567 } else {
Jerome Marchand924bd882011-06-10 15:28:48 +0200568 uncmem = user_mem;
Nitin Gupta397c6062013-01-02 08:53:41 -0800569 }
Jerome Marchand924bd882011-06-10 15:28:48 +0200570
571 if (page_zero_filled(uncmem)) {
Weijie Yangc4065152014-11-13 15:19:05 -0800572 if (user_mem)
573 kunmap_atomic(user_mem);
Sunghan Suhf40ac2a2013-07-03 20:10:05 +0900574 /* Free memory associated with this sector now. */
Weijie Yangd2d5e762014-08-06 16:08:31 -0700575 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Sunghan Suhf40ac2a2013-07-03 20:10:05 +0900576 zram_free_page(zram, index);
Minchan Kim92967472014-01-30 15:46:03 -0800577 zram_set_flag(meta, index, ZRAM_ZERO);
Weijie Yangd2d5e762014-08-06 16:08:31 -0700578 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
Sunghan Suhf40ac2a2013-07-03 20:10:05 +0900579
Sergey Senozhatsky90a78062014-04-07 15:38:03 -0700580 atomic64_inc(&zram->stats.zero_pages);
Jerome Marchand924bd882011-06-10 15:28:48 +0200581 ret = 0;
582 goto out;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200583 }
584
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700585 ret = zcomp_compress(zram->comp, zstrm, uncmem, &clen);
Nitin Gupta397c6062013-01-02 08:53:41 -0800586 if (!is_partial_io(bvec)) {
587 kunmap_atomic(user_mem);
588 user_mem = NULL;
589 uncmem = NULL;
590 }
Jerome Marchand8c921b22011-06-10 15:28:47 +0200591
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700592 if (unlikely(ret)) {
Jerome Marchand8c921b22011-06-10 15:28:47 +0200593 pr_err("Compression failed! err=%d\n", ret);
Jerome Marchand924bd882011-06-10 15:28:48 +0200594 goto out;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200595 }
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700596 src = zstrm->buffer;
Nitin Guptac8f2f0d2012-10-10 17:42:18 -0700597 if (unlikely(clen > max_zpage_size)) {
Nitin Guptac8f2f0d2012-10-10 17:42:18 -0700598 clen = PAGE_SIZE;
Nitin Gupta397c6062013-01-02 08:53:41 -0800599 if (is_partial_io(bvec))
600 src = uncmem;
Nitin Guptac8f2f0d2012-10-10 17:42:18 -0700601 }
Jerome Marchand8c921b22011-06-10 15:28:47 +0200602
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900603 handle = zs_malloc(meta->mem_pool, clen);
Nitin Guptafd1a30d2012-01-09 16:51:59 -0600604 if (!handle) {
Marlies Ruck596b3dd2013-05-16 14:30:39 -0400605 pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
606 index, clen);
Jerome Marchand924bd882011-06-10 15:28:48 +0200607 ret = -ENOMEM;
608 goto out;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200609 }
Minchan Kim9ada9da2014-10-09 15:29:53 -0700610
Minchan Kim461a8ee2014-10-09 15:29:55 -0700611 alloced_pages = zs_get_total_pages(meta->mem_pool);
612 if (zram->limit_pages && alloced_pages > zram->limit_pages) {
Minchan Kim9ada9da2014-10-09 15:29:53 -0700613 zs_free(meta->mem_pool, handle);
614 ret = -ENOMEM;
615 goto out;
616 }
617
Minchan Kim461a8ee2014-10-09 15:29:55 -0700618 update_used_max(zram, alloced_pages);
619
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900620 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
Jerome Marchand8c921b22011-06-10 15:28:47 +0200621
Jiang Liu42e99bd2013-06-07 00:07:30 +0800622 if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
Nitin Gupta397c6062013-01-02 08:53:41 -0800623 src = kmap_atomic(page);
Jiang Liu42e99bd2013-06-07 00:07:30 +0800624 copy_page(cmem, src);
Nitin Gupta397c6062013-01-02 08:53:41 -0800625 kunmap_atomic(src);
Jiang Liu42e99bd2013-06-07 00:07:30 +0800626 } else {
627 memcpy(cmem, src, clen);
628 }
Jerome Marchand8c921b22011-06-10 15:28:47 +0200629
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700630 zcomp_strm_release(zram->comp, zstrm);
631 locked = false;
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900632 zs_unmap_object(meta->mem_pool, handle);
Nitin Guptafd1a30d2012-01-09 16:51:59 -0600633
Sunghan Suhf40ac2a2013-07-03 20:10:05 +0900634 /*
635 * Free memory associated with this sector
636 * before overwriting unused sectors.
637 */
Weijie Yangd2d5e762014-08-06 16:08:31 -0700638 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Sunghan Suhf40ac2a2013-07-03 20:10:05 +0900639 zram_free_page(zram, index);
640
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900641 meta->table[index].handle = handle;
Weijie Yangd2d5e762014-08-06 16:08:31 -0700642 zram_set_obj_size(meta, index, clen);
643 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
Jerome Marchand8c921b22011-06-10 15:28:47 +0200644
645 /* Update stats */
Sergey Senozhatsky90a78062014-04-07 15:38:03 -0700646 atomic64_add(clen, &zram->stats.compr_data_size);
647 atomic64_inc(&zram->stats.pages_stored);
Jerome Marchand924bd882011-06-10 15:28:48 +0200648out:
Minchan Kime46e3312014-01-30 15:46:06 -0800649 if (locked)
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700650 zcomp_strm_release(zram->comp, zstrm);
Nitin Gupta397c6062013-01-02 08:53:41 -0800651 if (is_partial_io(bvec))
652 kfree(uncmem);
Jerome Marchand924bd882011-06-10 15:28:48 +0200653 return ret;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200654}
655
656static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
karam.leeb627cff2014-12-12 16:56:47 -0800657 int offset, int rw)
Jerome Marchand8c921b22011-06-10 15:28:47 +0200658{
Jerome Marchandc5bde232011-06-10 15:28:49 +0200659 int ret;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200660
Sergey Senozhatskybe257c62014-04-07 15:38:01 -0700661 if (rw == READ) {
662 atomic64_inc(&zram->stats.num_reads);
karam.leeb627cff2014-12-12 16:56:47 -0800663 ret = zram_bvec_read(zram, bvec, index, offset);
Sergey Senozhatskybe257c62014-04-07 15:38:01 -0700664 } else {
665 atomic64_inc(&zram->stats.num_writes);
Jerome Marchandc5bde232011-06-10 15:28:49 +0200666 ret = zram_bvec_write(zram, bvec, index, offset);
Sergey Senozhatskybe257c62014-04-07 15:38:01 -0700667 }
Jerome Marchandc5bde232011-06-10 15:28:49 +0200668
Chao Yu0cf1e9d2014-08-29 15:18:37 -0700669 if (unlikely(ret)) {
670 if (rw == READ)
671 atomic64_inc(&zram->stats.failed_reads);
672 else
673 atomic64_inc(&zram->stats.failed_writes);
674 }
675
Jerome Marchandc5bde232011-06-10 15:28:49 +0200676 return ret;
Jerome Marchand924bd882011-06-10 15:28:48 +0200677}
678
Joonsoo Kimf4659d82014-04-07 15:38:24 -0700679/*
680 * zram_bio_discard - handler on discard request
681 * @index: physical block index in PAGE_SIZE units
682 * @offset: byte offset within physical block
683 */
684static void zram_bio_discard(struct zram *zram, u32 index,
685 int offset, struct bio *bio)
686{
687 size_t n = bio->bi_iter.bi_size;
Weijie Yangd2d5e762014-08-06 16:08:31 -0700688 struct zram_meta *meta = zram->meta;
Joonsoo Kimf4659d82014-04-07 15:38:24 -0700689
690 /*
691 * zram manages data in physical block size units. Because logical block
692 * size isn't identical with physical block size on some arch, we
693 * could get a discard request pointing to a specific offset within a
694 * certain physical block. Although we can handle this request by
695 * reading that physiclal block and decompressing and partially zeroing
696 * and re-compressing and then re-storing it, this isn't reasonable
697 * because our intent with a discard request is to save memory. So
698 * skipping this logical block is appropriate here.
699 */
700 if (offset) {
Weijie Yang38515c72014-06-04 16:11:06 -0700701 if (n <= (PAGE_SIZE - offset))
Joonsoo Kimf4659d82014-04-07 15:38:24 -0700702 return;
703
Weijie Yang38515c72014-06-04 16:11:06 -0700704 n -= (PAGE_SIZE - offset);
Joonsoo Kimf4659d82014-04-07 15:38:24 -0700705 index++;
706 }
707
708 while (n >= PAGE_SIZE) {
Weijie Yangd2d5e762014-08-06 16:08:31 -0700709 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Joonsoo Kimf4659d82014-04-07 15:38:24 -0700710 zram_free_page(zram, index);
Weijie Yangd2d5e762014-08-06 16:08:31 -0700711 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
Sergey Senozhatsky015254d2014-10-09 15:29:57 -0700712 atomic64_inc(&zram->stats.notify_free);
Joonsoo Kimf4659d82014-04-07 15:38:24 -0700713 index++;
714 n -= PAGE_SIZE;
715 }
716}
717
Minchan Kim2b86ab92013-08-12 15:13:55 +0900718static void zram_reset_device(struct zram *zram, bool reset_capacity)
Jerome Marchand924bd882011-06-10 15:28:48 +0200719{
Sergey Senozhatsky644d4782013-06-26 15:28:39 +0300720 down_write(&zram->init_lock);
Minchan Kim9ada9da2014-10-09 15:29:53 -0700721
722 zram->limit_pages = 0;
723
Sergey Senozhatskybe2d1d52014-04-07 15:38:00 -0700724 if (!init_done(zram)) {
Sergey Senozhatsky644d4782013-06-26 15:28:39 +0300725 up_write(&zram->init_lock);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300726 return;
Sergey Senozhatsky644d4782013-06-26 15:28:39 +0300727 }
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300728
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700729 zcomp_destroy(zram->comp);
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700730 zram->max_comp_streams = 1;
Ganesh Mahendran1fec1172015-02-12 15:00:33 -0800731 zram_meta_free(zram->meta, zram->disksize);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300732 zram->meta = NULL;
733 /* Reset stats */
734 memset(&zram->stats, 0, sizeof(zram->stats));
735
736 zram->disksize = 0;
Minchan Kimb4c5c602014-07-23 14:00:04 -0700737 if (reset_capacity)
Minchan Kim2b86ab92013-08-12 15:13:55 +0900738 set_capacity(zram->disk, 0);
Minchan Kimb4c5c602014-07-23 14:00:04 -0700739
Sergey Senozhatsky644d4782013-06-26 15:28:39 +0300740 up_write(&zram->init_lock);
Minchan Kimb4c5c602014-07-23 14:00:04 -0700741
742 /*
743 * Revalidate disk out of the init_lock to avoid lockdep splat.
744 * It's okay because disk's capacity is protected by init_lock
745 * so that revalidate_disk always sees up-to-date capacity.
746 */
747 if (reset_capacity)
748 revalidate_disk(zram->disk);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300749}
750
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300751static ssize_t disksize_store(struct device *dev,
752 struct device_attribute *attr, const char *buf, size_t len)
753{
754 u64 disksize;
Sergey Senozhatskyd61f98c2014-04-07 15:38:19 -0700755 struct zcomp *comp;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300756 struct zram_meta *meta;
757 struct zram *zram = dev_to_zram(dev);
Sergey Senozhatskyfcfa8d92014-04-07 15:38:20 -0700758 int err;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300759
760 disksize = memparse(buf, NULL);
761 if (!disksize)
762 return -EINVAL;
763
764 disksize = PAGE_ALIGN(disksize);
765 meta = zram_meta_alloc(disksize);
Minchan Kimdb5d7112014-03-03 15:38:34 -0800766 if (!meta)
767 return -ENOMEM;
Sergey Senozhatskyb67d1ec2014-04-07 15:38:09 -0700768
Sergey Senozhatskyd61f98c2014-04-07 15:38:19 -0700769 comp = zcomp_create(zram->compressor, zram->max_comp_streams);
Sergey Senozhatskyfcfa8d92014-04-07 15:38:20 -0700770 if (IS_ERR(comp)) {
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700771 pr_info("Cannot initialise %s compressing backend\n",
Sergey Senozhatskye46b8a02014-04-07 15:38:17 -0700772 zram->compressor);
Sergey Senozhatskyfcfa8d92014-04-07 15:38:20 -0700773 err = PTR_ERR(comp);
774 goto out_free_meta;
Sergey Senozhatskyd61f98c2014-04-07 15:38:19 -0700775 }
776
777 down_write(&zram->init_lock);
778 if (init_done(zram)) {
Sergey Senozhatskyd61f98c2014-04-07 15:38:19 -0700779 pr_info("Cannot change disksize for initialized device\n");
780 err = -EBUSY;
Sergey Senozhatskyfcfa8d92014-04-07 15:38:20 -0700781 goto out_destroy_comp;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300782 }
783
Sergey Senozhatskyb67d1ec2014-04-07 15:38:09 -0700784 zram->meta = meta;
Sergey Senozhatskyd61f98c2014-04-07 15:38:19 -0700785 zram->comp = comp;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300786 zram->disksize = disksize;
787 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300788 up_write(&zram->init_lock);
Minchan Kimb4c5c602014-07-23 14:00:04 -0700789
790 /*
791 * Revalidate disk out of the init_lock to avoid lockdep splat.
792 * It's okay because disk's capacity is protected by init_lock
793 * so that revalidate_disk always sees up-to-date capacity.
794 */
795 revalidate_disk(zram->disk);
796
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300797 return len;
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700798
Sergey Senozhatskyfcfa8d92014-04-07 15:38:20 -0700799out_destroy_comp:
800 up_write(&zram->init_lock);
801 zcomp_destroy(comp);
802out_free_meta:
Ganesh Mahendran1fec1172015-02-12 15:00:33 -0800803 zram_meta_free(meta, disksize);
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700804 return err;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300805}
806
807static ssize_t reset_store(struct device *dev,
808 struct device_attribute *attr, const char *buf, size_t len)
809{
810 int ret;
811 unsigned short do_reset;
812 struct zram *zram;
813 struct block_device *bdev;
814
815 zram = dev_to_zram(dev);
816 bdev = bdget_disk(zram->disk, 0);
817
Rashika Kheria46a51c82013-10-30 18:36:32 +0530818 if (!bdev)
819 return -ENOMEM;
820
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300821 /* Do not reset an active device! */
Rashika Kheria1b672222013-11-10 22:13:53 +0530822 if (bdev->bd_holders) {
823 ret = -EBUSY;
824 goto out;
825 }
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300826
827 ret = kstrtou16(buf, 10, &do_reset);
828 if (ret)
Rashika Kheria1b672222013-11-10 22:13:53 +0530829 goto out;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300830
Rashika Kheria1b672222013-11-10 22:13:53 +0530831 if (!do_reset) {
832 ret = -EINVAL;
833 goto out;
834 }
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300835
836 /* Make sure all pending I/O is finished */
Rashika Kheria46a51c82013-10-30 18:36:32 +0530837 fsync_bdev(bdev);
Rashika Kheria1b672222013-11-10 22:13:53 +0530838 bdput(bdev);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300839
Minchan Kim2b86ab92013-08-12 15:13:55 +0900840 zram_reset_device(zram, true);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300841 return len;
Rashika Kheria1b672222013-11-10 22:13:53 +0530842
843out:
844 bdput(bdev);
845 return ret;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200846}
847
Sergey Senozhatskybe257c62014-04-07 15:38:01 -0700848static void __zram_make_request(struct zram *zram, struct bio *bio)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530849{
karam.leeb627cff2014-12-12 16:56:47 -0800850 int offset, rw;
Nitin Guptaa1dd52a2010-06-01 13:31:23 +0530851 u32 index;
Kent Overstreet79886132013-11-23 17:19:00 -0800852 struct bio_vec bvec;
853 struct bvec_iter iter;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530854
Kent Overstreet4f024f32013-10-11 15:44:27 -0700855 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
856 offset = (bio->bi_iter.bi_sector &
857 (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530858
Joonsoo Kimf4659d82014-04-07 15:38:24 -0700859 if (unlikely(bio->bi_rw & REQ_DISCARD)) {
860 zram_bio_discard(zram, index, offset, bio);
861 bio_endio(bio, 0);
862 return;
863 }
864
karam.leeb627cff2014-12-12 16:56:47 -0800865 rw = bio_data_dir(bio);
Kent Overstreet79886132013-11-23 17:19:00 -0800866 bio_for_each_segment(bvec, bio, iter) {
Jerome Marchand924bd882011-06-10 15:28:48 +0200867 int max_transfer_size = PAGE_SIZE - offset;
868
Kent Overstreet79886132013-11-23 17:19:00 -0800869 if (bvec.bv_len > max_transfer_size) {
Jerome Marchand924bd882011-06-10 15:28:48 +0200870 /*
871 * zram_bvec_rw() can only make operation on a single
872 * zram page. Split the bio vector.
873 */
874 struct bio_vec bv;
875
Kent Overstreet79886132013-11-23 17:19:00 -0800876 bv.bv_page = bvec.bv_page;
Jerome Marchand924bd882011-06-10 15:28:48 +0200877 bv.bv_len = max_transfer_size;
Kent Overstreet79886132013-11-23 17:19:00 -0800878 bv.bv_offset = bvec.bv_offset;
Jerome Marchand924bd882011-06-10 15:28:48 +0200879
karam.leeb627cff2014-12-12 16:56:47 -0800880 if (zram_bvec_rw(zram, &bv, index, offset, rw) < 0)
Jerome Marchand924bd882011-06-10 15:28:48 +0200881 goto out;
882
Kent Overstreet79886132013-11-23 17:19:00 -0800883 bv.bv_len = bvec.bv_len - max_transfer_size;
Jerome Marchand924bd882011-06-10 15:28:48 +0200884 bv.bv_offset += max_transfer_size;
karam.leeb627cff2014-12-12 16:56:47 -0800885 if (zram_bvec_rw(zram, &bv, index + 1, 0, rw) < 0)
Jerome Marchand924bd882011-06-10 15:28:48 +0200886 goto out;
887 } else
karam.leeb627cff2014-12-12 16:56:47 -0800888 if (zram_bvec_rw(zram, &bvec, index, offset, rw) < 0)
Jerome Marchand924bd882011-06-10 15:28:48 +0200889 goto out;
890
Kent Overstreet79886132013-11-23 17:19:00 -0800891 update_position(&index, &offset, &bvec);
Nitin Guptaa1dd52a2010-06-01 13:31:23 +0530892 }
Nitin Gupta306b0c92009-09-22 10:26:53 +0530893
894 set_bit(BIO_UPTODATE, &bio->bi_flags);
895 bio_endio(bio, 0);
Nitin Gupta7d7854b2011-01-22 07:36:15 -0500896 return;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530897
898out:
Nitin Gupta306b0c92009-09-22 10:26:53 +0530899 bio_io_error(bio);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530900}
901
Nitin Gupta306b0c92009-09-22 10:26:53 +0530902/*
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530903 * Handler function for all zram I/O requests.
Nitin Gupta306b0c92009-09-22 10:26:53 +0530904 */
Christoph Hellwig5a7bbad2011-09-12 12:12:01 +0200905static void zram_make_request(struct request_queue *queue, struct bio *bio)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530906{
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530907 struct zram *zram = queue->queuedata;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530908
Jerome Marchand0900bea2011-09-06 15:02:11 +0200909 down_read(&zram->init_lock);
Sergey Senozhatskybe2d1d52014-04-07 15:38:00 -0700910 if (unlikely(!init_done(zram)))
Minchan Kim3de738c2013-01-30 11:41:41 +0900911 goto error;
Jerome Marchand0900bea2011-09-06 15:02:11 +0200912
karam.lee54850e72014-12-12 16:56:50 -0800913 if (!valid_io_request(zram, bio->bi_iter.bi_sector,
914 bio->bi_iter.bi_size)) {
Jiang Liuda5cc7d2013-06-07 00:07:31 +0800915 atomic64_inc(&zram->stats.invalid_io);
Minchan Kim3de738c2013-01-30 11:41:41 +0900916 goto error;
Jerome Marchand6642a672011-02-17 17:11:49 +0100917 }
918
Sergey Senozhatskybe257c62014-04-07 15:38:01 -0700919 __zram_make_request(zram, bio);
Jerome Marchand0900bea2011-09-06 15:02:11 +0200920 up_read(&zram->init_lock);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530921
Linus Torvaldsb4fdcb02011-11-04 17:06:58 -0700922 return;
Jerome Marchand0900bea2011-09-06 15:02:11 +0200923
Jerome Marchand0900bea2011-09-06 15:02:11 +0200924error:
Minchan Kim3de738c2013-01-30 11:41:41 +0900925 up_read(&zram->init_lock);
Jerome Marchand0900bea2011-09-06 15:02:11 +0200926 bio_io_error(bio);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530927}
928
Nitin Gupta2ccbec02011-09-09 19:01:00 -0400929static void zram_slot_free_notify(struct block_device *bdev,
930 unsigned long index)
Nitin Gupta107c1612010-05-17 11:02:44 +0530931{
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530932 struct zram *zram;
Minchan Kimf614a9f2014-01-30 15:46:04 -0800933 struct zram_meta *meta;
Nitin Gupta107c1612010-05-17 11:02:44 +0530934
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530935 zram = bdev->bd_disk->private_data;
Minchan Kimf614a9f2014-01-30 15:46:04 -0800936 meta = zram->meta;
937
Weijie Yangd2d5e762014-08-06 16:08:31 -0700938 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Minchan Kimf614a9f2014-01-30 15:46:04 -0800939 zram_free_page(zram, index);
Weijie Yangd2d5e762014-08-06 16:08:31 -0700940 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
Jiang Liuda5cc7d2013-06-07 00:07:31 +0800941 atomic64_inc(&zram->stats.notify_free);
Nitin Gupta107c1612010-05-17 11:02:44 +0530942}
943
karam.lee8c7f0102014-12-12 16:56:53 -0800944static int zram_rw_page(struct block_device *bdev, sector_t sector,
945 struct page *page, int rw)
946{
947 int offset, err;
948 u32 index;
949 struct zram *zram;
950 struct bio_vec bv;
951
952 zram = bdev->bd_disk->private_data;
953 if (!valid_io_request(zram, sector, PAGE_SIZE)) {
954 atomic64_inc(&zram->stats.invalid_io);
955 return -EINVAL;
956 }
957
958 down_read(&zram->init_lock);
959 if (unlikely(!init_done(zram))) {
960 err = -EIO;
961 goto out_unlock;
962 }
963
964 index = sector >> SECTORS_PER_PAGE_SHIFT;
965 offset = sector & (SECTORS_PER_PAGE - 1) << SECTOR_SHIFT;
966
967 bv.bv_page = page;
968 bv.bv_len = PAGE_SIZE;
969 bv.bv_offset = 0;
970
971 err = zram_bvec_rw(zram, &bv, index, offset, rw);
972out_unlock:
973 up_read(&zram->init_lock);
974 /*
975 * If I/O fails, just return error(ie, non-zero) without
976 * calling page_endio.
977 * It causes resubmit the I/O with bio request by upper functions
978 * of rw_page(e.g., swap_readpage, __swap_writepage) and
979 * bio->bi_end_io does things to handle the error
980 * (e.g., SetPageError, set_page_dirty and extra works).
981 */
982 if (err == 0)
983 page_endio(page, rw, 0);
984 return err;
985}
986
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530987static const struct block_device_operations zram_devops = {
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530988 .swap_slot_free_notify = zram_slot_free_notify,
karam.lee8c7f0102014-12-12 16:56:53 -0800989 .rw_page = zram_rw_page,
Nitin Gupta107c1612010-05-17 11:02:44 +0530990 .owner = THIS_MODULE
Nitin Gupta306b0c92009-09-22 10:26:53 +0530991};
992
Ganesh Mahendran083914e2014-12-12 16:57:13 -0800993static DEVICE_ATTR_RW(disksize);
994static DEVICE_ATTR_RO(initstate);
995static DEVICE_ATTR_WO(reset);
996static DEVICE_ATTR_RO(orig_data_size);
997static DEVICE_ATTR_RO(mem_used_total);
998static DEVICE_ATTR_RW(mem_limit);
999static DEVICE_ATTR_RW(mem_used_max);
1000static DEVICE_ATTR_RW(max_comp_streams);
1001static DEVICE_ATTR_RW(comp_algorithm);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001002
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -07001003ZRAM_ATTR_RO(num_reads);
1004ZRAM_ATTR_RO(num_writes);
Sergey Senozhatsky64447242014-04-07 15:38:05 -07001005ZRAM_ATTR_RO(failed_reads);
1006ZRAM_ATTR_RO(failed_writes);
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -07001007ZRAM_ATTR_RO(invalid_io);
1008ZRAM_ATTR_RO(notify_free);
1009ZRAM_ATTR_RO(zero_pages);
1010ZRAM_ATTR_RO(compr_data_size);
1011
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001012static struct attribute *zram_disk_attrs[] = {
1013 &dev_attr_disksize.attr,
1014 &dev_attr_initstate.attr,
1015 &dev_attr_reset.attr,
1016 &dev_attr_num_reads.attr,
1017 &dev_attr_num_writes.attr,
Sergey Senozhatsky64447242014-04-07 15:38:05 -07001018 &dev_attr_failed_reads.attr,
1019 &dev_attr_failed_writes.attr,
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001020 &dev_attr_invalid_io.attr,
1021 &dev_attr_notify_free.attr,
1022 &dev_attr_zero_pages.attr,
1023 &dev_attr_orig_data_size.attr,
1024 &dev_attr_compr_data_size.attr,
1025 &dev_attr_mem_used_total.attr,
Minchan Kim9ada9da2014-10-09 15:29:53 -07001026 &dev_attr_mem_limit.attr,
Minchan Kim461a8ee2014-10-09 15:29:55 -07001027 &dev_attr_mem_used_max.attr,
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -07001028 &dev_attr_max_comp_streams.attr,
Sergey Senozhatskye46b8a02014-04-07 15:38:17 -07001029 &dev_attr_comp_algorithm.attr,
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001030 NULL,
1031};
1032
1033static struct attribute_group zram_disk_attr_group = {
1034 .attrs = zram_disk_attrs,
1035};
1036
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301037static int create_device(struct zram *zram, int device_id)
Nitin Gupta306b0c92009-09-22 10:26:53 +05301038{
Jiang Liu39a9b8a2013-06-07 00:07:24 +08001039 int ret = -ENOMEM;
Nitin Guptade1a21a2010-01-28 21:13:40 +05301040
Jerome Marchand0900bea2011-09-06 15:02:11 +02001041 init_rwsem(&zram->init_lock);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301042
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301043 zram->queue = blk_alloc_queue(GFP_KERNEL);
1044 if (!zram->queue) {
Nitin Gupta306b0c92009-09-22 10:26:53 +05301045 pr_err("Error allocating disk queue for device %d\n",
1046 device_id);
Nitin Guptade1a21a2010-01-28 21:13:40 +05301047 goto out;
Nitin Gupta306b0c92009-09-22 10:26:53 +05301048 }
1049
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301050 blk_queue_make_request(zram->queue, zram_make_request);
1051 zram->queue->queuedata = zram;
Nitin Gupta306b0c92009-09-22 10:26:53 +05301052
1053 /* gendisk structure */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301054 zram->disk = alloc_disk(1);
1055 if (!zram->disk) {
Sam Hansen94b84352012-06-07 16:03:47 -07001056 pr_warn("Error allocating disk structure for device %d\n",
Nitin Gupta306b0c92009-09-22 10:26:53 +05301057 device_id);
Jiang Liu39a9b8a2013-06-07 00:07:24 +08001058 goto out_free_queue;
Nitin Gupta306b0c92009-09-22 10:26:53 +05301059 }
1060
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301061 zram->disk->major = zram_major;
1062 zram->disk->first_minor = device_id;
1063 zram->disk->fops = &zram_devops;
1064 zram->disk->queue = zram->queue;
1065 zram->disk->private_data = zram;
1066 snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301067
Nitin Gupta33863c22010-08-09 22:56:47 +05301068 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301069 set_capacity(zram->disk, 0);
Sergey Senozhatskyb67d1ec2014-04-07 15:38:09 -07001070 /* zram devices sort of resembles non-rotational disks */
1071 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
Mike Snitzerb277da02014-10-04 10:55:32 -06001072 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);
Nitin Guptaa1dd52a2010-06-01 13:31:23 +05301073 /*
1074 * To ensure that we always get PAGE_SIZE aligned
1075 * and n*PAGE_SIZED sized I/O requests.
1076 */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301077 blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
Robert Jennings7b19b8d2011-01-28 08:58:17 -06001078 blk_queue_logical_block_size(zram->disk->queue,
1079 ZRAM_LOGICAL_BLOCK_SIZE);
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301080 blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
1081 blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
Joonsoo Kimf4659d82014-04-07 15:38:24 -07001082 zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
1083 zram->disk->queue->limits.max_discard_sectors = UINT_MAX;
1084 /*
1085 * zram_bio_discard() will clear all logical blocks if logical block
1086 * size is identical with physical block size(PAGE_SIZE). But if it is
1087 * different, we will skip discarding some parts of logical blocks in
1088 * the part of the request range which isn't aligned to physical block
1089 * size. So we can't ensure that all discarded logical blocks are
1090 * zeroed.
1091 */
1092 if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
1093 zram->disk->queue->limits.discard_zeroes_data = 1;
1094 else
1095 zram->disk->queue->limits.discard_zeroes_data = 0;
1096 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue);
Nitin Gupta5d83d5a2010-01-28 21:13:39 +05301097
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301098 add_disk(zram->disk);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301099
Nitin Gupta33863c22010-08-09 22:56:47 +05301100 ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
1101 &zram_disk_attr_group);
1102 if (ret < 0) {
Sam Hansen94b84352012-06-07 16:03:47 -07001103 pr_warn("Error creating sysfs group");
Jiang Liu39a9b8a2013-06-07 00:07:24 +08001104 goto out_free_disk;
Nitin Gupta33863c22010-08-09 22:56:47 +05301105 }
Sergey Senozhatskye46b8a02014-04-07 15:38:17 -07001106 strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
Sergey Senozhatskybe2d1d52014-04-07 15:38:00 -07001107 zram->meta = NULL;
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -07001108 zram->max_comp_streams = 1;
Jiang Liu39a9b8a2013-06-07 00:07:24 +08001109 return 0;
Nitin Guptade1a21a2010-01-28 21:13:40 +05301110
Jiang Liu39a9b8a2013-06-07 00:07:24 +08001111out_free_disk:
1112 del_gendisk(zram->disk);
1113 put_disk(zram->disk);
1114out_free_queue:
1115 blk_cleanup_queue(zram->queue);
Nitin Guptade1a21a2010-01-28 21:13:40 +05301116out:
1117 return ret;
Nitin Gupta306b0c92009-09-22 10:26:53 +05301118}
1119
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301120static void destroy_device(struct zram *zram)
Nitin Gupta306b0c92009-09-22 10:26:53 +05301121{
Nitin Gupta33863c22010-08-09 22:56:47 +05301122 sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
1123 &zram_disk_attr_group);
Nitin Gupta33863c22010-08-09 22:56:47 +05301124
Rashika Kheria59d3fe52013-10-30 18:43:32 +05301125 del_gendisk(zram->disk);
1126 put_disk(zram->disk);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301127
Rashika Kheria59d3fe52013-10-30 18:43:32 +05301128 blk_cleanup_queue(zram->queue);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301129}
1130
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301131static int __init zram_init(void)
Nitin Gupta306b0c92009-09-22 10:26:53 +05301132{
Nitin Guptade1a21a2010-01-28 21:13:40 +05301133 int ret, dev_id;
Nitin Gupta306b0c92009-09-22 10:26:53 +05301134
Nitin Gupta5fa5a902012-02-12 23:04:45 -05001135 if (num_devices > max_num_devices) {
Sam Hansen94b84352012-06-07 16:03:47 -07001136 pr_warn("Invalid value for num_devices: %u\n",
Nitin Gupta5fa5a902012-02-12 23:04:45 -05001137 num_devices);
Nitin Guptade1a21a2010-01-28 21:13:40 +05301138 ret = -EINVAL;
1139 goto out;
Nitin Gupta306b0c92009-09-22 10:26:53 +05301140 }
1141
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301142 zram_major = register_blkdev(0, "zram");
1143 if (zram_major <= 0) {
Sam Hansen94b84352012-06-07 16:03:47 -07001144 pr_warn("Unable to get major number\n");
Nitin Guptade1a21a2010-01-28 21:13:40 +05301145 ret = -EBUSY;
1146 goto out;
Nitin Gupta306b0c92009-09-22 10:26:53 +05301147 }
1148
Nitin Gupta306b0c92009-09-22 10:26:53 +05301149 /* Allocate the device array and initialize each one */
Nitin Gupta5fa5a902012-02-12 23:04:45 -05001150 zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
Noah Watkins43801f62011-07-20 17:05:57 -06001151 if (!zram_devices) {
Nitin Guptade1a21a2010-01-28 21:13:40 +05301152 ret = -ENOMEM;
1153 goto unregister;
1154 }
Nitin Gupta306b0c92009-09-22 10:26:53 +05301155
Nitin Gupta5fa5a902012-02-12 23:04:45 -05001156 for (dev_id = 0; dev_id < num_devices; dev_id++) {
Noah Watkins43801f62011-07-20 17:05:57 -06001157 ret = create_device(&zram_devices[dev_id], dev_id);
Nitin Guptade1a21a2010-01-28 21:13:40 +05301158 if (ret)
Minchan Kim3bf040c2010-01-11 16:15:53 +09001159 goto free_devices;
Nitin Guptade1a21a2010-01-28 21:13:40 +05301160 }
1161
Davidlohr Buesoca3d70b2013-01-01 21:24:13 -08001162 pr_info("Created %u device(s) ...\n", num_devices);
1163
Nitin Gupta306b0c92009-09-22 10:26:53 +05301164 return 0;
Nitin Guptade1a21a2010-01-28 21:13:40 +05301165
Minchan Kim3bf040c2010-01-11 16:15:53 +09001166free_devices:
Nitin Guptade1a21a2010-01-28 21:13:40 +05301167 while (dev_id)
Noah Watkins43801f62011-07-20 17:05:57 -06001168 destroy_device(&zram_devices[--dev_id]);
1169 kfree(zram_devices);
Nitin Guptade1a21a2010-01-28 21:13:40 +05301170unregister:
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301171 unregister_blkdev(zram_major, "zram");
Nitin Guptade1a21a2010-01-28 21:13:40 +05301172out:
Nitin Gupta306b0c92009-09-22 10:26:53 +05301173 return ret;
1174}
1175
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301176static void __exit zram_exit(void)
Nitin Gupta306b0c92009-09-22 10:26:53 +05301177{
1178 int i;
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301179 struct zram *zram;
Nitin Gupta306b0c92009-09-22 10:26:53 +05301180
Nitin Gupta5fa5a902012-02-12 23:04:45 -05001181 for (i = 0; i < num_devices; i++) {
Noah Watkins43801f62011-07-20 17:05:57 -06001182 zram = &zram_devices[i];
Nitin Gupta306b0c92009-09-22 10:26:53 +05301183
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301184 destroy_device(zram);
Minchan Kim2b86ab92013-08-12 15:13:55 +09001185 /*
1186 * Shouldn't access zram->disk after destroy_device
1187 * because destroy_device already released zram->disk.
1188 */
1189 zram_reset_device(zram, false);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301190 }
1191
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301192 unregister_blkdev(zram_major, "zram");
Nitin Gupta306b0c92009-09-22 10:26:53 +05301193
Noah Watkins43801f62011-07-20 17:05:57 -06001194 kfree(zram_devices);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301195 pr_debug("Cleanup done!\n");
1196}
1197
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301198module_init(zram_init);
1199module_exit(zram_exit);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301200
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001201module_param(num_devices, uint, 0);
1202MODULE_PARM_DESC(num_devices, "Number of zram devices");
1203
Nitin Gupta306b0c92009-09-22 10:26:53 +05301204MODULE_LICENSE("Dual BSD/GPL");
1205MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301206MODULE_DESCRIPTION("Compressed RAM Block Device");