blob: 9c689b34e6e792105d64f2bb4835e11e37c91578 [file] [log] [blame]
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001/*
2 * Copyright (C) 2012 Red Hat. All rights reserved.
3 *
4 * This file is released under the GPL.
5 */
6
7#include "dm.h"
8#include "dm-bio-prison.h"
Darrick J. Wongb844fe62013-04-05 15:36:32 +01009#include "dm-bio-record.h"
Joe Thornberc6b4fcb2013-03-01 22:45:51 +000010#include "dm-cache-metadata.h"
11
12#include <linux/dm-io.h>
13#include <linux/dm-kcopyd.h>
Manuel Schölling0f30af92014-05-22 22:42:37 +020014#include <linux/jiffies.h>
Joe Thornberc6b4fcb2013-03-01 22:45:51 +000015#include <linux/init.h>
16#include <linux/mempool.h>
17#include <linux/module.h>
18#include <linux/slab.h>
19#include <linux/vmalloc.h>
20
21#define DM_MSG_PREFIX "cache"
22
23DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(cache_copy_throttle,
24 "A percentage of time allocated for copying to and/or from cache");
25
26/*----------------------------------------------------------------*/
27
Joe Thornber77289d32015-05-15 13:45:30 +010028#define IOT_RESOLUTION 4
29
30struct io_tracker {
31 spinlock_t lock;
32
33 /*
34 * Sectors of in-flight IO.
35 */
36 sector_t in_flight;
37
38 /*
39 * The time, in jiffies, when this device became idle (if it is
40 * indeed idle).
41 */
42 unsigned long idle_time;
43 unsigned long last_update_time;
44};
45
46static void iot_init(struct io_tracker *iot)
47{
48 spin_lock_init(&iot->lock);
49 iot->in_flight = 0ul;
50 iot->idle_time = 0ul;
51 iot->last_update_time = jiffies;
52}
53
54static bool __iot_idle_for(struct io_tracker *iot, unsigned long jifs)
55{
56 if (iot->in_flight)
57 return false;
58
59 return time_after(jiffies, iot->idle_time + jifs);
60}
61
62static bool iot_idle_for(struct io_tracker *iot, unsigned long jifs)
63{
64 bool r;
65 unsigned long flags;
66
67 spin_lock_irqsave(&iot->lock, flags);
68 r = __iot_idle_for(iot, jifs);
69 spin_unlock_irqrestore(&iot->lock, flags);
70
71 return r;
72}
73
74static void iot_io_begin(struct io_tracker *iot, sector_t len)
75{
76 unsigned long flags;
77
78 spin_lock_irqsave(&iot->lock, flags);
79 iot->in_flight += len;
80 spin_unlock_irqrestore(&iot->lock, flags);
81}
82
83static void __iot_io_end(struct io_tracker *iot, sector_t len)
84{
85 iot->in_flight -= len;
86 if (!iot->in_flight)
87 iot->idle_time = jiffies;
88}
89
90static void iot_io_end(struct io_tracker *iot, sector_t len)
91{
92 unsigned long flags;
93
94 spin_lock_irqsave(&iot->lock, flags);
95 __iot_io_end(iot, len);
96 spin_unlock_irqrestore(&iot->lock, flags);
97}
98
99/*----------------------------------------------------------------*/
100
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000101/*
102 * Glossary:
103 *
104 * oblock: index of an origin block
105 * cblock: index of a cache block
106 * promotion: movement of a block from origin to cache
107 * demotion: movement of a block from cache to origin
108 * migration: movement of a block between the origin and cache device,
109 * either direction
110 */
111
112/*----------------------------------------------------------------*/
113
Joe Thornberc9d28d52013-10-31 13:55:48 -0400114/*
115 * There are a couple of places where we let a bio run, but want to do some
116 * work before calling its endio function. We do this by temporarily
117 * changing the endio fn.
118 */
119struct dm_hook_info {
120 bio_end_io_t *bi_end_io;
Joe Thornberc9d28d52013-10-31 13:55:48 -0400121};
122
123static void dm_hook_bio(struct dm_hook_info *h, struct bio *bio,
124 bio_end_io_t *bi_end_io, void *bi_private)
125{
126 h->bi_end_io = bio->bi_end_io;
Joe Thornberc9d28d52013-10-31 13:55:48 -0400127
128 bio->bi_end_io = bi_end_io;
129 bio->bi_private = bi_private;
130}
131
132static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio)
133{
134 bio->bi_end_io = h->bi_end_io;
Joe Thornberc9d28d52013-10-31 13:55:48 -0400135}
136
137/*----------------------------------------------------------------*/
138
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000139#define MIGRATION_POOL_SIZE 128
140#define COMMIT_PERIOD HZ
141#define MIGRATION_COUNT_WINDOW 10
142
143/*
Mike Snitzer05473042013-08-16 10:54:19 -0400144 * The block size of the device holding cache data must be
145 * between 32KB and 1GB.
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000146 */
147#define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (32 * 1024 >> SECTOR_SHIFT)
Mike Snitzer05473042013-08-16 10:54:19 -0400148#define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000149
Joe Thornber2ee57d52013-10-24 14:10:29 -0400150enum cache_metadata_mode {
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000151 CM_WRITE, /* metadata may be changed */
152 CM_READ_ONLY, /* metadata may not be changed */
Joe Thornber028ae9f2015-04-22 16:42:35 -0400153 CM_FAIL
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000154};
155
Joe Thornber2ee57d52013-10-24 14:10:29 -0400156enum cache_io_mode {
157 /*
158 * Data is written to cached blocks only. These blocks are marked
159 * dirty. If you lose the cache device you will lose data.
160 * Potential performance increase for both reads and writes.
161 */
162 CM_IO_WRITEBACK,
163
164 /*
165 * Data is written to both cache and origin. Blocks are never
166 * dirty. Potential performance benfit for reads only.
167 */
168 CM_IO_WRITETHROUGH,
169
170 /*
171 * A degraded mode useful for various cache coherency situations
172 * (eg, rolling back snapshots). Reads and writes always go to the
173 * origin. If a write goes to a cached oblock, then the cache
174 * block is invalidated.
175 */
176 CM_IO_PASSTHROUGH
177};
178
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000179struct cache_features {
Joe Thornber2ee57d52013-10-24 14:10:29 -0400180 enum cache_metadata_mode mode;
181 enum cache_io_mode io_mode;
Joe Thornber629d0a82016-09-22 06:15:21 -0400182 unsigned metadata_version;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000183};
184
185struct cache_stats {
186 atomic_t read_hit;
187 atomic_t read_miss;
188 atomic_t write_hit;
189 atomic_t write_miss;
190 atomic_t demotion;
191 atomic_t promotion;
192 atomic_t copies_avoided;
193 atomic_t cache_cell_clash;
194 atomic_t commit_count;
195 atomic_t discard_count;
196};
197
Joe Thornber65790ff2013-11-08 16:39:50 +0000198/*
199 * Defines a range of cblocks, begin to (end - 1) are in the range. end is
200 * the one-past-the-end value.
201 */
202struct cblock_range {
203 dm_cblock_t begin;
204 dm_cblock_t end;
205};
206
207struct invalidation_request {
208 struct list_head list;
209 struct cblock_range *cblocks;
210
211 atomic_t complete;
212 int err;
213
214 wait_queue_head_t result_wait;
215};
216
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000217struct cache {
218 struct dm_target *ti;
219 struct dm_target_callbacks callbacks;
220
Mike Snitzerc9ec5d72013-08-16 10:54:21 -0400221 struct dm_cache_metadata *cmd;
222
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000223 /*
224 * Metadata is written to this device.
225 */
226 struct dm_dev *metadata_dev;
227
228 /*
229 * The slower of the two data devices. Typically a spindle.
230 */
231 struct dm_dev *origin_dev;
232
233 /*
234 * The faster of the two data devices. Typically an SSD.
235 */
236 struct dm_dev *cache_dev;
237
238 /*
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000239 * Size of the origin device in _complete_ blocks and native sectors.
240 */
241 dm_oblock_t origin_blocks;
242 sector_t origin_sectors;
243
244 /*
245 * Size of the cache device in blocks.
246 */
247 dm_cblock_t cache_size;
248
249 /*
250 * Fields for converting from sectors to blocks.
251 */
Joe Thornberca763d02017-02-09 11:46:18 -0500252 sector_t sectors_per_block;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000253 int sectors_per_block_shift;
254
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000255 spinlock_t lock;
Joe Thornber651f5fa2015-05-15 15:26:08 +0100256 struct list_head deferred_cells;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000257 struct bio_list deferred_bios;
258 struct bio_list deferred_flush_bios;
Joe Thornbere2e74d62013-03-20 17:21:27 +0000259 struct bio_list deferred_writethrough_bios;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000260 struct list_head quiesced_migrations;
261 struct list_head completed_migrations;
262 struct list_head need_commit_migrations;
263 sector_t migration_threshold;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000264 wait_queue_head_t migration_wait;
Joe Thornbera59db672015-01-23 10:16:16 +0000265 atomic_t nr_allocated_migrations;
266
267 /*
268 * The number of in flight migrations that are performing
269 * background io. eg, promotion, writeback.
270 */
271 atomic_t nr_io_migrations;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000272
Joe Thornber66cb1912013-10-30 17:11:58 +0000273 wait_queue_head_t quiescing_wait;
Joe Thornber238f8362013-10-30 17:29:30 +0000274 atomic_t quiescing;
Joe Thornber66cb1912013-10-30 17:11:58 +0000275 atomic_t quiescing_ack;
276
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000277 /*
278 * cache_size entries, dirty if set
279 */
Anssi Hannula44fa8162014-08-01 11:55:47 -0400280 atomic_t nr_dirty;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000281 unsigned long *dirty_bitset;
282
283 /*
284 * origin_blocks entries, discarded if set.
285 */
Joe Thornber1bad9bc2014-11-07 14:47:07 +0000286 dm_dblock_t discard_nr_blocks;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000287 unsigned long *discard_bitset;
Joe Thornber08b18452014-11-06 14:38:01 +0000288 uint32_t discard_block_size; /* a power of 2 times sectors per block */
Mike Snitzerc9ec5d72013-08-16 10:54:21 -0400289
290 /*
291 * Rather than reconstructing the table line for the status we just
292 * save it and regurgitate.
293 */
294 unsigned nr_ctr_args;
295 const char **ctr_args;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000296
297 struct dm_kcopyd_client *copier;
298 struct workqueue_struct *wq;
299 struct work_struct worker;
300
301 struct delayed_work waker;
302 unsigned long last_commit_jiffies;
303
304 struct dm_bio_prison *prison;
305 struct dm_deferred_set *all_io_ds;
306
307 mempool_t *migration_pool;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000308
309 struct dm_cache_policy *policy;
310 unsigned policy_nr_args;
311
312 bool need_tick_bio:1;
313 bool sized:1;
Joe Thornber65790ff2013-11-08 16:39:50 +0000314 bool invalidate:1;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000315 bool commit_requested:1;
316 bool loaded_mappings:1;
317 bool loaded_discards:1;
318
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000319 /*
Mike Snitzerc9ec5d72013-08-16 10:54:21 -0400320 * Cache features such as write-through.
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000321 */
Mike Snitzerc9ec5d72013-08-16 10:54:21 -0400322 struct cache_features features;
323
324 struct cache_stats stats;
Joe Thornber65790ff2013-11-08 16:39:50 +0000325
326 /*
327 * Invalidation fields.
328 */
329 spinlock_t invalidation_lock;
330 struct list_head invalidation_requests;
Joe Thornber066dbaa32015-05-15 15:18:01 +0100331
332 struct io_tracker origin_tracker;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000333};
334
335struct per_bio_data {
336 bool tick:1;
337 unsigned req_nr:2;
338 struct dm_deferred_entry *all_io_entry;
Mike Snitzerc6eda5e2014-01-31 14:11:54 -0500339 struct dm_hook_info hook_info;
Joe Thornber066dbaa32015-05-15 15:18:01 +0100340 sector_t len;
Joe Thornbere2e74d62013-03-20 17:21:27 +0000341
Mike Snitzer19b00922013-04-05 15:36:34 +0100342 /*
343 * writethrough fields. These MUST remain at the end of this
344 * structure and the 'cache' member must be the first as it
Joe Thornberaeed14202013-05-10 14:37:18 +0100345 * is used to determine the offset of the writethrough fields.
Mike Snitzer19b00922013-04-05 15:36:34 +0100346 */
Joe Thornbere2e74d62013-03-20 17:21:27 +0000347 struct cache *cache;
348 dm_cblock_t cblock;
Darrick J. Wongb844fe62013-04-05 15:36:32 +0100349 struct dm_bio_details bio_details;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000350};
351
352struct dm_cache_migration {
353 struct list_head list;
354 struct cache *cache;
355
356 unsigned long start_jiffies;
357 dm_oblock_t old_oblock;
358 dm_oblock_t new_oblock;
359 dm_cblock_t cblock;
360
361 bool err:1;
Joe Thornber7ae34e72014-11-06 10:18:04 +0000362 bool discard:1;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000363 bool writeback:1;
364 bool demote:1;
365 bool promote:1;
Joe Thornberc9d28d52013-10-31 13:55:48 -0400366 bool requeue_holder:1;
Joe Thornber65790ff2013-11-08 16:39:50 +0000367 bool invalidate:1;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000368
369 struct dm_bio_prison_cell *old_ocell;
370 struct dm_bio_prison_cell *new_ocell;
371};
372
373/*
374 * Processing a bio in the worker thread may require these memory
375 * allocations. We prealloc to avoid deadlocks (the same worker thread
376 * frees them back to the mempool).
377 */
378struct prealloc {
379 struct dm_cache_migration *mg;
380 struct dm_bio_prison_cell *cell1;
381 struct dm_bio_prison_cell *cell2;
382};
383
Joe Thornber028ae9f2015-04-22 16:42:35 -0400384static enum cache_metadata_mode get_cache_mode(struct cache *cache);
385
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000386static void wake_worker(struct cache *cache)
387{
388 queue_work(cache->wq, &cache->worker);
389}
390
391/*----------------------------------------------------------------*/
392
393static struct dm_bio_prison_cell *alloc_prison_cell(struct cache *cache)
394{
395 /* FIXME: change to use a local slab. */
396 return dm_bio_prison_alloc_cell(cache->prison, GFP_NOWAIT);
397}
398
399static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell *cell)
400{
401 dm_bio_prison_free_cell(cache->prison, cell);
402}
403
Joe Thornbera59db672015-01-23 10:16:16 +0000404static struct dm_cache_migration *alloc_migration(struct cache *cache)
405{
406 struct dm_cache_migration *mg;
407
408 mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT);
409 if (mg) {
410 mg->cache = cache;
411 atomic_inc(&mg->cache->nr_allocated_migrations);
412 }
413
414 return mg;
415}
416
417static void free_migration(struct dm_cache_migration *mg)
418{
Joe Thornber88bf51842015-05-27 15:39:45 +0100419 struct cache *cache = mg->cache;
Joe Thornbera59db672015-01-23 10:16:16 +0000420
Joe Thornber88bf51842015-05-27 15:39:45 +0100421 if (atomic_dec_and_test(&cache->nr_allocated_migrations))
422 wake_up(&cache->migration_wait);
423
424 mempool_free(mg, cache->migration_pool);
Joe Thornbera59db672015-01-23 10:16:16 +0000425}
426
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000427static int prealloc_data_structs(struct cache *cache, struct prealloc *p)
428{
429 if (!p->mg) {
Joe Thornbera59db672015-01-23 10:16:16 +0000430 p->mg = alloc_migration(cache);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000431 if (!p->mg)
432 return -ENOMEM;
433 }
434
435 if (!p->cell1) {
436 p->cell1 = alloc_prison_cell(cache);
437 if (!p->cell1)
438 return -ENOMEM;
439 }
440
441 if (!p->cell2) {
442 p->cell2 = alloc_prison_cell(cache);
443 if (!p->cell2)
444 return -ENOMEM;
445 }
446
447 return 0;
448}
449
450static void prealloc_free_structs(struct cache *cache, struct prealloc *p)
451{
452 if (p->cell2)
453 free_prison_cell(cache, p->cell2);
454
455 if (p->cell1)
456 free_prison_cell(cache, p->cell1);
457
458 if (p->mg)
Joe Thornbera59db672015-01-23 10:16:16 +0000459 free_migration(p->mg);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000460}
461
462static struct dm_cache_migration *prealloc_get_migration(struct prealloc *p)
463{
464 struct dm_cache_migration *mg = p->mg;
465
466 BUG_ON(!mg);
467 p->mg = NULL;
468
469 return mg;
470}
471
472/*
473 * You must have a cell within the prealloc struct to return. If not this
474 * function will BUG() rather than returning NULL.
475 */
476static struct dm_bio_prison_cell *prealloc_get_cell(struct prealloc *p)
477{
478 struct dm_bio_prison_cell *r = NULL;
479
480 if (p->cell1) {
481 r = p->cell1;
482 p->cell1 = NULL;
483
484 } else if (p->cell2) {
485 r = p->cell2;
486 p->cell2 = NULL;
487 } else
488 BUG();
489
490 return r;
491}
492
493/*
494 * You can't have more than two cells in a prealloc struct. BUG() will be
495 * called if you try and overfill.
496 */
497static void prealloc_put_cell(struct prealloc *p, struct dm_bio_prison_cell *cell)
498{
499 if (!p->cell2)
500 p->cell2 = cell;
501
502 else if (!p->cell1)
503 p->cell1 = cell;
504
505 else
506 BUG();
507}
508
509/*----------------------------------------------------------------*/
510
Joe Thornber7ae34e72014-11-06 10:18:04 +0000511static void build_key(dm_oblock_t begin, dm_oblock_t end, struct dm_cell_key *key)
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000512{
513 key->virtual = 0;
514 key->dev = 0;
Joe Thornber7ae34e72014-11-06 10:18:04 +0000515 key->block_begin = from_oblock(begin);
516 key->block_end = from_oblock(end);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000517}
518
519/*
520 * The caller hands in a preallocated cell, and a free function for it.
521 * The cell will be freed if there's an error, or if it wasn't used because
522 * a cell with that key already exists.
523 */
524typedef void (*cell_free_fn)(void *context, struct dm_bio_prison_cell *cell);
525
Joe Thornber7ae34e72014-11-06 10:18:04 +0000526static int bio_detain_range(struct cache *cache, dm_oblock_t oblock_begin, dm_oblock_t oblock_end,
527 struct bio *bio, struct dm_bio_prison_cell *cell_prealloc,
528 cell_free_fn free_fn, void *free_context,
529 struct dm_bio_prison_cell **cell_result)
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000530{
531 int r;
532 struct dm_cell_key key;
533
Joe Thornber7ae34e72014-11-06 10:18:04 +0000534 build_key(oblock_begin, oblock_end, &key);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000535 r = dm_bio_detain(cache->prison, &key, bio, cell_prealloc, cell_result);
536 if (r)
537 free_fn(free_context, cell_prealloc);
538
539 return r;
540}
541
Joe Thornber7ae34e72014-11-06 10:18:04 +0000542static int bio_detain(struct cache *cache, dm_oblock_t oblock,
543 struct bio *bio, struct dm_bio_prison_cell *cell_prealloc,
544 cell_free_fn free_fn, void *free_context,
545 struct dm_bio_prison_cell **cell_result)
546{
547 dm_oblock_t end = to_oblock(from_oblock(oblock) + 1ULL);
548 return bio_detain_range(cache, oblock, end, bio,
549 cell_prealloc, free_fn, free_context, cell_result);
550}
551
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000552static int get_cell(struct cache *cache,
553 dm_oblock_t oblock,
554 struct prealloc *structs,
555 struct dm_bio_prison_cell **cell_result)
556{
557 int r;
558 struct dm_cell_key key;
559 struct dm_bio_prison_cell *cell_prealloc;
560
561 cell_prealloc = prealloc_get_cell(structs);
562
Joe Thornber7ae34e72014-11-06 10:18:04 +0000563 build_key(oblock, to_oblock(from_oblock(oblock) + 1ULL), &key);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000564 r = dm_get_cell(cache->prison, &key, cell_prealloc, cell_result);
565 if (r)
566 prealloc_put_cell(structs, cell_prealloc);
567
568 return r;
569}
570
Joe Thornberaeed14202013-05-10 14:37:18 +0100571/*----------------------------------------------------------------*/
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000572
573static bool is_dirty(struct cache *cache, dm_cblock_t b)
574{
575 return test_bit(from_cblock(b), cache->dirty_bitset);
576}
577
578static void set_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock)
579{
580 if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) {
Anssi Hannula44fa8162014-08-01 11:55:47 -0400581 atomic_inc(&cache->nr_dirty);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000582 policy_set_dirty(cache->policy, oblock);
583 }
584}
585
586static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock)
587{
588 if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) {
589 policy_clear_dirty(cache->policy, oblock);
Anssi Hannula44fa8162014-08-01 11:55:47 -0400590 if (atomic_dec_return(&cache->nr_dirty) == 0)
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000591 dm_table_event(cache->ti->table);
592 }
593}
594
595/*----------------------------------------------------------------*/
Joe Thornberaeed14202013-05-10 14:37:18 +0100596
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000597static bool block_size_is_power_of_two(struct cache *cache)
598{
599 return cache->sectors_per_block_shift >= 0;
600}
601
Mikulas Patocka43aeaa22013-07-10 23:41:17 +0100602/* gcc on ARM generates spurious references to __udivdi3 and __umoddi3 */
603#if defined(CONFIG_ARM) && __GNUC__ == 4 && __GNUC_MINOR__ <= 6
604__always_inline
605#endif
Joe Thornber414dd672013-03-20 17:21:25 +0000606static dm_block_t block_div(dm_block_t b, uint32_t n)
607{
608 do_div(b, n);
609
610 return b;
611}
612
Joe Thornber7ae34e72014-11-06 10:18:04 +0000613static dm_block_t oblocks_per_dblock(struct cache *cache)
614{
615 dm_block_t oblocks = cache->discard_block_size;
616
617 if (block_size_is_power_of_two(cache))
618 oblocks >>= cache->sectors_per_block_shift;
619 else
620 oblocks = block_div(oblocks, cache->sectors_per_block);
621
622 return oblocks;
623}
624
Joe Thornber1bad9bc2014-11-07 14:47:07 +0000625static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock)
626{
Joe Thornber7ae34e72014-11-06 10:18:04 +0000627 return to_dblock(block_div(from_oblock(oblock),
628 oblocks_per_dblock(cache)));
629}
Joe Thornber1bad9bc2014-11-07 14:47:07 +0000630
Joe Thornber7ae34e72014-11-06 10:18:04 +0000631static dm_oblock_t dblock_to_oblock(struct cache *cache, dm_dblock_t dblock)
632{
633 return to_oblock(from_dblock(dblock) * oblocks_per_dblock(cache));
Joe Thornber1bad9bc2014-11-07 14:47:07 +0000634}
635
636static void set_discard(struct cache *cache, dm_dblock_t b)
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000637{
638 unsigned long flags;
639
Joe Thornber7ae34e72014-11-06 10:18:04 +0000640 BUG_ON(from_dblock(b) >= from_dblock(cache->discard_nr_blocks));
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000641 atomic_inc(&cache->stats.discard_count);
642
643 spin_lock_irqsave(&cache->lock, flags);
Joe Thornber1bad9bc2014-11-07 14:47:07 +0000644 set_bit(from_dblock(b), cache->discard_bitset);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000645 spin_unlock_irqrestore(&cache->lock, flags);
646}
647
Joe Thornber1bad9bc2014-11-07 14:47:07 +0000648static void clear_discard(struct cache *cache, dm_dblock_t b)
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000649{
650 unsigned long flags;
651
652 spin_lock_irqsave(&cache->lock, flags);
Joe Thornber1bad9bc2014-11-07 14:47:07 +0000653 clear_bit(from_dblock(b), cache->discard_bitset);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000654 spin_unlock_irqrestore(&cache->lock, flags);
655}
656
Joe Thornber1bad9bc2014-11-07 14:47:07 +0000657static bool is_discarded(struct cache *cache, dm_dblock_t b)
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000658{
659 int r;
660 unsigned long flags;
661
662 spin_lock_irqsave(&cache->lock, flags);
Joe Thornber1bad9bc2014-11-07 14:47:07 +0000663 r = test_bit(from_dblock(b), cache->discard_bitset);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000664 spin_unlock_irqrestore(&cache->lock, flags);
665
666 return r;
667}
668
669static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b)
670{
671 int r;
672 unsigned long flags;
673
674 spin_lock_irqsave(&cache->lock, flags);
Joe Thornber1bad9bc2014-11-07 14:47:07 +0000675 r = test_bit(from_dblock(oblock_to_dblock(cache, b)),
676 cache->discard_bitset);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000677 spin_unlock_irqrestore(&cache->lock, flags);
678
679 return r;
680}
681
682/*----------------------------------------------------------------*/
683
684static void load_stats(struct cache *cache)
685{
686 struct dm_cache_statistics stats;
687
688 dm_cache_metadata_get_stats(cache->cmd, &stats);
689 atomic_set(&cache->stats.read_hit, stats.read_hits);
690 atomic_set(&cache->stats.read_miss, stats.read_misses);
691 atomic_set(&cache->stats.write_hit, stats.write_hits);
692 atomic_set(&cache->stats.write_miss, stats.write_misses);
693}
694
695static void save_stats(struct cache *cache)
696{
697 struct dm_cache_statistics stats;
698
Joe Thornber028ae9f2015-04-22 16:42:35 -0400699 if (get_cache_mode(cache) >= CM_READ_ONLY)
700 return;
701
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000702 stats.read_hits = atomic_read(&cache->stats.read_hit);
703 stats.read_misses = atomic_read(&cache->stats.read_miss);
704 stats.write_hits = atomic_read(&cache->stats.write_hit);
705 stats.write_misses = atomic_read(&cache->stats.write_miss);
706
707 dm_cache_metadata_set_stats(cache->cmd, &stats);
708}
709
710/*----------------------------------------------------------------
711 * Per bio data
712 *--------------------------------------------------------------*/
Mike Snitzer19b00922013-04-05 15:36:34 +0100713
714/*
715 * If using writeback, leave out struct per_bio_data's writethrough fields.
716 */
717#define PB_DATA_SIZE_WB (offsetof(struct per_bio_data, cache))
718#define PB_DATA_SIZE_WT (sizeof(struct per_bio_data))
719
Joe Thornber2ee57d52013-10-24 14:10:29 -0400720static bool writethrough_mode(struct cache_features *f)
721{
722 return f->io_mode == CM_IO_WRITETHROUGH;
723}
724
725static bool writeback_mode(struct cache_features *f)
726{
727 return f->io_mode == CM_IO_WRITEBACK;
728}
729
730static bool passthrough_mode(struct cache_features *f)
731{
732 return f->io_mode == CM_IO_PASSTHROUGH;
733}
734
Mike Snitzer19b00922013-04-05 15:36:34 +0100735static size_t get_per_bio_data_size(struct cache *cache)
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000736{
Joe Thornber2ee57d52013-10-24 14:10:29 -0400737 return writethrough_mode(&cache->features) ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB;
Mike Snitzer19b00922013-04-05 15:36:34 +0100738}
739
740static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size)
741{
742 struct per_bio_data *pb = dm_per_bio_data(bio, data_size);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000743 BUG_ON(!pb);
744 return pb;
745}
746
Mike Snitzer19b00922013-04-05 15:36:34 +0100747static struct per_bio_data *init_per_bio_data(struct bio *bio, size_t data_size)
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000748{
Mike Snitzer19b00922013-04-05 15:36:34 +0100749 struct per_bio_data *pb = get_per_bio_data(bio, data_size);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000750
751 pb->tick = false;
752 pb->req_nr = dm_bio_get_target_bio_nr(bio);
753 pb->all_io_entry = NULL;
Joe Thornber066dbaa32015-05-15 15:18:01 +0100754 pb->len = 0;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000755
756 return pb;
757}
758
759/*----------------------------------------------------------------
760 * Remapping
761 *--------------------------------------------------------------*/
762static void remap_to_origin(struct cache *cache, struct bio *bio)
763{
764 bio->bi_bdev = cache->origin_dev->bdev;
765}
766
767static void remap_to_cache(struct cache *cache, struct bio *bio,
768 dm_cblock_t cblock)
769{
Kent Overstreet4f024f32013-10-11 15:44:27 -0700770 sector_t bi_sector = bio->bi_iter.bi_sector;
Heinz Mauelshagene0d849f2014-02-27 22:46:48 +0100771 sector_t block = from_cblock(cblock);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000772
773 bio->bi_bdev = cache->cache_dev->bdev;
774 if (!block_size_is_power_of_two(cache))
Kent Overstreet4f024f32013-10-11 15:44:27 -0700775 bio->bi_iter.bi_sector =
Heinz Mauelshagene0d849f2014-02-27 22:46:48 +0100776 (block * cache->sectors_per_block) +
Kent Overstreet4f024f32013-10-11 15:44:27 -0700777 sector_div(bi_sector, cache->sectors_per_block);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000778 else
Kent Overstreet4f024f32013-10-11 15:44:27 -0700779 bio->bi_iter.bi_sector =
Heinz Mauelshagene0d849f2014-02-27 22:46:48 +0100780 (block << cache->sectors_per_block_shift) |
Kent Overstreet4f024f32013-10-11 15:44:27 -0700781 (bi_sector & (cache->sectors_per_block - 1));
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000782}
783
784static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
785{
786 unsigned long flags;
Mike Snitzer19b00922013-04-05 15:36:34 +0100787 size_t pb_data_size = get_per_bio_data_size(cache);
788 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000789
790 spin_lock_irqsave(&cache->lock, flags);
Christoph Hellwigf73f44e2017-01-27 08:30:47 -0700791 if (cache->need_tick_bio && !op_is_flush(bio->bi_opf) &&
Mike Christiee6047142016-06-05 14:32:04 -0500792 bio_op(bio) != REQ_OP_DISCARD) {
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000793 pb->tick = true;
794 cache->need_tick_bio = false;
795 }
796 spin_unlock_irqrestore(&cache->lock, flags);
797}
798
799static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
800 dm_oblock_t oblock)
801{
802 check_if_tick_bio_needed(cache, bio);
803 remap_to_origin(cache, bio);
804 if (bio_data_dir(bio) == WRITE)
Joe Thornber1bad9bc2014-11-07 14:47:07 +0000805 clear_discard(cache, oblock_to_dblock(cache, oblock));
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000806}
807
808static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
809 dm_oblock_t oblock, dm_cblock_t cblock)
810{
Joe Thornberf8e5f012013-10-21 12:51:45 +0100811 check_if_tick_bio_needed(cache, bio);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000812 remap_to_cache(cache, bio, cblock);
813 if (bio_data_dir(bio) == WRITE) {
814 set_dirty(cache, oblock, cblock);
Joe Thornber1bad9bc2014-11-07 14:47:07 +0000815 clear_discard(cache, oblock_to_dblock(cache, oblock));
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000816 }
817}
818
819static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
820{
Kent Overstreet4f024f32013-10-11 15:44:27 -0700821 sector_t block_nr = bio->bi_iter.bi_sector;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000822
823 if (!block_size_is_power_of_two(cache))
824 (void) sector_div(block_nr, cache->sectors_per_block);
825 else
826 block_nr >>= cache->sectors_per_block_shift;
827
828 return to_oblock(block_nr);
829}
830
Joe Thornber8c081b52014-05-13 16:18:38 +0100831/*
832 * You must increment the deferred set whilst the prison cell is held. To
833 * encourage this, we ask for 'cell' to be passed in.
834 */
835static void inc_ds(struct cache *cache, struct bio *bio,
836 struct dm_bio_prison_cell *cell)
837{
838 size_t pb_data_size = get_per_bio_data_size(cache);
839 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
840
841 BUG_ON(!cell);
842 BUG_ON(pb->all_io_entry);
843
844 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
845}
846
Joe Thornber066dbaa32015-05-15 15:18:01 +0100847static bool accountable_bio(struct cache *cache, struct bio *bio)
848{
849 return ((bio->bi_bdev == cache->origin_dev->bdev) &&
Mike Christiee6047142016-06-05 14:32:04 -0500850 bio_op(bio) != REQ_OP_DISCARD);
Joe Thornber066dbaa32015-05-15 15:18:01 +0100851}
852
853static void accounted_begin(struct cache *cache, struct bio *bio)
854{
855 size_t pb_data_size = get_per_bio_data_size(cache);
856 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
857
858 if (accountable_bio(cache, bio)) {
859 pb->len = bio_sectors(bio);
860 iot_io_begin(&cache->origin_tracker, pb->len);
861 }
862}
863
864static void accounted_complete(struct cache *cache, struct bio *bio)
865{
866 size_t pb_data_size = get_per_bio_data_size(cache);
867 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
868
869 iot_io_end(&cache->origin_tracker, pb->len);
870}
871
872static void accounted_request(struct cache *cache, struct bio *bio)
873{
874 accounted_begin(cache, bio);
875 generic_make_request(bio);
876}
877
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000878static void issue(struct cache *cache, struct bio *bio)
879{
880 unsigned long flags;
881
Christoph Hellwigf73f44e2017-01-27 08:30:47 -0700882 if (!op_is_flush(bio->bi_opf)) {
Joe Thornber066dbaa32015-05-15 15:18:01 +0100883 accounted_request(cache, bio);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000884 return;
885 }
886
887 /*
888 * Batch together any bios that trigger commits and then issue a
889 * single commit for them in do_worker().
890 */
891 spin_lock_irqsave(&cache->lock, flags);
892 cache->commit_requested = true;
893 bio_list_add(&cache->deferred_flush_bios, bio);
894 spin_unlock_irqrestore(&cache->lock, flags);
895}
896
Joe Thornber8c081b52014-05-13 16:18:38 +0100897static void inc_and_issue(struct cache *cache, struct bio *bio, struct dm_bio_prison_cell *cell)
898{
899 inc_ds(cache, bio, cell);
900 issue(cache, bio);
901}
902
Joe Thornbere2e74d62013-03-20 17:21:27 +0000903static void defer_writethrough_bio(struct cache *cache, struct bio *bio)
904{
905 unsigned long flags;
906
907 spin_lock_irqsave(&cache->lock, flags);
908 bio_list_add(&cache->deferred_writethrough_bios, bio);
909 spin_unlock_irqrestore(&cache->lock, flags);
910
911 wake_worker(cache);
912}
913
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200914static void writethrough_endio(struct bio *bio)
Joe Thornbere2e74d62013-03-20 17:21:27 +0000915{
Mike Snitzer19b00922013-04-05 15:36:34 +0100916 struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
Joe Thornberc9d28d52013-10-31 13:55:48 -0400917
918 dm_unhook_bio(&pb->hook_info, bio);
Joe Thornbere2e74d62013-03-20 17:21:27 +0000919
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200920 if (bio->bi_error) {
921 bio_endio(bio);
Joe Thornbere2e74d62013-03-20 17:21:27 +0000922 return;
923 }
924
Darrick J. Wongb844fe62013-04-05 15:36:32 +0100925 dm_bio_restore(&pb->bio_details, bio);
Joe Thornbere2e74d62013-03-20 17:21:27 +0000926 remap_to_cache(pb->cache, bio, pb->cblock);
927
928 /*
929 * We can't issue this bio directly, since we're in interrupt
Joe Thornberaeed14202013-05-10 14:37:18 +0100930 * context. So it gets put on a bio list for processing by the
Joe Thornbere2e74d62013-03-20 17:21:27 +0000931 * worker thread.
932 */
933 defer_writethrough_bio(pb->cache, bio);
934}
935
936/*
937 * When running in writethrough mode we need to send writes to clean blocks
938 * to both the cache and origin devices. In future we'd like to clone the
939 * bio and send them in parallel, but for now we're doing them in
940 * series as this is easier.
941 */
942static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio,
943 dm_oblock_t oblock, dm_cblock_t cblock)
944{
Mike Snitzer19b00922013-04-05 15:36:34 +0100945 struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
Joe Thornbere2e74d62013-03-20 17:21:27 +0000946
947 pb->cache = cache;
948 pb->cblock = cblock;
Joe Thornberc9d28d52013-10-31 13:55:48 -0400949 dm_hook_bio(&pb->hook_info, bio, writethrough_endio, NULL);
Darrick J. Wongb844fe62013-04-05 15:36:32 +0100950 dm_bio_record(&pb->bio_details, bio);
Joe Thornbere2e74d62013-03-20 17:21:27 +0000951
952 remap_to_origin_clear_discard(pb->cache, bio, oblock);
953}
954
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000955/*----------------------------------------------------------------
Joe Thornber028ae9f2015-04-22 16:42:35 -0400956 * Failure modes
957 *--------------------------------------------------------------*/
958static enum cache_metadata_mode get_cache_mode(struct cache *cache)
959{
960 return cache->features.mode;
961}
962
Mike Snitzerb61d9502015-04-22 17:25:56 -0400963static const char *cache_device_name(struct cache *cache)
964{
965 return dm_device_name(dm_table_get_md(cache->ti->table));
966}
967
Joe Thornber028ae9f2015-04-22 16:42:35 -0400968static void notify_mode_switch(struct cache *cache, enum cache_metadata_mode mode)
969{
970 const char *descs[] = {
971 "write",
972 "read-only",
973 "fail"
974 };
975
976 dm_table_event(cache->ti->table);
Mike Snitzerb61d9502015-04-22 17:25:56 -0400977 DMINFO("%s: switching cache to %s mode",
978 cache_device_name(cache), descs[(int)mode]);
Joe Thornber028ae9f2015-04-22 16:42:35 -0400979}
980
981static void set_cache_mode(struct cache *cache, enum cache_metadata_mode new_mode)
982{
Joe Thornberd14fcf32016-03-10 16:20:58 +0000983 bool needs_check;
Joe Thornber028ae9f2015-04-22 16:42:35 -0400984 enum cache_metadata_mode old_mode = get_cache_mode(cache);
985
Joe Thornberd14fcf32016-03-10 16:20:58 +0000986 if (dm_cache_metadata_needs_check(cache->cmd, &needs_check)) {
Mike Snitzer23cab262016-10-04 12:04:08 -0400987 DMERR("%s: unable to read needs_check flag, setting failure mode.",
988 cache_device_name(cache));
Joe Thornberd14fcf32016-03-10 16:20:58 +0000989 new_mode = CM_FAIL;
990 }
991
Joe Thornber028ae9f2015-04-22 16:42:35 -0400992 if (new_mode == CM_WRITE && needs_check) {
Mike Snitzerb61d9502015-04-22 17:25:56 -0400993 DMERR("%s: unable to switch cache to write mode until repaired.",
994 cache_device_name(cache));
Joe Thornber028ae9f2015-04-22 16:42:35 -0400995 if (old_mode != new_mode)
996 new_mode = old_mode;
997 else
998 new_mode = CM_READ_ONLY;
999 }
1000
1001 /* Never move out of fail mode */
1002 if (old_mode == CM_FAIL)
1003 new_mode = CM_FAIL;
1004
1005 switch (new_mode) {
1006 case CM_FAIL:
1007 case CM_READ_ONLY:
1008 dm_cache_metadata_set_read_only(cache->cmd);
1009 break;
1010
1011 case CM_WRITE:
1012 dm_cache_metadata_set_read_write(cache->cmd);
1013 break;
1014 }
1015
1016 cache->features.mode = new_mode;
1017
1018 if (new_mode != old_mode)
1019 notify_mode_switch(cache, new_mode);
1020}
1021
1022static void abort_transaction(struct cache *cache)
1023{
Mike Snitzerb61d9502015-04-22 17:25:56 -04001024 const char *dev_name = cache_device_name(cache);
1025
Joe Thornber028ae9f2015-04-22 16:42:35 -04001026 if (get_cache_mode(cache) >= CM_READ_ONLY)
1027 return;
1028
1029 if (dm_cache_metadata_set_needs_check(cache->cmd)) {
Mike Snitzerb61d9502015-04-22 17:25:56 -04001030 DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name);
Joe Thornber028ae9f2015-04-22 16:42:35 -04001031 set_cache_mode(cache, CM_FAIL);
1032 }
1033
Mike Snitzerb61d9502015-04-22 17:25:56 -04001034 DMERR_LIMIT("%s: aborting current metadata transaction", dev_name);
Joe Thornber028ae9f2015-04-22 16:42:35 -04001035 if (dm_cache_metadata_abort(cache->cmd)) {
Mike Snitzerb61d9502015-04-22 17:25:56 -04001036 DMERR("%s: failed to abort metadata transaction", dev_name);
Joe Thornber028ae9f2015-04-22 16:42:35 -04001037 set_cache_mode(cache, CM_FAIL);
1038 }
1039}
1040
1041static void metadata_operation_failed(struct cache *cache, const char *op, int r)
1042{
Mike Snitzerb61d9502015-04-22 17:25:56 -04001043 DMERR_LIMIT("%s: metadata operation '%s' failed: error = %d",
1044 cache_device_name(cache), op, r);
Joe Thornber028ae9f2015-04-22 16:42:35 -04001045 abort_transaction(cache);
1046 set_cache_mode(cache, CM_READ_ONLY);
1047}
1048
1049/*----------------------------------------------------------------
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001050 * Migration processing
1051 *
1052 * Migration covers moving data from the origin device to the cache, or
1053 * vice versa.
1054 *--------------------------------------------------------------*/
Joe Thornbera59db672015-01-23 10:16:16 +00001055static void inc_io_migrations(struct cache *cache)
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001056{
Joe Thornbera59db672015-01-23 10:16:16 +00001057 atomic_inc(&cache->nr_io_migrations);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001058}
1059
Joe Thornbera59db672015-01-23 10:16:16 +00001060static void dec_io_migrations(struct cache *cache)
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001061{
Joe Thornbera59db672015-01-23 10:16:16 +00001062 atomic_dec(&cache->nr_io_migrations);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001063}
1064
Joe Thornber651f5fa2015-05-15 15:26:08 +01001065static bool discard_or_flush(struct bio *bio)
1066{
Christoph Hellwigf73f44e2017-01-27 08:30:47 -07001067 return bio_op(bio) == REQ_OP_DISCARD || op_is_flush(bio->bi_opf);
Joe Thornber651f5fa2015-05-15 15:26:08 +01001068}
1069
1070static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell)
1071{
Mike Snitzerdc9cee52015-08-31 15:41:34 -04001072 if (discard_or_flush(cell->holder)) {
Joe Thornber651f5fa2015-05-15 15:26:08 +01001073 /*
Mike Snitzerdc9cee52015-08-31 15:41:34 -04001074 * We have to handle these bios individually.
Joe Thornber651f5fa2015-05-15 15:26:08 +01001075 */
Mike Snitzerdc9cee52015-08-31 15:41:34 -04001076 dm_cell_release(cache->prison, cell, &cache->deferred_bios);
1077 free_prison_cell(cache, cell);
1078 } else
Joe Thornber651f5fa2015-05-15 15:26:08 +01001079 list_add_tail(&cell->user_list, &cache->deferred_cells);
1080}
1081
1082static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell, bool holder)
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001083{
1084 unsigned long flags;
1085
Joe Thornber651f5fa2015-05-15 15:26:08 +01001086 if (!holder && dm_cell_promote_or_release(cache->prison, cell)) {
1087 /*
1088 * There was no prisoner to promote to holder, the
1089 * cell has been released.
1090 */
1091 free_prison_cell(cache, cell);
1092 return;
1093 }
1094
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001095 spin_lock_irqsave(&cache->lock, flags);
Joe Thornber651f5fa2015-05-15 15:26:08 +01001096 __cell_defer(cache, cell);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001097 spin_unlock_irqrestore(&cache->lock, flags);
1098
1099 wake_worker(cache);
1100}
1101
Joe Thornber651f5fa2015-05-15 15:26:08 +01001102static void cell_error_with_code(struct cache *cache, struct dm_bio_prison_cell *cell, int err)
1103{
1104 dm_cell_error(cache->prison, cell, err);
Mike Snitzerdc9cee52015-08-31 15:41:34 -04001105 free_prison_cell(cache, cell);
Joe Thornber651f5fa2015-05-15 15:26:08 +01001106}
1107
1108static void cell_requeue(struct cache *cache, struct dm_bio_prison_cell *cell)
1109{
1110 cell_error_with_code(cache, cell, DM_ENDIO_REQUEUE);
1111}
1112
Joe Thornbera59db672015-01-23 10:16:16 +00001113static void free_io_migration(struct dm_cache_migration *mg)
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001114{
Joe Thornbercc7da0b2015-09-01 11:38:19 +01001115 struct cache *cache = mg->cache;
1116
1117 dec_io_migrations(cache);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001118 free_migration(mg);
Joe Thornbercc7da0b2015-09-01 11:38:19 +01001119 wake_worker(cache);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001120}
1121
1122static void migration_failure(struct dm_cache_migration *mg)
1123{
1124 struct cache *cache = mg->cache;
Mike Snitzerb61d9502015-04-22 17:25:56 -04001125 const char *dev_name = cache_device_name(cache);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001126
1127 if (mg->writeback) {
Mike Snitzerb61d9502015-04-22 17:25:56 -04001128 DMERR_LIMIT("%s: writeback failed; couldn't copy block", dev_name);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001129 set_dirty(cache, mg->old_oblock, mg->cblock);
1130 cell_defer(cache, mg->old_ocell, false);
1131
1132 } else if (mg->demote) {
Mike Snitzerb61d9502015-04-22 17:25:56 -04001133 DMERR_LIMIT("%s: demotion failed; couldn't copy block", dev_name);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001134 policy_force_mapping(cache->policy, mg->new_oblock, mg->old_oblock);
1135
Heinz Mauelshagen80f659f2013-10-14 17:10:47 +02001136 cell_defer(cache, mg->old_ocell, mg->promote ? false : true);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001137 if (mg->promote)
Heinz Mauelshagen80f659f2013-10-14 17:10:47 +02001138 cell_defer(cache, mg->new_ocell, true);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001139 } else {
Mike Snitzerb61d9502015-04-22 17:25:56 -04001140 DMERR_LIMIT("%s: promotion failed; couldn't copy block", dev_name);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001141 policy_remove_mapping(cache->policy, mg->new_oblock);
Heinz Mauelshagen80f659f2013-10-14 17:10:47 +02001142 cell_defer(cache, mg->new_ocell, true);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001143 }
1144
Joe Thornbera59db672015-01-23 10:16:16 +00001145 free_io_migration(mg);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001146}
1147
1148static void migration_success_pre_commit(struct dm_cache_migration *mg)
1149{
Joe Thornber028ae9f2015-04-22 16:42:35 -04001150 int r;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001151 unsigned long flags;
1152 struct cache *cache = mg->cache;
1153
1154 if (mg->writeback) {
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001155 clear_dirty(cache, mg->old_oblock, mg->cblock);
Anssi Hannula40aa9782014-09-05 03:11:28 +03001156 cell_defer(cache, mg->old_ocell, false);
Joe Thornbera59db672015-01-23 10:16:16 +00001157 free_io_migration(mg);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001158 return;
1159
1160 } else if (mg->demote) {
Joe Thornber028ae9f2015-04-22 16:42:35 -04001161 r = dm_cache_remove_mapping(cache->cmd, mg->cblock);
1162 if (r) {
Mike Snitzerb61d9502015-04-22 17:25:56 -04001163 DMERR_LIMIT("%s: demotion failed; couldn't update on disk metadata",
1164 cache_device_name(cache));
Joe Thornber028ae9f2015-04-22 16:42:35 -04001165 metadata_operation_failed(cache, "dm_cache_remove_mapping", r);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001166 policy_force_mapping(cache->policy, mg->new_oblock,
1167 mg->old_oblock);
1168 if (mg->promote)
1169 cell_defer(cache, mg->new_ocell, true);
Joe Thornbera59db672015-01-23 10:16:16 +00001170 free_io_migration(mg);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001171 return;
1172 }
1173 } else {
Joe Thornber028ae9f2015-04-22 16:42:35 -04001174 r = dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock);
1175 if (r) {
Mike Snitzerb61d9502015-04-22 17:25:56 -04001176 DMERR_LIMIT("%s: promotion failed; couldn't update on disk metadata",
1177 cache_device_name(cache));
Joe Thornber028ae9f2015-04-22 16:42:35 -04001178 metadata_operation_failed(cache, "dm_cache_insert_mapping", r);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001179 policy_remove_mapping(cache->policy, mg->new_oblock);
Joe Thornbera59db672015-01-23 10:16:16 +00001180 free_io_migration(mg);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001181 return;
1182 }
1183 }
1184
1185 spin_lock_irqsave(&cache->lock, flags);
1186 list_add_tail(&mg->list, &cache->need_commit_migrations);
1187 cache->commit_requested = true;
1188 spin_unlock_irqrestore(&cache->lock, flags);
1189}
1190
1191static void migration_success_post_commit(struct dm_cache_migration *mg)
1192{
1193 unsigned long flags;
1194 struct cache *cache = mg->cache;
1195
1196 if (mg->writeback) {
Mike Snitzerb61d9502015-04-22 17:25:56 -04001197 DMWARN_LIMIT("%s: writeback unexpectedly triggered commit",
1198 cache_device_name(cache));
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001199 return;
1200
1201 } else if (mg->demote) {
Heinz Mauelshagen80f659f2013-10-14 17:10:47 +02001202 cell_defer(cache, mg->old_ocell, mg->promote ? false : true);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001203
1204 if (mg->promote) {
1205 mg->demote = false;
1206
1207 spin_lock_irqsave(&cache->lock, flags);
1208 list_add_tail(&mg->list, &cache->quiesced_migrations);
1209 spin_unlock_irqrestore(&cache->lock, flags);
1210
Joe Thornber65790ff2013-11-08 16:39:50 +00001211 } else {
1212 if (mg->invalidate)
1213 policy_remove_mapping(cache->policy, mg->old_oblock);
Joe Thornbera59db672015-01-23 10:16:16 +00001214 free_io_migration(mg);
Joe Thornber65790ff2013-11-08 16:39:50 +00001215 }
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001216
1217 } else {
Joe Thornber1e321342014-11-27 12:26:46 +00001218 if (mg->requeue_holder) {
1219 clear_dirty(cache, mg->new_oblock, mg->cblock);
Joe Thornberc9d28d52013-10-31 13:55:48 -04001220 cell_defer(cache, mg->new_ocell, true);
Joe Thornber1e321342014-11-27 12:26:46 +00001221 } else {
1222 /*
1223 * The block was promoted via an overwrite, so it's dirty.
1224 */
1225 set_dirty(cache, mg->new_oblock, mg->cblock);
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02001226 bio_endio(mg->new_ocell->holder);
Joe Thornberc9d28d52013-10-31 13:55:48 -04001227 cell_defer(cache, mg->new_ocell, false);
1228 }
Joe Thornbera59db672015-01-23 10:16:16 +00001229 free_io_migration(mg);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001230 }
1231}
1232
1233static void copy_complete(int read_err, unsigned long write_err, void *context)
1234{
1235 unsigned long flags;
1236 struct dm_cache_migration *mg = (struct dm_cache_migration *) context;
1237 struct cache *cache = mg->cache;
1238
1239 if (read_err || write_err)
1240 mg->err = true;
1241
1242 spin_lock_irqsave(&cache->lock, flags);
1243 list_add_tail(&mg->list, &cache->completed_migrations);
1244 spin_unlock_irqrestore(&cache->lock, flags);
1245
1246 wake_worker(cache);
1247}
1248
Joe Thornber7ae34e72014-11-06 10:18:04 +00001249static void issue_copy(struct dm_cache_migration *mg)
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001250{
1251 int r;
1252 struct dm_io_region o_region, c_region;
1253 struct cache *cache = mg->cache;
Heinz Mauelshagen8b9d9662014-03-12 00:40:05 +01001254 sector_t cblock = from_cblock(mg->cblock);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001255
1256 o_region.bdev = cache->origin_dev->bdev;
1257 o_region.count = cache->sectors_per_block;
1258
1259 c_region.bdev = cache->cache_dev->bdev;
Heinz Mauelshagen8b9d9662014-03-12 00:40:05 +01001260 c_region.sector = cblock * cache->sectors_per_block;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001261 c_region.count = cache->sectors_per_block;
1262
1263 if (mg->writeback || mg->demote) {
1264 /* demote */
1265 o_region.sector = from_oblock(mg->old_oblock) * cache->sectors_per_block;
1266 r = dm_kcopyd_copy(cache->copier, &c_region, 1, &o_region, 0, copy_complete, mg);
1267 } else {
1268 /* promote */
1269 o_region.sector = from_oblock(mg->new_oblock) * cache->sectors_per_block;
1270 r = dm_kcopyd_copy(cache->copier, &o_region, 1, &c_region, 0, copy_complete, mg);
1271 }
1272
Heinz Mauelshagen2c2263c2013-10-14 17:14:45 +02001273 if (r < 0) {
Mike Snitzerb61d9502015-04-22 17:25:56 -04001274 DMERR_LIMIT("%s: issuing migration failed", cache_device_name(cache));
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001275 migration_failure(mg);
Heinz Mauelshagen2c2263c2013-10-14 17:14:45 +02001276 }
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001277}
1278
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02001279static void overwrite_endio(struct bio *bio)
Joe Thornberc9d28d52013-10-31 13:55:48 -04001280{
1281 struct dm_cache_migration *mg = bio->bi_private;
1282 struct cache *cache = mg->cache;
1283 size_t pb_data_size = get_per_bio_data_size(cache);
1284 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
1285 unsigned long flags;
1286
Mike Snitzer80ae49a2014-01-31 14:30:37 -05001287 dm_unhook_bio(&pb->hook_info, bio);
1288
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02001289 if (bio->bi_error)
Joe Thornberc9d28d52013-10-31 13:55:48 -04001290 mg->err = true;
1291
Mike Snitzer80ae49a2014-01-31 14:30:37 -05001292 mg->requeue_holder = false;
1293
Joe Thornberc9d28d52013-10-31 13:55:48 -04001294 spin_lock_irqsave(&cache->lock, flags);
1295 list_add_tail(&mg->list, &cache->completed_migrations);
Joe Thornberc9d28d52013-10-31 13:55:48 -04001296 spin_unlock_irqrestore(&cache->lock, flags);
1297
1298 wake_worker(cache);
1299}
1300
1301static void issue_overwrite(struct dm_cache_migration *mg, struct bio *bio)
1302{
1303 size_t pb_data_size = get_per_bio_data_size(mg->cache);
1304 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
1305
1306 dm_hook_bio(&pb->hook_info, bio, overwrite_endio, mg);
1307 remap_to_cache_dirty(mg->cache, bio, mg->new_oblock, mg->cblock);
Joe Thornber8c081b52014-05-13 16:18:38 +01001308
1309 /*
1310 * No need to inc_ds() here, since the cell will be held for the
1311 * duration of the io.
1312 */
Joe Thornber066dbaa32015-05-15 15:18:01 +01001313 accounted_request(mg->cache, bio);
Joe Thornberc9d28d52013-10-31 13:55:48 -04001314}
1315
1316static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
1317{
1318 return (bio_data_dir(bio) == WRITE) &&
Kent Overstreet4f024f32013-10-11 15:44:27 -07001319 (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
Joe Thornberc9d28d52013-10-31 13:55:48 -04001320}
1321
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001322static void avoid_copy(struct dm_cache_migration *mg)
1323{
1324 atomic_inc(&mg->cache->stats.copies_avoided);
1325 migration_success_pre_commit(mg);
1326}
1327
Joe Thornber7ae34e72014-11-06 10:18:04 +00001328static void calc_discard_block_range(struct cache *cache, struct bio *bio,
1329 dm_dblock_t *b, dm_dblock_t *e)
1330{
1331 sector_t sb = bio->bi_iter.bi_sector;
1332 sector_t se = bio_end_sector(bio);
1333
1334 *b = to_dblock(dm_sector_div_up(sb, cache->discard_block_size));
1335
1336 if (se - sb < cache->discard_block_size)
1337 *e = *b;
1338 else
1339 *e = to_dblock(block_div(se, cache->discard_block_size));
1340}
1341
1342static void issue_discard(struct dm_cache_migration *mg)
1343{
1344 dm_dblock_t b, e;
1345 struct bio *bio = mg->new_ocell->holder;
Joe Thornbercc7da0b2015-09-01 11:38:19 +01001346 struct cache *cache = mg->cache;
Joe Thornber7ae34e72014-11-06 10:18:04 +00001347
Joe Thornbercc7da0b2015-09-01 11:38:19 +01001348 calc_discard_block_range(cache, bio, &b, &e);
Joe Thornber7ae34e72014-11-06 10:18:04 +00001349 while (b != e) {
Joe Thornbercc7da0b2015-09-01 11:38:19 +01001350 set_discard(cache, b);
Joe Thornber7ae34e72014-11-06 10:18:04 +00001351 b = to_dblock(from_dblock(b) + 1);
1352 }
1353
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02001354 bio_endio(bio);
Joe Thornbercc7da0b2015-09-01 11:38:19 +01001355 cell_defer(cache, mg->new_ocell, false);
Joe Thornber7ae34e72014-11-06 10:18:04 +00001356 free_migration(mg);
Joe Thornbercc7da0b2015-09-01 11:38:19 +01001357 wake_worker(cache);
Joe Thornber7ae34e72014-11-06 10:18:04 +00001358}
1359
1360static void issue_copy_or_discard(struct dm_cache_migration *mg)
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001361{
1362 bool avoid;
1363 struct cache *cache = mg->cache;
1364
Joe Thornber7ae34e72014-11-06 10:18:04 +00001365 if (mg->discard) {
1366 issue_discard(mg);
1367 return;
1368 }
1369
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001370 if (mg->writeback || mg->demote)
1371 avoid = !is_dirty(cache, mg->cblock) ||
1372 is_discarded_oblock(cache, mg->old_oblock);
Joe Thornberc9d28d52013-10-31 13:55:48 -04001373 else {
1374 struct bio *bio = mg->new_ocell->holder;
1375
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001376 avoid = is_discarded_oblock(cache, mg->new_oblock);
1377
Joe Thornberf29a3142014-11-27 12:21:08 +00001378 if (writeback_mode(&cache->features) &&
1379 !avoid && bio_writes_complete_block(cache, bio)) {
Joe Thornberc9d28d52013-10-31 13:55:48 -04001380 issue_overwrite(mg, bio);
1381 return;
1382 }
1383 }
1384
Joe Thornber7ae34e72014-11-06 10:18:04 +00001385 avoid ? avoid_copy(mg) : issue_copy(mg);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001386}
1387
1388static void complete_migration(struct dm_cache_migration *mg)
1389{
1390 if (mg->err)
1391 migration_failure(mg);
1392 else
1393 migration_success_pre_commit(mg);
1394}
1395
1396static void process_migrations(struct cache *cache, struct list_head *head,
1397 void (*fn)(struct dm_cache_migration *))
1398{
1399 unsigned long flags;
1400 struct list_head list;
1401 struct dm_cache_migration *mg, *tmp;
1402
1403 INIT_LIST_HEAD(&list);
1404 spin_lock_irqsave(&cache->lock, flags);
1405 list_splice_init(head, &list);
1406 spin_unlock_irqrestore(&cache->lock, flags);
1407
1408 list_for_each_entry_safe(mg, tmp, &list, list)
1409 fn(mg);
1410}
1411
1412static void __queue_quiesced_migration(struct dm_cache_migration *mg)
1413{
1414 list_add_tail(&mg->list, &mg->cache->quiesced_migrations);
1415}
1416
1417static void queue_quiesced_migration(struct dm_cache_migration *mg)
1418{
1419 unsigned long flags;
1420 struct cache *cache = mg->cache;
1421
1422 spin_lock_irqsave(&cache->lock, flags);
1423 __queue_quiesced_migration(mg);
1424 spin_unlock_irqrestore(&cache->lock, flags);
1425
1426 wake_worker(cache);
1427}
1428
1429static void queue_quiesced_migrations(struct cache *cache, struct list_head *work)
1430{
1431 unsigned long flags;
1432 struct dm_cache_migration *mg, *tmp;
1433
1434 spin_lock_irqsave(&cache->lock, flags);
1435 list_for_each_entry_safe(mg, tmp, work, list)
1436 __queue_quiesced_migration(mg);
1437 spin_unlock_irqrestore(&cache->lock, flags);
1438
1439 wake_worker(cache);
1440}
1441
1442static void check_for_quiesced_migrations(struct cache *cache,
1443 struct per_bio_data *pb)
1444{
1445 struct list_head work;
1446
1447 if (!pb->all_io_entry)
1448 return;
1449
1450 INIT_LIST_HEAD(&work);
Joe Thornber8c081b52014-05-13 16:18:38 +01001451 dm_deferred_entry_dec(pb->all_io_entry, &work);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001452
1453 if (!list_empty(&work))
1454 queue_quiesced_migrations(cache, &work);
1455}
1456
1457static void quiesce_migration(struct dm_cache_migration *mg)
1458{
1459 if (!dm_deferred_set_add_work(mg->cache->all_io_ds, &mg->list))
1460 queue_quiesced_migration(mg);
1461}
1462
1463static void promote(struct cache *cache, struct prealloc *structs,
1464 dm_oblock_t oblock, dm_cblock_t cblock,
1465 struct dm_bio_prison_cell *cell)
1466{
1467 struct dm_cache_migration *mg = prealloc_get_migration(structs);
1468
1469 mg->err = false;
Joe Thornber7ae34e72014-11-06 10:18:04 +00001470 mg->discard = false;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001471 mg->writeback = false;
1472 mg->demote = false;
1473 mg->promote = true;
Joe Thornberc9d28d52013-10-31 13:55:48 -04001474 mg->requeue_holder = true;
Joe Thornber65790ff2013-11-08 16:39:50 +00001475 mg->invalidate = false;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001476 mg->cache = cache;
1477 mg->new_oblock = oblock;
1478 mg->cblock = cblock;
1479 mg->old_ocell = NULL;
1480 mg->new_ocell = cell;
1481 mg->start_jiffies = jiffies;
1482
Joe Thornbera59db672015-01-23 10:16:16 +00001483 inc_io_migrations(cache);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001484 quiesce_migration(mg);
1485}
1486
1487static void writeback(struct cache *cache, struct prealloc *structs,
1488 dm_oblock_t oblock, dm_cblock_t cblock,
1489 struct dm_bio_prison_cell *cell)
1490{
1491 struct dm_cache_migration *mg = prealloc_get_migration(structs);
1492
1493 mg->err = false;
Joe Thornber7ae34e72014-11-06 10:18:04 +00001494 mg->discard = false;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001495 mg->writeback = true;
1496 mg->demote = false;
1497 mg->promote = false;
Joe Thornberc9d28d52013-10-31 13:55:48 -04001498 mg->requeue_holder = true;
Joe Thornber65790ff2013-11-08 16:39:50 +00001499 mg->invalidate = false;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001500 mg->cache = cache;
1501 mg->old_oblock = oblock;
1502 mg->cblock = cblock;
1503 mg->old_ocell = cell;
1504 mg->new_ocell = NULL;
1505 mg->start_jiffies = jiffies;
1506
Joe Thornbera59db672015-01-23 10:16:16 +00001507 inc_io_migrations(cache);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001508 quiesce_migration(mg);
1509}
1510
1511static void demote_then_promote(struct cache *cache, struct prealloc *structs,
1512 dm_oblock_t old_oblock, dm_oblock_t new_oblock,
1513 dm_cblock_t cblock,
1514 struct dm_bio_prison_cell *old_ocell,
1515 struct dm_bio_prison_cell *new_ocell)
1516{
1517 struct dm_cache_migration *mg = prealloc_get_migration(structs);
1518
1519 mg->err = false;
Joe Thornber7ae34e72014-11-06 10:18:04 +00001520 mg->discard = false;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001521 mg->writeback = false;
1522 mg->demote = true;
1523 mg->promote = true;
Joe Thornberc9d28d52013-10-31 13:55:48 -04001524 mg->requeue_holder = true;
Joe Thornber65790ff2013-11-08 16:39:50 +00001525 mg->invalidate = false;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001526 mg->cache = cache;
1527 mg->old_oblock = old_oblock;
1528 mg->new_oblock = new_oblock;
1529 mg->cblock = cblock;
1530 mg->old_ocell = old_ocell;
1531 mg->new_ocell = new_ocell;
1532 mg->start_jiffies = jiffies;
1533
Joe Thornbera59db672015-01-23 10:16:16 +00001534 inc_io_migrations(cache);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001535 quiesce_migration(mg);
1536}
1537
Joe Thornber2ee57d52013-10-24 14:10:29 -04001538/*
1539 * Invalidate a cache entry. No writeback occurs; any changes in the cache
1540 * block are thrown away.
1541 */
1542static void invalidate(struct cache *cache, struct prealloc *structs,
1543 dm_oblock_t oblock, dm_cblock_t cblock,
1544 struct dm_bio_prison_cell *cell)
1545{
1546 struct dm_cache_migration *mg = prealloc_get_migration(structs);
1547
1548 mg->err = false;
Joe Thornber7ae34e72014-11-06 10:18:04 +00001549 mg->discard = false;
Joe Thornber2ee57d52013-10-24 14:10:29 -04001550 mg->writeback = false;
1551 mg->demote = true;
1552 mg->promote = false;
1553 mg->requeue_holder = true;
Joe Thornber65790ff2013-11-08 16:39:50 +00001554 mg->invalidate = true;
Joe Thornber2ee57d52013-10-24 14:10:29 -04001555 mg->cache = cache;
1556 mg->old_oblock = oblock;
1557 mg->cblock = cblock;
1558 mg->old_ocell = cell;
1559 mg->new_ocell = NULL;
1560 mg->start_jiffies = jiffies;
1561
Joe Thornbera59db672015-01-23 10:16:16 +00001562 inc_io_migrations(cache);
Joe Thornber2ee57d52013-10-24 14:10:29 -04001563 quiesce_migration(mg);
1564}
1565
Joe Thornber7ae34e72014-11-06 10:18:04 +00001566static void discard(struct cache *cache, struct prealloc *structs,
1567 struct dm_bio_prison_cell *cell)
1568{
1569 struct dm_cache_migration *mg = prealloc_get_migration(structs);
1570
1571 mg->err = false;
1572 mg->discard = true;
1573 mg->writeback = false;
1574 mg->demote = false;
1575 mg->promote = false;
1576 mg->requeue_holder = false;
1577 mg->invalidate = false;
1578 mg->cache = cache;
1579 mg->old_ocell = NULL;
1580 mg->new_ocell = cell;
1581 mg->start_jiffies = jiffies;
1582
1583 quiesce_migration(mg);
1584}
1585
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001586/*----------------------------------------------------------------
1587 * bio processing
1588 *--------------------------------------------------------------*/
1589static void defer_bio(struct cache *cache, struct bio *bio)
1590{
1591 unsigned long flags;
1592
1593 spin_lock_irqsave(&cache->lock, flags);
1594 bio_list_add(&cache->deferred_bios, bio);
1595 spin_unlock_irqrestore(&cache->lock, flags);
1596
1597 wake_worker(cache);
1598}
1599
1600static void process_flush_bio(struct cache *cache, struct bio *bio)
1601{
Mike Snitzer19b00922013-04-05 15:36:34 +01001602 size_t pb_data_size = get_per_bio_data_size(cache);
1603 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001604
Kent Overstreet4f024f32013-10-11 15:44:27 -07001605 BUG_ON(bio->bi_iter.bi_size);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001606 if (!pb->req_nr)
1607 remap_to_origin(cache, bio);
1608 else
1609 remap_to_cache(cache, bio, 0);
1610
Joe Thornber8c081b52014-05-13 16:18:38 +01001611 /*
Mike Christie28a8f0d2016-06-05 14:32:25 -05001612 * REQ_PREFLUSH is not directed at any particular block so we don't
1613 * need to inc_ds(). REQ_FUA's are split into a write + REQ_PREFLUSH
Joe Thornber8c081b52014-05-13 16:18:38 +01001614 * by dm-core.
1615 */
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001616 issue(cache, bio);
1617}
1618
Joe Thornber7ae34e72014-11-06 10:18:04 +00001619static void process_discard_bio(struct cache *cache, struct prealloc *structs,
1620 struct bio *bio)
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001621{
Joe Thornber7ae34e72014-11-06 10:18:04 +00001622 int r;
1623 dm_dblock_t b, e;
1624 struct dm_bio_prison_cell *cell_prealloc, *new_ocell;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001625
Joe Thornber7ae34e72014-11-06 10:18:04 +00001626 calc_discard_block_range(cache, bio, &b, &e);
1627 if (b == e) {
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02001628 bio_endio(bio);
Joe Thornber7ae34e72014-11-06 10:18:04 +00001629 return;
1630 }
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001631
Joe Thornber7ae34e72014-11-06 10:18:04 +00001632 cell_prealloc = prealloc_get_cell(structs);
1633 r = bio_detain_range(cache, dblock_to_oblock(cache, b), dblock_to_oblock(cache, e), bio, cell_prealloc,
1634 (cell_free_fn) prealloc_put_cell,
1635 structs, &new_ocell);
1636 if (r > 0)
1637 return;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001638
Joe Thornber7ae34e72014-11-06 10:18:04 +00001639 discard(cache, structs, new_ocell);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001640}
1641
1642static bool spare_migration_bandwidth(struct cache *cache)
1643{
Joe Thornbera59db672015-01-23 10:16:16 +00001644 sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) *
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001645 cache->sectors_per_block;
1646 return current_volume < cache->migration_threshold;
1647}
1648
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001649static void inc_hit_counter(struct cache *cache, struct bio *bio)
1650{
1651 atomic_inc(bio_data_dir(bio) == READ ?
1652 &cache->stats.read_hit : &cache->stats.write_hit);
1653}
1654
1655static void inc_miss_counter(struct cache *cache, struct bio *bio)
1656{
1657 atomic_inc(bio_data_dir(bio) == READ ?
1658 &cache->stats.read_miss : &cache->stats.write_miss);
1659}
1660
Joe Thornberfb4100a2015-05-20 10:30:32 +01001661/*----------------------------------------------------------------*/
1662
Joe Thornber651f5fa2015-05-15 15:26:08 +01001663struct inc_detail {
1664 struct cache *cache;
1665 struct bio_list bios_for_issue;
1666 struct bio_list unhandled_bios;
1667 bool any_writes;
1668};
1669
1670static void inc_fn(void *context, struct dm_bio_prison_cell *cell)
1671{
1672 struct bio *bio;
1673 struct inc_detail *detail = context;
1674 struct cache *cache = detail->cache;
1675
1676 inc_ds(cache, cell->holder, cell);
1677 if (bio_data_dir(cell->holder) == WRITE)
1678 detail->any_writes = true;
1679
1680 while ((bio = bio_list_pop(&cell->bios))) {
1681 if (discard_or_flush(bio)) {
1682 bio_list_add(&detail->unhandled_bios, bio);
1683 continue;
1684 }
1685
1686 if (bio_data_dir(bio) == WRITE)
1687 detail->any_writes = true;
1688
1689 bio_list_add(&detail->bios_for_issue, bio);
1690 inc_ds(cache, bio, cell);
1691 }
1692}
1693
1694// FIXME: refactor these two
1695static void remap_cell_to_origin_clear_discard(struct cache *cache,
1696 struct dm_bio_prison_cell *cell,
1697 dm_oblock_t oblock, bool issue_holder)
1698{
1699 struct bio *bio;
1700 unsigned long flags;
1701 struct inc_detail detail;
1702
1703 detail.cache = cache;
1704 bio_list_init(&detail.bios_for_issue);
1705 bio_list_init(&detail.unhandled_bios);
1706 detail.any_writes = false;
1707
1708 spin_lock_irqsave(&cache->lock, flags);
1709 dm_cell_visit_release(cache->prison, inc_fn, &detail, cell);
1710 bio_list_merge(&cache->deferred_bios, &detail.unhandled_bios);
1711 spin_unlock_irqrestore(&cache->lock, flags);
1712
1713 remap_to_origin(cache, cell->holder);
1714 if (issue_holder)
1715 issue(cache, cell->holder);
1716 else
1717 accounted_begin(cache, cell->holder);
1718
1719 if (detail.any_writes)
1720 clear_discard(cache, oblock_to_dblock(cache, oblock));
1721
1722 while ((bio = bio_list_pop(&detail.bios_for_issue))) {
1723 remap_to_origin(cache, bio);
1724 issue(cache, bio);
1725 }
Joe Thornber9153df72015-08-31 18:20:08 +01001726
1727 free_prison_cell(cache, cell);
Joe Thornber651f5fa2015-05-15 15:26:08 +01001728}
1729
1730static void remap_cell_to_cache_dirty(struct cache *cache, struct dm_bio_prison_cell *cell,
1731 dm_oblock_t oblock, dm_cblock_t cblock, bool issue_holder)
1732{
1733 struct bio *bio;
1734 unsigned long flags;
1735 struct inc_detail detail;
1736
1737 detail.cache = cache;
1738 bio_list_init(&detail.bios_for_issue);
1739 bio_list_init(&detail.unhandled_bios);
1740 detail.any_writes = false;
1741
1742 spin_lock_irqsave(&cache->lock, flags);
1743 dm_cell_visit_release(cache->prison, inc_fn, &detail, cell);
1744 bio_list_merge(&cache->deferred_bios, &detail.unhandled_bios);
1745 spin_unlock_irqrestore(&cache->lock, flags);
1746
1747 remap_to_cache(cache, cell->holder, cblock);
1748 if (issue_holder)
1749 issue(cache, cell->holder);
1750 else
1751 accounted_begin(cache, cell->holder);
1752
1753 if (detail.any_writes) {
1754 set_dirty(cache, oblock, cblock);
1755 clear_discard(cache, oblock_to_dblock(cache, oblock));
1756 }
1757
1758 while ((bio = bio_list_pop(&detail.bios_for_issue))) {
1759 remap_to_cache(cache, bio, cblock);
1760 issue(cache, bio);
1761 }
Joe Thornber9153df72015-08-31 18:20:08 +01001762
1763 free_prison_cell(cache, cell);
Joe Thornber651f5fa2015-05-15 15:26:08 +01001764}
1765
1766/*----------------------------------------------------------------*/
1767
Joe Thornberfb4100a2015-05-20 10:30:32 +01001768struct old_oblock_lock {
1769 struct policy_locker locker;
1770 struct cache *cache;
1771 struct prealloc *structs;
1772 struct dm_bio_prison_cell *cell;
1773};
1774
1775static int null_locker(struct policy_locker *locker, dm_oblock_t b)
1776{
1777 /* This should never be called */
1778 BUG();
1779 return 0;
1780}
1781
1782static int cell_locker(struct policy_locker *locker, dm_oblock_t b)
1783{
1784 struct old_oblock_lock *l = container_of(locker, struct old_oblock_lock, locker);
1785 struct dm_bio_prison_cell *cell_prealloc = prealloc_get_cell(l->structs);
1786
1787 return bio_detain(l->cache, b, NULL, cell_prealloc,
1788 (cell_free_fn) prealloc_put_cell,
1789 l->structs, &l->cell);
1790}
1791
Joe Thornber651f5fa2015-05-15 15:26:08 +01001792static void process_cell(struct cache *cache, struct prealloc *structs,
1793 struct dm_bio_prison_cell *new_ocell)
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001794{
1795 int r;
1796 bool release_cell = true;
Joe Thornber651f5fa2015-05-15 15:26:08 +01001797 struct bio *bio = new_ocell->holder;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001798 dm_oblock_t block = get_bio_block(cache, bio);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001799 struct policy_result lookup_result;
Joe Thornber2ee57d52013-10-24 14:10:29 -04001800 bool passthrough = passthrough_mode(&cache->features);
Joe Thornber40775252015-05-15 15:29:58 +01001801 bool fast_promotion, can_migrate;
Joe Thornberfb4100a2015-05-20 10:30:32 +01001802 struct old_oblock_lock ool;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001803
Joe Thornber40775252015-05-15 15:29:58 +01001804 fast_promotion = is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio);
1805 can_migrate = !passthrough && (fast_promotion || spare_migration_bandwidth(cache));
Joe Thornber43c32bf2014-11-25 13:14:57 +00001806
Joe Thornberfb4100a2015-05-20 10:30:32 +01001807 ool.locker.fn = cell_locker;
1808 ool.cache = cache;
1809 ool.structs = structs;
1810 ool.cell = NULL;
Joe Thornber40775252015-05-15 15:29:58 +01001811 r = policy_map(cache->policy, block, true, can_migrate, fast_promotion,
Joe Thornberfb4100a2015-05-20 10:30:32 +01001812 bio, &ool.locker, &lookup_result);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001813
1814 if (r == -EWOULDBLOCK)
1815 /* migration has been denied */
1816 lookup_result.op = POLICY_MISS;
1817
1818 switch (lookup_result.op) {
1819 case POLICY_HIT:
Joe Thornber2ee57d52013-10-24 14:10:29 -04001820 if (passthrough) {
1821 inc_miss_counter(cache, bio);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001822
Joe Thornber2ee57d52013-10-24 14:10:29 -04001823 /*
1824 * Passthrough always maps to the origin,
1825 * invalidating any cache blocks that are written
1826 * to.
1827 */
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001828
Joe Thornber2ee57d52013-10-24 14:10:29 -04001829 if (bio_data_dir(bio) == WRITE) {
1830 atomic_inc(&cache->stats.demotion);
1831 invalidate(cache, structs, block, lookup_result.cblock, new_ocell);
1832 release_cell = false;
1833
1834 } else {
1835 /* FIXME: factor out issue_origin() */
Joe Thornber2ee57d52013-10-24 14:10:29 -04001836 remap_to_origin_clear_discard(cache, bio, block);
Joe Thornber8c081b52014-05-13 16:18:38 +01001837 inc_and_issue(cache, bio, new_ocell);
Joe Thornber2ee57d52013-10-24 14:10:29 -04001838 }
1839 } else {
1840 inc_hit_counter(cache, bio);
1841
1842 if (bio_data_dir(bio) == WRITE &&
1843 writethrough_mode(&cache->features) &&
1844 !is_dirty(cache, lookup_result.cblock)) {
Joe Thornber2ee57d52013-10-24 14:10:29 -04001845 remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
Joe Thornber8c081b52014-05-13 16:18:38 +01001846 inc_and_issue(cache, bio, new_ocell);
1847
Joe Thornber651f5fa2015-05-15 15:26:08 +01001848 } else {
1849 remap_cell_to_cache_dirty(cache, new_ocell, block, lookup_result.cblock, true);
1850 release_cell = false;
Joe Thornber8c081b52014-05-13 16:18:38 +01001851 }
Joe Thornber2ee57d52013-10-24 14:10:29 -04001852 }
1853
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001854 break;
1855
1856 case POLICY_MISS:
1857 inc_miss_counter(cache, bio);
Joe Thornber651f5fa2015-05-15 15:26:08 +01001858 remap_cell_to_origin_clear_discard(cache, new_ocell, block, true);
1859 release_cell = false;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001860 break;
1861
1862 case POLICY_NEW:
1863 atomic_inc(&cache->stats.promotion);
1864 promote(cache, structs, block, lookup_result.cblock, new_ocell);
1865 release_cell = false;
1866 break;
1867
1868 case POLICY_REPLACE:
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001869 atomic_inc(&cache->stats.demotion);
1870 atomic_inc(&cache->stats.promotion);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001871 demote_then_promote(cache, structs, lookup_result.old_oblock,
1872 block, lookup_result.cblock,
Joe Thornberfb4100a2015-05-20 10:30:32 +01001873 ool.cell, new_ocell);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001874 release_cell = false;
1875 break;
1876
1877 default:
Mike Snitzerb61d9502015-04-22 17:25:56 -04001878 DMERR_LIMIT("%s: %s: erroring bio, unknown policy op: %u",
1879 cache_device_name(cache), __func__,
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001880 (unsigned) lookup_result.op);
1881 bio_io_error(bio);
1882 }
1883
1884 if (release_cell)
1885 cell_defer(cache, new_ocell, false);
1886}
1887
Joe Thornber651f5fa2015-05-15 15:26:08 +01001888static void process_bio(struct cache *cache, struct prealloc *structs,
1889 struct bio *bio)
1890{
1891 int r;
1892 dm_oblock_t block = get_bio_block(cache, bio);
1893 struct dm_bio_prison_cell *cell_prealloc, *new_ocell;
1894
1895 /*
1896 * Check to see if that block is currently migrating.
1897 */
1898 cell_prealloc = prealloc_get_cell(structs);
1899 r = bio_detain(cache, block, bio, cell_prealloc,
1900 (cell_free_fn) prealloc_put_cell,
1901 structs, &new_ocell);
1902 if (r > 0)
1903 return;
1904
1905 process_cell(cache, structs, new_ocell);
1906}
1907
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001908static int need_commit_due_to_time(struct cache *cache)
1909{
Joe Thornber651f5fa2015-05-15 15:26:08 +01001910 return jiffies < cache->last_commit_jiffies ||
1911 jiffies > cache->last_commit_jiffies + COMMIT_PERIOD;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001912}
1913
Joe Thornber028ae9f2015-04-22 16:42:35 -04001914/*
1915 * A non-zero return indicates read_only or fail_io mode.
1916 */
1917static int commit(struct cache *cache, bool clean_shutdown)
1918{
1919 int r;
1920
1921 if (get_cache_mode(cache) >= CM_READ_ONLY)
1922 return -EINVAL;
1923
1924 atomic_inc(&cache->stats.commit_count);
1925 r = dm_cache_commit(cache->cmd, clean_shutdown);
1926 if (r)
1927 metadata_operation_failed(cache, "dm_cache_commit", r);
1928
1929 return r;
1930}
1931
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001932static int commit_if_needed(struct cache *cache)
1933{
Heinz Mauelshagenffcbcb62013-10-14 17:24:43 +02001934 int r = 0;
1935
1936 if ((cache->commit_requested || need_commit_due_to_time(cache)) &&
1937 dm_cache_changed_this_transaction(cache->cmd)) {
Joe Thornber028ae9f2015-04-22 16:42:35 -04001938 r = commit(cache, false);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001939 cache->commit_requested = false;
Heinz Mauelshagenffcbcb62013-10-14 17:24:43 +02001940 cache->last_commit_jiffies = jiffies;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001941 }
1942
Heinz Mauelshagenffcbcb62013-10-14 17:24:43 +02001943 return r;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001944}
1945
1946static void process_deferred_bios(struct cache *cache)
1947{
Mike Snitzer665022d2015-07-16 21:48:55 -04001948 bool prealloc_used = false;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001949 unsigned long flags;
1950 struct bio_list bios;
1951 struct bio *bio;
1952 struct prealloc structs;
1953
1954 memset(&structs, 0, sizeof(structs));
1955 bio_list_init(&bios);
1956
1957 spin_lock_irqsave(&cache->lock, flags);
1958 bio_list_merge(&bios, &cache->deferred_bios);
1959 bio_list_init(&cache->deferred_bios);
1960 spin_unlock_irqrestore(&cache->lock, flags);
1961
1962 while (!bio_list_empty(&bios)) {
1963 /*
1964 * If we've got no free migration structs, and processing
1965 * this bio might require one, we pause until there are some
1966 * prepared mappings to process.
1967 */
Mike Snitzer795e6332015-07-29 13:48:23 -04001968 prealloc_used = true;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001969 if (prealloc_data_structs(cache, &structs)) {
1970 spin_lock_irqsave(&cache->lock, flags);
1971 bio_list_merge(&cache->deferred_bios, &bios);
1972 spin_unlock_irqrestore(&cache->lock, flags);
1973 break;
1974 }
1975
1976 bio = bio_list_pop(&bios);
1977
Jens Axboe1eff9d32016-08-05 15:35:16 -06001978 if (bio->bi_opf & REQ_PREFLUSH)
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001979 process_flush_bio(cache, bio);
Mike Christiee6047142016-06-05 14:32:04 -05001980 else if (bio_op(bio) == REQ_OP_DISCARD)
Joe Thornber7ae34e72014-11-06 10:18:04 +00001981 process_discard_bio(cache, &structs, bio);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001982 else
1983 process_bio(cache, &structs, bio);
1984 }
1985
Mike Snitzer665022d2015-07-16 21:48:55 -04001986 if (prealloc_used)
1987 prealloc_free_structs(cache, &structs);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001988}
1989
Joe Thornber651f5fa2015-05-15 15:26:08 +01001990static void process_deferred_cells(struct cache *cache)
1991{
Mike Snitzer665022d2015-07-16 21:48:55 -04001992 bool prealloc_used = false;
Joe Thornber651f5fa2015-05-15 15:26:08 +01001993 unsigned long flags;
1994 struct dm_bio_prison_cell *cell, *tmp;
1995 struct list_head cells;
1996 struct prealloc structs;
1997
1998 memset(&structs, 0, sizeof(structs));
1999
2000 INIT_LIST_HEAD(&cells);
2001
2002 spin_lock_irqsave(&cache->lock, flags);
2003 list_splice_init(&cache->deferred_cells, &cells);
2004 spin_unlock_irqrestore(&cache->lock, flags);
2005
2006 list_for_each_entry_safe(cell, tmp, &cells, user_list) {
2007 /*
2008 * If we've got no free migration structs, and processing
2009 * this bio might require one, we pause until there are some
2010 * prepared mappings to process.
2011 */
Mike Snitzer795e6332015-07-29 13:48:23 -04002012 prealloc_used = true;
Joe Thornber651f5fa2015-05-15 15:26:08 +01002013 if (prealloc_data_structs(cache, &structs)) {
2014 spin_lock_irqsave(&cache->lock, flags);
2015 list_splice(&cells, &cache->deferred_cells);
2016 spin_unlock_irqrestore(&cache->lock, flags);
2017 break;
2018 }
2019
2020 process_cell(cache, &structs, cell);
2021 }
2022
Mike Snitzer665022d2015-07-16 21:48:55 -04002023 if (prealloc_used)
2024 prealloc_free_structs(cache, &structs);
Joe Thornber651f5fa2015-05-15 15:26:08 +01002025}
2026
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002027static void process_deferred_flush_bios(struct cache *cache, bool submit_bios)
2028{
2029 unsigned long flags;
2030 struct bio_list bios;
2031 struct bio *bio;
2032
2033 bio_list_init(&bios);
2034
2035 spin_lock_irqsave(&cache->lock, flags);
2036 bio_list_merge(&bios, &cache->deferred_flush_bios);
2037 bio_list_init(&cache->deferred_flush_bios);
2038 spin_unlock_irqrestore(&cache->lock, flags);
2039
Joe Thornber8c081b52014-05-13 16:18:38 +01002040 /*
2041 * These bios have already been through inc_ds()
2042 */
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002043 while ((bio = bio_list_pop(&bios)))
Joe Thornber066dbaa32015-05-15 15:18:01 +01002044 submit_bios ? accounted_request(cache, bio) : bio_io_error(bio);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002045}
2046
Joe Thornbere2e74d62013-03-20 17:21:27 +00002047static void process_deferred_writethrough_bios(struct cache *cache)
2048{
2049 unsigned long flags;
2050 struct bio_list bios;
2051 struct bio *bio;
2052
2053 bio_list_init(&bios);
2054
2055 spin_lock_irqsave(&cache->lock, flags);
2056 bio_list_merge(&bios, &cache->deferred_writethrough_bios);
2057 bio_list_init(&cache->deferred_writethrough_bios);
2058 spin_unlock_irqrestore(&cache->lock, flags);
2059
Joe Thornber8c081b52014-05-13 16:18:38 +01002060 /*
2061 * These bios have already been through inc_ds()
2062 */
Joe Thornbere2e74d62013-03-20 17:21:27 +00002063 while ((bio = bio_list_pop(&bios)))
Joe Thornber066dbaa32015-05-15 15:18:01 +01002064 accounted_request(cache, bio);
Joe Thornbere2e74d62013-03-20 17:21:27 +00002065}
2066
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002067static void writeback_some_dirty_blocks(struct cache *cache)
2068{
Mike Snitzer665022d2015-07-16 21:48:55 -04002069 bool prealloc_used = false;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002070 dm_oblock_t oblock;
2071 dm_cblock_t cblock;
2072 struct prealloc structs;
2073 struct dm_bio_prison_cell *old_ocell;
Joe Thornber20f68142015-05-15 15:20:09 +01002074 bool busy = !iot_idle_for(&cache->origin_tracker, HZ);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002075
2076 memset(&structs, 0, sizeof(structs));
2077
2078 while (spare_migration_bandwidth(cache)) {
Mike Snitzere782eff2015-07-16 21:26:10 -04002079 if (policy_writeback_work(cache->policy, &oblock, &cblock, busy))
2080 break; /* no work to do */
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002081
Mike Snitzer795e6332015-07-29 13:48:23 -04002082 prealloc_used = true;
Mike Snitzere782eff2015-07-16 21:26:10 -04002083 if (prealloc_data_structs(cache, &structs) ||
2084 get_cell(cache, oblock, &structs, &old_ocell)) {
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002085 policy_set_dirty(cache->policy, oblock);
2086 break;
2087 }
2088
2089 writeback(cache, &structs, oblock, cblock, old_ocell);
2090 }
2091
Mike Snitzer665022d2015-07-16 21:48:55 -04002092 if (prealloc_used)
2093 prealloc_free_structs(cache, &structs);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002094}
2095
2096/*----------------------------------------------------------------
Joe Thornber65790ff2013-11-08 16:39:50 +00002097 * Invalidations.
2098 * Dropping something from the cache *without* writing back.
2099 *--------------------------------------------------------------*/
2100
2101static void process_invalidation_request(struct cache *cache, struct invalidation_request *req)
2102{
2103 int r = 0;
2104 uint64_t begin = from_cblock(req->cblocks->begin);
2105 uint64_t end = from_cblock(req->cblocks->end);
2106
2107 while (begin != end) {
2108 r = policy_remove_cblock(cache->policy, to_cblock(begin));
2109 if (!r) {
2110 r = dm_cache_remove_mapping(cache->cmd, to_cblock(begin));
Joe Thornber028ae9f2015-04-22 16:42:35 -04002111 if (r) {
2112 metadata_operation_failed(cache, "dm_cache_remove_mapping", r);
Joe Thornber65790ff2013-11-08 16:39:50 +00002113 break;
Joe Thornber028ae9f2015-04-22 16:42:35 -04002114 }
Joe Thornber65790ff2013-11-08 16:39:50 +00002115
2116 } else if (r == -ENODATA) {
2117 /* harmless, already unmapped */
2118 r = 0;
2119
2120 } else {
Mike Snitzerb61d9502015-04-22 17:25:56 -04002121 DMERR("%s: policy_remove_cblock failed", cache_device_name(cache));
Joe Thornber65790ff2013-11-08 16:39:50 +00002122 break;
2123 }
2124
2125 begin++;
2126 }
2127
2128 cache->commit_requested = true;
2129
2130 req->err = r;
2131 atomic_set(&req->complete, 1);
2132
2133 wake_up(&req->result_wait);
2134}
2135
2136static void process_invalidation_requests(struct cache *cache)
2137{
2138 struct list_head list;
2139 struct invalidation_request *req, *tmp;
2140
2141 INIT_LIST_HEAD(&list);
2142 spin_lock(&cache->invalidation_lock);
2143 list_splice_init(&cache->invalidation_requests, &list);
2144 spin_unlock(&cache->invalidation_lock);
2145
2146 list_for_each_entry_safe (req, tmp, &list, list)
2147 process_invalidation_request(cache, req);
2148}
2149
2150/*----------------------------------------------------------------
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002151 * Main worker loop
2152 *--------------------------------------------------------------*/
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002153static bool is_quiescing(struct cache *cache)
2154{
Joe Thornber238f8362013-10-30 17:29:30 +00002155 return atomic_read(&cache->quiescing);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002156}
2157
Joe Thornber66cb1912013-10-30 17:11:58 +00002158static void ack_quiescing(struct cache *cache)
2159{
2160 if (is_quiescing(cache)) {
2161 atomic_inc(&cache->quiescing_ack);
2162 wake_up(&cache->quiescing_wait);
2163 }
2164}
2165
2166static void wait_for_quiescing_ack(struct cache *cache)
2167{
2168 wait_event(cache->quiescing_wait, atomic_read(&cache->quiescing_ack));
2169}
2170
2171static void start_quiescing(struct cache *cache)
2172{
Joe Thornber238f8362013-10-30 17:29:30 +00002173 atomic_inc(&cache->quiescing);
Joe Thornber66cb1912013-10-30 17:11:58 +00002174 wait_for_quiescing_ack(cache);
2175}
2176
2177static void stop_quiescing(struct cache *cache)
2178{
Joe Thornber238f8362013-10-30 17:29:30 +00002179 atomic_set(&cache->quiescing, 0);
Joe Thornber66cb1912013-10-30 17:11:58 +00002180 atomic_set(&cache->quiescing_ack, 0);
2181}
2182
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002183static void wait_for_migrations(struct cache *cache)
2184{
Joe Thornbera59db672015-01-23 10:16:16 +00002185 wait_event(cache->migration_wait, !atomic_read(&cache->nr_allocated_migrations));
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002186}
2187
2188static void stop_worker(struct cache *cache)
2189{
2190 cancel_delayed_work(&cache->waker);
2191 flush_workqueue(cache->wq);
2192}
2193
Joe Thornber651f5fa2015-05-15 15:26:08 +01002194static void requeue_deferred_cells(struct cache *cache)
2195{
2196 unsigned long flags;
2197 struct list_head cells;
2198 struct dm_bio_prison_cell *cell, *tmp;
2199
2200 INIT_LIST_HEAD(&cells);
2201 spin_lock_irqsave(&cache->lock, flags);
2202 list_splice_init(&cache->deferred_cells, &cells);
2203 spin_unlock_irqrestore(&cache->lock, flags);
2204
2205 list_for_each_entry_safe(cell, tmp, &cells, user_list)
2206 cell_requeue(cache, cell);
2207}
2208
2209static void requeue_deferred_bios(struct cache *cache)
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002210{
2211 struct bio *bio;
2212 struct bio_list bios;
2213
2214 bio_list_init(&bios);
2215 bio_list_merge(&bios, &cache->deferred_bios);
2216 bio_list_init(&cache->deferred_bios);
2217
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02002218 while ((bio = bio_list_pop(&bios))) {
2219 bio->bi_error = DM_ENDIO_REQUEUE;
2220 bio_endio(bio);
2221 }
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002222}
2223
2224static int more_work(struct cache *cache)
2225{
2226 if (is_quiescing(cache))
2227 return !list_empty(&cache->quiesced_migrations) ||
2228 !list_empty(&cache->completed_migrations) ||
2229 !list_empty(&cache->need_commit_migrations);
2230 else
2231 return !bio_list_empty(&cache->deferred_bios) ||
Joe Thornber651f5fa2015-05-15 15:26:08 +01002232 !list_empty(&cache->deferred_cells) ||
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002233 !bio_list_empty(&cache->deferred_flush_bios) ||
Joe Thornbere2e74d62013-03-20 17:21:27 +00002234 !bio_list_empty(&cache->deferred_writethrough_bios) ||
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002235 !list_empty(&cache->quiesced_migrations) ||
2236 !list_empty(&cache->completed_migrations) ||
Joe Thornber65790ff2013-11-08 16:39:50 +00002237 !list_empty(&cache->need_commit_migrations) ||
2238 cache->invalidate;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002239}
2240
2241static void do_worker(struct work_struct *ws)
2242{
2243 struct cache *cache = container_of(ws, struct cache, worker);
2244
2245 do {
Joe Thornber66cb1912013-10-30 17:11:58 +00002246 if (!is_quiescing(cache)) {
2247 writeback_some_dirty_blocks(cache);
2248 process_deferred_writethrough_bios(cache);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002249 process_deferred_bios(cache);
Joe Thornber651f5fa2015-05-15 15:26:08 +01002250 process_deferred_cells(cache);
Joe Thornber65790ff2013-11-08 16:39:50 +00002251 process_invalidation_requests(cache);
Joe Thornber66cb1912013-10-30 17:11:58 +00002252 }
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002253
Joe Thornber7ae34e72014-11-06 10:18:04 +00002254 process_migrations(cache, &cache->quiesced_migrations, issue_copy_or_discard);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002255 process_migrations(cache, &cache->completed_migrations, complete_migration);
2256
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002257 if (commit_if_needed(cache)) {
2258 process_deferred_flush_bios(cache, false);
Joe Thornber304affa2014-06-24 15:36:58 -04002259 process_migrations(cache, &cache->need_commit_migrations, migration_failure);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002260 } else {
2261 process_deferred_flush_bios(cache, true);
2262 process_migrations(cache, &cache->need_commit_migrations,
2263 migration_success_post_commit);
2264 }
Joe Thornber66cb1912013-10-30 17:11:58 +00002265
2266 ack_quiescing(cache);
2267
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002268 } while (more_work(cache));
2269}
2270
2271/*
2272 * We want to commit periodically so that not too much
2273 * unwritten metadata builds up.
2274 */
2275static void do_waker(struct work_struct *ws)
2276{
2277 struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker);
Joe Thornberfba10102015-05-29 10:20:56 +01002278 policy_tick(cache->policy, true);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002279 wake_worker(cache);
2280 queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD);
2281}
2282
2283/*----------------------------------------------------------------*/
2284
2285static int is_congested(struct dm_dev *dev, int bdi_bits)
2286{
2287 struct request_queue *q = bdev_get_queue(dev->bdev);
Jan Karadc3b17c2017-02-02 15:56:50 +01002288 return bdi_congested(q->backing_dev_info, bdi_bits);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002289}
2290
2291static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
2292{
2293 struct cache *cache = container_of(cb, struct cache, callbacks);
2294
2295 return is_congested(cache->origin_dev, bdi_bits) ||
2296 is_congested(cache->cache_dev, bdi_bits);
2297}
2298
2299/*----------------------------------------------------------------
2300 * Target methods
2301 *--------------------------------------------------------------*/
2302
2303/*
2304 * This function gets called on the error paths of the constructor, so we
2305 * have to cope with a partially initialised struct.
2306 */
2307static void destroy(struct cache *cache)
2308{
2309 unsigned i;
2310
Julia Lawall6f659852015-09-13 14:15:05 +02002311 mempool_destroy(cache->migration_pool);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002312
2313 if (cache->all_io_ds)
2314 dm_deferred_set_destroy(cache->all_io_ds);
2315
2316 if (cache->prison)
2317 dm_bio_prison_destroy(cache->prison);
2318
2319 if (cache->wq)
2320 destroy_workqueue(cache->wq);
2321
2322 if (cache->dirty_bitset)
2323 free_bitset(cache->dirty_bitset);
2324
2325 if (cache->discard_bitset)
2326 free_bitset(cache->discard_bitset);
2327
2328 if (cache->copier)
2329 dm_kcopyd_client_destroy(cache->copier);
2330
2331 if (cache->cmd)
2332 dm_cache_metadata_close(cache->cmd);
2333
2334 if (cache->metadata_dev)
2335 dm_put_device(cache->ti, cache->metadata_dev);
2336
2337 if (cache->origin_dev)
2338 dm_put_device(cache->ti, cache->origin_dev);
2339
2340 if (cache->cache_dev)
2341 dm_put_device(cache->ti, cache->cache_dev);
2342
2343 if (cache->policy)
2344 dm_cache_policy_destroy(cache->policy);
2345
2346 for (i = 0; i < cache->nr_ctr_args ; i++)
2347 kfree(cache->ctr_args[i]);
2348 kfree(cache->ctr_args);
2349
2350 kfree(cache);
2351}
2352
2353static void cache_dtr(struct dm_target *ti)
2354{
2355 struct cache *cache = ti->private;
2356
2357 destroy(cache);
2358}
2359
2360static sector_t get_dev_size(struct dm_dev *dev)
2361{
2362 return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
2363}
2364
2365/*----------------------------------------------------------------*/
2366
2367/*
2368 * Construct a cache device mapping.
2369 *
2370 * cache <metadata dev> <cache dev> <origin dev> <block size>
2371 * <#feature args> [<feature arg>]*
2372 * <policy> <#policy args> [<policy arg>]*
2373 *
2374 * metadata dev : fast device holding the persistent metadata
2375 * cache dev : fast device holding cached data blocks
2376 * origin dev : slow device holding original data blocks
2377 * block size : cache unit size in sectors
2378 *
2379 * #feature args : number of feature arguments passed
2380 * feature args : writethrough. (The default is writeback.)
2381 *
2382 * policy : the replacement policy to use
2383 * #policy args : an even number of policy arguments corresponding
2384 * to key/value pairs passed to the policy
2385 * policy args : key/value pairs passed to the policy
2386 * E.g. 'sequential_threshold 1024'
2387 * See cache-policies.txt for details.
2388 *
2389 * Optional feature arguments are:
2390 * writethrough : write through caching that prohibits cache block
2391 * content from being different from origin block content.
2392 * Without this argument, the default behaviour is to write
2393 * back cache block contents later for performance reasons,
2394 * so they may differ from the corresponding origin blocks.
2395 */
2396struct cache_args {
2397 struct dm_target *ti;
2398
2399 struct dm_dev *metadata_dev;
2400
2401 struct dm_dev *cache_dev;
2402 sector_t cache_sectors;
2403
2404 struct dm_dev *origin_dev;
2405 sector_t origin_sectors;
2406
2407 uint32_t block_size;
2408
2409 const char *policy_name;
2410 int policy_argc;
2411 const char **policy_argv;
2412
2413 struct cache_features features;
2414};
2415
2416static void destroy_cache_args(struct cache_args *ca)
2417{
2418 if (ca->metadata_dev)
2419 dm_put_device(ca->ti, ca->metadata_dev);
2420
2421 if (ca->cache_dev)
2422 dm_put_device(ca->ti, ca->cache_dev);
2423
2424 if (ca->origin_dev)
2425 dm_put_device(ca->ti, ca->origin_dev);
2426
2427 kfree(ca);
2428}
2429
2430static bool at_least_one_arg(struct dm_arg_set *as, char **error)
2431{
2432 if (!as->argc) {
2433 *error = "Insufficient args";
2434 return false;
2435 }
2436
2437 return true;
2438}
2439
2440static int parse_metadata_dev(struct cache_args *ca, struct dm_arg_set *as,
2441 char **error)
2442{
2443 int r;
2444 sector_t metadata_dev_size;
2445 char b[BDEVNAME_SIZE];
2446
2447 if (!at_least_one_arg(as, error))
2448 return -EINVAL;
2449
2450 r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
2451 &ca->metadata_dev);
2452 if (r) {
2453 *error = "Error opening metadata device";
2454 return r;
2455 }
2456
2457 metadata_dev_size = get_dev_size(ca->metadata_dev);
2458 if (metadata_dev_size > DM_CACHE_METADATA_MAX_SECTORS_WARNING)
2459 DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
2460 bdevname(ca->metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS);
2461
2462 return 0;
2463}
2464
2465static int parse_cache_dev(struct cache_args *ca, struct dm_arg_set *as,
2466 char **error)
2467{
2468 int r;
2469
2470 if (!at_least_one_arg(as, error))
2471 return -EINVAL;
2472
2473 r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
2474 &ca->cache_dev);
2475 if (r) {
2476 *error = "Error opening cache device";
2477 return r;
2478 }
2479 ca->cache_sectors = get_dev_size(ca->cache_dev);
2480
2481 return 0;
2482}
2483
2484static int parse_origin_dev(struct cache_args *ca, struct dm_arg_set *as,
2485 char **error)
2486{
2487 int r;
2488
2489 if (!at_least_one_arg(as, error))
2490 return -EINVAL;
2491
2492 r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
2493 &ca->origin_dev);
2494 if (r) {
2495 *error = "Error opening origin device";
2496 return r;
2497 }
2498
2499 ca->origin_sectors = get_dev_size(ca->origin_dev);
2500 if (ca->ti->len > ca->origin_sectors) {
2501 *error = "Device size larger than cached device";
2502 return -EINVAL;
2503 }
2504
2505 return 0;
2506}
2507
2508static int parse_block_size(struct cache_args *ca, struct dm_arg_set *as,
2509 char **error)
2510{
Mike Snitzer05473042013-08-16 10:54:19 -04002511 unsigned long block_size;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002512
2513 if (!at_least_one_arg(as, error))
2514 return -EINVAL;
2515
Mike Snitzer05473042013-08-16 10:54:19 -04002516 if (kstrtoul(dm_shift_arg(as), 10, &block_size) || !block_size ||
2517 block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
2518 block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
2519 block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002520 *error = "Invalid data block size";
2521 return -EINVAL;
2522 }
2523
Mike Snitzer05473042013-08-16 10:54:19 -04002524 if (block_size > ca->cache_sectors) {
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002525 *error = "Data block size is larger than the cache device";
2526 return -EINVAL;
2527 }
2528
Mike Snitzer05473042013-08-16 10:54:19 -04002529 ca->block_size = block_size;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002530
2531 return 0;
2532}
2533
2534static void init_features(struct cache_features *cf)
2535{
2536 cf->mode = CM_WRITE;
Joe Thornber2ee57d52013-10-24 14:10:29 -04002537 cf->io_mode = CM_IO_WRITEBACK;
Joe Thornber629d0a82016-09-22 06:15:21 -04002538 cf->metadata_version = 1;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002539}
2540
2541static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
2542 char **error)
2543{
2544 static struct dm_arg _args[] = {
Joe Thornber629d0a82016-09-22 06:15:21 -04002545 {0, 2, "Invalid number of cache feature arguments"},
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002546 };
2547
2548 int r;
2549 unsigned argc;
2550 const char *arg;
2551 struct cache_features *cf = &ca->features;
2552
2553 init_features(cf);
2554
2555 r = dm_read_arg_group(_args, as, &argc, error);
2556 if (r)
2557 return -EINVAL;
2558
2559 while (argc--) {
2560 arg = dm_shift_arg(as);
2561
2562 if (!strcasecmp(arg, "writeback"))
Joe Thornber2ee57d52013-10-24 14:10:29 -04002563 cf->io_mode = CM_IO_WRITEBACK;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002564
2565 else if (!strcasecmp(arg, "writethrough"))
Joe Thornber2ee57d52013-10-24 14:10:29 -04002566 cf->io_mode = CM_IO_WRITETHROUGH;
2567
2568 else if (!strcasecmp(arg, "passthrough"))
2569 cf->io_mode = CM_IO_PASSTHROUGH;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002570
Joe Thornber629d0a82016-09-22 06:15:21 -04002571 else if (!strcasecmp(arg, "metadata2"))
2572 cf->metadata_version = 2;
2573
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002574 else {
2575 *error = "Unrecognised cache feature requested";
2576 return -EINVAL;
2577 }
2578 }
2579
2580 return 0;
2581}
2582
2583static int parse_policy(struct cache_args *ca, struct dm_arg_set *as,
2584 char **error)
2585{
2586 static struct dm_arg _args[] = {
2587 {0, 1024, "Invalid number of policy arguments"},
2588 };
2589
2590 int r;
2591
2592 if (!at_least_one_arg(as, error))
2593 return -EINVAL;
2594
2595 ca->policy_name = dm_shift_arg(as);
2596
2597 r = dm_read_arg_group(_args, as, &ca->policy_argc, error);
2598 if (r)
2599 return -EINVAL;
2600
2601 ca->policy_argv = (const char **)as->argv;
2602 dm_consume_args(as, ca->policy_argc);
2603
2604 return 0;
2605}
2606
2607static int parse_cache_args(struct cache_args *ca, int argc, char **argv,
2608 char **error)
2609{
2610 int r;
2611 struct dm_arg_set as;
2612
2613 as.argc = argc;
2614 as.argv = argv;
2615
2616 r = parse_metadata_dev(ca, &as, error);
2617 if (r)
2618 return r;
2619
2620 r = parse_cache_dev(ca, &as, error);
2621 if (r)
2622 return r;
2623
2624 r = parse_origin_dev(ca, &as, error);
2625 if (r)
2626 return r;
2627
2628 r = parse_block_size(ca, &as, error);
2629 if (r)
2630 return r;
2631
2632 r = parse_features(ca, &as, error);
2633 if (r)
2634 return r;
2635
2636 r = parse_policy(ca, &as, error);
2637 if (r)
2638 return r;
2639
2640 return 0;
2641}
2642
2643/*----------------------------------------------------------------*/
2644
2645static struct kmem_cache *migration_cache;
2646
Alasdair G Kergon2c73c472013-05-10 14:37:21 +01002647#define NOT_CORE_OPTION 1
2648
Joe Thornber2f14f4b2013-05-10 14:37:21 +01002649static int process_config_option(struct cache *cache, const char *key, const char *value)
Alasdair G Kergon2c73c472013-05-10 14:37:21 +01002650{
2651 unsigned long tmp;
2652
Joe Thornber2f14f4b2013-05-10 14:37:21 +01002653 if (!strcasecmp(key, "migration_threshold")) {
2654 if (kstrtoul(value, 10, &tmp))
Alasdair G Kergon2c73c472013-05-10 14:37:21 +01002655 return -EINVAL;
2656
2657 cache->migration_threshold = tmp;
2658 return 0;
2659 }
2660
2661 return NOT_CORE_OPTION;
2662}
2663
Joe Thornber2f14f4b2013-05-10 14:37:21 +01002664static int set_config_value(struct cache *cache, const char *key, const char *value)
2665{
2666 int r = process_config_option(cache, key, value);
2667
2668 if (r == NOT_CORE_OPTION)
2669 r = policy_set_config_value(cache->policy, key, value);
2670
2671 if (r)
2672 DMWARN("bad config value for %s: %s", key, value);
2673
2674 return r;
2675}
2676
2677static int set_config_values(struct cache *cache, int argc, const char **argv)
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002678{
2679 int r = 0;
2680
2681 if (argc & 1) {
2682 DMWARN("Odd number of policy arguments given but they should be <key> <value> pairs.");
2683 return -EINVAL;
2684 }
2685
2686 while (argc) {
Joe Thornber2f14f4b2013-05-10 14:37:21 +01002687 r = set_config_value(cache, argv[0], argv[1]);
2688 if (r)
2689 break;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002690
2691 argc -= 2;
2692 argv += 2;
2693 }
2694
2695 return r;
2696}
2697
2698static int create_cache_policy(struct cache *cache, struct cache_args *ca,
2699 char **error)
2700{
Mikulas Patocka4cb3e1d2013-10-01 18:35:39 -04002701 struct dm_cache_policy *p = dm_cache_policy_create(ca->policy_name,
2702 cache->cache_size,
2703 cache->origin_sectors,
2704 cache->sectors_per_block);
2705 if (IS_ERR(p)) {
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002706 *error = "Error creating cache's policy";
Mikulas Patocka4cb3e1d2013-10-01 18:35:39 -04002707 return PTR_ERR(p);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002708 }
Mikulas Patocka4cb3e1d2013-10-01 18:35:39 -04002709 cache->policy = p;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002710
Joe Thornber2f14f4b2013-05-10 14:37:21 +01002711 return 0;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002712}
2713
Joe Thornber08b18452014-11-06 14:38:01 +00002714/*
Joe Thornber2bb812d2014-11-26 16:07:50 +00002715 * We want the discard block size to be at least the size of the cache
2716 * block size and have no more than 2^14 discard blocks across the origin.
Joe Thornber08b18452014-11-06 14:38:01 +00002717 */
2718#define MAX_DISCARD_BLOCKS (1 << 14)
2719
2720static bool too_many_discard_blocks(sector_t discard_block_size,
2721 sector_t origin_size)
2722{
2723 (void) sector_div(origin_size, discard_block_size);
2724
2725 return origin_size > MAX_DISCARD_BLOCKS;
2726}
2727
2728static sector_t calculate_discard_block_size(sector_t cache_block_size,
2729 sector_t origin_size)
2730{
Joe Thornber2bb812d2014-11-26 16:07:50 +00002731 sector_t discard_block_size = cache_block_size;
Joe Thornber08b18452014-11-06 14:38:01 +00002732
2733 if (origin_size)
2734 while (too_many_discard_blocks(discard_block_size, origin_size))
2735 discard_block_size *= 2;
2736
2737 return discard_block_size;
2738}
2739
Joe Thornberd1d92202014-11-11 11:58:32 +00002740static void set_cache_size(struct cache *cache, dm_cblock_t size)
2741{
2742 dm_block_t nr_blocks = from_cblock(size);
2743
2744 if (nr_blocks > (1 << 20) && cache->cache_size != size)
2745 DMWARN_LIMIT("You have created a cache device with a lot of individual cache blocks (%llu)\n"
2746 "All these mappings can consume a lot of kernel memory, and take some time to read/write.\n"
2747 "Please consider increasing the cache block size to reduce the overall cache block count.",
2748 (unsigned long long) nr_blocks);
2749
2750 cache->cache_size = size;
2751}
2752
Joe Thornberf8350da2013-05-10 14:37:16 +01002753#define DEFAULT_MIGRATION_THRESHOLD 2048
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002754
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002755static int cache_create(struct cache_args *ca, struct cache **result)
2756{
2757 int r = 0;
2758 char **error = &ca->ti->error;
2759 struct cache *cache;
2760 struct dm_target *ti = ca->ti;
2761 dm_block_t origin_blocks;
2762 struct dm_cache_metadata *cmd;
2763 bool may_format = ca->features.mode == CM_WRITE;
2764
2765 cache = kzalloc(sizeof(*cache), GFP_KERNEL);
2766 if (!cache)
2767 return -ENOMEM;
2768
2769 cache->ti = ca->ti;
2770 ti->private = cache;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002771 ti->num_flush_bios = 2;
2772 ti->flush_supported = true;
2773
2774 ti->num_discard_bios = 1;
2775 ti->discards_supported = true;
2776 ti->discard_zeroes_data_unsupported = true;
Joe Thornber25726292014-11-24 14:05:16 +00002777 ti->split_discard_bios = false;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002778
Joe Thornber8c5008f2013-05-10 14:37:18 +01002779 cache->features = ca->features;
Mike Snitzer30187e12016-01-31 13:28:26 -05002780 ti->per_io_data_size = get_per_bio_data_size(cache);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002781
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002782 cache->callbacks.congested_fn = cache_is_congested;
2783 dm_table_add_target_callbacks(ti->table, &cache->callbacks);
2784
2785 cache->metadata_dev = ca->metadata_dev;
2786 cache->origin_dev = ca->origin_dev;
2787 cache->cache_dev = ca->cache_dev;
2788
2789 ca->metadata_dev = ca->origin_dev = ca->cache_dev = NULL;
2790
2791 /* FIXME: factor out this whole section */
2792 origin_blocks = cache->origin_sectors = ca->origin_sectors;
Joe Thornber414dd672013-03-20 17:21:25 +00002793 origin_blocks = block_div(origin_blocks, ca->block_size);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002794 cache->origin_blocks = to_oblock(origin_blocks);
2795
2796 cache->sectors_per_block = ca->block_size;
2797 if (dm_set_target_max_io_len(ti, cache->sectors_per_block)) {
2798 r = -EINVAL;
2799 goto bad;
2800 }
2801
2802 if (ca->block_size & (ca->block_size - 1)) {
2803 dm_block_t cache_size = ca->cache_sectors;
2804
2805 cache->sectors_per_block_shift = -1;
Joe Thornber414dd672013-03-20 17:21:25 +00002806 cache_size = block_div(cache_size, ca->block_size);
Joe Thornberd1d92202014-11-11 11:58:32 +00002807 set_cache_size(cache, to_cblock(cache_size));
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002808 } else {
2809 cache->sectors_per_block_shift = __ffs(ca->block_size);
Joe Thornberd1d92202014-11-11 11:58:32 +00002810 set_cache_size(cache, to_cblock(ca->cache_sectors >> cache->sectors_per_block_shift));
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002811 }
2812
2813 r = create_cache_policy(cache, ca, error);
2814 if (r)
2815 goto bad;
Joe Thornber2f14f4b2013-05-10 14:37:21 +01002816
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002817 cache->policy_nr_args = ca->policy_argc;
Joe Thornber2f14f4b2013-05-10 14:37:21 +01002818 cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD;
2819
2820 r = set_config_values(cache, ca->policy_argc, ca->policy_argv);
2821 if (r) {
2822 *error = "Error setting cache policy's config values";
2823 goto bad;
2824 }
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002825
2826 cmd = dm_cache_metadata_open(cache->metadata_dev->bdev,
2827 ca->block_size, may_format,
Joe Thornber629d0a82016-09-22 06:15:21 -04002828 dm_cache_policy_get_hint_size(cache->policy),
2829 ca->features.metadata_version);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002830 if (IS_ERR(cmd)) {
2831 *error = "Error creating metadata object";
2832 r = PTR_ERR(cmd);
2833 goto bad;
2834 }
2835 cache->cmd = cmd;
Joe Thornber028ae9f2015-04-22 16:42:35 -04002836 set_cache_mode(cache, CM_WRITE);
2837 if (get_cache_mode(cache) != CM_WRITE) {
2838 *error = "Unable to get write access to metadata, please check/repair metadata.";
2839 r = -EINVAL;
2840 goto bad;
2841 }
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002842
Joe Thornber2ee57d52013-10-24 14:10:29 -04002843 if (passthrough_mode(&cache->features)) {
2844 bool all_clean;
2845
2846 r = dm_cache_metadata_all_clean(cache->cmd, &all_clean);
2847 if (r) {
2848 *error = "dm_cache_metadata_all_clean() failed";
2849 goto bad;
2850 }
2851
2852 if (!all_clean) {
2853 *error = "Cannot enter passthrough mode unless all blocks are clean";
2854 r = -EINVAL;
2855 goto bad;
2856 }
2857 }
2858
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002859 spin_lock_init(&cache->lock);
Joe Thornber651f5fa2015-05-15 15:26:08 +01002860 INIT_LIST_HEAD(&cache->deferred_cells);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002861 bio_list_init(&cache->deferred_bios);
2862 bio_list_init(&cache->deferred_flush_bios);
Joe Thornbere2e74d62013-03-20 17:21:27 +00002863 bio_list_init(&cache->deferred_writethrough_bios);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002864 INIT_LIST_HEAD(&cache->quiesced_migrations);
2865 INIT_LIST_HEAD(&cache->completed_migrations);
2866 INIT_LIST_HEAD(&cache->need_commit_migrations);
Joe Thornbera59db672015-01-23 10:16:16 +00002867 atomic_set(&cache->nr_allocated_migrations, 0);
2868 atomic_set(&cache->nr_io_migrations, 0);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002869 init_waitqueue_head(&cache->migration_wait);
2870
Joe Thornber66cb1912013-10-30 17:11:58 +00002871 init_waitqueue_head(&cache->quiescing_wait);
Joe Thornber238f8362013-10-30 17:29:30 +00002872 atomic_set(&cache->quiescing, 0);
Joe Thornber66cb1912013-10-30 17:11:58 +00002873 atomic_set(&cache->quiescing_ack, 0);
2874
Wei Yongjunfa4d6832013-05-10 14:37:14 +01002875 r = -ENOMEM;
Anssi Hannula44fa8162014-08-01 11:55:47 -04002876 atomic_set(&cache->nr_dirty, 0);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002877 cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
2878 if (!cache->dirty_bitset) {
2879 *error = "could not allocate dirty bitset";
2880 goto bad;
2881 }
2882 clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size));
2883
Joe Thornber08b18452014-11-06 14:38:01 +00002884 cache->discard_block_size =
2885 calculate_discard_block_size(cache->sectors_per_block,
2886 cache->origin_sectors);
Joe Thornber25726292014-11-24 14:05:16 +00002887 cache->discard_nr_blocks = to_dblock(dm_sector_div_up(cache->origin_sectors,
2888 cache->discard_block_size));
Joe Thornber1bad9bc2014-11-07 14:47:07 +00002889 cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks));
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002890 if (!cache->discard_bitset) {
2891 *error = "could not allocate discard bitset";
2892 goto bad;
2893 }
Joe Thornber1bad9bc2014-11-07 14:47:07 +00002894 clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks));
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002895
2896 cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
2897 if (IS_ERR(cache->copier)) {
2898 *error = "could not create kcopyd client";
2899 r = PTR_ERR(cache->copier);
2900 goto bad;
2901 }
2902
2903 cache->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
2904 if (!cache->wq) {
2905 *error = "could not create workqueue for metadata object";
2906 goto bad;
2907 }
2908 INIT_WORK(&cache->worker, do_worker);
2909 INIT_DELAYED_WORK(&cache->waker, do_waker);
2910 cache->last_commit_jiffies = jiffies;
2911
Joe Thornbera195db22014-10-06 16:30:06 -04002912 cache->prison = dm_bio_prison_create();
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002913 if (!cache->prison) {
2914 *error = "could not create bio prison";
2915 goto bad;
2916 }
2917
2918 cache->all_io_ds = dm_deferred_set_create();
2919 if (!cache->all_io_ds) {
2920 *error = "could not create all_io deferred set";
2921 goto bad;
2922 }
2923
2924 cache->migration_pool = mempool_create_slab_pool(MIGRATION_POOL_SIZE,
2925 migration_cache);
2926 if (!cache->migration_pool) {
2927 *error = "Error creating cache's migration mempool";
2928 goto bad;
2929 }
2930
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002931 cache->need_tick_bio = true;
2932 cache->sized = false;
Joe Thornber65790ff2013-11-08 16:39:50 +00002933 cache->invalidate = false;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002934 cache->commit_requested = false;
2935 cache->loaded_mappings = false;
2936 cache->loaded_discards = false;
2937
2938 load_stats(cache);
2939
2940 atomic_set(&cache->stats.demotion, 0);
2941 atomic_set(&cache->stats.promotion, 0);
2942 atomic_set(&cache->stats.copies_avoided, 0);
2943 atomic_set(&cache->stats.cache_cell_clash, 0);
2944 atomic_set(&cache->stats.commit_count, 0);
2945 atomic_set(&cache->stats.discard_count, 0);
2946
Joe Thornber65790ff2013-11-08 16:39:50 +00002947 spin_lock_init(&cache->invalidation_lock);
2948 INIT_LIST_HEAD(&cache->invalidation_requests);
2949
Joe Thornber066dbaa32015-05-15 15:18:01 +01002950 iot_init(&cache->origin_tracker);
2951
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00002952 *result = cache;
2953 return 0;
2954
2955bad:
2956 destroy(cache);
2957 return r;
2958}
2959
2960static int copy_ctr_args(struct cache *cache, int argc, const char **argv)
2961{
2962 unsigned i;
2963 const char **copy;
2964
2965 copy = kcalloc(argc, sizeof(*copy), GFP_KERNEL);
2966 if (!copy)
2967 return -ENOMEM;
2968 for (i = 0; i < argc; i++) {
2969 copy[i] = kstrdup(argv[i], GFP_KERNEL);
2970 if (!copy[i]) {
2971 while (i--)
2972 kfree(copy[i]);
2973 kfree(copy);
2974 return -ENOMEM;
2975 }
2976 }
2977
2978 cache->nr_ctr_args = argc;
2979 cache->ctr_args = copy;
2980
2981 return 0;
2982}
2983
2984static int cache_ctr(struct dm_target *ti, unsigned argc, char **argv)
2985{
2986 int r = -EINVAL;
2987 struct cache_args *ca;
2988 struct cache *cache = NULL;
2989
2990 ca = kzalloc(sizeof(*ca), GFP_KERNEL);
2991 if (!ca) {
2992 ti->error = "Error allocating memory for cache";
2993 return -ENOMEM;
2994 }
2995 ca->ti = ti;
2996
2997 r = parse_cache_args(ca, argc, argv, &ti->error);
2998 if (r)
2999 goto out;
3000
3001 r = cache_create(ca, &cache);
Heinz Mauelshagen617a0b82013-03-20 17:21:26 +00003002 if (r)
3003 goto out;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003004
3005 r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3);
3006 if (r) {
3007 destroy(cache);
3008 goto out;
3009 }
3010
3011 ti->private = cache;
3012
3013out:
3014 destroy_cache_args(ca);
3015 return r;
3016}
3017
Joe Thornber651f5fa2015-05-15 15:26:08 +01003018/*----------------------------------------------------------------*/
3019
3020static int cache_map(struct dm_target *ti, struct bio *bio)
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003021{
Joe Thornber651f5fa2015-05-15 15:26:08 +01003022 struct cache *cache = ti->private;
3023
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003024 int r;
Joe Thornber651f5fa2015-05-15 15:26:08 +01003025 struct dm_bio_prison_cell *cell = NULL;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003026 dm_oblock_t block = get_bio_block(cache, bio);
Mike Snitzer19b00922013-04-05 15:36:34 +01003027 size_t pb_data_size = get_per_bio_data_size(cache);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003028 bool can_migrate = false;
Joe Thornber40775252015-05-15 15:29:58 +01003029 bool fast_promotion;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003030 struct policy_result lookup_result;
Heinz Mauelshagene893fba2014-03-12 16:13:39 +01003031 struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size);
Joe Thornberfb4100a2015-05-20 10:30:32 +01003032 struct old_oblock_lock ool;
3033
3034 ool.locker.fn = null_locker;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003035
Heinz Mauelshagene893fba2014-03-12 16:13:39 +01003036 if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) {
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003037 /*
3038 * This can only occur if the io goes to a partial block at
3039 * the end of the origin device. We don't cache these.
3040 * Just remap to the origin and carry on.
3041 */
Heinz Mauelshagene893fba2014-03-12 16:13:39 +01003042 remap_to_origin(cache, bio);
Joe Thornber651f5fa2015-05-15 15:26:08 +01003043 accounted_begin(cache, bio);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003044 return DM_MAPIO_REMAPPED;
3045 }
3046
Joe Thornber651f5fa2015-05-15 15:26:08 +01003047 if (discard_or_flush(bio)) {
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003048 defer_bio(cache, bio);
3049 return DM_MAPIO_SUBMITTED;
3050 }
3051
3052 /*
3053 * Check to see if that block is currently migrating.
3054 */
Joe Thornber651f5fa2015-05-15 15:26:08 +01003055 cell = alloc_prison_cell(cache);
3056 if (!cell) {
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003057 defer_bio(cache, bio);
3058 return DM_MAPIO_SUBMITTED;
3059 }
3060
Joe Thornber651f5fa2015-05-15 15:26:08 +01003061 r = bio_detain(cache, block, bio, cell,
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003062 (cell_free_fn) free_prison_cell,
Joe Thornber651f5fa2015-05-15 15:26:08 +01003063 cache, &cell);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003064 if (r) {
3065 if (r < 0)
3066 defer_bio(cache, bio);
3067
3068 return DM_MAPIO_SUBMITTED;
3069 }
3070
Joe Thornber40775252015-05-15 15:29:58 +01003071 fast_promotion = is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003072
Joe Thornber40775252015-05-15 15:29:58 +01003073 r = policy_map(cache->policy, block, false, can_migrate, fast_promotion,
Joe Thornberfb4100a2015-05-20 10:30:32 +01003074 bio, &ool.locker, &lookup_result);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003075 if (r == -EWOULDBLOCK) {
Joe Thornber651f5fa2015-05-15 15:26:08 +01003076 cell_defer(cache, cell, true);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003077 return DM_MAPIO_SUBMITTED;
3078
3079 } else if (r) {
Mike Snitzerb61d9502015-04-22 17:25:56 -04003080 DMERR_LIMIT("%s: Unexpected return from cache replacement policy: %d",
3081 cache_device_name(cache), r);
Joe Thornber651f5fa2015-05-15 15:26:08 +01003082 cell_defer(cache, cell, false);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003083 bio_io_error(bio);
3084 return DM_MAPIO_SUBMITTED;
3085 }
3086
Joe Thornber2ee57d52013-10-24 14:10:29 -04003087 r = DM_MAPIO_REMAPPED;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003088 switch (lookup_result.op) {
3089 case POLICY_HIT:
Joe Thornber2ee57d52013-10-24 14:10:29 -04003090 if (passthrough_mode(&cache->features)) {
3091 if (bio_data_dir(bio) == WRITE) {
3092 /*
3093 * We need to invalidate this block, so
3094 * defer for the worker thread.
3095 */
Joe Thornber651f5fa2015-05-15 15:26:08 +01003096 cell_defer(cache, cell, true);
Joe Thornber2ee57d52013-10-24 14:10:29 -04003097 r = DM_MAPIO_SUBMITTED;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003098
Joe Thornber2ee57d52013-10-24 14:10:29 -04003099 } else {
Joe Thornber2ee57d52013-10-24 14:10:29 -04003100 inc_miss_counter(cache, bio);
3101 remap_to_origin_clear_discard(cache, bio, block);
Joe Thornber651f5fa2015-05-15 15:26:08 +01003102 accounted_begin(cache, bio);
3103 inc_ds(cache, bio, cell);
3104 // FIXME: we want to remap hits or misses straight
3105 // away rather than passing over to the worker.
3106 cell_defer(cache, cell, false);
Joe Thornber2ee57d52013-10-24 14:10:29 -04003107 }
3108
3109 } else {
3110 inc_hit_counter(cache, bio);
Joe Thornber2ee57d52013-10-24 14:10:29 -04003111 if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) &&
Joe Thornber651f5fa2015-05-15 15:26:08 +01003112 !is_dirty(cache, lookup_result.cblock)) {
Joe Thornber2ee57d52013-10-24 14:10:29 -04003113 remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
Joe Thornber651f5fa2015-05-15 15:26:08 +01003114 accounted_begin(cache, bio);
3115 inc_ds(cache, bio, cell);
3116 cell_defer(cache, cell, false);
3117
3118 } else
3119 remap_cell_to_cache_dirty(cache, cell, block, lookup_result.cblock, false);
Joe Thornber2ee57d52013-10-24 14:10:29 -04003120 }
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003121 break;
3122
3123 case POLICY_MISS:
3124 inc_miss_counter(cache, bio);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003125 if (pb->req_nr != 0) {
3126 /*
3127 * This is a duplicate writethrough io that is no
3128 * longer needed because the block has been demoted.
3129 */
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02003130 bio_endio(bio);
Joe Thornber651f5fa2015-05-15 15:26:08 +01003131 // FIXME: remap everything as a miss
3132 cell_defer(cache, cell, false);
Joe Thornber8c081b52014-05-13 16:18:38 +01003133 r = DM_MAPIO_SUBMITTED;
3134
3135 } else
Joe Thornber651f5fa2015-05-15 15:26:08 +01003136 remap_cell_to_origin_clear_discard(cache, cell, block, false);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003137 break;
3138
3139 default:
Mike Snitzerb61d9502015-04-22 17:25:56 -04003140 DMERR_LIMIT("%s: %s: erroring bio: unknown policy op: %u",
3141 cache_device_name(cache), __func__,
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003142 (unsigned) lookup_result.op);
Joe Thornber651f5fa2015-05-15 15:26:08 +01003143 cell_defer(cache, cell, false);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003144 bio_io_error(bio);
Joe Thornber2ee57d52013-10-24 14:10:29 -04003145 r = DM_MAPIO_SUBMITTED;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003146 }
3147
Joe Thornber2ee57d52013-10-24 14:10:29 -04003148 return r;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003149}
3150
3151static int cache_end_io(struct dm_target *ti, struct bio *bio, int error)
3152{
3153 struct cache *cache = ti->private;
3154 unsigned long flags;
Mike Snitzer19b00922013-04-05 15:36:34 +01003155 size_t pb_data_size = get_per_bio_data_size(cache);
3156 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003157
3158 if (pb->tick) {
Joe Thornberfba10102015-05-29 10:20:56 +01003159 policy_tick(cache->policy, false);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003160
3161 spin_lock_irqsave(&cache->lock, flags);
3162 cache->need_tick_bio = true;
3163 spin_unlock_irqrestore(&cache->lock, flags);
3164 }
3165
3166 check_for_quiesced_migrations(cache, pb);
Joe Thornber066dbaa32015-05-15 15:18:01 +01003167 accounted_complete(cache, bio);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003168
3169 return 0;
3170}
3171
3172static int write_dirty_bitset(struct cache *cache)
3173{
Joe Thornber629d0a82016-09-22 06:15:21 -04003174 int r;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003175
Joe Thornber028ae9f2015-04-22 16:42:35 -04003176 if (get_cache_mode(cache) >= CM_READ_ONLY)
3177 return -EINVAL;
3178
Joe Thornber629d0a82016-09-22 06:15:21 -04003179 r = dm_cache_set_dirty_bits(cache->cmd, from_cblock(cache->cache_size), cache->dirty_bitset);
3180 if (r)
3181 metadata_operation_failed(cache, "dm_cache_set_dirty_bits", r);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003182
Joe Thornber629d0a82016-09-22 06:15:21 -04003183 return r;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003184}
3185
3186static int write_discard_bitset(struct cache *cache)
3187{
3188 unsigned i, r;
3189
Joe Thornber028ae9f2015-04-22 16:42:35 -04003190 if (get_cache_mode(cache) >= CM_READ_ONLY)
3191 return -EINVAL;
3192
Joe Thornber1bad9bc2014-11-07 14:47:07 +00003193 r = dm_cache_discard_bitset_resize(cache->cmd, cache->discard_block_size,
3194 cache->discard_nr_blocks);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003195 if (r) {
Mike Snitzerb61d9502015-04-22 17:25:56 -04003196 DMERR("%s: could not resize on-disk discard bitset", cache_device_name(cache));
Joe Thornber028ae9f2015-04-22 16:42:35 -04003197 metadata_operation_failed(cache, "dm_cache_discard_bitset_resize", r);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003198 return r;
3199 }
3200
Joe Thornber1bad9bc2014-11-07 14:47:07 +00003201 for (i = 0; i < from_dblock(cache->discard_nr_blocks); i++) {
3202 r = dm_cache_set_discard(cache->cmd, to_dblock(i),
3203 is_discarded(cache, to_dblock(i)));
Joe Thornber028ae9f2015-04-22 16:42:35 -04003204 if (r) {
3205 metadata_operation_failed(cache, "dm_cache_set_discard", r);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003206 return r;
Joe Thornber028ae9f2015-04-22 16:42:35 -04003207 }
3208 }
3209
3210 return 0;
3211}
3212
3213static int write_hints(struct cache *cache)
3214{
3215 int r;
3216
3217 if (get_cache_mode(cache) >= CM_READ_ONLY)
3218 return -EINVAL;
3219
3220 r = dm_cache_write_hints(cache->cmd, cache->policy);
3221 if (r) {
3222 metadata_operation_failed(cache, "dm_cache_write_hints", r);
3223 return r;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003224 }
3225
3226 return 0;
3227}
3228
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003229/*
3230 * returns true on success
3231 */
3232static bool sync_metadata(struct cache *cache)
3233{
3234 int r1, r2, r3, r4;
3235
3236 r1 = write_dirty_bitset(cache);
3237 if (r1)
Mike Snitzerb61d9502015-04-22 17:25:56 -04003238 DMERR("%s: could not write dirty bitset", cache_device_name(cache));
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003239
3240 r2 = write_discard_bitset(cache);
3241 if (r2)
Mike Snitzerb61d9502015-04-22 17:25:56 -04003242 DMERR("%s: could not write discard bitset", cache_device_name(cache));
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003243
3244 save_stats(cache);
3245
Joe Thornber028ae9f2015-04-22 16:42:35 -04003246 r3 = write_hints(cache);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003247 if (r3)
Mike Snitzerb61d9502015-04-22 17:25:56 -04003248 DMERR("%s: could not write hints", cache_device_name(cache));
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003249
3250 /*
3251 * If writing the above metadata failed, we still commit, but don't
3252 * set the clean shutdown flag. This will effectively force every
3253 * dirty bit to be set on reload.
3254 */
Joe Thornber028ae9f2015-04-22 16:42:35 -04003255 r4 = commit(cache, !r1 && !r2 && !r3);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003256 if (r4)
Mike Snitzerb61d9502015-04-22 17:25:56 -04003257 DMERR("%s: could not write cache metadata", cache_device_name(cache));
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003258
3259 return !r1 && !r2 && !r3 && !r4;
3260}
3261
3262static void cache_postsuspend(struct dm_target *ti)
3263{
3264 struct cache *cache = ti->private;
3265
3266 start_quiescing(cache);
3267 wait_for_migrations(cache);
3268 stop_worker(cache);
Joe Thornber651f5fa2015-05-15 15:26:08 +01003269 requeue_deferred_bios(cache);
3270 requeue_deferred_cells(cache);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003271 stop_quiescing(cache);
3272
Joe Thornber028ae9f2015-04-22 16:42:35 -04003273 if (get_cache_mode(cache) == CM_WRITE)
3274 (void) sync_metadata(cache);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003275}
3276
3277static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock,
3278 bool dirty, uint32_t hint, bool hint_valid)
3279{
3280 int r;
3281 struct cache *cache = context;
3282
3283 r = policy_load_mapping(cache->policy, oblock, cblock, hint, hint_valid);
3284 if (r)
3285 return r;
3286
3287 if (dirty)
3288 set_dirty(cache, oblock, cblock);
3289 else
3290 clear_dirty(cache, oblock, cblock);
3291
3292 return 0;
3293}
3294
Joe Thornber3e2e1c32014-11-24 14:06:22 +00003295/*
3296 * The discard block size in the on disk metadata is not
3297 * neccessarily the same as we're currently using. So we have to
3298 * be careful to only set the discarded attribute if we know it
3299 * covers a complete block of the new size.
3300 */
3301struct discard_load_info {
3302 struct cache *cache;
3303
3304 /*
3305 * These blocks are sized using the on disk dblock size, rather
3306 * than the current one.
3307 */
3308 dm_block_t block_size;
3309 dm_block_t discard_begin, discard_end;
3310};
3311
3312static void discard_load_info_init(struct cache *cache,
3313 struct discard_load_info *li)
3314{
3315 li->cache = cache;
3316 li->discard_begin = li->discard_end = 0;
3317}
3318
3319static void set_discard_range(struct discard_load_info *li)
3320{
3321 sector_t b, e;
3322
3323 if (li->discard_begin == li->discard_end)
3324 return;
3325
3326 /*
3327 * Convert to sectors.
3328 */
3329 b = li->discard_begin * li->block_size;
3330 e = li->discard_end * li->block_size;
3331
3332 /*
3333 * Then convert back to the current dblock size.
3334 */
3335 b = dm_sector_div_up(b, li->cache->discard_block_size);
3336 sector_div(e, li->cache->discard_block_size);
3337
3338 /*
3339 * The origin may have shrunk, so we need to check we're still in
3340 * bounds.
3341 */
3342 if (e > from_dblock(li->cache->discard_nr_blocks))
3343 e = from_dblock(li->cache->discard_nr_blocks);
3344
3345 for (; b < e; b++)
3346 set_discard(li->cache, to_dblock(b));
3347}
3348
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003349static int load_discard(void *context, sector_t discard_block_size,
Joe Thornber1bad9bc2014-11-07 14:47:07 +00003350 dm_dblock_t dblock, bool discard)
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003351{
Joe Thornber3e2e1c32014-11-24 14:06:22 +00003352 struct discard_load_info *li = context;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003353
Joe Thornber3e2e1c32014-11-24 14:06:22 +00003354 li->block_size = discard_block_size;
Joe Thornber1bad9bc2014-11-07 14:47:07 +00003355
Joe Thornber3e2e1c32014-11-24 14:06:22 +00003356 if (discard) {
3357 if (from_dblock(dblock) == li->discard_end)
3358 /*
3359 * We're already in a discard range, just extend it.
3360 */
3361 li->discard_end = li->discard_end + 1ULL;
3362
3363 else {
3364 /*
3365 * Emit the old range and start a new one.
3366 */
3367 set_discard_range(li);
3368 li->discard_begin = from_dblock(dblock);
3369 li->discard_end = li->discard_begin + 1ULL;
3370 }
3371 } else {
3372 set_discard_range(li);
3373 li->discard_begin = li->discard_end = 0;
3374 }
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003375
3376 return 0;
3377}
3378
Joe Thornberf494a9c2013-10-31 13:55:49 -04003379static dm_cblock_t get_cache_dev_size(struct cache *cache)
3380{
3381 sector_t size = get_dev_size(cache->cache_dev);
3382 (void) sector_div(size, cache->sectors_per_block);
3383 return to_cblock(size);
3384}
3385
3386static bool can_resize(struct cache *cache, dm_cblock_t new_size)
3387{
3388 if (from_cblock(new_size) > from_cblock(cache->cache_size))
3389 return true;
3390
3391 /*
3392 * We can't drop a dirty block when shrinking the cache.
3393 */
3394 while (from_cblock(new_size) < from_cblock(cache->cache_size)) {
3395 new_size = to_cblock(from_cblock(new_size) + 1);
3396 if (is_dirty(cache, new_size)) {
Mike Snitzerb61d9502015-04-22 17:25:56 -04003397 DMERR("%s: unable to shrink cache; cache block %llu is dirty",
3398 cache_device_name(cache),
Joe Thornberf494a9c2013-10-31 13:55:49 -04003399 (unsigned long long) from_cblock(new_size));
3400 return false;
3401 }
3402 }
3403
3404 return true;
3405}
3406
3407static int resize_cache_dev(struct cache *cache, dm_cblock_t new_size)
3408{
3409 int r;
3410
Vincent Pelletier08844802013-11-30 12:58:42 +01003411 r = dm_cache_resize(cache->cmd, new_size);
Joe Thornberf494a9c2013-10-31 13:55:49 -04003412 if (r) {
Mike Snitzerb61d9502015-04-22 17:25:56 -04003413 DMERR("%s: could not resize cache metadata", cache_device_name(cache));
Joe Thornber028ae9f2015-04-22 16:42:35 -04003414 metadata_operation_failed(cache, "dm_cache_resize", r);
Joe Thornberf494a9c2013-10-31 13:55:49 -04003415 return r;
3416 }
3417
Joe Thornberd1d92202014-11-11 11:58:32 +00003418 set_cache_size(cache, new_size);
Joe Thornberf494a9c2013-10-31 13:55:49 -04003419
3420 return 0;
3421}
3422
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003423static int cache_preresume(struct dm_target *ti)
3424{
3425 int r = 0;
3426 struct cache *cache = ti->private;
Joe Thornberf494a9c2013-10-31 13:55:49 -04003427 dm_cblock_t csize = get_cache_dev_size(cache);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003428
3429 /*
3430 * Check to see if the cache has resized.
3431 */
Joe Thornberf494a9c2013-10-31 13:55:49 -04003432 if (!cache->sized) {
3433 r = resize_cache_dev(cache, csize);
3434 if (r)
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003435 return r;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003436
3437 cache->sized = true;
Joe Thornberf494a9c2013-10-31 13:55:49 -04003438
3439 } else if (csize != cache->cache_size) {
3440 if (!can_resize(cache, csize))
3441 return -EINVAL;
3442
3443 r = resize_cache_dev(cache, csize);
3444 if (r)
3445 return r;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003446 }
3447
3448 if (!cache->loaded_mappings) {
Mike Snitzerea2dd8c2013-03-20 17:21:28 +00003449 r = dm_cache_load_mappings(cache->cmd, cache->policy,
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003450 load_mapping, cache);
3451 if (r) {
Mike Snitzerb61d9502015-04-22 17:25:56 -04003452 DMERR("%s: could not load cache mappings", cache_device_name(cache));
Joe Thornber028ae9f2015-04-22 16:42:35 -04003453 metadata_operation_failed(cache, "dm_cache_load_mappings", r);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003454 return r;
3455 }
3456
3457 cache->loaded_mappings = true;
3458 }
3459
3460 if (!cache->loaded_discards) {
Joe Thornber3e2e1c32014-11-24 14:06:22 +00003461 struct discard_load_info li;
3462
3463 /*
3464 * The discard bitset could have been resized, or the
3465 * discard block size changed. To be safe we start by
3466 * setting every dblock to not discarded.
3467 */
3468 clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks));
3469
3470 discard_load_info_init(cache, &li);
3471 r = dm_cache_load_discards(cache->cmd, load_discard, &li);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003472 if (r) {
Mike Snitzerb61d9502015-04-22 17:25:56 -04003473 DMERR("%s: could not load origin discards", cache_device_name(cache));
Joe Thornber028ae9f2015-04-22 16:42:35 -04003474 metadata_operation_failed(cache, "dm_cache_load_discards", r);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003475 return r;
3476 }
Joe Thornber3e2e1c32014-11-24 14:06:22 +00003477 set_discard_range(&li);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003478
3479 cache->loaded_discards = true;
3480 }
3481
3482 return r;
3483}
3484
3485static void cache_resume(struct dm_target *ti)
3486{
3487 struct cache *cache = ti->private;
3488
3489 cache->need_tick_bio = true;
3490 do_waker(&cache->waker.work);
3491}
3492
3493/*
3494 * Status format:
3495 *
Mike Snitzer6a388612014-01-09 16:04:12 -05003496 * <metadata block size> <#used metadata blocks>/<#total metadata blocks>
3497 * <cache block size> <#used cache blocks>/<#total cache blocks>
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003498 * <#read hits> <#read misses> <#write hits> <#write misses>
Mike Snitzer6a388612014-01-09 16:04:12 -05003499 * <#demotions> <#promotions> <#dirty>
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003500 * <#features> <features>*
3501 * <#core args> <core args>
Mike Snitzer255eac22015-07-15 11:42:59 -04003502 * <policy name> <#policy args> <policy args>* <cache metadata mode> <needs_check>
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003503 */
3504static void cache_status(struct dm_target *ti, status_type_t type,
3505 unsigned status_flags, char *result, unsigned maxlen)
3506{
3507 int r = 0;
3508 unsigned i;
3509 ssize_t sz = 0;
3510 dm_block_t nr_free_blocks_metadata = 0;
3511 dm_block_t nr_blocks_metadata = 0;
3512 char buf[BDEVNAME_SIZE];
3513 struct cache *cache = ti->private;
3514 dm_cblock_t residency;
Joe Thornberd14fcf32016-03-10 16:20:58 +00003515 bool needs_check;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003516
3517 switch (type) {
3518 case STATUSTYPE_INFO:
Joe Thornber028ae9f2015-04-22 16:42:35 -04003519 if (get_cache_mode(cache) == CM_FAIL) {
3520 DMEMIT("Fail");
3521 break;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003522 }
3523
Joe Thornber028ae9f2015-04-22 16:42:35 -04003524 /* Commit to ensure statistics aren't out-of-date */
3525 if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
3526 (void) commit(cache, false);
3527
Mike Snitzerb61d9502015-04-22 17:25:56 -04003528 r = dm_cache_get_free_metadata_block_count(cache->cmd, &nr_free_blocks_metadata);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003529 if (r) {
Mike Snitzerb61d9502015-04-22 17:25:56 -04003530 DMERR("%s: dm_cache_get_free_metadata_block_count returned %d",
3531 cache_device_name(cache), r);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003532 goto err;
3533 }
3534
3535 r = dm_cache_get_metadata_dev_size(cache->cmd, &nr_blocks_metadata);
3536 if (r) {
Mike Snitzerb61d9502015-04-22 17:25:56 -04003537 DMERR("%s: dm_cache_get_metadata_dev_size returned %d",
3538 cache_device_name(cache), r);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003539 goto err;
3540 }
3541
3542 residency = policy_residency(cache->policy);
3543
Joe Thornberca763d02017-02-09 11:46:18 -05003544 DMEMIT("%u %llu/%llu %llu %llu/%llu %u %u %u %u %u %u %lu ",
Mike Snitzer895b47d2014-07-14 15:37:18 -04003545 (unsigned)DM_CACHE_METADATA_BLOCK_SIZE,
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003546 (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
3547 (unsigned long long)nr_blocks_metadata,
Joe Thornberca763d02017-02-09 11:46:18 -05003548 (unsigned long long)cache->sectors_per_block,
Mike Snitzer6a388612014-01-09 16:04:12 -05003549 (unsigned long long) from_cblock(residency),
3550 (unsigned long long) from_cblock(cache->cache_size),
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003551 (unsigned) atomic_read(&cache->stats.read_hit),
3552 (unsigned) atomic_read(&cache->stats.read_miss),
3553 (unsigned) atomic_read(&cache->stats.write_hit),
3554 (unsigned) atomic_read(&cache->stats.write_miss),
3555 (unsigned) atomic_read(&cache->stats.demotion),
3556 (unsigned) atomic_read(&cache->stats.promotion),
Anssi Hannula44fa8162014-08-01 11:55:47 -04003557 (unsigned long) atomic_read(&cache->nr_dirty));
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003558
Joe Thornber629d0a82016-09-22 06:15:21 -04003559 if (cache->features.metadata_version == 2)
3560 DMEMIT("2 metadata2 ");
3561 else
3562 DMEMIT("1 ");
3563
Joe Thornber2ee57d52013-10-24 14:10:29 -04003564 if (writethrough_mode(&cache->features))
Joe Thornber629d0a82016-09-22 06:15:21 -04003565 DMEMIT("writethrough ");
Joe Thornber2ee57d52013-10-24 14:10:29 -04003566
3567 else if (passthrough_mode(&cache->features))
Joe Thornber629d0a82016-09-22 06:15:21 -04003568 DMEMIT("passthrough ");
Joe Thornber2ee57d52013-10-24 14:10:29 -04003569
3570 else if (writeback_mode(&cache->features))
Joe Thornber629d0a82016-09-22 06:15:21 -04003571 DMEMIT("writeback ");
Joe Thornber2ee57d52013-10-24 14:10:29 -04003572
3573 else {
Mike Snitzerb61d9502015-04-22 17:25:56 -04003574 DMERR("%s: internal error: unknown io mode: %d",
3575 cache_device_name(cache), (int) cache->features.io_mode);
Joe Thornber2ee57d52013-10-24 14:10:29 -04003576 goto err;
3577 }
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003578
3579 DMEMIT("2 migration_threshold %llu ", (unsigned long long) cache->migration_threshold);
Mike Snitzer2e68c4e2014-01-15 21:06:55 -05003580
3581 DMEMIT("%s ", dm_cache_policy_get_name(cache->policy));
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003582 if (sz < maxlen) {
Joe Thornber028ae9f2015-04-22 16:42:35 -04003583 r = policy_emit_config_values(cache->policy, result, maxlen, &sz);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003584 if (r)
Mike Snitzerb61d9502015-04-22 17:25:56 -04003585 DMERR("%s: policy_emit_config_values returned %d",
3586 cache_device_name(cache), r);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003587 }
3588
Joe Thornber028ae9f2015-04-22 16:42:35 -04003589 if (get_cache_mode(cache) == CM_READ_ONLY)
3590 DMEMIT("ro ");
3591 else
3592 DMEMIT("rw ");
3593
Joe Thornberd14fcf32016-03-10 16:20:58 +00003594 r = dm_cache_metadata_needs_check(cache->cmd, &needs_check);
3595
3596 if (r || needs_check)
Mike Snitzer255eac22015-07-15 11:42:59 -04003597 DMEMIT("needs_check ");
3598 else
3599 DMEMIT("- ");
3600
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003601 break;
3602
3603 case STATUSTYPE_TABLE:
3604 format_dev_t(buf, cache->metadata_dev->bdev->bd_dev);
3605 DMEMIT("%s ", buf);
3606 format_dev_t(buf, cache->cache_dev->bdev->bd_dev);
3607 DMEMIT("%s ", buf);
3608 format_dev_t(buf, cache->origin_dev->bdev->bd_dev);
3609 DMEMIT("%s", buf);
3610
3611 for (i = 0; i < cache->nr_ctr_args - 1; i++)
3612 DMEMIT(" %s", cache->ctr_args[i]);
3613 if (cache->nr_ctr_args)
3614 DMEMIT(" %s", cache->ctr_args[cache->nr_ctr_args - 1]);
3615 }
3616
3617 return;
3618
3619err:
3620 DMEMIT("Error");
3621}
3622
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003623/*
Joe Thornber65790ff2013-11-08 16:39:50 +00003624 * A cache block range can take two forms:
3625 *
3626 * i) A single cblock, eg. '3456'
3627 * ii) A begin and end cblock with dots between, eg. 123-234
3628 */
3629static int parse_cblock_range(struct cache *cache, const char *str,
3630 struct cblock_range *result)
3631{
3632 char dummy;
3633 uint64_t b, e;
3634 int r;
3635
3636 /*
3637 * Try and parse form (ii) first.
3638 */
3639 r = sscanf(str, "%llu-%llu%c", &b, &e, &dummy);
3640 if (r < 0)
3641 return r;
3642
3643 if (r == 2) {
3644 result->begin = to_cblock(b);
3645 result->end = to_cblock(e);
3646 return 0;
3647 }
3648
3649 /*
3650 * That didn't work, try form (i).
3651 */
3652 r = sscanf(str, "%llu%c", &b, &dummy);
3653 if (r < 0)
3654 return r;
3655
3656 if (r == 1) {
3657 result->begin = to_cblock(b);
3658 result->end = to_cblock(from_cblock(result->begin) + 1u);
3659 return 0;
3660 }
3661
Mike Snitzerb61d9502015-04-22 17:25:56 -04003662 DMERR("%s: invalid cblock range '%s'", cache_device_name(cache), str);
Joe Thornber65790ff2013-11-08 16:39:50 +00003663 return -EINVAL;
3664}
3665
3666static int validate_cblock_range(struct cache *cache, struct cblock_range *range)
3667{
3668 uint64_t b = from_cblock(range->begin);
3669 uint64_t e = from_cblock(range->end);
3670 uint64_t n = from_cblock(cache->cache_size);
3671
3672 if (b >= n) {
Mike Snitzerb61d9502015-04-22 17:25:56 -04003673 DMERR("%s: begin cblock out of range: %llu >= %llu",
3674 cache_device_name(cache), b, n);
Joe Thornber65790ff2013-11-08 16:39:50 +00003675 return -EINVAL;
3676 }
3677
3678 if (e > n) {
Mike Snitzerb61d9502015-04-22 17:25:56 -04003679 DMERR("%s: end cblock out of range: %llu > %llu",
3680 cache_device_name(cache), e, n);
Joe Thornber65790ff2013-11-08 16:39:50 +00003681 return -EINVAL;
3682 }
3683
3684 if (b >= e) {
Mike Snitzerb61d9502015-04-22 17:25:56 -04003685 DMERR("%s: invalid cblock range: %llu >= %llu",
3686 cache_device_name(cache), b, e);
Joe Thornber65790ff2013-11-08 16:39:50 +00003687 return -EINVAL;
3688 }
3689
3690 return 0;
3691}
3692
3693static int request_invalidation(struct cache *cache, struct cblock_range *range)
3694{
3695 struct invalidation_request req;
3696
3697 INIT_LIST_HEAD(&req.list);
3698 req.cblocks = range;
3699 atomic_set(&req.complete, 0);
3700 req.err = 0;
3701 init_waitqueue_head(&req.result_wait);
3702
3703 spin_lock(&cache->invalidation_lock);
3704 list_add(&req.list, &cache->invalidation_requests);
3705 spin_unlock(&cache->invalidation_lock);
3706 wake_worker(cache);
3707
3708 wait_event(req.result_wait, atomic_read(&req.complete));
3709 return req.err;
3710}
3711
3712static int process_invalidate_cblocks_message(struct cache *cache, unsigned count,
3713 const char **cblock_ranges)
3714{
3715 int r = 0;
3716 unsigned i;
3717 struct cblock_range range;
3718
3719 if (!passthrough_mode(&cache->features)) {
Mike Snitzerb61d9502015-04-22 17:25:56 -04003720 DMERR("%s: cache has to be in passthrough mode for invalidation",
3721 cache_device_name(cache));
Joe Thornber65790ff2013-11-08 16:39:50 +00003722 return -EPERM;
3723 }
3724
3725 for (i = 0; i < count; i++) {
3726 r = parse_cblock_range(cache, cblock_ranges[i], &range);
3727 if (r)
3728 break;
3729
3730 r = validate_cblock_range(cache, &range);
3731 if (r)
3732 break;
3733
3734 /*
3735 * Pass begin and end origin blocks to the worker and wake it.
3736 */
3737 r = request_invalidation(cache, &range);
3738 if (r)
3739 break;
3740 }
3741
3742 return r;
3743}
3744
3745/*
3746 * Supports
3747 * "<key> <value>"
3748 * and
3749 * "invalidate_cblocks [(<begin>)|(<begin>-<end>)]*
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003750 *
3751 * The key migration_threshold is supported by the cache target core.
3752 */
3753static int cache_message(struct dm_target *ti, unsigned argc, char **argv)
3754{
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003755 struct cache *cache = ti->private;
3756
Joe Thornber65790ff2013-11-08 16:39:50 +00003757 if (!argc)
3758 return -EINVAL;
3759
Joe Thornber028ae9f2015-04-22 16:42:35 -04003760 if (get_cache_mode(cache) >= CM_READ_ONLY) {
Mike Snitzerb61d9502015-04-22 17:25:56 -04003761 DMERR("%s: unable to service cache target messages in READ_ONLY or FAIL mode",
3762 cache_device_name(cache));
Joe Thornber028ae9f2015-04-22 16:42:35 -04003763 return -EOPNOTSUPP;
3764 }
3765
Mike Snitzer7b6b2bc2013-11-12 12:17:43 -05003766 if (!strcasecmp(argv[0], "invalidate_cblocks"))
Joe Thornber65790ff2013-11-08 16:39:50 +00003767 return process_invalidate_cblocks_message(cache, argc - 1, (const char **) argv + 1);
3768
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003769 if (argc != 2)
3770 return -EINVAL;
3771
Joe Thornber2f14f4b2013-05-10 14:37:21 +01003772 return set_config_value(cache, argv[0], argv[1]);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003773}
3774
3775static int cache_iterate_devices(struct dm_target *ti,
3776 iterate_devices_callout_fn fn, void *data)
3777{
3778 int r = 0;
3779 struct cache *cache = ti->private;
3780
3781 r = fn(ti, cache->cache_dev, 0, get_dev_size(cache->cache_dev), data);
3782 if (!r)
3783 r = fn(ti, cache->origin_dev, 0, ti->len, data);
3784
3785 return r;
3786}
3787
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003788static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
3789{
3790 /*
3791 * FIXME: these limits may be incompatible with the cache device
3792 */
Joe Thornber7ae34e72014-11-06 10:18:04 +00003793 limits->max_discard_sectors = min_t(sector_t, cache->discard_block_size * 1024,
3794 cache->origin_sectors);
Joe Thornber1bad9bc2014-11-07 14:47:07 +00003795 limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003796}
3797
3798static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
3799{
3800 struct cache *cache = ti->private;
Mike Snitzerf6109372013-08-20 15:02:41 -04003801 uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003802
Mike Snitzerf6109372013-08-20 15:02:41 -04003803 /*
3804 * If the system-determined stacked limits are compatible with the
3805 * cache's blocksize (io_opt is a factor) do not override them.
3806 */
3807 if (io_opt_sectors < cache->sectors_per_block ||
3808 do_div(io_opt_sectors, cache->sectors_per_block)) {
Mike Snitzerb0246532014-07-19 13:25:46 -04003809 blk_limits_io_min(limits, cache->sectors_per_block << SECTOR_SHIFT);
Mike Snitzerf6109372013-08-20 15:02:41 -04003810 blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT);
3811 }
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003812 set_discard_limits(cache, limits);
3813}
3814
3815/*----------------------------------------------------------------*/
3816
3817static struct target_type cache_target = {
3818 .name = "cache",
Joe Thornber629d0a82016-09-22 06:15:21 -04003819 .version = {1, 10, 0},
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003820 .module = THIS_MODULE,
3821 .ctr = cache_ctr,
3822 .dtr = cache_dtr,
3823 .map = cache_map,
3824 .end_io = cache_end_io,
3825 .postsuspend = cache_postsuspend,
3826 .preresume = cache_preresume,
3827 .resume = cache_resume,
3828 .status = cache_status,
3829 .message = cache_message,
3830 .iterate_devices = cache_iterate_devices,
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00003831 .io_hints = cache_io_hints,
3832};
3833
3834static int __init dm_cache_init(void)
3835{
3836 int r;
3837
3838 r = dm_register_target(&cache_target);
3839 if (r) {
3840 DMERR("cache target registration failed: %d", r);
3841 return r;
3842 }
3843
3844 migration_cache = KMEM_CACHE(dm_cache_migration, 0);
3845 if (!migration_cache) {
3846 dm_unregister_target(&cache_target);
3847 return -ENOMEM;
3848 }
3849
3850 return 0;
3851}
3852
3853static void __exit dm_cache_exit(void)
3854{
3855 dm_unregister_target(&cache_target);
3856 kmem_cache_destroy(migration_cache);
3857}
3858
3859module_init(dm_cache_init);
3860module_exit(dm_cache_exit);
3861
3862MODULE_DESCRIPTION(DM_NAME " cache target");
3863MODULE_AUTHOR("Joe Thornber <ejt@redhat.com>");
3864MODULE_LICENSE("GPL");