blob: b842fbfbf1db2912225b56d2aef4195cb5054daf [file] [log] [blame]
Kent Overstreetcafe5632013-03-23 16:11:31 -07001/*
2 * background writeback - scan btree for dirty data and write it to the backing
3 * device
4 *
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
7 */
8
9#include "bcache.h"
10#include "btree.h"
11#include "debug.h"
Kent Overstreet279afba2013-06-05 06:21:07 -070012#include "writeback.h"
Kent Overstreetcafe5632013-03-23 16:11:31 -070013
Kent Overstreetc37511b2013-04-26 15:39:55 -070014#include <trace/events/bcache.h>
15
Kent Overstreetcafe5632013-03-23 16:11:31 -070016static struct workqueue_struct *dirty_wq;
17
18static void read_dirty(struct closure *);
19
20struct dirty_io {
21 struct closure cl;
22 struct cached_dev *dc;
23 struct bio bio;
24};
25
26/* Rate limiting */
27
28static void __update_writeback_rate(struct cached_dev *dc)
29{
30 struct cache_set *c = dc->disk.c;
31 uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size;
32 uint64_t cache_dirty_target =
33 div_u64(cache_sectors * dc->writeback_percent, 100);
34
35 int64_t target = div64_u64(cache_dirty_target * bdev_sectors(dc->bdev),
36 c->cached_dev_sectors);
37
38 /* PD controller */
39
40 int change = 0;
41 int64_t error;
Kent Overstreet279afba2013-06-05 06:21:07 -070042 int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);
Kent Overstreetcafe5632013-03-23 16:11:31 -070043 int64_t derivative = dirty - dc->disk.sectors_dirty_last;
44
45 dc->disk.sectors_dirty_last = dirty;
46
47 derivative *= dc->writeback_rate_d_term;
48 derivative = clamp(derivative, -dirty, dirty);
49
50 derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative,
51 dc->writeback_rate_d_smooth, 0);
52
53 /* Avoid divide by zero */
54 if (!target)
55 goto out;
56
57 error = div64_s64((dirty + derivative - target) << 8, target);
58
59 change = div_s64((dc->writeback_rate.rate * error) >> 8,
60 dc->writeback_rate_p_term_inverse);
61
62 /* Don't increase writeback rate if the device isn't keeping up */
63 if (change > 0 &&
64 time_after64(local_clock(),
65 dc->writeback_rate.next + 10 * NSEC_PER_MSEC))
66 change = 0;
67
68 dc->writeback_rate.rate =
69 clamp_t(int64_t, dc->writeback_rate.rate + change,
70 1, NSEC_PER_MSEC);
71out:
72 dc->writeback_rate_derivative = derivative;
73 dc->writeback_rate_change = change;
74 dc->writeback_rate_target = target;
75
76 schedule_delayed_work(&dc->writeback_rate_update,
77 dc->writeback_rate_update_seconds * HZ);
78}
79
80static void update_writeback_rate(struct work_struct *work)
81{
82 struct cached_dev *dc = container_of(to_delayed_work(work),
83 struct cached_dev,
84 writeback_rate_update);
85
86 down_read(&dc->writeback_lock);
87
88 if (atomic_read(&dc->has_dirty) &&
89 dc->writeback_percent)
90 __update_writeback_rate(dc);
91
92 up_read(&dc->writeback_lock);
93}
94
95static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
96{
Kent Overstreetc2a4f312013-09-23 23:17:31 -070097 uint64_t ret;
98
Kent Overstreetcafe5632013-03-23 16:11:31 -070099 if (atomic_read(&dc->disk.detaching) ||
100 !dc->writeback_percent)
101 return 0;
102
Kent Overstreetc2a4f312013-09-23 23:17:31 -0700103 ret = bch_next_delay(&dc->writeback_rate, sectors * 10000000ULL);
104
105 return min_t(uint64_t, ret, HZ);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700106}
107
108/* Background writeback */
109
110static bool dirty_pred(struct keybuf *buf, struct bkey *k)
111{
112 return KEY_DIRTY(k);
113}
114
Kent Overstreet72c27062013-06-05 06:24:39 -0700115static bool dirty_full_stripe_pred(struct keybuf *buf, struct bkey *k)
116{
Kent Overstreet2d679fc2013-08-17 02:13:15 -0700117 uint64_t stripe = KEY_START(k);
Kent Overstreet72c27062013-06-05 06:24:39 -0700118 unsigned nr_sectors = KEY_SIZE(k);
119 struct cached_dev *dc = container_of(buf, struct cached_dev,
120 writeback_keys);
Kent Overstreet72c27062013-06-05 06:24:39 -0700121
122 if (!KEY_DIRTY(k))
123 return false;
124
Kent Overstreet2d679fc2013-08-17 02:13:15 -0700125 do_div(stripe, dc->disk.stripe_size);
Kent Overstreet72c27062013-06-05 06:24:39 -0700126
Kent Overstreet2d679fc2013-08-17 02:13:15 -0700127 while (1) {
128 if (atomic_read(dc->disk.stripe_sectors_dirty + stripe) ==
129 dc->disk.stripe_size)
Kent Overstreet72c27062013-06-05 06:24:39 -0700130 return true;
131
Kent Overstreet2d679fc2013-08-17 02:13:15 -0700132 if (nr_sectors <= dc->disk.stripe_size)
133 return false;
134
135 nr_sectors -= dc->disk.stripe_size;
Kent Overstreet72c27062013-06-05 06:24:39 -0700136 stripe++;
137 }
138}
139
Kent Overstreetcafe5632013-03-23 16:11:31 -0700140static void dirty_init(struct keybuf_key *w)
141{
142 struct dirty_io *io = w->private;
143 struct bio *bio = &io->bio;
144
145 bio_init(bio);
146 if (!io->dc->writeback_percent)
147 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
148
149 bio->bi_size = KEY_SIZE(&w->key) << 9;
150 bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS);
151 bio->bi_private = w;
152 bio->bi_io_vec = bio->bi_inline_vecs;
Kent Overstreet169ef1c2013-03-28 12:50:55 -0600153 bch_bio_map(bio, NULL);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700154}
155
156static void refill_dirty(struct closure *cl)
157{
158 struct cached_dev *dc = container_of(cl, struct cached_dev,
159 writeback.cl);
160 struct keybuf *buf = &dc->writeback_keys;
161 bool searched_from_start = false;
162 struct bkey end = MAX_KEY;
163 SET_KEY_INODE(&end, dc->disk.id);
164
165 if (!atomic_read(&dc->disk.detaching) &&
166 !dc->writeback_running)
167 closure_return(cl);
168
169 down_write(&dc->writeback_lock);
170
171 if (!atomic_read(&dc->has_dirty)) {
172 SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
173 bch_write_bdev_super(dc, NULL);
174
175 up_write(&dc->writeback_lock);
176 closure_return(cl);
177 }
178
179 if (bkey_cmp(&buf->last_scanned, &end) >= 0) {
180 buf->last_scanned = KEY(dc->disk.id, 0, 0);
181 searched_from_start = true;
182 }
183
Kent Overstreet72c27062013-06-05 06:24:39 -0700184 if (dc->partial_stripes_expensive) {
185 uint64_t i;
186
187 for (i = 0; i < dc->disk.nr_stripes; i++)
188 if (atomic_read(dc->disk.stripe_sectors_dirty + i) ==
Kent Overstreet2d679fc2013-08-17 02:13:15 -0700189 dc->disk.stripe_size)
Kent Overstreet72c27062013-06-05 06:24:39 -0700190 goto full_stripes;
191
192 goto normal_refill;
193full_stripes:
Kent Overstreet2d679fc2013-08-17 02:13:15 -0700194 searched_from_start = false; /* not searching entire btree */
Kent Overstreet72c27062013-06-05 06:24:39 -0700195 bch_refill_keybuf(dc->disk.c, buf, &end,
196 dirty_full_stripe_pred);
197 } else {
198normal_refill:
199 bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
200 }
Kent Overstreetcafe5632013-03-23 16:11:31 -0700201
202 if (bkey_cmp(&buf->last_scanned, &end) >= 0 && searched_from_start) {
203 /* Searched the entire btree - delay awhile */
204
205 if (RB_EMPTY_ROOT(&buf->keys)) {
206 atomic_set(&dc->has_dirty, 0);
207 cached_dev_put(dc);
208 }
209
210 if (!atomic_read(&dc->disk.detaching))
211 closure_delay(&dc->writeback, dc->writeback_delay * HZ);
212 }
213
214 up_write(&dc->writeback_lock);
215
Kent Overstreetc2a4f312013-09-23 23:17:31 -0700216 bch_ratelimit_reset(&dc->writeback_rate);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700217
218 /* Punt to workqueue only so we don't recurse and blow the stack */
219 continue_at(cl, read_dirty, dirty_wq);
220}
221
222void bch_writeback_queue(struct cached_dev *dc)
223{
224 if (closure_trylock(&dc->writeback.cl, &dc->disk.cl)) {
225 if (!atomic_read(&dc->disk.detaching))
226 closure_delay(&dc->writeback, dc->writeback_delay * HZ);
227
228 continue_at(&dc->writeback.cl, refill_dirty, dirty_wq);
229 }
230}
231
Kent Overstreet279afba2013-06-05 06:21:07 -0700232void bch_writeback_add(struct cached_dev *dc)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700233{
Kent Overstreetcafe5632013-03-23 16:11:31 -0700234 if (!atomic_read(&dc->has_dirty) &&
235 !atomic_xchg(&dc->has_dirty, 1)) {
236 atomic_inc(&dc->count);
237
238 if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) {
239 SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY);
240 /* XXX: should do this synchronously */
241 bch_write_bdev_super(dc, NULL);
242 }
243
244 bch_writeback_queue(dc);
245
246 if (dc->writeback_percent)
247 schedule_delayed_work(&dc->writeback_rate_update,
248 dc->writeback_rate_update_seconds * HZ);
249 }
250}
251
Kent Overstreet279afba2013-06-05 06:21:07 -0700252void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
253 uint64_t offset, int nr_sectors)
254{
255 struct bcache_device *d = c->devices[inode];
Kent Overstreet2d679fc2013-08-17 02:13:15 -0700256 unsigned stripe_offset;
257 uint64_t stripe = offset;
Kent Overstreet279afba2013-06-05 06:21:07 -0700258
259 if (!d)
260 return;
261
Kent Overstreet2d679fc2013-08-17 02:13:15 -0700262 do_div(stripe, d->stripe_size);
263
264 stripe_offset = offset & (d->stripe_size - 1);
Kent Overstreet279afba2013-06-05 06:21:07 -0700265
266 while (nr_sectors) {
267 int s = min_t(unsigned, abs(nr_sectors),
Kent Overstreet2d679fc2013-08-17 02:13:15 -0700268 d->stripe_size - stripe_offset);
Kent Overstreet279afba2013-06-05 06:21:07 -0700269
270 if (nr_sectors < 0)
271 s = -s;
272
273 atomic_add(s, d->stripe_sectors_dirty + stripe);
274 nr_sectors -= s;
275 stripe_offset = 0;
276 stripe++;
277 }
278}
279
Kent Overstreetcafe5632013-03-23 16:11:31 -0700280/* Background writeback - IO loop */
281
282static void dirty_io_destructor(struct closure *cl)
283{
284 struct dirty_io *io = container_of(cl, struct dirty_io, cl);
285 kfree(io);
286}
287
288static void write_dirty_finish(struct closure *cl)
289{
290 struct dirty_io *io = container_of(cl, struct dirty_io, cl);
291 struct keybuf_key *w = io->bio.bi_private;
292 struct cached_dev *dc = io->dc;
Kent Overstreet8e51e412013-06-06 18:15:57 -0700293 struct bio_vec *bv;
294 int i;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700295
Kent Overstreet8e51e412013-06-06 18:15:57 -0700296 bio_for_each_segment_all(bv, &io->bio, i)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700297 __free_page(bv->bv_page);
298
299 /* This is kind of a dumb way of signalling errors. */
300 if (KEY_DIRTY(&w->key)) {
301 unsigned i;
302 struct btree_op op;
303 bch_btree_op_init_stack(&op);
304
305 op.type = BTREE_REPLACE;
306 bkey_copy(&op.replace, &w->key);
307
308 SET_KEY_DIRTY(&w->key, false);
309 bch_keylist_add(&op.keys, &w->key);
310
311 for (i = 0; i < KEY_PTRS(&w->key); i++)
312 atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin);
313
Kent Overstreetcafe5632013-03-23 16:11:31 -0700314 bch_btree_insert(&op, dc->disk.c);
315 closure_sync(&op.cl);
316
Kent Overstreetc37511b2013-04-26 15:39:55 -0700317 if (op.insert_collision)
318 trace_bcache_writeback_collision(&w->key);
319
Kent Overstreetcafe5632013-03-23 16:11:31 -0700320 atomic_long_inc(op.insert_collision
321 ? &dc->disk.c->writeback_keys_failed
322 : &dc->disk.c->writeback_keys_done);
323 }
324
325 bch_keybuf_del(&dc->writeback_keys, w);
Kent Overstreetc2a4f312013-09-23 23:17:31 -0700326 up(&dc->in_flight);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700327
328 closure_return_with_destructor(cl, dirty_io_destructor);
329}
330
331static void dirty_endio(struct bio *bio, int error)
332{
333 struct keybuf_key *w = bio->bi_private;
334 struct dirty_io *io = w->private;
335
336 if (error)
337 SET_KEY_DIRTY(&w->key, false);
338
339 closure_put(&io->cl);
340}
341
342static void write_dirty(struct closure *cl)
343{
344 struct dirty_io *io = container_of(cl, struct dirty_io, cl);
345 struct keybuf_key *w = io->bio.bi_private;
346
347 dirty_init(w);
348 io->bio.bi_rw = WRITE;
349 io->bio.bi_sector = KEY_START(&w->key);
350 io->bio.bi_bdev = io->dc->bdev;
351 io->bio.bi_end_io = dirty_endio;
352
Kent Overstreetcafe5632013-03-23 16:11:31 -0700353 closure_bio_submit(&io->bio, cl, &io->dc->disk);
354
Kent Overstreetc2a4f312013-09-23 23:17:31 -0700355 continue_at(cl, write_dirty_finish, system_wq);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700356}
357
358static void read_dirty_endio(struct bio *bio, int error)
359{
360 struct keybuf_key *w = bio->bi_private;
361 struct dirty_io *io = w->private;
362
363 bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0),
364 error, "reading dirty data from cache");
365
366 dirty_endio(bio, error);
367}
368
369static void read_dirty_submit(struct closure *cl)
370{
371 struct dirty_io *io = container_of(cl, struct dirty_io, cl);
372
Kent Overstreetcafe5632013-03-23 16:11:31 -0700373 closure_bio_submit(&io->bio, cl, &io->dc->disk);
374
Kent Overstreetc2a4f312013-09-23 23:17:31 -0700375 continue_at(cl, write_dirty, system_wq);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700376}
377
378static void read_dirty(struct closure *cl)
379{
380 struct cached_dev *dc = container_of(cl, struct cached_dev,
381 writeback.cl);
382 unsigned delay = writeback_delay(dc, 0);
383 struct keybuf_key *w;
384 struct dirty_io *io;
385
386 /*
387 * XXX: if we error, background writeback just spins. Should use some
388 * mempools.
389 */
390
391 while (1) {
392 w = bch_keybuf_next(&dc->writeback_keys);
393 if (!w)
394 break;
395
396 BUG_ON(ptr_stale(dc->disk.c, &w->key, 0));
397
398 if (delay > 0 &&
399 (KEY_START(&w->key) != dc->last_read ||
Kent Overstreetc2a4f312013-09-23 23:17:31 -0700400 jiffies_to_msecs(delay) > 50))
Kent Overstreet79e3dab2013-09-23 23:17:33 -0700401 delay = schedule_timeout_uninterruptible(delay);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700402
403 dc->last_read = KEY_OFFSET(&w->key);
404
405 io = kzalloc(sizeof(struct dirty_io) + sizeof(struct bio_vec)
406 * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
407 GFP_KERNEL);
408 if (!io)
409 goto err;
410
411 w->private = io;
412 io->dc = dc;
413
414 dirty_init(w);
415 io->bio.bi_sector = PTR_OFFSET(&w->key, 0);
416 io->bio.bi_bdev = PTR_CACHE(dc->disk.c,
417 &w->key, 0)->bdev;
418 io->bio.bi_rw = READ;
419 io->bio.bi_end_io = read_dirty_endio;
420
Kent Overstreet8e51e412013-06-06 18:15:57 -0700421 if (bio_alloc_pages(&io->bio, GFP_KERNEL))
Kent Overstreetcafe5632013-03-23 16:11:31 -0700422 goto err_free;
423
Kent Overstreetc37511b2013-04-26 15:39:55 -0700424 trace_bcache_writeback(&w->key);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700425
Kent Overstreetc2a4f312013-09-23 23:17:31 -0700426 down(&dc->in_flight);
427 closure_call(&io->cl, read_dirty_submit, NULL, cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700428
429 delay = writeback_delay(dc, KEY_SIZE(&w->key));
Kent Overstreetcafe5632013-03-23 16:11:31 -0700430 }
431
432 if (0) {
433err_free:
434 kfree(w->private);
435err:
436 bch_keybuf_del(&dc->writeback_keys, w);
437 }
438
Kent Overstreetc2a4f312013-09-23 23:17:31 -0700439 /*
440 * Wait for outstanding writeback IOs to finish (and keybuf slots to be
441 * freed) before refilling again
442 */
443 continue_at(cl, refill_dirty, dirty_wq);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700444}
445
Kent Overstreet444fc0b2013-05-11 17:07:26 -0700446/* Init */
447
448static int bch_btree_sectors_dirty_init(struct btree *b, struct btree_op *op,
449 struct cached_dev *dc)
450{
451 struct bkey *k;
452 struct btree_iter iter;
453
454 bch_btree_iter_init(b, &iter, &KEY(dc->disk.id, 0, 0));
455 while ((k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad)))
456 if (!b->level) {
457 if (KEY_INODE(k) > dc->disk.id)
458 break;
459
460 if (KEY_DIRTY(k))
Kent Overstreet279afba2013-06-05 06:21:07 -0700461 bcache_dev_sectors_dirty_add(b->c, dc->disk.id,
462 KEY_START(k),
463 KEY_SIZE(k));
Kent Overstreet444fc0b2013-05-11 17:07:26 -0700464 } else {
465 btree(sectors_dirty_init, k, b, op, dc);
466 if (KEY_INODE(k) > dc->disk.id)
467 break;
468
469 cond_resched();
470 }
471
472 return 0;
473}
474
475void bch_sectors_dirty_init(struct cached_dev *dc)
476{
477 struct btree_op op;
478
479 bch_btree_op_init_stack(&op);
480 btree_root(sectors_dirty_init, dc->disk.c, &op, dc);
481}
482
Kent Overstreetf59fce82013-05-15 00:11:26 -0700483void bch_cached_dev_writeback_init(struct cached_dev *dc)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700484{
Kent Overstreetc2a4f312013-09-23 23:17:31 -0700485 sema_init(&dc->in_flight, 64);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700486 closure_init_unlocked(&dc->writeback);
487 init_rwsem(&dc->writeback_lock);
488
Kent Overstreet72c27062013-06-05 06:24:39 -0700489 bch_keybuf_init(&dc->writeback_keys);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700490
491 dc->writeback_metadata = true;
492 dc->writeback_running = true;
493 dc->writeback_percent = 10;
494 dc->writeback_delay = 30;
495 dc->writeback_rate.rate = 1024;
496
497 dc->writeback_rate_update_seconds = 30;
498 dc->writeback_rate_d_term = 16;
499 dc->writeback_rate_p_term_inverse = 64;
500 dc->writeback_rate_d_smooth = 8;
501
502 INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);
503 schedule_delayed_work(&dc->writeback_rate_update,
504 dc->writeback_rate_update_seconds * HZ);
505}
506
507void bch_writeback_exit(void)
508{
509 if (dirty_wq)
510 destroy_workqueue(dirty_wq);
511}
512
513int __init bch_writeback_init(void)
514{
Kent Overstreetc2a4f312013-09-23 23:17:31 -0700515 dirty_wq = create_workqueue("bcache_writeback");
Kent Overstreetcafe5632013-03-23 16:11:31 -0700516 if (!dirty_wq)
517 return -ENOMEM;
518
519 return 0;
520}