blob: e9e3b730e25823ffda260d792f751d62ca67d27b [file] [log] [blame]
Dmitry Fomichevbae9a0a2019-08-02 15:02:50 -07001// SPDX-License-Identifier: GPL-2.0-only
Damien Le Moal3b1a94c2017-06-07 15:55:39 +09002/*
3 * Copyright (C) 2017 Western Digital Corporation or its affiliates.
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm-zoned.h"
9
10#include <linux/module.h>
11
12#define DM_MSG_PREFIX "zoned reclaim"
13
14struct dmz_reclaim {
15 struct dmz_metadata *metadata;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +090016
17 struct delayed_work work;
18 struct workqueue_struct *wq;
19
20 struct dm_kcopyd_client *kc;
21 struct dm_kcopyd_throttle kc_throttle;
22 int kc_err;
23
24 unsigned long flags;
25
26 /* Last target access time */
27 unsigned long atime;
28};
29
30/*
31 * Reclaim state flags.
32 */
33enum {
34 DMZ_RECLAIM_KCOPY,
35};
36
37/*
38 * Number of seconds of target BIO inactivity to consider the target idle.
39 */
Dmitry Fomichev75d66ff2019-08-10 14:43:11 -070040#define DMZ_IDLE_PERIOD (10UL * HZ)
Damien Le Moal3b1a94c2017-06-07 15:55:39 +090041
42/*
43 * Percentage of unmapped (free) random zones below which reclaim starts
44 * even if the target is busy.
45 */
Hannes Reinecke34f5aff2020-05-19 10:14:20 +020046#define DMZ_RECLAIM_LOW_UNMAP_ZONES 30
Damien Le Moal3b1a94c2017-06-07 15:55:39 +090047
48/*
49 * Percentage of unmapped (free) random zones above which reclaim will
50 * stop if the target is busy.
51 */
Hannes Reinecke34f5aff2020-05-19 10:14:20 +020052#define DMZ_RECLAIM_HIGH_UNMAP_ZONES 50
Damien Le Moal3b1a94c2017-06-07 15:55:39 +090053
54/*
55 * Align a sequential zone write pointer to chunk_block.
56 */
57static int dmz_reclaim_align_wp(struct dmz_reclaim *zrc, struct dm_zone *zone,
58 sector_t block)
59{
60 struct dmz_metadata *zmd = zrc->metadata;
Hannes Reinecke8f222722020-06-02 13:09:48 +020061 struct dmz_dev *dev = zone->dev;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +090062 sector_t wp_block = zone->wp_block;
63 unsigned int nr_blocks;
64 int ret;
65
66 if (wp_block == block)
67 return 0;
68
69 if (wp_block > block)
70 return -EIO;
71
72 /*
73 * Zeroout the space between the write
74 * pointer and the requested position.
75 */
76 nr_blocks = block - wp_block;
Hannes Reinecke6c805f72020-05-11 10:24:24 +020077 ret = blkdev_issue_zeroout(dev->bdev,
Damien Le Moal3b1a94c2017-06-07 15:55:39 +090078 dmz_start_sect(zmd, zone) + dmz_blk2sect(wp_block),
Damien Le Moal4218a952017-07-24 16:44:37 +090079 dmz_blk2sect(nr_blocks), GFP_NOIO, 0);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +090080 if (ret) {
Hannes Reinecke6c805f72020-05-11 10:24:24 +020081 dmz_dev_err(dev,
Damien Le Moal3b1a94c2017-06-07 15:55:39 +090082 "Align zone %u wp %llu to %llu (wp+%u) blocks failed %d",
Hannes Reineckeb7122872020-05-11 10:24:18 +020083 zone->id, (unsigned long long)wp_block,
Damien Le Moal3b1a94c2017-06-07 15:55:39 +090084 (unsigned long long)block, nr_blocks, ret);
Hannes Reinecke6c805f72020-05-11 10:24:24 +020085 dmz_check_bdev(dev);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +090086 return ret;
87 }
88
89 zone->wp_block = block;
90
91 return 0;
92}
93
94/*
95 * dm_kcopyd_copy end notification.
96 */
97static void dmz_reclaim_kcopy_end(int read_err, unsigned long write_err,
98 void *context)
99{
100 struct dmz_reclaim *zrc = context;
101
102 if (read_err || write_err)
103 zrc->kc_err = -EIO;
104 else
105 zrc->kc_err = 0;
106
107 clear_bit_unlock(DMZ_RECLAIM_KCOPY, &zrc->flags);
108 smp_mb__after_atomic();
109 wake_up_bit(&zrc->flags, DMZ_RECLAIM_KCOPY);
110}
111
112/*
113 * Copy valid blocks of src_zone into dst_zone.
114 */
115static int dmz_reclaim_copy(struct dmz_reclaim *zrc,
116 struct dm_zone *src_zone, struct dm_zone *dst_zone)
117{
118 struct dmz_metadata *zmd = zrc->metadata;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900119 struct dm_io_region src, dst;
120 sector_t block = 0, end_block;
121 sector_t nr_blocks;
122 sector_t src_zone_block;
123 sector_t dst_zone_block;
124 unsigned long flags = 0;
125 int ret;
126
127 if (dmz_is_seq(src_zone))
128 end_block = src_zone->wp_block;
129 else
Hannes Reinecke36820562020-05-11 10:24:21 +0200130 end_block = dmz_zone_nr_blocks(zmd);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900131 src_zone_block = dmz_start_block(zmd, src_zone);
132 dst_zone_block = dmz_start_block(zmd, dst_zone);
133
134 if (dmz_is_seq(dst_zone))
135 set_bit(DM_KCOPYD_WRITE_SEQ, &flags);
136
137 while (block < end_block) {
Hannes Reinecke8f222722020-06-02 13:09:48 +0200138 if (src_zone->dev->flags & DMZ_BDEV_DYING)
Hannes Reinecke6c805f72020-05-11 10:24:24 +0200139 return -EIO;
Hannes Reinecke8f222722020-06-02 13:09:48 +0200140 if (dst_zone->dev->flags & DMZ_BDEV_DYING)
Dmitry Fomichev75d66ff2019-08-10 14:43:11 -0700141 return -EIO;
142
Hannes Reineckea16b7de2020-05-19 10:14:23 +0200143 if (dmz_reclaim_should_terminate(src_zone))
144 return -EINTR;
145
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900146 /* Get a valid region from the source zone */
147 ret = dmz_first_valid_block(zmd, src_zone, &block);
148 if (ret <= 0)
149 return ret;
150 nr_blocks = ret;
151
152 /*
153 * If we are writing in a sequential zone, we must make sure
154 * that writes are sequential. So Zeroout any eventual hole
155 * between writes.
156 */
157 if (dmz_is_seq(dst_zone)) {
158 ret = dmz_reclaim_align_wp(zrc, dst_zone, block);
159 if (ret)
160 return ret;
161 }
162
Hannes Reinecke8f222722020-06-02 13:09:48 +0200163 src.bdev = src_zone->dev->bdev;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900164 src.sector = dmz_blk2sect(src_zone_block + block);
165 src.count = dmz_blk2sect(nr_blocks);
166
Hannes Reinecke8f222722020-06-02 13:09:48 +0200167 dst.bdev = dst_zone->dev->bdev;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900168 dst.sector = dmz_blk2sect(dst_zone_block + block);
169 dst.count = src.count;
170
171 /* Copy the valid region */
172 set_bit(DMZ_RECLAIM_KCOPY, &zrc->flags);
Mike Snitzer7209049d2018-07-31 17:27:02 -0400173 dm_kcopyd_copy(zrc->kc, &src, 1, &dst, flags,
174 dmz_reclaim_kcopy_end, zrc);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900175
176 /* Wait for copy to complete */
177 wait_on_bit_io(&zrc->flags, DMZ_RECLAIM_KCOPY,
178 TASK_UNINTERRUPTIBLE);
179 if (zrc->kc_err)
180 return zrc->kc_err;
181
182 block += nr_blocks;
183 if (dmz_is_seq(dst_zone))
184 dst_zone->wp_block = block;
185 }
186
187 return 0;
188}
189
190/*
191 * Move valid blocks of dzone buffer zone into dzone (after its write pointer)
192 * and free the buffer zone.
193 */
194static int dmz_reclaim_buf(struct dmz_reclaim *zrc, struct dm_zone *dzone)
195{
196 struct dm_zone *bzone = dzone->bzone;
197 sector_t chunk_block = dzone->wp_block;
198 struct dmz_metadata *zmd = zrc->metadata;
199 int ret;
200
Hannes Reinecke6c805f72020-05-11 10:24:24 +0200201 DMDEBUG("(%s): Chunk %u, move buf zone %u (weight %u) to data zone %u (weight %u)",
202 dmz_metadata_label(zmd),
203 dzone->chunk, bzone->id, dmz_weight(bzone),
204 dzone->id, dmz_weight(dzone));
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900205
206 /* Flush data zone into the buffer zone */
207 ret = dmz_reclaim_copy(zrc, bzone, dzone);
208 if (ret < 0)
209 return ret;
210
211 dmz_lock_flush(zmd);
212
213 /* Validate copied blocks */
214 ret = dmz_merge_valid_blocks(zmd, bzone, dzone, chunk_block);
215 if (ret == 0) {
216 /* Free the buffer zone */
Hannes Reinecke36820562020-05-11 10:24:21 +0200217 dmz_invalidate_blocks(zmd, bzone, 0, dmz_zone_nr_blocks(zmd));
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900218 dmz_lock_map(zmd);
219 dmz_unmap_zone(zmd, bzone);
220 dmz_unlock_zone_reclaim(dzone);
221 dmz_free_zone(zmd, bzone);
222 dmz_unlock_map(zmd);
223 }
224
225 dmz_unlock_flush(zmd);
226
Dmitry Fomichevb234c6d2019-08-10 14:43:09 -0700227 return ret;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900228}
229
230/*
231 * Merge valid blocks of dzone into its buffer zone and free dzone.
232 */
233static int dmz_reclaim_seq_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
234{
235 unsigned int chunk = dzone->chunk;
236 struct dm_zone *bzone = dzone->bzone;
237 struct dmz_metadata *zmd = zrc->metadata;
238 int ret = 0;
239
Hannes Reinecke6c805f72020-05-11 10:24:24 +0200240 DMDEBUG("(%s): Chunk %u, move data zone %u (weight %u) to buf zone %u (weight %u)",
241 dmz_metadata_label(zmd),
242 chunk, dzone->id, dmz_weight(dzone),
243 bzone->id, dmz_weight(bzone));
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900244
245 /* Flush data zone into the buffer zone */
246 ret = dmz_reclaim_copy(zrc, dzone, bzone);
247 if (ret < 0)
248 return ret;
249
250 dmz_lock_flush(zmd);
251
252 /* Validate copied blocks */
253 ret = dmz_merge_valid_blocks(zmd, dzone, bzone, 0);
254 if (ret == 0) {
255 /*
256 * Free the data zone and remap the chunk to
257 * the buffer zone.
258 */
Hannes Reinecke36820562020-05-11 10:24:21 +0200259 dmz_invalidate_blocks(zmd, dzone, 0, dmz_zone_nr_blocks(zmd));
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900260 dmz_lock_map(zmd);
261 dmz_unmap_zone(zmd, bzone);
262 dmz_unmap_zone(zmd, dzone);
263 dmz_unlock_zone_reclaim(dzone);
264 dmz_free_zone(zmd, dzone);
265 dmz_map_zone(zmd, bzone, chunk);
266 dmz_unlock_map(zmd);
267 }
268
269 dmz_unlock_flush(zmd);
270
Dmitry Fomichevb234c6d2019-08-10 14:43:09 -0700271 return ret;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900272}
273
274/*
275 * Move valid blocks of the random data zone dzone into a free sequential zone.
276 * Once blocks are moved, remap the zone chunk to the sequential zone.
277 */
278static int dmz_reclaim_rnd_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
279{
280 unsigned int chunk = dzone->chunk;
281 struct dm_zone *szone = NULL;
282 struct dmz_metadata *zmd = zrc->metadata;
283 int ret;
Hannes Reineckec5c78852020-05-19 10:14:22 +0200284 int alloc_flags = DMZ_ALLOC_SEQ;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900285
Hannes Reinecke90a9b862020-05-19 10:14:21 +0200286 /* Get a free random or sequential zone */
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900287 dmz_lock_map(zmd);
Hannes Reineckec5c78852020-05-19 10:14:22 +0200288again:
Hannes Reinecke34f5aff2020-05-19 10:14:20 +0200289 szone = dmz_alloc_zone(zmd, alloc_flags | DMZ_ALLOC_RECLAIM);
Hannes Reineckec5c78852020-05-19 10:14:22 +0200290 if (!szone && alloc_flags == DMZ_ALLOC_SEQ && dmz_nr_cache_zones(zmd)) {
291 alloc_flags = DMZ_ALLOC_RND;
292 goto again;
293 }
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900294 dmz_unlock_map(zmd);
295 if (!szone)
296 return -ENOSPC;
297
Hannes Reinecke34f5aff2020-05-19 10:14:20 +0200298 DMDEBUG("(%s): Chunk %u, move %s zone %u (weight %u) to %s zone %u",
299 dmz_metadata_label(zmd), chunk,
300 dmz_is_cache(dzone) ? "cache" : "rnd",
301 dzone->id, dmz_weight(dzone),
302 dmz_is_rnd(szone) ? "rnd" : "seq", szone->id);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900303
304 /* Flush the random data zone into the sequential zone */
305 ret = dmz_reclaim_copy(zrc, dzone, szone);
306
307 dmz_lock_flush(zmd);
308
309 if (ret == 0) {
310 /* Validate copied blocks */
311 ret = dmz_copy_valid_blocks(zmd, dzone, szone);
312 }
313 if (ret) {
314 /* Free the sequential zone */
315 dmz_lock_map(zmd);
316 dmz_free_zone(zmd, szone);
317 dmz_unlock_map(zmd);
318 } else {
319 /* Free the data zone and remap the chunk */
Hannes Reinecke36820562020-05-11 10:24:21 +0200320 dmz_invalidate_blocks(zmd, dzone, 0, dmz_zone_nr_blocks(zmd));
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900321 dmz_lock_map(zmd);
322 dmz_unmap_zone(zmd, dzone);
323 dmz_unlock_zone_reclaim(dzone);
324 dmz_free_zone(zmd, dzone);
325 dmz_map_zone(zmd, szone, chunk);
326 dmz_unlock_map(zmd);
327 }
328
329 dmz_unlock_flush(zmd);
330
Dmitry Fomichevb234c6d2019-08-10 14:43:09 -0700331 return ret;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900332}
333
334/*
335 * Reclaim an empty zone.
336 */
337static void dmz_reclaim_empty(struct dmz_reclaim *zrc, struct dm_zone *dzone)
338{
339 struct dmz_metadata *zmd = zrc->metadata;
340
341 dmz_lock_flush(zmd);
342 dmz_lock_map(zmd);
343 dmz_unmap_zone(zmd, dzone);
344 dmz_unlock_zone_reclaim(dzone);
345 dmz_free_zone(zmd, dzone);
346 dmz_unlock_map(zmd);
347 dmz_unlock_flush(zmd);
348}
349
350/*
Hannes Reinecke90a9b862020-05-19 10:14:21 +0200351 * Test if the target device is idle.
352 */
353static inline int dmz_target_idle(struct dmz_reclaim *zrc)
354{
355 return time_is_before_jiffies(zrc->atime + DMZ_IDLE_PERIOD);
356}
357
358/*
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900359 * Find a candidate zone for reclaim and process it.
360 */
Dmitry Fomichevb234c6d2019-08-10 14:43:09 -0700361static int dmz_do_reclaim(struct dmz_reclaim *zrc)
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900362{
363 struct dmz_metadata *zmd = zrc->metadata;
364 struct dm_zone *dzone;
365 struct dm_zone *rzone;
366 unsigned long start;
367 int ret;
368
369 /* Get a data zone */
Hannes Reinecke90a9b862020-05-19 10:14:21 +0200370 dzone = dmz_get_zone_for_reclaim(zmd, dmz_target_idle(zrc));
Hannes Reineckec3ff4792020-06-02 13:09:44 +0200371 if (!dzone) {
372 DMDEBUG("(%s): No zone found to reclaim",
373 dmz_metadata_label(zmd));
Hannes Reinecke489dc0f2020-05-19 10:14:19 +0200374 return -EBUSY;
Hannes Reineckec3ff4792020-06-02 13:09:44 +0200375 }
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900376
377 start = jiffies;
Hannes Reinecke34f5aff2020-05-19 10:14:20 +0200378 if (dmz_is_cache(dzone) || dmz_is_rnd(dzone)) {
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900379 if (!dmz_weight(dzone)) {
380 /* Empty zone */
381 dmz_reclaim_empty(zrc, dzone);
382 ret = 0;
383 } else {
384 /*
385 * Reclaim the random data zone by moving its
386 * valid data blocks to a free sequential zone.
387 */
388 ret = dmz_reclaim_rnd_data(zrc, dzone);
389 }
390 rzone = dzone;
391
392 } else {
393 struct dm_zone *bzone = dzone->bzone;
394 sector_t chunk_block = 0;
395
396 ret = dmz_first_valid_block(zmd, bzone, &chunk_block);
397 if (ret < 0)
398 goto out;
399
400 if (ret == 0 || chunk_block >= dzone->wp_block) {
401 /*
402 * The buffer zone is empty or its valid blocks are
403 * after the data zone write pointer.
404 */
405 ret = dmz_reclaim_buf(zrc, dzone);
406 rzone = bzone;
407 } else {
408 /*
409 * Reclaim the data zone by merging it into the
410 * buffer zone so that the buffer zone itself can
411 * be later reclaimed.
412 */
413 ret = dmz_reclaim_seq_data(zrc, dzone);
414 rzone = dzone;
415 }
416 }
417out:
418 if (ret) {
Hannes Reineckec3ff4792020-06-02 13:09:44 +0200419 if (ret == -EINTR)
420 DMDEBUG("(%s): reclaim zone %u interrupted",
421 dmz_metadata_label(zmd), rzone->id);
422 else
423 DMDEBUG("(%s): Failed to reclaim zone %u, err %d",
424 dmz_metadata_label(zmd), rzone->id, ret);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900425 dmz_unlock_zone_reclaim(dzone);
Dmitry Fomichevb234c6d2019-08-10 14:43:09 -0700426 return ret;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900427 }
428
Dmitry Fomichevb234c6d2019-08-10 14:43:09 -0700429 ret = dmz_flush_metadata(zrc->metadata);
430 if (ret) {
Hannes Reinecke49de3b72020-05-14 08:09:29 +0200431 DMDEBUG("(%s): Metadata flush for zone %u failed, err %d",
Hannes Reinecke6c805f72020-05-11 10:24:24 +0200432 dmz_metadata_label(zmd), rzone->id, ret);
Dmitry Fomichevb234c6d2019-08-10 14:43:09 -0700433 return ret;
434 }
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900435
Hannes Reinecke6c805f72020-05-11 10:24:24 +0200436 DMDEBUG("(%s): Reclaimed zone %u in %u ms",
437 dmz_metadata_label(zmd),
438 rzone->id, jiffies_to_msecs(jiffies - start));
Dmitry Fomichevb234c6d2019-08-10 14:43:09 -0700439 return 0;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900440}
441
Hannes Reinecke34f5aff2020-05-19 10:14:20 +0200442static unsigned int dmz_reclaim_percentage(struct dmz_reclaim *zrc)
443{
444 struct dmz_metadata *zmd = zrc->metadata;
445 unsigned int nr_cache = dmz_nr_cache_zones(zmd);
446 unsigned int nr_rnd = dmz_nr_rnd_zones(zmd);
447 unsigned int nr_unmap, nr_zones;
448
449 if (nr_cache) {
450 nr_zones = nr_cache;
451 nr_unmap = dmz_nr_unmap_cache_zones(zmd);
452 } else {
453 nr_zones = nr_rnd;
454 nr_unmap = dmz_nr_unmap_rnd_zones(zmd);
455 }
456 return nr_unmap * 100 / nr_zones;
457}
458
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900459/*
460 * Test if reclaim is necessary.
461 */
Hannes Reinecke34f5aff2020-05-19 10:14:20 +0200462static bool dmz_should_reclaim(struct dmz_reclaim *zrc, unsigned int p_unmap)
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900463{
Hannes Reinecke90a9b862020-05-19 10:14:21 +0200464 unsigned int nr_reclaim = dmz_nr_rnd_zones(zrc->metadata);
465
466 if (dmz_nr_cache_zones(zrc->metadata))
467 nr_reclaim += dmz_nr_cache_zones(zrc->metadata);
468
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900469 /* Reclaim when idle */
Hannes Reinecke90a9b862020-05-19 10:14:21 +0200470 if (dmz_target_idle(zrc) && nr_reclaim)
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900471 return true;
472
Hannes Reinecke34f5aff2020-05-19 10:14:20 +0200473 /* If there are still plenty of cache zones, do not reclaim */
474 if (p_unmap >= DMZ_RECLAIM_HIGH_UNMAP_ZONES)
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900475 return false;
476
477 /*
Hannes Reinecke34f5aff2020-05-19 10:14:20 +0200478 * If the percentage of unmapped cache zones is low,
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900479 * reclaim even if the target is busy.
480 */
Hannes Reinecke34f5aff2020-05-19 10:14:20 +0200481 return p_unmap <= DMZ_RECLAIM_LOW_UNMAP_ZONES;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900482}
483
484/*
485 * Reclaim work function.
486 */
487static void dmz_reclaim_work(struct work_struct *work)
488{
489 struct dmz_reclaim *zrc = container_of(work, struct dmz_reclaim, work.work);
490 struct dmz_metadata *zmd = zrc->metadata;
Hannes Reinecke34f5aff2020-05-19 10:14:20 +0200491 unsigned int p_unmap;
Dmitry Fomichevb234c6d2019-08-10 14:43:09 -0700492 int ret;
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900493
Hannes Reinecked0e21ce2020-05-11 10:24:23 +0200494 if (dmz_dev_is_dying(zmd))
Dmitry Fomichev75d66ff2019-08-10 14:43:11 -0700495 return;
496
Hannes Reinecke34f5aff2020-05-19 10:14:20 +0200497 p_unmap = dmz_reclaim_percentage(zrc);
498 if (!dmz_should_reclaim(zrc, p_unmap)) {
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900499 mod_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD);
500 return;
501 }
502
503 /*
504 * We need to start reclaiming random zones: set up zone copy
505 * throttling to either go fast if we are very low on random zones
506 * and slower if there are still some free random zones to avoid
507 * as much as possible to negatively impact the user workload.
508 */
Hannes Reinecke34f5aff2020-05-19 10:14:20 +0200509 if (dmz_target_idle(zrc) || p_unmap < DMZ_RECLAIM_LOW_UNMAP_ZONES / 2) {
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900510 /* Idle or very low percentage: go fast */
511 zrc->kc_throttle.throttle = 100;
512 } else {
513 /* Busy but we still have some random zone: throttle */
Hannes Reinecke34f5aff2020-05-19 10:14:20 +0200514 zrc->kc_throttle.throttle = min(75U, 100U - p_unmap / 2);
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900515 }
516
Hannes Reinecke34f5aff2020-05-19 10:14:20 +0200517 DMDEBUG("(%s): Reclaim (%u): %s, %u%% free zones (%u/%u cache %u/%u random)",
Hannes Reinecke2234e732020-05-11 10:24:22 +0200518 dmz_metadata_label(zmd),
519 zrc->kc_throttle.throttle,
520 (dmz_target_idle(zrc) ? "Idle" : "Busy"),
Hannes Reinecke34f5aff2020-05-19 10:14:20 +0200521 p_unmap, dmz_nr_unmap_cache_zones(zmd),
522 dmz_nr_cache_zones(zmd),
523 dmz_nr_unmap_rnd_zones(zmd),
524 dmz_nr_rnd_zones(zmd));
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900525
Dmitry Fomichevb234c6d2019-08-10 14:43:09 -0700526 ret = dmz_do_reclaim(zrc);
Hannes Reineckea16b7de2020-05-19 10:14:23 +0200527 if (ret && ret != -EINTR) {
Hannes Reinecked0e21ce2020-05-11 10:24:23 +0200528 if (!dmz_check_dev(zmd))
Dmitry Fomichev75d66ff2019-08-10 14:43:11 -0700529 return;
530 }
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900531
532 dmz_schedule_reclaim(zrc);
533}
534
535/*
536 * Initialize reclaim.
537 */
Hannes Reinecke6c805f72020-05-11 10:24:24 +0200538int dmz_ctr_reclaim(struct dmz_metadata *zmd,
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900539 struct dmz_reclaim **reclaim)
540{
541 struct dmz_reclaim *zrc;
542 int ret;
543
544 zrc = kzalloc(sizeof(struct dmz_reclaim), GFP_KERNEL);
545 if (!zrc)
546 return -ENOMEM;
547
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900548 zrc->metadata = zmd;
549 zrc->atime = jiffies;
550
551 /* Reclaim kcopyd client */
552 zrc->kc = dm_kcopyd_client_create(&zrc->kc_throttle);
553 if (IS_ERR(zrc->kc)) {
554 ret = PTR_ERR(zrc->kc);
555 zrc->kc = NULL;
556 goto err;
557 }
558
559 /* Reclaim work */
560 INIT_DELAYED_WORK(&zrc->work, dmz_reclaim_work);
561 zrc->wq = alloc_ordered_workqueue("dmz_rwq_%s", WQ_MEM_RECLAIM,
Hannes Reinecke2234e732020-05-11 10:24:22 +0200562 dmz_metadata_label(zmd));
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900563 if (!zrc->wq) {
564 ret = -ENOMEM;
565 goto err;
566 }
567
568 *reclaim = zrc;
569 queue_delayed_work(zrc->wq, &zrc->work, 0);
570
571 return 0;
572err:
573 if (zrc->kc)
574 dm_kcopyd_client_destroy(zrc->kc);
575 kfree(zrc);
576
577 return ret;
578}
579
580/*
581 * Terminate reclaim.
582 */
583void dmz_dtr_reclaim(struct dmz_reclaim *zrc)
584{
585 cancel_delayed_work_sync(&zrc->work);
586 destroy_workqueue(zrc->wq);
587 dm_kcopyd_client_destroy(zrc->kc);
588 kfree(zrc);
589}
590
591/*
592 * Suspend reclaim.
593 */
594void dmz_suspend_reclaim(struct dmz_reclaim *zrc)
595{
596 cancel_delayed_work_sync(&zrc->work);
597}
598
599/*
600 * Resume reclaim.
601 */
602void dmz_resume_reclaim(struct dmz_reclaim *zrc)
603{
604 queue_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD);
605}
606
607/*
608 * BIO accounting.
609 */
610void dmz_reclaim_bio_acc(struct dmz_reclaim *zrc)
611{
612 zrc->atime = jiffies;
613}
614
615/*
616 * Start reclaim if necessary.
617 */
618void dmz_schedule_reclaim(struct dmz_reclaim *zrc)
619{
Hannes Reinecke34f5aff2020-05-19 10:14:20 +0200620 unsigned int p_unmap = dmz_reclaim_percentage(zrc);
621
622 if (dmz_should_reclaim(zrc, p_unmap))
Damien Le Moal3b1a94c2017-06-07 15:55:39 +0900623 mod_delayed_work(zrc->wq, &zrc->work, 0);
624}