Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2012 Red Hat. All rights reserved. |
| 3 | * |
| 4 | * This file is released under the GPL. |
| 5 | */ |
| 6 | |
| 7 | #ifndef DM_CACHE_POLICY_H |
| 8 | #define DM_CACHE_POLICY_H |
| 9 | |
| 10 | #include "dm-cache-block-types.h" |
| 11 | |
| 12 | #include <linux/device-mapper.h> |
| 13 | |
| 14 | /*----------------------------------------------------------------*/ |
| 15 | |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 16 | /* |
| 17 | * The cache policy makes the important decisions about which blocks get to |
| 18 | * live on the faster cache device. |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 19 | */ |
| 20 | enum policy_operation { |
Joe Thornber | b29d498 | 2016-12-15 04:57:31 -0500 | [diff] [blame] | 21 | POLICY_PROMOTE, |
| 22 | POLICY_DEMOTE, |
| 23 | POLICY_WRITEBACK |
Joe Thornber | fb4100a | 2015-05-20 10:30:32 +0100 | [diff] [blame] | 24 | }; |
| 25 | |
| 26 | /* |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 27 | * This is the instruction passed back to the core target. |
| 28 | */ |
Joe Thornber | b29d498 | 2016-12-15 04:57:31 -0500 | [diff] [blame] | 29 | struct policy_work { |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 30 | enum policy_operation op; |
Joe Thornber | b29d498 | 2016-12-15 04:57:31 -0500 | [diff] [blame] | 31 | dm_oblock_t oblock; |
| 32 | dm_cblock_t cblock; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 33 | }; |
| 34 | |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 35 | /* |
Joe Thornber | b29d498 | 2016-12-15 04:57:31 -0500 | [diff] [blame] | 36 | * The cache policy object. It is envisaged that this structure will be |
| 37 | * embedded in a bigger, policy specific structure (ie. use container_of()). |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 38 | */ |
| 39 | struct dm_cache_policy { |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 40 | /* |
| 41 | * Destroys this object. |
| 42 | */ |
| 43 | void (*destroy)(struct dm_cache_policy *p); |
| 44 | |
| 45 | /* |
Joe Thornber | b29d498 | 2016-12-15 04:57:31 -0500 | [diff] [blame] | 46 | * Find the location of a block. |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 47 | * |
| 48 | * Must not block. |
| 49 | * |
Joe Thornber | b29d498 | 2016-12-15 04:57:31 -0500 | [diff] [blame] | 50 | * Returns 0 if in cache (cblock will be set), -ENOENT if not, < 0 for |
| 51 | * other errors (-EWOULDBLOCK would be typical). data_dir should be |
| 52 | * READ or WRITE. fast_copy should be set if migrating this block would |
| 53 | * be 'cheap' somehow (eg, discarded data). background_queued will be set |
| 54 | * if a migration has just been queued. |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 55 | */ |
Joe Thornber | b29d498 | 2016-12-15 04:57:31 -0500 | [diff] [blame] | 56 | int (*lookup)(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock, |
| 57 | int data_dir, bool fast_copy, bool *background_queued); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 58 | |
Joe Thornber | b29d498 | 2016-12-15 04:57:31 -0500 | [diff] [blame] | 59 | /* |
| 60 | * Sometimes the core target can optimise a migration, eg, the |
| 61 | * block may be discarded, or the bio may cover an entire block. |
| 62 | * In order to optimise it needs the migration immediately though |
| 63 | * so it knows to do something different with the bio. |
| 64 | * |
| 65 | * This method is optional (policy-internal will fallback to using |
| 66 | * lookup). |
| 67 | */ |
| 68 | int (*lookup_with_work)(struct dm_cache_policy *p, |
| 69 | dm_oblock_t oblock, dm_cblock_t *cblock, |
| 70 | int data_dir, bool fast_copy, |
| 71 | struct policy_work **work); |
| 72 | |
| 73 | /* |
| 74 | * Retrieves background work. Returns -ENODATA when there's no |
| 75 | * background work. |
| 76 | */ |
| 77 | int (*get_background_work)(struct dm_cache_policy *p, bool idle, |
| 78 | struct policy_work **result); |
| 79 | |
| 80 | /* |
| 81 | * You must pass in the same work pointer that you were given, not |
| 82 | * a copy. |
| 83 | */ |
| 84 | void (*complete_background_work)(struct dm_cache_policy *p, |
| 85 | struct policy_work *work, |
| 86 | bool success); |
| 87 | |
| 88 | void (*set_dirty)(struct dm_cache_policy *p, dm_cblock_t cblock); |
| 89 | void (*clear_dirty)(struct dm_cache_policy *p, dm_cblock_t cblock); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 90 | |
| 91 | /* |
| 92 | * Called when a cache target is first created. Used to load a |
| 93 | * mapping from the metadata device into the policy. |
| 94 | */ |
| 95 | int (*load_mapping)(struct dm_cache_policy *p, dm_oblock_t oblock, |
Joe Thornber | b29d498 | 2016-12-15 04:57:31 -0500 | [diff] [blame] | 96 | dm_cblock_t cblock, bool dirty, |
| 97 | uint32_t hint, bool hint_valid); |
| 98 | |
| 99 | /* |
| 100 | * Drops the mapping, irrespective of whether it's clean or dirty. |
| 101 | * Returns -ENODATA if cblock is not mapped. |
| 102 | */ |
| 103 | int (*invalidate_mapping)(struct dm_cache_policy *p, dm_cblock_t cblock); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 104 | |
Joe Thornber | 4e781b4 | 2016-09-15 09:23:46 -0400 | [diff] [blame] | 105 | /* |
| 106 | * Gets the hint for a given cblock. Called in a single threaded |
| 107 | * context. So no locking required. |
| 108 | */ |
| 109 | uint32_t (*get_hint)(struct dm_cache_policy *p, dm_cblock_t cblock); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 110 | |
| 111 | /* |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 112 | * How full is the cache? |
| 113 | */ |
| 114 | dm_cblock_t (*residency)(struct dm_cache_policy *p); |
| 115 | |
| 116 | /* |
| 117 | * Because of where we sit in the block layer, we can be asked to |
| 118 | * map a lot of little bios that are all in the same block (no |
| 119 | * queue merging has occurred). To stop the policy being fooled by |
Joe Thornber | fba1010 | 2015-05-29 10:20:56 +0100 | [diff] [blame] | 120 | * these, the core target sends regular tick() calls to the policy. |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 121 | * The policy should only count an entry as hit once per tick. |
Joe Thornber | b29d498 | 2016-12-15 04:57:31 -0500 | [diff] [blame] | 122 | * |
| 123 | * This method is optional. |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 124 | */ |
Joe Thornber | fba1010 | 2015-05-29 10:20:56 +0100 | [diff] [blame] | 125 | void (*tick)(struct dm_cache_policy *p, bool can_block); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 126 | |
| 127 | /* |
| 128 | * Configuration. |
| 129 | */ |
Joe Thornber | 028ae9f | 2015-04-22 16:42:35 -0400 | [diff] [blame] | 130 | int (*emit_config_values)(struct dm_cache_policy *p, char *result, |
| 131 | unsigned maxlen, ssize_t *sz_ptr); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 132 | int (*set_config_value)(struct dm_cache_policy *p, |
| 133 | const char *key, const char *value); |
| 134 | |
Joe Thornber | b29d498 | 2016-12-15 04:57:31 -0500 | [diff] [blame] | 135 | void (*allow_migrations)(struct dm_cache_policy *p, bool allow); |
| 136 | |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 137 | /* |
| 138 | * Book keeping ptr for the policy register, not for general use. |
| 139 | */ |
| 140 | void *private; |
| 141 | }; |
| 142 | |
| 143 | /*----------------------------------------------------------------*/ |
| 144 | |
| 145 | /* |
| 146 | * We maintain a little register of the different policy types. |
| 147 | */ |
| 148 | #define CACHE_POLICY_NAME_SIZE 16 |
Mike Snitzer | 4e7f506 | 2013-03-20 17:21:27 +0000 | [diff] [blame] | 149 | #define CACHE_POLICY_VERSION_SIZE 3 |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 150 | |
| 151 | struct dm_cache_policy_type { |
| 152 | /* For use by the register code only. */ |
| 153 | struct list_head list; |
| 154 | |
| 155 | /* |
| 156 | * Policy writers should fill in these fields. The name field is |
| 157 | * what gets passed on the target line to select your policy. |
| 158 | */ |
| 159 | char name[CACHE_POLICY_NAME_SIZE]; |
Mike Snitzer | 4e7f506 | 2013-03-20 17:21:27 +0000 | [diff] [blame] | 160 | unsigned version[CACHE_POLICY_VERSION_SIZE]; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 161 | |
| 162 | /* |
Mike Snitzer | 2e68c4e | 2014-01-15 21:06:55 -0500 | [diff] [blame] | 163 | * For use by an alias dm_cache_policy_type to point to the |
| 164 | * real dm_cache_policy_type. |
| 165 | */ |
| 166 | struct dm_cache_policy_type *real; |
| 167 | |
| 168 | /* |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 169 | * Policies may store a hint for each each cache block. |
| 170 | * Currently the size of this hint must be 0 or 4 bytes but we |
| 171 | * expect to relax this in future. |
| 172 | */ |
| 173 | size_t hint_size; |
| 174 | |
| 175 | struct module *owner; |
| 176 | struct dm_cache_policy *(*create)(dm_cblock_t cache_size, |
| 177 | sector_t origin_size, |
| 178 | sector_t block_size); |
| 179 | }; |
| 180 | |
| 181 | int dm_cache_policy_register(struct dm_cache_policy_type *type); |
| 182 | void dm_cache_policy_unregister(struct dm_cache_policy_type *type); |
| 183 | |
| 184 | /*----------------------------------------------------------------*/ |
| 185 | |
| 186 | #endif /* DM_CACHE_POLICY_H */ |