Joe Thornber | 3241b1d | 2011-10-31 20:19:11 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2011 Red Hat, Inc. |
| 3 | * |
| 4 | * This file is released under the GPL. |
| 5 | */ |
| 6 | |
| 7 | #include "dm-btree-internal.h" |
| 8 | #include "dm-transaction-manager.h" |
| 9 | |
| 10 | #include <linux/device-mapper.h> |
| 11 | |
| 12 | #define DM_MSG_PREFIX "btree spine" |
| 13 | |
| 14 | /*----------------------------------------------------------------*/ |
| 15 | |
| 16 | #define BTREE_CSUM_XOR 121107 |
| 17 | |
| 18 | static int node_check(struct dm_block_validator *v, |
| 19 | struct dm_block *b, |
| 20 | size_t block_size); |
| 21 | |
| 22 | static void node_prepare_for_write(struct dm_block_validator *v, |
| 23 | struct dm_block *b, |
| 24 | size_t block_size) |
| 25 | { |
Mikulas Patocka | 550929f | 2012-12-21 20:23:30 +0000 | [diff] [blame] | 26 | struct btree_node *n = dm_block_data(b); |
Joe Thornber | 3241b1d | 2011-10-31 20:19:11 +0000 | [diff] [blame] | 27 | struct node_header *h = &n->header; |
| 28 | |
| 29 | h->blocknr = cpu_to_le64(dm_block_location(b)); |
| 30 | h->csum = cpu_to_le32(dm_bm_checksum(&h->flags, |
| 31 | block_size - sizeof(__le32), |
| 32 | BTREE_CSUM_XOR)); |
Joe Thornber | 3241b1d | 2011-10-31 20:19:11 +0000 | [diff] [blame] | 33 | } |
| 34 | |
| 35 | static int node_check(struct dm_block_validator *v, |
| 36 | struct dm_block *b, |
| 37 | size_t block_size) |
| 38 | { |
Mikulas Patocka | 550929f | 2012-12-21 20:23:30 +0000 | [diff] [blame] | 39 | struct btree_node *n = dm_block_data(b); |
Joe Thornber | 3241b1d | 2011-10-31 20:19:11 +0000 | [diff] [blame] | 40 | struct node_header *h = &n->header; |
| 41 | size_t value_size; |
| 42 | __le32 csum_disk; |
| 43 | uint32_t flags; |
| 44 | |
| 45 | if (dm_block_location(b) != le64_to_cpu(h->blocknr)) { |
Mike Snitzer | 89ddeb8 | 2012-12-21 20:23:34 +0000 | [diff] [blame] | 46 | DMERR_LIMIT("node_check failed: blocknr %llu != wanted %llu", |
| 47 | le64_to_cpu(h->blocknr), dm_block_location(b)); |
Joe Thornber | 3241b1d | 2011-10-31 20:19:11 +0000 | [diff] [blame] | 48 | return -ENOTBLK; |
| 49 | } |
| 50 | |
| 51 | csum_disk = cpu_to_le32(dm_bm_checksum(&h->flags, |
| 52 | block_size - sizeof(__le32), |
| 53 | BTREE_CSUM_XOR)); |
| 54 | if (csum_disk != h->csum) { |
Mike Snitzer | 89ddeb8 | 2012-12-21 20:23:34 +0000 | [diff] [blame] | 55 | DMERR_LIMIT("node_check failed: csum %u != wanted %u", |
| 56 | le32_to_cpu(csum_disk), le32_to_cpu(h->csum)); |
Joe Thornber | 3241b1d | 2011-10-31 20:19:11 +0000 | [diff] [blame] | 57 | return -EILSEQ; |
| 58 | } |
| 59 | |
| 60 | value_size = le32_to_cpu(h->value_size); |
| 61 | |
| 62 | if (sizeof(struct node_header) + |
| 63 | (sizeof(__le64) + value_size) * le32_to_cpu(h->max_entries) > block_size) { |
Mike Snitzer | 89ddeb8 | 2012-12-21 20:23:34 +0000 | [diff] [blame] | 64 | DMERR_LIMIT("node_check failed: max_entries too large"); |
Joe Thornber | 3241b1d | 2011-10-31 20:19:11 +0000 | [diff] [blame] | 65 | return -EILSEQ; |
| 66 | } |
| 67 | |
| 68 | if (le32_to_cpu(h->nr_entries) > le32_to_cpu(h->max_entries)) { |
Mike Snitzer | 89ddeb8 | 2012-12-21 20:23:34 +0000 | [diff] [blame] | 69 | DMERR_LIMIT("node_check failed: too many entries"); |
Joe Thornber | 3241b1d | 2011-10-31 20:19:11 +0000 | [diff] [blame] | 70 | return -EILSEQ; |
| 71 | } |
| 72 | |
| 73 | /* |
| 74 | * The node must be either INTERNAL or LEAF. |
| 75 | */ |
| 76 | flags = le32_to_cpu(h->flags); |
| 77 | if (!(flags & INTERNAL_NODE) && !(flags & LEAF_NODE)) { |
Mike Snitzer | 89ddeb8 | 2012-12-21 20:23:34 +0000 | [diff] [blame] | 78 | DMERR_LIMIT("node_check failed: node is neither INTERNAL or LEAF"); |
Joe Thornber | 3241b1d | 2011-10-31 20:19:11 +0000 | [diff] [blame] | 79 | return -EILSEQ; |
| 80 | } |
| 81 | |
| 82 | return 0; |
| 83 | } |
| 84 | |
| 85 | struct dm_block_validator btree_node_validator = { |
| 86 | .name = "btree_node", |
| 87 | .prepare_for_write = node_prepare_for_write, |
| 88 | .check = node_check |
| 89 | }; |
| 90 | |
| 91 | /*----------------------------------------------------------------*/ |
| 92 | |
Joe Thornber | 9b460d3 | 2014-11-10 15:03:24 +0000 | [diff] [blame] | 93 | int bn_read_lock(struct dm_btree_info *info, dm_block_t b, |
Joe Thornber | 3241b1d | 2011-10-31 20:19:11 +0000 | [diff] [blame] | 94 | struct dm_block **result) |
| 95 | { |
| 96 | return dm_tm_read_lock(info->tm, b, &btree_node_validator, result); |
| 97 | } |
| 98 | |
| 99 | static int bn_shadow(struct dm_btree_info *info, dm_block_t orig, |
| 100 | struct dm_btree_value_type *vt, |
| 101 | struct dm_block **result) |
| 102 | { |
| 103 | int r, inc; |
| 104 | |
| 105 | r = dm_tm_shadow_block(info->tm, orig, &btree_node_validator, |
| 106 | result, &inc); |
| 107 | if (!r && inc) |
| 108 | inc_children(info->tm, dm_block_data(*result), vt); |
| 109 | |
| 110 | return r; |
| 111 | } |
| 112 | |
| 113 | int new_block(struct dm_btree_info *info, struct dm_block **result) |
| 114 | { |
| 115 | return dm_tm_new_block(info->tm, &btree_node_validator, result); |
| 116 | } |
| 117 | |
Mikulas Patocka | 4c7da06 | 2015-10-22 16:46:59 -0400 | [diff] [blame] | 118 | void unlock_block(struct dm_btree_info *info, struct dm_block *b) |
Joe Thornber | 3241b1d | 2011-10-31 20:19:11 +0000 | [diff] [blame] | 119 | { |
Mikulas Patocka | 4c7da06 | 2015-10-22 16:46:59 -0400 | [diff] [blame] | 120 | dm_tm_unlock(info->tm, b); |
Joe Thornber | 3241b1d | 2011-10-31 20:19:11 +0000 | [diff] [blame] | 121 | } |
| 122 | |
| 123 | /*----------------------------------------------------------------*/ |
| 124 | |
| 125 | void init_ro_spine(struct ro_spine *s, struct dm_btree_info *info) |
| 126 | { |
| 127 | s->info = info; |
| 128 | s->count = 0; |
| 129 | s->nodes[0] = NULL; |
| 130 | s->nodes[1] = NULL; |
| 131 | } |
| 132 | |
Zhiqiang Liu | 9431cf6 | 2020-04-15 19:57:31 +0800 | [diff] [blame] | 133 | void exit_ro_spine(struct ro_spine *s) |
Joe Thornber | 3241b1d | 2011-10-31 20:19:11 +0000 | [diff] [blame] | 134 | { |
Zhiqiang Liu | 9431cf6 | 2020-04-15 19:57:31 +0800 | [diff] [blame] | 135 | int i; |
Joe Thornber | 3241b1d | 2011-10-31 20:19:11 +0000 | [diff] [blame] | 136 | |
| 137 | for (i = 0; i < s->count; i++) { |
Mikulas Patocka | 4c7da06 | 2015-10-22 16:46:59 -0400 | [diff] [blame] | 138 | unlock_block(s->info, s->nodes[i]); |
Joe Thornber | 3241b1d | 2011-10-31 20:19:11 +0000 | [diff] [blame] | 139 | } |
Joe Thornber | 3241b1d | 2011-10-31 20:19:11 +0000 | [diff] [blame] | 140 | } |
| 141 | |
| 142 | int ro_step(struct ro_spine *s, dm_block_t new_child) |
| 143 | { |
| 144 | int r; |
| 145 | |
| 146 | if (s->count == 2) { |
Mikulas Patocka | 4c7da06 | 2015-10-22 16:46:59 -0400 | [diff] [blame] | 147 | unlock_block(s->info, s->nodes[0]); |
Joe Thornber | 3241b1d | 2011-10-31 20:19:11 +0000 | [diff] [blame] | 148 | s->nodes[0] = s->nodes[1]; |
| 149 | s->count--; |
| 150 | } |
| 151 | |
| 152 | r = bn_read_lock(s->info, new_child, s->nodes + s->count); |
| 153 | if (!r) |
| 154 | s->count++; |
| 155 | |
| 156 | return r; |
| 157 | } |
| 158 | |
Joe Thornber | 4e7f1f9 | 2013-03-01 22:45:50 +0000 | [diff] [blame] | 159 | void ro_pop(struct ro_spine *s) |
| 160 | { |
| 161 | BUG_ON(!s->count); |
| 162 | --s->count; |
| 163 | unlock_block(s->info, s->nodes[s->count]); |
| 164 | } |
| 165 | |
Mikulas Patocka | 550929f | 2012-12-21 20:23:30 +0000 | [diff] [blame] | 166 | struct btree_node *ro_node(struct ro_spine *s) |
Joe Thornber | 3241b1d | 2011-10-31 20:19:11 +0000 | [diff] [blame] | 167 | { |
| 168 | struct dm_block *block; |
| 169 | |
| 170 | BUG_ON(!s->count); |
| 171 | block = s->nodes[s->count - 1]; |
| 172 | |
| 173 | return dm_block_data(block); |
| 174 | } |
| 175 | |
| 176 | /*----------------------------------------------------------------*/ |
| 177 | |
| 178 | void init_shadow_spine(struct shadow_spine *s, struct dm_btree_info *info) |
| 179 | { |
| 180 | s->info = info; |
| 181 | s->count = 0; |
| 182 | } |
| 183 | |
Jiapeng Chong | ece2577 | 2021-03-19 16:19:30 +0800 | [diff] [blame] | 184 | void exit_shadow_spine(struct shadow_spine *s) |
Joe Thornber | 3241b1d | 2011-10-31 20:19:11 +0000 | [diff] [blame] | 185 | { |
Jiapeng Chong | ece2577 | 2021-03-19 16:19:30 +0800 | [diff] [blame] | 186 | int i; |
Joe Thornber | 3241b1d | 2011-10-31 20:19:11 +0000 | [diff] [blame] | 187 | |
| 188 | for (i = 0; i < s->count; i++) { |
Mikulas Patocka | 4c7da06 | 2015-10-22 16:46:59 -0400 | [diff] [blame] | 189 | unlock_block(s->info, s->nodes[i]); |
Joe Thornber | 3241b1d | 2011-10-31 20:19:11 +0000 | [diff] [blame] | 190 | } |
Joe Thornber | 3241b1d | 2011-10-31 20:19:11 +0000 | [diff] [blame] | 191 | } |
| 192 | |
| 193 | int shadow_step(struct shadow_spine *s, dm_block_t b, |
| 194 | struct dm_btree_value_type *vt) |
| 195 | { |
| 196 | int r; |
| 197 | |
| 198 | if (s->count == 2) { |
Mikulas Patocka | 4c7da06 | 2015-10-22 16:46:59 -0400 | [diff] [blame] | 199 | unlock_block(s->info, s->nodes[0]); |
Joe Thornber | 3241b1d | 2011-10-31 20:19:11 +0000 | [diff] [blame] | 200 | s->nodes[0] = s->nodes[1]; |
| 201 | s->count--; |
| 202 | } |
| 203 | |
| 204 | r = bn_shadow(s->info, b, vt, s->nodes + s->count); |
| 205 | if (!r) { |
| 206 | if (!s->count) |
| 207 | s->root = dm_block_location(s->nodes[0]); |
| 208 | |
| 209 | s->count++; |
| 210 | } |
| 211 | |
| 212 | return r; |
| 213 | } |
| 214 | |
| 215 | struct dm_block *shadow_current(struct shadow_spine *s) |
| 216 | { |
| 217 | BUG_ON(!s->count); |
| 218 | |
| 219 | return s->nodes[s->count - 1]; |
| 220 | } |
| 221 | |
| 222 | struct dm_block *shadow_parent(struct shadow_spine *s) |
| 223 | { |
| 224 | BUG_ON(s->count != 2); |
| 225 | |
| 226 | return s->count == 2 ? s->nodes[0] : NULL; |
| 227 | } |
| 228 | |
| 229 | int shadow_has_parent(struct shadow_spine *s) |
| 230 | { |
| 231 | return s->count >= 2; |
| 232 | } |
| 233 | |
Jinoh Kang | 4c9e988 | 2021-01-17 11:49:33 +0000 | [diff] [blame] | 234 | dm_block_t shadow_root(struct shadow_spine *s) |
Joe Thornber | 3241b1d | 2011-10-31 20:19:11 +0000 | [diff] [blame] | 235 | { |
| 236 | return s->root; |
| 237 | } |
Joe Thornber | b0dc3c8 | 2015-08-12 15:12:09 +0100 | [diff] [blame] | 238 | |
Joe Thornber | be500ed | 2021-04-13 11:03:45 +0100 | [diff] [blame] | 239 | static void le64_inc(void *context, const void *value_le, unsigned count) |
Joe Thornber | b0dc3c8 | 2015-08-12 15:12:09 +0100 | [diff] [blame] | 240 | { |
Joe Thornber | be500ed | 2021-04-13 11:03:45 +0100 | [diff] [blame] | 241 | dm_tm_with_runs(context, value_le, count, dm_tm_inc_range); |
Joe Thornber | b0dc3c8 | 2015-08-12 15:12:09 +0100 | [diff] [blame] | 242 | } |
| 243 | |
Joe Thornber | be500ed | 2021-04-13 11:03:45 +0100 | [diff] [blame] | 244 | static void le64_dec(void *context, const void *value_le, unsigned count) |
Joe Thornber | b0dc3c8 | 2015-08-12 15:12:09 +0100 | [diff] [blame] | 245 | { |
Joe Thornber | be500ed | 2021-04-13 11:03:45 +0100 | [diff] [blame] | 246 | dm_tm_with_runs(context, value_le, count, dm_tm_dec_range); |
Joe Thornber | b0dc3c8 | 2015-08-12 15:12:09 +0100 | [diff] [blame] | 247 | } |
| 248 | |
| 249 | static int le64_equal(void *context, const void *value1_le, const void *value2_le) |
| 250 | { |
| 251 | __le64 v1_le, v2_le; |
| 252 | |
| 253 | memcpy(&v1_le, value1_le, sizeof(v1_le)); |
| 254 | memcpy(&v2_le, value2_le, sizeof(v2_le)); |
| 255 | return v1_le == v2_le; |
| 256 | } |
| 257 | |
| 258 | void init_le64_type(struct dm_transaction_manager *tm, |
| 259 | struct dm_btree_value_type *vt) |
| 260 | { |
| 261 | vt->context = tm; |
| 262 | vt->size = sizeof(__le64); |
| 263 | vt->inc = le64_inc; |
| 264 | vt->dec = le64_dec; |
| 265 | vt->equal = le64_equal; |
| 266 | } |