Josef Bacik | 0f9dd46 | 2008-09-23 13:14:11 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2008 Red Hat. All rights reserved. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or |
| 5 | * modify it under the terms of the GNU General Public |
| 6 | * License v2 as published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope that it will be useful, |
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 11 | * General Public License for more details. |
| 12 | * |
| 13 | * You should have received a copy of the GNU General Public |
| 14 | * License along with this program; if not, write to the |
| 15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, |
| 16 | * Boston, MA 021110-1307, USA. |
| 17 | */ |
| 18 | |
| 19 | #include <linux/sched.h> |
| 20 | #include "ctree.h" |
| 21 | |
| 22 | static int tree_insert_offset(struct rb_root *root, u64 offset, |
| 23 | struct rb_node *node) |
| 24 | { |
| 25 | struct rb_node **p = &root->rb_node; |
| 26 | struct rb_node *parent = NULL; |
| 27 | struct btrfs_free_space *info; |
| 28 | |
| 29 | while (*p) { |
| 30 | parent = *p; |
| 31 | info = rb_entry(parent, struct btrfs_free_space, offset_index); |
| 32 | |
| 33 | if (offset < info->offset) |
| 34 | p = &(*p)->rb_left; |
| 35 | else if (offset > info->offset) |
| 36 | p = &(*p)->rb_right; |
| 37 | else |
| 38 | return -EEXIST; |
| 39 | } |
| 40 | |
| 41 | rb_link_node(node, parent, p); |
| 42 | rb_insert_color(node, root); |
| 43 | |
| 44 | return 0; |
| 45 | } |
| 46 | |
| 47 | static int tree_insert_bytes(struct rb_root *root, u64 bytes, |
| 48 | struct rb_node *node) |
| 49 | { |
| 50 | struct rb_node **p = &root->rb_node; |
| 51 | struct rb_node *parent = NULL; |
| 52 | struct btrfs_free_space *info; |
| 53 | |
| 54 | while (*p) { |
| 55 | parent = *p; |
| 56 | info = rb_entry(parent, struct btrfs_free_space, bytes_index); |
| 57 | |
| 58 | if (bytes < info->bytes) |
| 59 | p = &(*p)->rb_left; |
| 60 | else |
| 61 | p = &(*p)->rb_right; |
| 62 | } |
| 63 | |
| 64 | rb_link_node(node, parent, p); |
| 65 | rb_insert_color(node, root); |
| 66 | |
| 67 | return 0; |
| 68 | } |
| 69 | |
| 70 | /* |
| 71 | * searches the tree for the given offset. If contains is set we will return |
| 72 | * the free space that contains the given offset. If contains is not set we |
| 73 | * will return the free space that starts at or after the given offset and is |
| 74 | * at least bytes long. |
| 75 | */ |
| 76 | static struct btrfs_free_space *tree_search_offset(struct rb_root *root, |
| 77 | u64 offset, u64 bytes, |
| 78 | int contains) |
| 79 | { |
| 80 | struct rb_node *n = root->rb_node; |
| 81 | struct btrfs_free_space *entry, *ret = NULL; |
| 82 | |
| 83 | while (n) { |
| 84 | entry = rb_entry(n, struct btrfs_free_space, offset_index); |
| 85 | |
| 86 | if (offset < entry->offset) { |
| 87 | if (!contains && |
| 88 | (!ret || entry->offset < ret->offset) && |
| 89 | (bytes <= entry->bytes)) |
| 90 | ret = entry; |
| 91 | n = n->rb_left; |
| 92 | } else if (offset > entry->offset) { |
| 93 | if (contains && |
| 94 | (entry->offset + entry->bytes - 1) >= offset) { |
| 95 | ret = entry; |
| 96 | break; |
| 97 | } |
| 98 | n = n->rb_right; |
| 99 | } else { |
| 100 | if (bytes > entry->bytes) { |
| 101 | n = n->rb_right; |
| 102 | continue; |
| 103 | } |
| 104 | ret = entry; |
| 105 | break; |
| 106 | } |
| 107 | } |
| 108 | |
| 109 | return ret; |
| 110 | } |
| 111 | |
| 112 | /* |
| 113 | * return a chunk at least bytes size, as close to offset that we can get. |
| 114 | */ |
| 115 | static struct btrfs_free_space *tree_search_bytes(struct rb_root *root, |
| 116 | u64 offset, u64 bytes) |
| 117 | { |
| 118 | struct rb_node *n = root->rb_node; |
| 119 | struct btrfs_free_space *entry, *ret = NULL; |
| 120 | |
| 121 | while (n) { |
| 122 | entry = rb_entry(n, struct btrfs_free_space, bytes_index); |
| 123 | |
| 124 | if (bytes < entry->bytes) { |
| 125 | /* |
| 126 | * We prefer to get a hole size as close to the size we |
| 127 | * are asking for so we don't take small slivers out of |
| 128 | * huge holes, but we also want to get as close to the |
| 129 | * offset as possible so we don't have a whole lot of |
| 130 | * fragmentation. |
| 131 | */ |
| 132 | if (offset <= entry->offset) { |
| 133 | if (!ret) |
| 134 | ret = entry; |
| 135 | else if (entry->bytes < ret->bytes) |
| 136 | ret = entry; |
| 137 | else if (entry->offset < ret->offset) |
| 138 | ret = entry; |
| 139 | } |
| 140 | n = n->rb_left; |
| 141 | } else if (bytes > entry->bytes) { |
| 142 | n = n->rb_right; |
| 143 | } else { |
| 144 | /* |
| 145 | * Ok we may have multiple chunks of the wanted size, |
| 146 | * so we don't want to take the first one we find, we |
| 147 | * want to take the one closest to our given offset, so |
| 148 | * keep searching just in case theres a better match. |
| 149 | */ |
| 150 | n = n->rb_right; |
| 151 | if (offset > entry->offset) |
| 152 | continue; |
| 153 | else if (!ret || entry->offset < ret->offset) |
| 154 | ret = entry; |
| 155 | } |
| 156 | } |
| 157 | |
| 158 | return ret; |
| 159 | } |
| 160 | |
| 161 | static void unlink_free_space(struct btrfs_block_group_cache *block_group, |
| 162 | struct btrfs_free_space *info) |
| 163 | { |
| 164 | rb_erase(&info->offset_index, &block_group->free_space_offset); |
| 165 | rb_erase(&info->bytes_index, &block_group->free_space_bytes); |
| 166 | } |
| 167 | |
| 168 | static int link_free_space(struct btrfs_block_group_cache *block_group, |
| 169 | struct btrfs_free_space *info) |
| 170 | { |
| 171 | int ret = 0; |
| 172 | |
| 173 | |
| 174 | ret = tree_insert_offset(&block_group->free_space_offset, info->offset, |
| 175 | &info->offset_index); |
| 176 | if (ret) |
| 177 | return ret; |
| 178 | |
| 179 | ret = tree_insert_bytes(&block_group->free_space_bytes, info->bytes, |
| 180 | &info->bytes_index); |
| 181 | if (ret) |
| 182 | return ret; |
| 183 | |
| 184 | return ret; |
| 185 | } |
| 186 | |
| 187 | int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, |
| 188 | u64 offset, u64 bytes) |
| 189 | { |
| 190 | struct btrfs_free_space *right_info; |
| 191 | struct btrfs_free_space *left_info; |
| 192 | struct btrfs_free_space *info = NULL; |
| 193 | struct btrfs_free_space *alloc_info; |
| 194 | int ret = 0; |
| 195 | |
| 196 | alloc_info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS); |
| 197 | if (!alloc_info) |
| 198 | return -ENOMEM; |
| 199 | |
| 200 | /* |
| 201 | * first we want to see if there is free space adjacent to the range we |
| 202 | * are adding, if there is remove that struct and add a new one to |
| 203 | * cover the entire range |
| 204 | */ |
| 205 | spin_lock(&block_group->lock); |
| 206 | |
| 207 | right_info = tree_search_offset(&block_group->free_space_offset, |
| 208 | offset+bytes, 0, 1); |
| 209 | left_info = tree_search_offset(&block_group->free_space_offset, |
| 210 | offset-1, 0, 1); |
| 211 | |
| 212 | if (right_info && right_info->offset == offset+bytes) { |
| 213 | unlink_free_space(block_group, right_info); |
| 214 | info = right_info; |
| 215 | info->offset = offset; |
| 216 | info->bytes += bytes; |
| 217 | } else if (right_info && right_info->offset != offset+bytes) { |
| 218 | printk(KERN_ERR "adding space in the middle of an existing " |
| 219 | "free space area. existing: offset=%Lu, bytes=%Lu. " |
| 220 | "new: offset=%Lu, bytes=%Lu\n", right_info->offset, |
| 221 | right_info->bytes, offset, bytes); |
| 222 | BUG(); |
| 223 | } |
| 224 | |
| 225 | if (left_info) { |
| 226 | unlink_free_space(block_group, left_info); |
| 227 | |
| 228 | if (unlikely((left_info->offset + left_info->bytes) != |
| 229 | offset)) { |
| 230 | printk(KERN_ERR "free space to the left of new free " |
| 231 | "space isn't quite right. existing: offset=%Lu," |
| 232 | " bytes=%Lu. new: offset=%Lu, bytes=%Lu\n", |
| 233 | left_info->offset, left_info->bytes, offset, |
| 234 | bytes); |
| 235 | BUG(); |
| 236 | } |
| 237 | |
| 238 | if (info) { |
| 239 | info->offset = left_info->offset; |
| 240 | info->bytes += left_info->bytes; |
| 241 | kfree(left_info); |
| 242 | } else { |
| 243 | info = left_info; |
| 244 | info->bytes += bytes; |
| 245 | } |
| 246 | } |
| 247 | |
| 248 | if (info) { |
| 249 | ret = link_free_space(block_group, info); |
| 250 | if (!ret) |
| 251 | info = NULL; |
| 252 | goto out; |
| 253 | } |
| 254 | |
| 255 | info = alloc_info; |
| 256 | alloc_info = NULL; |
| 257 | info->offset = offset; |
| 258 | info->bytes = bytes; |
| 259 | |
| 260 | ret = link_free_space(block_group, info); |
| 261 | if (ret) |
| 262 | kfree(info); |
| 263 | out: |
| 264 | spin_unlock(&block_group->lock); |
| 265 | if (ret) { |
| 266 | printk(KERN_ERR "btrfs: unable to add free space :%d\n", ret); |
| 267 | if (ret == -EEXIST) |
| 268 | BUG(); |
| 269 | } |
| 270 | |
| 271 | if (alloc_info) |
| 272 | kfree(alloc_info); |
| 273 | |
| 274 | return ret; |
| 275 | } |
| 276 | |
| 277 | int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, |
| 278 | u64 offset, u64 bytes) |
| 279 | { |
| 280 | struct btrfs_free_space *info; |
| 281 | int ret = 0; |
| 282 | |
| 283 | spin_lock(&block_group->lock); |
| 284 | info = tree_search_offset(&block_group->free_space_offset, offset, 0, |
| 285 | 1); |
| 286 | |
| 287 | if (info && info->offset == offset) { |
| 288 | if (info->bytes < bytes) { |
| 289 | printk(KERN_ERR "Found free space at %Lu, size %Lu," |
| 290 | "trying to use %Lu\n", |
| 291 | info->offset, info->bytes, bytes); |
| 292 | WARN_ON(1); |
| 293 | ret = -EINVAL; |
| 294 | goto out; |
| 295 | } |
| 296 | |
| 297 | unlink_free_space(block_group, info); |
| 298 | |
| 299 | if (info->bytes == bytes) { |
| 300 | kfree(info); |
| 301 | goto out; |
| 302 | } |
| 303 | |
| 304 | info->offset += bytes; |
| 305 | info->bytes -= bytes; |
| 306 | |
| 307 | ret = link_free_space(block_group, info); |
| 308 | BUG_ON(ret); |
Chris Mason | 9b49c9b | 2008-09-24 11:23:25 -0400 | [diff] [blame^] | 309 | } else if (info && info->offset < offset && |
| 310 | info->offset + info->bytes >= offset + bytes) { |
| 311 | u64 old_start = info->offset; |
| 312 | /* |
| 313 | * we're freeing space in the middle of the info, |
| 314 | * this can happen during tree log replay |
| 315 | * |
| 316 | * first unlink the old info and then |
| 317 | * insert it again after the hole we're creating |
| 318 | */ |
| 319 | unlink_free_space(block_group, info); |
| 320 | if (offset + bytes < info->offset + info->bytes) { |
| 321 | u64 old_end = info->offset + info->bytes; |
| 322 | |
| 323 | info->offset = offset + bytes; |
| 324 | info->bytes = old_end - info->offset; |
| 325 | ret = link_free_space(block_group, info); |
| 326 | BUG_ON(ret); |
| 327 | } else { |
| 328 | /* the hole we're creating ends at the end |
| 329 | * of the info struct, just free the info |
| 330 | */ |
| 331 | kfree(info); |
| 332 | } |
| 333 | |
| 334 | /* step two, insert a new info struct to cover anything |
| 335 | * before the hole |
| 336 | */ |
| 337 | spin_unlock(&block_group->lock); |
| 338 | ret = btrfs_add_free_space(block_group, old_start, |
| 339 | offset - old_start); |
| 340 | BUG_ON(ret); |
| 341 | goto out_nolock; |
Josef Bacik | 0f9dd46 | 2008-09-23 13:14:11 -0400 | [diff] [blame] | 342 | } else { |
| 343 | WARN_ON(1); |
| 344 | } |
| 345 | out: |
| 346 | spin_unlock(&block_group->lock); |
Chris Mason | 9b49c9b | 2008-09-24 11:23:25 -0400 | [diff] [blame^] | 347 | out_nolock: |
Josef Bacik | 0f9dd46 | 2008-09-23 13:14:11 -0400 | [diff] [blame] | 348 | return ret; |
| 349 | } |
| 350 | |
| 351 | void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, |
| 352 | u64 bytes) |
| 353 | { |
| 354 | struct btrfs_free_space *info; |
| 355 | struct rb_node *n; |
| 356 | int count = 0; |
| 357 | |
| 358 | for (n = rb_first(&block_group->free_space_offset); n; n = rb_next(n)) { |
| 359 | info = rb_entry(n, struct btrfs_free_space, offset_index); |
| 360 | if (info->bytes >= bytes) |
| 361 | count++; |
| 362 | //printk(KERN_INFO "offset=%Lu, bytes=%Lu\n", info->offset, |
| 363 | // info->bytes); |
| 364 | } |
| 365 | printk(KERN_INFO "%d blocks of free space at or bigger than bytes is" |
| 366 | "\n", count); |
| 367 | } |
| 368 | |
| 369 | u64 btrfs_block_group_free_space(struct btrfs_block_group_cache *block_group) |
| 370 | { |
| 371 | struct btrfs_free_space *info; |
| 372 | struct rb_node *n; |
| 373 | u64 ret = 0; |
| 374 | |
| 375 | for (n = rb_first(&block_group->free_space_offset); n; |
| 376 | n = rb_next(n)) { |
| 377 | info = rb_entry(n, struct btrfs_free_space, offset_index); |
| 378 | ret += info->bytes; |
| 379 | } |
| 380 | |
| 381 | return ret; |
| 382 | } |
| 383 | |
| 384 | void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) |
| 385 | { |
| 386 | struct btrfs_free_space *info; |
| 387 | struct rb_node *node; |
| 388 | |
| 389 | spin_lock(&block_group->lock); |
| 390 | while ((node = rb_last(&block_group->free_space_bytes)) != NULL) { |
| 391 | info = rb_entry(node, struct btrfs_free_space, bytes_index); |
| 392 | unlink_free_space(block_group, info); |
| 393 | kfree(info); |
| 394 | if (need_resched()) { |
| 395 | spin_unlock(&block_group->lock); |
| 396 | cond_resched(); |
| 397 | spin_lock(&block_group->lock); |
| 398 | } |
| 399 | } |
| 400 | spin_unlock(&block_group->lock); |
| 401 | } |
| 402 | |
| 403 | struct btrfs_free_space *btrfs_find_free_space_offset(struct |
| 404 | btrfs_block_group_cache |
| 405 | *block_group, u64 offset, |
| 406 | u64 bytes) |
| 407 | { |
| 408 | struct btrfs_free_space *ret; |
| 409 | |
| 410 | spin_lock(&block_group->lock); |
| 411 | ret = tree_search_offset(&block_group->free_space_offset, offset, |
| 412 | bytes, 0); |
| 413 | spin_unlock(&block_group->lock); |
| 414 | |
| 415 | return ret; |
| 416 | } |
| 417 | |
| 418 | struct btrfs_free_space *btrfs_find_free_space_bytes(struct |
| 419 | btrfs_block_group_cache |
| 420 | *block_group, u64 offset, |
| 421 | u64 bytes) |
| 422 | { |
| 423 | struct btrfs_free_space *ret; |
| 424 | |
| 425 | spin_lock(&block_group->lock); |
| 426 | |
| 427 | ret = tree_search_bytes(&block_group->free_space_bytes, offset, bytes); |
| 428 | spin_unlock(&block_group->lock); |
| 429 | |
| 430 | return ret; |
| 431 | } |
| 432 | |
| 433 | struct btrfs_free_space *btrfs_find_free_space(struct btrfs_block_group_cache |
| 434 | *block_group, u64 offset, |
| 435 | u64 bytes) |
| 436 | { |
| 437 | struct btrfs_free_space *ret; |
| 438 | |
| 439 | spin_lock(&block_group->lock); |
| 440 | ret = tree_search_offset(&block_group->free_space_offset, offset, |
| 441 | bytes, 0); |
| 442 | if (!ret) |
| 443 | ret = tree_search_bytes(&block_group->free_space_bytes, |
| 444 | offset, bytes); |
| 445 | |
| 446 | spin_unlock(&block_group->lock); |
| 447 | |
| 448 | return ret; |
| 449 | } |