Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2009-2011 Red Hat, Inc. |
| 3 | * |
| 4 | * Author: Mikulas Patocka <mpatocka@redhat.com> |
| 5 | * |
| 6 | * This file is released under the GPL. |
| 7 | */ |
| 8 | |
Mikulas Patocka | afa53df | 2018-03-15 16:02:31 -0400 | [diff] [blame] | 9 | #include <linux/dm-bufio.h> |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 10 | |
| 11 | #include <linux/device-mapper.h> |
| 12 | #include <linux/dm-io.h> |
| 13 | #include <linux/slab.h> |
Ingo Molnar | 5b3cc15 | 2017-02-02 20:43:54 +0100 | [diff] [blame] | 14 | #include <linux/sched/mm.h> |
Asaf Vertz | f495339 | 2015-01-06 15:44:15 +0200 | [diff] [blame] | 15 | #include <linux/jiffies.h> |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 16 | #include <linux/vmalloc.h> |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 17 | #include <linux/shrinker.h> |
Stephen Rothwell | 6f66263 | 2011-11-01 18:30:49 +1100 | [diff] [blame] | 18 | #include <linux/module.h> |
Joe Thornber | 4e420c4 | 2014-10-06 13:48:51 +0100 | [diff] [blame] | 19 | #include <linux/rbtree.h> |
Mikulas Patocka | 86bad0c | 2015-11-23 19:20:06 -0500 | [diff] [blame] | 20 | #include <linux/stacktrace.h> |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 21 | |
| 22 | #define DM_MSG_PREFIX "bufio" |
| 23 | |
| 24 | /* |
| 25 | * Memory management policy: |
| 26 | * Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory |
| 27 | * or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower). |
| 28 | * Always allocate at least DM_BUFIO_MIN_BUFFERS buffers. |
| 29 | * Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT |
| 30 | * dirty buffers. |
| 31 | */ |
| 32 | #define DM_BUFIO_MIN_BUFFERS 8 |
| 33 | |
| 34 | #define DM_BUFIO_MEMORY_PERCENT 2 |
| 35 | #define DM_BUFIO_VMALLOC_PERCENT 25 |
Mikulas Patocka | b132ff3 | 2019-09-12 10:44:47 +0200 | [diff] [blame] | 36 | #define DM_BUFIO_WRITEBACK_RATIO 3 |
Mikulas Patocka | 6e913b2 | 2019-09-12 12:07:23 -0400 | [diff] [blame] | 37 | #define DM_BUFIO_LOW_WATERMARK_RATIO 16 |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 38 | |
| 39 | /* |
| 40 | * Check buffer ages in this interval (seconds) |
| 41 | */ |
Joe Thornber | 33096a7 | 2014-10-09 11:10:25 +0100 | [diff] [blame] | 42 | #define DM_BUFIO_WORK_TIMER_SECS 30 |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 43 | |
| 44 | /* |
| 45 | * Free buffers when they are older than this (seconds) |
| 46 | */ |
Joe Thornber | 33096a7 | 2014-10-09 11:10:25 +0100 | [diff] [blame] | 47 | #define DM_BUFIO_DEFAULT_AGE_SECS 300 |
| 48 | |
| 49 | /* |
| 50 | * The nr of bytes of cached data to keep around. |
| 51 | */ |
| 52 | #define DM_BUFIO_DEFAULT_RETAIN_BYTES (256 * 1024) |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 53 | |
| 54 | /* |
Mikulas Patocka | 1e3b21c | 2017-04-30 17:31:22 -0400 | [diff] [blame] | 55 | * Align buffer writes to this boundary. |
| 56 | * Tests show that SSDs have the highest IOPS when using 4k writes. |
| 57 | */ |
| 58 | #define DM_BUFIO_WRITE_ALIGN 4096 |
| 59 | |
| 60 | /* |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 61 | * dm_buffer->list_mode |
| 62 | */ |
| 63 | #define LIST_CLEAN 0 |
| 64 | #define LIST_DIRTY 1 |
| 65 | #define LIST_SIZE 2 |
| 66 | |
| 67 | /* |
| 68 | * Linking of buffers: |
Shenghui Wang | ef99237 | 2018-10-30 15:35:54 +0800 | [diff] [blame] | 69 | * All buffers are linked to buffer_tree with their node field. |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 70 | * |
| 71 | * Clean buffers that are not being written (B_WRITING not set) |
| 72 | * are linked to lru[LIST_CLEAN] with their lru_list field. |
| 73 | * |
| 74 | * Dirty and clean buffers that are being written are linked to |
| 75 | * lru[LIST_DIRTY] with their lru_list field. When the write |
| 76 | * finishes, the buffer cannot be relinked immediately (because we |
| 77 | * are in an interrupt context and relinking requires process |
| 78 | * context), so some clean-not-writing buffers can be held on |
| 79 | * dirty_lru too. They are later added to lru in the process |
| 80 | * context. |
| 81 | */ |
| 82 | struct dm_bufio_client { |
| 83 | struct mutex lock; |
| 84 | |
| 85 | struct list_head lru[LIST_SIZE]; |
| 86 | unsigned long n_buffers[LIST_SIZE]; |
| 87 | |
| 88 | struct block_device *bdev; |
| 89 | unsigned block_size; |
Mikulas Patocka | f51f2e0 | 2018-03-26 20:29:46 +0200 | [diff] [blame] | 90 | s8 sectors_per_block_bits; |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 91 | void (*alloc_callback)(struct dm_buffer *); |
| 92 | void (*write_callback)(struct dm_buffer *); |
| 93 | |
Mikulas Patocka | 359dbf1 | 2018-03-26 20:29:45 +0200 | [diff] [blame] | 94 | struct kmem_cache *slab_buffer; |
Mikulas Patocka | 21bb132 | 2018-03-26 20:29:42 +0200 | [diff] [blame] | 95 | struct kmem_cache *slab_cache; |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 96 | struct dm_io_client *dm_io; |
| 97 | |
| 98 | struct list_head reserved_buffers; |
| 99 | unsigned need_reserved_buffers; |
| 100 | |
Mikulas Patocka | 55b082e | 2014-01-13 19:13:05 -0500 | [diff] [blame] | 101 | unsigned minimum_buffers; |
| 102 | |
Joe Thornber | 4e420c4 | 2014-10-06 13:48:51 +0100 | [diff] [blame] | 103 | struct rb_root buffer_tree; |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 104 | wait_queue_head_t free_buffer_wait; |
| 105 | |
Mikulas Patocka | 400a0be | 2017-01-04 20:23:52 +0100 | [diff] [blame] | 106 | sector_t start; |
| 107 | |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 108 | int async_write_error; |
| 109 | |
| 110 | struct list_head client_list; |
Mikulas Patocka | 70704c3 | 2020-07-03 10:26:46 -0400 | [diff] [blame] | 111 | |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 112 | struct shrinker shrinker; |
Mikulas Patocka | 70704c3 | 2020-07-03 10:26:46 -0400 | [diff] [blame] | 113 | struct work_struct shrink_work; |
| 114 | atomic_long_t need_shrink; |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 115 | }; |
| 116 | |
| 117 | /* |
| 118 | * Buffer state bits. |
| 119 | */ |
| 120 | #define B_READING 0 |
| 121 | #define B_WRITING 1 |
| 122 | #define B_DIRTY 2 |
| 123 | |
| 124 | /* |
| 125 | * Describes how the block was allocated: |
| 126 | * kmem_cache_alloc(), __get_free_pages() or vmalloc(). |
| 127 | * See the comment at alloc_buffer_data. |
| 128 | */ |
| 129 | enum data_mode { |
| 130 | DATA_MODE_SLAB = 0, |
| 131 | DATA_MODE_GET_FREE_PAGES = 1, |
| 132 | DATA_MODE_VMALLOC = 2, |
| 133 | DATA_MODE_LIMIT = 3 |
| 134 | }; |
| 135 | |
| 136 | struct dm_buffer { |
Joe Thornber | 4e420c4 | 2014-10-06 13:48:51 +0100 | [diff] [blame] | 137 | struct rb_node node; |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 138 | struct list_head lru_list; |
Mikulas Patocka | af53bad | 2019-09-12 10:44:46 +0200 | [diff] [blame] | 139 | struct list_head global_list; |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 140 | sector_t block; |
| 141 | void *data; |
Mikulas Patocka | 03b02939 | 2018-03-26 20:29:44 +0200 | [diff] [blame] | 142 | unsigned char data_mode; /* DATA_MODE_* */ |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 143 | unsigned char list_mode; /* LIST_* */ |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 144 | blk_status_t read_error; |
| 145 | blk_status_t write_error; |
Mikulas Patocka | 6e913b2 | 2019-09-12 12:07:23 -0400 | [diff] [blame] | 146 | unsigned accessed; |
Mikulas Patocka | 03b02939 | 2018-03-26 20:29:44 +0200 | [diff] [blame] | 147 | unsigned hold_count; |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 148 | unsigned long state; |
| 149 | unsigned long last_accessed; |
Mikulas Patocka | 1e3b21c | 2017-04-30 17:31:22 -0400 | [diff] [blame] | 150 | unsigned dirty_start; |
| 151 | unsigned dirty_end; |
| 152 | unsigned write_start; |
| 153 | unsigned write_end; |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 154 | struct dm_bufio_client *c; |
Mikulas Patocka | 2480945 | 2013-07-10 23:41:18 +0100 | [diff] [blame] | 155 | struct list_head write_list; |
Mikulas Patocka | 45354f1 | 2018-03-26 20:29:47 +0200 | [diff] [blame] | 156 | void (*end_io)(struct dm_buffer *, blk_status_t); |
Mikulas Patocka | 86bad0c | 2015-11-23 19:20:06 -0500 | [diff] [blame] | 157 | #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING |
| 158 | #define MAX_STACK 10 |
Thomas Gleixner | 741b58f | 2019-04-25 11:45:07 +0200 | [diff] [blame] | 159 | unsigned int stack_len; |
Mikulas Patocka | 86bad0c | 2015-11-23 19:20:06 -0500 | [diff] [blame] | 160 | unsigned long stack_entries[MAX_STACK]; |
| 161 | #endif |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 162 | }; |
| 163 | |
| 164 | /*----------------------------------------------------------------*/ |
| 165 | |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 166 | #define dm_bufio_in_request() (!!current->bio_list) |
| 167 | |
| 168 | static void dm_bufio_lock(struct dm_bufio_client *c) |
| 169 | { |
| 170 | mutex_lock_nested(&c->lock, dm_bufio_in_request()); |
| 171 | } |
| 172 | |
| 173 | static int dm_bufio_trylock(struct dm_bufio_client *c) |
| 174 | { |
| 175 | return mutex_trylock(&c->lock); |
| 176 | } |
| 177 | |
| 178 | static void dm_bufio_unlock(struct dm_bufio_client *c) |
| 179 | { |
| 180 | mutex_unlock(&c->lock); |
| 181 | } |
| 182 | |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 183 | /*----------------------------------------------------------------*/ |
| 184 | |
| 185 | /* |
| 186 | * Default cache size: available memory divided by the ratio. |
| 187 | */ |
| 188 | static unsigned long dm_bufio_default_cache_size; |
| 189 | |
| 190 | /* |
| 191 | * Total cache size set by the user. |
| 192 | */ |
| 193 | static unsigned long dm_bufio_cache_size; |
| 194 | |
| 195 | /* |
| 196 | * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change |
| 197 | * at any time. If it disagrees, the user has changed cache size. |
| 198 | */ |
| 199 | static unsigned long dm_bufio_cache_size_latch; |
| 200 | |
Mikulas Patocka | af53bad | 2019-09-12 10:44:46 +0200 | [diff] [blame] | 201 | static DEFINE_SPINLOCK(global_spinlock); |
| 202 | |
| 203 | static LIST_HEAD(global_queue); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 204 | |
Mikulas Patocka | 6e913b2 | 2019-09-12 12:07:23 -0400 | [diff] [blame] | 205 | static unsigned long global_num = 0; |
| 206 | |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 207 | /* |
| 208 | * Buffers are freed after this timeout |
| 209 | */ |
| 210 | static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS; |
Mikulas Patocka | 13840d3 | 2017-04-30 17:32:28 -0400 | [diff] [blame] | 211 | static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES; |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 212 | |
| 213 | static unsigned long dm_bufio_peak_allocated; |
| 214 | static unsigned long dm_bufio_allocated_kmem_cache; |
| 215 | static unsigned long dm_bufio_allocated_get_free_pages; |
| 216 | static unsigned long dm_bufio_allocated_vmalloc; |
| 217 | static unsigned long dm_bufio_current_allocated; |
| 218 | |
| 219 | /*----------------------------------------------------------------*/ |
| 220 | |
| 221 | /* |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 222 | * The current number of clients. |
| 223 | */ |
| 224 | static int dm_bufio_client_count; |
| 225 | |
| 226 | /* |
| 227 | * The list of all clients. |
| 228 | */ |
| 229 | static LIST_HEAD(dm_bufio_all_clients); |
| 230 | |
| 231 | /* |
Mikulas Patocka | b132ff3 | 2019-09-12 10:44:47 +0200 | [diff] [blame] | 232 | * This mutex protects dm_bufio_cache_size_latch and dm_bufio_client_count |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 233 | */ |
| 234 | static DEFINE_MUTEX(dm_bufio_clients_lock); |
| 235 | |
Mikulas Patocka | 6e913b2 | 2019-09-12 12:07:23 -0400 | [diff] [blame] | 236 | static struct workqueue_struct *dm_bufio_wq; |
| 237 | static struct delayed_work dm_bufio_cleanup_old_work; |
| 238 | static struct work_struct dm_bufio_replacement_work; |
| 239 | |
| 240 | |
Mikulas Patocka | 86bad0c | 2015-11-23 19:20:06 -0500 | [diff] [blame] | 241 | #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING |
| 242 | static void buffer_record_stack(struct dm_buffer *b) |
| 243 | { |
Thomas Gleixner | 741b58f | 2019-04-25 11:45:07 +0200 | [diff] [blame] | 244 | b->stack_len = stack_trace_save(b->stack_entries, MAX_STACK, 2); |
Mikulas Patocka | 86bad0c | 2015-11-23 19:20:06 -0500 | [diff] [blame] | 245 | } |
| 246 | #endif |
| 247 | |
Joe Thornber | 4e420c4 | 2014-10-06 13:48:51 +0100 | [diff] [blame] | 248 | /*---------------------------------------------------------------- |
| 249 | * A red/black tree acts as an index for all the buffers. |
| 250 | *--------------------------------------------------------------*/ |
| 251 | static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block) |
| 252 | { |
| 253 | struct rb_node *n = c->buffer_tree.rb_node; |
| 254 | struct dm_buffer *b; |
| 255 | |
| 256 | while (n) { |
| 257 | b = container_of(n, struct dm_buffer, node); |
| 258 | |
| 259 | if (b->block == block) |
| 260 | return b; |
| 261 | |
Mikulas Patocka | 88f878e | 2020-06-02 15:34:39 +0200 | [diff] [blame] | 262 | n = block < b->block ? n->rb_left : n->rb_right; |
Joe Thornber | 4e420c4 | 2014-10-06 13:48:51 +0100 | [diff] [blame] | 263 | } |
| 264 | |
| 265 | return NULL; |
| 266 | } |
| 267 | |
Mikulas Patocka | 33a1806 | 2020-06-02 15:34:40 +0200 | [diff] [blame] | 268 | static struct dm_buffer *__find_next(struct dm_bufio_client *c, sector_t block) |
| 269 | { |
| 270 | struct rb_node *n = c->buffer_tree.rb_node; |
| 271 | struct dm_buffer *b; |
| 272 | struct dm_buffer *best = NULL; |
| 273 | |
| 274 | while (n) { |
| 275 | b = container_of(n, struct dm_buffer, node); |
| 276 | |
| 277 | if (b->block == block) |
| 278 | return b; |
| 279 | |
| 280 | if (block <= b->block) { |
| 281 | n = n->rb_left; |
| 282 | best = b; |
| 283 | } else { |
| 284 | n = n->rb_right; |
| 285 | } |
| 286 | } |
| 287 | |
| 288 | return best; |
| 289 | } |
| 290 | |
Joe Thornber | 4e420c4 | 2014-10-06 13:48:51 +0100 | [diff] [blame] | 291 | static void __insert(struct dm_bufio_client *c, struct dm_buffer *b) |
| 292 | { |
| 293 | struct rb_node **new = &c->buffer_tree.rb_node, *parent = NULL; |
| 294 | struct dm_buffer *found; |
| 295 | |
| 296 | while (*new) { |
| 297 | found = container_of(*new, struct dm_buffer, node); |
| 298 | |
| 299 | if (found->block == b->block) { |
| 300 | BUG_ON(found != b); |
| 301 | return; |
| 302 | } |
| 303 | |
| 304 | parent = *new; |
Mikulas Patocka | 88f878e | 2020-06-02 15:34:39 +0200 | [diff] [blame] | 305 | new = b->block < found->block ? |
| 306 | &found->node.rb_left : &found->node.rb_right; |
Joe Thornber | 4e420c4 | 2014-10-06 13:48:51 +0100 | [diff] [blame] | 307 | } |
| 308 | |
| 309 | rb_link_node(&b->node, parent, new); |
| 310 | rb_insert_color(&b->node, &c->buffer_tree); |
| 311 | } |
| 312 | |
| 313 | static void __remove(struct dm_bufio_client *c, struct dm_buffer *b) |
| 314 | { |
| 315 | rb_erase(&b->node, &c->buffer_tree); |
| 316 | } |
| 317 | |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 318 | /*----------------------------------------------------------------*/ |
| 319 | |
Mikulas Patocka | d0a328a | 2019-09-12 10:44:45 +0200 | [diff] [blame] | 320 | static void adjust_total_allocated(struct dm_buffer *b, bool unlink) |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 321 | { |
Mikulas Patocka | d0a328a | 2019-09-12 10:44:45 +0200 | [diff] [blame] | 322 | unsigned char data_mode; |
| 323 | long diff; |
| 324 | |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 325 | static unsigned long * const class_ptr[DATA_MODE_LIMIT] = { |
| 326 | &dm_bufio_allocated_kmem_cache, |
| 327 | &dm_bufio_allocated_get_free_pages, |
| 328 | &dm_bufio_allocated_vmalloc, |
| 329 | }; |
| 330 | |
Mikulas Patocka | d0a328a | 2019-09-12 10:44:45 +0200 | [diff] [blame] | 331 | data_mode = b->data_mode; |
| 332 | diff = (long)b->c->block_size; |
| 333 | if (unlink) |
| 334 | diff = -diff; |
| 335 | |
Mikulas Patocka | af53bad | 2019-09-12 10:44:46 +0200 | [diff] [blame] | 336 | spin_lock(&global_spinlock); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 337 | |
| 338 | *class_ptr[data_mode] += diff; |
| 339 | |
| 340 | dm_bufio_current_allocated += diff; |
| 341 | |
| 342 | if (dm_bufio_current_allocated > dm_bufio_peak_allocated) |
| 343 | dm_bufio_peak_allocated = dm_bufio_current_allocated; |
| 344 | |
Mikulas Patocka | 6e913b2 | 2019-09-12 12:07:23 -0400 | [diff] [blame] | 345 | b->accessed = 1; |
| 346 | |
Mikulas Patocka | af53bad | 2019-09-12 10:44:46 +0200 | [diff] [blame] | 347 | if (!unlink) { |
| 348 | list_add(&b->global_list, &global_queue); |
Mikulas Patocka | 6e913b2 | 2019-09-12 12:07:23 -0400 | [diff] [blame] | 349 | global_num++; |
| 350 | if (dm_bufio_current_allocated > dm_bufio_cache_size) |
| 351 | queue_work(dm_bufio_wq, &dm_bufio_replacement_work); |
Mikulas Patocka | af53bad | 2019-09-12 10:44:46 +0200 | [diff] [blame] | 352 | } else { |
| 353 | list_del(&b->global_list); |
Mikulas Patocka | 6e913b2 | 2019-09-12 12:07:23 -0400 | [diff] [blame] | 354 | global_num--; |
Mikulas Patocka | af53bad | 2019-09-12 10:44:46 +0200 | [diff] [blame] | 355 | } |
| 356 | |
| 357 | spin_unlock(&global_spinlock); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 358 | } |
| 359 | |
| 360 | /* |
| 361 | * Change the number of clients and recalculate per-client limit. |
| 362 | */ |
| 363 | static void __cache_size_refresh(void) |
| 364 | { |
| 365 | BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock)); |
| 366 | BUG_ON(dm_bufio_client_count < 0); |
| 367 | |
Mark Rutland | 6aa7de0 | 2017-10-23 14:07:29 -0700 | [diff] [blame] | 368 | dm_bufio_cache_size_latch = READ_ONCE(dm_bufio_cache_size); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 369 | |
| 370 | /* |
| 371 | * Use default if set to 0 and report the actual cache size used. |
| 372 | */ |
| 373 | if (!dm_bufio_cache_size_latch) { |
| 374 | (void)cmpxchg(&dm_bufio_cache_size, 0, |
| 375 | dm_bufio_default_cache_size); |
| 376 | dm_bufio_cache_size_latch = dm_bufio_default_cache_size; |
| 377 | } |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 378 | } |
| 379 | |
| 380 | /* |
| 381 | * Allocating buffer data. |
| 382 | * |
| 383 | * Small buffers are allocated with kmem_cache, to use space optimally. |
| 384 | * |
| 385 | * For large buffers, we choose between get_free_pages and vmalloc. |
| 386 | * Each has advantages and disadvantages. |
| 387 | * |
| 388 | * __get_free_pages can randomly fail if the memory is fragmented. |
| 389 | * __vmalloc won't randomly fail, but vmalloc space is limited (it may be |
| 390 | * as low as 128M) so using it for caching is not appropriate. |
| 391 | * |
| 392 | * If the allocation may fail we use __get_free_pages. Memory fragmentation |
| 393 | * won't have a fatal effect here, but it just causes flushes of some other |
| 394 | * buffers and more I/O will be performed. Don't use __get_free_pages if it |
| 395 | * always fails (i.e. order >= MAX_ORDER). |
| 396 | * |
| 397 | * If the allocation shouldn't fail we use __vmalloc. This is only for the |
| 398 | * initial reserve allocation, so there's no risk of wasting all vmalloc |
| 399 | * space. |
| 400 | */ |
| 401 | static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask, |
Mikulas Patocka | 03b02939 | 2018-03-26 20:29:44 +0200 | [diff] [blame] | 402 | unsigned char *data_mode) |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 403 | { |
Mikulas Patocka | 21bb132 | 2018-03-26 20:29:42 +0200 | [diff] [blame] | 404 | if (unlikely(c->slab_cache != NULL)) { |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 405 | *data_mode = DATA_MODE_SLAB; |
Mikulas Patocka | 21bb132 | 2018-03-26 20:29:42 +0200 | [diff] [blame] | 406 | return kmem_cache_alloc(c->slab_cache, gfp_mask); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 407 | } |
| 408 | |
Mikulas Patocka | f51f2e0 | 2018-03-26 20:29:46 +0200 | [diff] [blame] | 409 | if (c->block_size <= KMALLOC_MAX_SIZE && |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 410 | gfp_mask & __GFP_NORETRY) { |
| 411 | *data_mode = DATA_MODE_GET_FREE_PAGES; |
| 412 | return (void *)__get_free_pages(gfp_mask, |
Mikulas Patocka | f51f2e0 | 2018-03-26 20:29:46 +0200 | [diff] [blame] | 413 | c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT)); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 414 | } |
| 415 | |
| 416 | *data_mode = DATA_MODE_VMALLOC; |
Mikulas Patocka | 502624b | 2013-05-10 14:37:15 +0100 | [diff] [blame] | 417 | |
| 418 | /* |
| 419 | * __vmalloc allocates the data pages and auxiliary structures with |
| 420 | * gfp_flags that were specified, but pagetables are always allocated |
| 421 | * with GFP_KERNEL, no matter what was specified as gfp_mask. |
| 422 | * |
| 423 | * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that |
| 424 | * all allocations done by this process (including pagetables) are done |
| 425 | * as if GFP_NOIO was specified. |
| 426 | */ |
Arnd Bergmann | 590347e | 2018-02-22 16:56:16 +0100 | [diff] [blame] | 427 | if (gfp_mask & __GFP_NORETRY) { |
| 428 | unsigned noio_flag = memalloc_noio_save(); |
Christoph Hellwig | 88dca4c | 2020-06-01 21:51:40 -0700 | [diff] [blame] | 429 | void *ptr = __vmalloc(c->block_size, gfp_mask); |
Mikulas Patocka | 502624b | 2013-05-10 14:37:15 +0100 | [diff] [blame] | 430 | |
Mikulas Patocka | 502624b | 2013-05-10 14:37:15 +0100 | [diff] [blame] | 431 | memalloc_noio_restore(noio_flag); |
Arnd Bergmann | 590347e | 2018-02-22 16:56:16 +0100 | [diff] [blame] | 432 | return ptr; |
| 433 | } |
Mikulas Patocka | 502624b | 2013-05-10 14:37:15 +0100 | [diff] [blame] | 434 | |
Christoph Hellwig | 88dca4c | 2020-06-01 21:51:40 -0700 | [diff] [blame] | 435 | return __vmalloc(c->block_size, gfp_mask); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 436 | } |
| 437 | |
| 438 | /* |
| 439 | * Free buffer's data. |
| 440 | */ |
| 441 | static void free_buffer_data(struct dm_bufio_client *c, |
Mikulas Patocka | 03b02939 | 2018-03-26 20:29:44 +0200 | [diff] [blame] | 442 | void *data, unsigned char data_mode) |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 443 | { |
| 444 | switch (data_mode) { |
| 445 | case DATA_MODE_SLAB: |
Mikulas Patocka | 21bb132 | 2018-03-26 20:29:42 +0200 | [diff] [blame] | 446 | kmem_cache_free(c->slab_cache, data); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 447 | break; |
| 448 | |
| 449 | case DATA_MODE_GET_FREE_PAGES: |
Mikulas Patocka | f51f2e0 | 2018-03-26 20:29:46 +0200 | [diff] [blame] | 450 | free_pages((unsigned long)data, |
| 451 | c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT)); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 452 | break; |
| 453 | |
| 454 | case DATA_MODE_VMALLOC: |
| 455 | vfree(data); |
| 456 | break; |
| 457 | |
| 458 | default: |
| 459 | DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d", |
| 460 | data_mode); |
| 461 | BUG(); |
| 462 | } |
| 463 | } |
| 464 | |
| 465 | /* |
| 466 | * Allocate buffer and its data. |
| 467 | */ |
| 468 | static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask) |
| 469 | { |
Mikulas Patocka | 359dbf1 | 2018-03-26 20:29:45 +0200 | [diff] [blame] | 470 | struct dm_buffer *b = kmem_cache_alloc(c->slab_buffer, gfp_mask); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 471 | |
| 472 | if (!b) |
| 473 | return NULL; |
| 474 | |
| 475 | b->c = c; |
| 476 | |
| 477 | b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode); |
| 478 | if (!b->data) { |
Mikulas Patocka | 359dbf1 | 2018-03-26 20:29:45 +0200 | [diff] [blame] | 479 | kmem_cache_free(c->slab_buffer, b); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 480 | return NULL; |
| 481 | } |
| 482 | |
Mikulas Patocka | 86bad0c | 2015-11-23 19:20:06 -0500 | [diff] [blame] | 483 | #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING |
Thomas Gleixner | 741b58f | 2019-04-25 11:45:07 +0200 | [diff] [blame] | 484 | b->stack_len = 0; |
Mikulas Patocka | 86bad0c | 2015-11-23 19:20:06 -0500 | [diff] [blame] | 485 | #endif |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 486 | return b; |
| 487 | } |
| 488 | |
| 489 | /* |
| 490 | * Free buffer and its data. |
| 491 | */ |
| 492 | static void free_buffer(struct dm_buffer *b) |
| 493 | { |
| 494 | struct dm_bufio_client *c = b->c; |
| 495 | |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 496 | free_buffer_data(c, b->data, b->data_mode); |
Mikulas Patocka | 359dbf1 | 2018-03-26 20:29:45 +0200 | [diff] [blame] | 497 | kmem_cache_free(c->slab_buffer, b); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 498 | } |
| 499 | |
| 500 | /* |
Shenghui Wang | ef99237 | 2018-10-30 15:35:54 +0800 | [diff] [blame] | 501 | * Link buffer to the buffer tree and clean or dirty queue. |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 502 | */ |
| 503 | static void __link_buffer(struct dm_buffer *b, sector_t block, int dirty) |
| 504 | { |
| 505 | struct dm_bufio_client *c = b->c; |
| 506 | |
| 507 | c->n_buffers[dirty]++; |
| 508 | b->block = block; |
| 509 | b->list_mode = dirty; |
| 510 | list_add(&b->lru_list, &c->lru[dirty]); |
Joe Thornber | 4e420c4 | 2014-10-06 13:48:51 +0100 | [diff] [blame] | 511 | __insert(b->c, b); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 512 | b->last_accessed = jiffies; |
Mikulas Patocka | 26d2ef0 | 2019-09-12 10:44:44 +0200 | [diff] [blame] | 513 | |
Mikulas Patocka | d0a328a | 2019-09-12 10:44:45 +0200 | [diff] [blame] | 514 | adjust_total_allocated(b, false); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 515 | } |
| 516 | |
| 517 | /* |
Shenghui Wang | ef99237 | 2018-10-30 15:35:54 +0800 | [diff] [blame] | 518 | * Unlink buffer from the buffer tree and dirty or clean queue. |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 519 | */ |
| 520 | static void __unlink_buffer(struct dm_buffer *b) |
| 521 | { |
| 522 | struct dm_bufio_client *c = b->c; |
| 523 | |
| 524 | BUG_ON(!c->n_buffers[b->list_mode]); |
| 525 | |
| 526 | c->n_buffers[b->list_mode]--; |
Joe Thornber | 4e420c4 | 2014-10-06 13:48:51 +0100 | [diff] [blame] | 527 | __remove(b->c, b); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 528 | list_del(&b->lru_list); |
Mikulas Patocka | 26d2ef0 | 2019-09-12 10:44:44 +0200 | [diff] [blame] | 529 | |
Mikulas Patocka | d0a328a | 2019-09-12 10:44:45 +0200 | [diff] [blame] | 530 | adjust_total_allocated(b, true); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 531 | } |
| 532 | |
| 533 | /* |
| 534 | * Place the buffer to the head of dirty or clean LRU queue. |
| 535 | */ |
| 536 | static void __relink_lru(struct dm_buffer *b, int dirty) |
| 537 | { |
| 538 | struct dm_bufio_client *c = b->c; |
| 539 | |
Mikulas Patocka | 6e913b2 | 2019-09-12 12:07:23 -0400 | [diff] [blame] | 540 | b->accessed = 1; |
| 541 | |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 542 | BUG_ON(!c->n_buffers[b->list_mode]); |
| 543 | |
| 544 | c->n_buffers[b->list_mode]--; |
| 545 | c->n_buffers[dirty]++; |
| 546 | b->list_mode = dirty; |
Wei Yongjun | 54499af | 2012-10-12 16:59:44 +0100 | [diff] [blame] | 547 | list_move(&b->lru_list, &c->lru[dirty]); |
Joe Thornber | eb76faf | 2014-09-30 09:32:46 +0100 | [diff] [blame] | 548 | b->last_accessed = jiffies; |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 549 | } |
| 550 | |
| 551 | /*---------------------------------------------------------------- |
| 552 | * Submit I/O on the buffer. |
| 553 | * |
| 554 | * Bio interface is faster but it has some problems: |
| 555 | * the vector list is limited (increasing this limit increases |
| 556 | * memory-consumption per buffer, so it is not viable); |
| 557 | * |
| 558 | * the memory must be direct-mapped, not vmalloced; |
| 559 | * |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 560 | * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and |
| 561 | * it is not vmalloced, try using the bio interface. |
| 562 | * |
| 563 | * If the buffer is big, if it is vmalloced or if the underlying device |
| 564 | * rejects the bio because it is too large, use dm-io layer to do the I/O. |
| 565 | * The dm-io layer splits the I/O into multiple requests, avoiding the above |
| 566 | * shortcomings. |
| 567 | *--------------------------------------------------------------*/ |
| 568 | |
| 569 | /* |
| 570 | * dm-io completion routine. It just calls b->bio.bi_end_io, pretending |
| 571 | * that the request was handled directly with bio interface. |
| 572 | */ |
| 573 | static void dmio_complete(unsigned long error, void *context) |
| 574 | { |
| 575 | struct dm_buffer *b = context; |
| 576 | |
Mikulas Patocka | 45354f1 | 2018-03-26 20:29:47 +0200 | [diff] [blame] | 577 | b->end_io(b, unlikely(error != 0) ? BLK_STS_IOERR : 0); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 578 | } |
| 579 | |
Mikulas Patocka | 400a0be | 2017-01-04 20:23:52 +0100 | [diff] [blame] | 580 | static void use_dmio(struct dm_buffer *b, int rw, sector_t sector, |
Mikulas Patocka | 45354f1 | 2018-03-26 20:29:47 +0200 | [diff] [blame] | 581 | unsigned n_sectors, unsigned offset) |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 582 | { |
| 583 | int r; |
| 584 | struct dm_io_request io_req = { |
Mike Christie | e604714 | 2016-06-05 14:32:04 -0500 | [diff] [blame] | 585 | .bi_op = rw, |
| 586 | .bi_op_flags = 0, |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 587 | .notify.fn = dmio_complete, |
| 588 | .notify.context = b, |
| 589 | .client = b->c->dm_io, |
| 590 | }; |
| 591 | struct dm_io_region region = { |
| 592 | .bdev = b->c->bdev, |
Mikulas Patocka | 400a0be | 2017-01-04 20:23:52 +0100 | [diff] [blame] | 593 | .sector = sector, |
| 594 | .count = n_sectors, |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 595 | }; |
| 596 | |
| 597 | if (b->data_mode != DATA_MODE_VMALLOC) { |
| 598 | io_req.mem.type = DM_IO_KMEM; |
Mikulas Patocka | 1e3b21c | 2017-04-30 17:31:22 -0400 | [diff] [blame] | 599 | io_req.mem.ptr.addr = (char *)b->data + offset; |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 600 | } else { |
| 601 | io_req.mem.type = DM_IO_VMA; |
Mikulas Patocka | 1e3b21c | 2017-04-30 17:31:22 -0400 | [diff] [blame] | 602 | io_req.mem.ptr.vma = (char *)b->data + offset; |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 603 | } |
| 604 | |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 605 | r = dm_io(&io_req, 1, ®ion, NULL); |
Mikulas Patocka | 45354f1 | 2018-03-26 20:29:47 +0200 | [diff] [blame] | 606 | if (unlikely(r)) |
| 607 | b->end_io(b, errno_to_blk_status(r)); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 608 | } |
| 609 | |
Mikulas Patocka | 45354f1 | 2018-03-26 20:29:47 +0200 | [diff] [blame] | 610 | static void bio_complete(struct bio *bio) |
Darrick J. Wong | 445559c | 2014-11-25 17:45:15 -0800 | [diff] [blame] | 611 | { |
Mikulas Patocka | 45354f1 | 2018-03-26 20:29:47 +0200 | [diff] [blame] | 612 | struct dm_buffer *b = bio->bi_private; |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 613 | blk_status_t status = bio->bi_status; |
Mikulas Patocka | 45354f1 | 2018-03-26 20:29:47 +0200 | [diff] [blame] | 614 | bio_put(bio); |
| 615 | b->end_io(b, status); |
Darrick J. Wong | 445559c | 2014-11-25 17:45:15 -0800 | [diff] [blame] | 616 | } |
| 617 | |
Mikulas Patocka | 45354f1 | 2018-03-26 20:29:47 +0200 | [diff] [blame] | 618 | static void use_bio(struct dm_buffer *b, int rw, sector_t sector, |
| 619 | unsigned n_sectors, unsigned offset) |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 620 | { |
Mikulas Patocka | 45354f1 | 2018-03-26 20:29:47 +0200 | [diff] [blame] | 621 | struct bio *bio; |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 622 | char *ptr; |
Mikulas Patocka | 45354f1 | 2018-03-26 20:29:47 +0200 | [diff] [blame] | 623 | unsigned vec_size, len; |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 624 | |
Mikulas Patocka | 45354f1 | 2018-03-26 20:29:47 +0200 | [diff] [blame] | 625 | vec_size = b->c->block_size >> PAGE_SHIFT; |
| 626 | if (unlikely(b->c->sectors_per_block_bits < PAGE_SHIFT - SECTOR_SHIFT)) |
| 627 | vec_size += 2; |
| 628 | |
| 629 | bio = bio_kmalloc(GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN, vec_size); |
| 630 | if (!bio) { |
| 631 | dmio: |
| 632 | use_dmio(b, rw, sector, n_sectors, offset); |
| 633 | return; |
| 634 | } |
| 635 | |
| 636 | bio->bi_iter.bi_sector = sector; |
| 637 | bio_set_dev(bio, b->c->bdev); |
| 638 | bio_set_op_attrs(bio, rw, 0); |
| 639 | bio->bi_end_io = bio_complete; |
| 640 | bio->bi_private = b; |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 641 | |
Mikulas Patocka | 1e3b21c | 2017-04-30 17:31:22 -0400 | [diff] [blame] | 642 | ptr = (char *)b->data + offset; |
Mikulas Patocka | 400a0be | 2017-01-04 20:23:52 +0100 | [diff] [blame] | 643 | len = n_sectors << SECTOR_SHIFT; |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 644 | |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 645 | do { |
Mikulas Patocka | 1e3b21c | 2017-04-30 17:31:22 -0400 | [diff] [blame] | 646 | unsigned this_step = min((unsigned)(PAGE_SIZE - offset_in_page(ptr)), len); |
Mikulas Patocka | 45354f1 | 2018-03-26 20:29:47 +0200 | [diff] [blame] | 647 | if (!bio_add_page(bio, virt_to_page(ptr), this_step, |
Al Viro | 756d097 | 2016-01-02 12:45:27 -0500 | [diff] [blame] | 648 | offset_in_page(ptr))) { |
Mikulas Patocka | 45354f1 | 2018-03-26 20:29:47 +0200 | [diff] [blame] | 649 | bio_put(bio); |
| 650 | goto dmio; |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 651 | } |
| 652 | |
Mikulas Patocka | 1e3b21c | 2017-04-30 17:31:22 -0400 | [diff] [blame] | 653 | len -= this_step; |
| 654 | ptr += this_step; |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 655 | } while (len > 0); |
| 656 | |
Mikulas Patocka | 45354f1 | 2018-03-26 20:29:47 +0200 | [diff] [blame] | 657 | submit_bio(bio); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 658 | } |
| 659 | |
Mikulas Patocka | 6fbeb00 | 2020-02-07 15:59:25 -0500 | [diff] [blame] | 660 | static inline sector_t block_to_sector(struct dm_bufio_client *c, sector_t block) |
| 661 | { |
| 662 | sector_t sector; |
| 663 | |
| 664 | if (likely(c->sectors_per_block_bits >= 0)) |
| 665 | sector = block << c->sectors_per_block_bits; |
| 666 | else |
| 667 | sector = block * (c->block_size >> SECTOR_SHIFT); |
| 668 | sector += c->start; |
| 669 | |
| 670 | return sector; |
| 671 | } |
| 672 | |
Mikulas Patocka | 45354f1 | 2018-03-26 20:29:47 +0200 | [diff] [blame] | 673 | static void submit_io(struct dm_buffer *b, int rw, void (*end_io)(struct dm_buffer *, blk_status_t)) |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 674 | { |
Mikulas Patocka | 400a0be | 2017-01-04 20:23:52 +0100 | [diff] [blame] | 675 | unsigned n_sectors; |
| 676 | sector_t sector; |
Mikulas Patocka | 1e3b21c | 2017-04-30 17:31:22 -0400 | [diff] [blame] | 677 | unsigned offset, end; |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 678 | |
Mikulas Patocka | 45354f1 | 2018-03-26 20:29:47 +0200 | [diff] [blame] | 679 | b->end_io = end_io; |
| 680 | |
Mikulas Patocka | 6fbeb00 | 2020-02-07 15:59:25 -0500 | [diff] [blame] | 681 | sector = block_to_sector(b->c, b->block); |
Mikulas Patocka | 1e3b21c | 2017-04-30 17:31:22 -0400 | [diff] [blame] | 682 | |
Mikulas Patocka | 905be0a | 2017-12-02 00:33:39 -0500 | [diff] [blame] | 683 | if (rw != REQ_OP_WRITE) { |
Mikulas Patocka | f51f2e0 | 2018-03-26 20:29:46 +0200 | [diff] [blame] | 684 | n_sectors = b->c->block_size >> SECTOR_SHIFT; |
Mikulas Patocka | 1e3b21c | 2017-04-30 17:31:22 -0400 | [diff] [blame] | 685 | offset = 0; |
| 686 | } else { |
| 687 | if (b->c->write_callback) |
| 688 | b->c->write_callback(b); |
| 689 | offset = b->write_start; |
| 690 | end = b->write_end; |
| 691 | offset &= -DM_BUFIO_WRITE_ALIGN; |
| 692 | end += DM_BUFIO_WRITE_ALIGN - 1; |
| 693 | end &= -DM_BUFIO_WRITE_ALIGN; |
| 694 | if (unlikely(end > b->c->block_size)) |
| 695 | end = b->c->block_size; |
| 696 | |
| 697 | sector += offset >> SECTOR_SHIFT; |
| 698 | n_sectors = (end - offset) >> SECTOR_SHIFT; |
| 699 | } |
Mikulas Patocka | 400a0be | 2017-01-04 20:23:52 +0100 | [diff] [blame] | 700 | |
Mikulas Patocka | 45354f1 | 2018-03-26 20:29:47 +0200 | [diff] [blame] | 701 | if (b->data_mode != DATA_MODE_VMALLOC) |
| 702 | use_bio(b, rw, sector, n_sectors, offset); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 703 | else |
Mikulas Patocka | 45354f1 | 2018-03-26 20:29:47 +0200 | [diff] [blame] | 704 | use_dmio(b, rw, sector, n_sectors, offset); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 705 | } |
| 706 | |
| 707 | /*---------------------------------------------------------------- |
| 708 | * Writing dirty buffers |
| 709 | *--------------------------------------------------------------*/ |
| 710 | |
| 711 | /* |
| 712 | * The endio routine for write. |
| 713 | * |
| 714 | * Set the error, clear B_WRITING bit and wake anyone who was waiting on |
| 715 | * it. |
| 716 | */ |
Mikulas Patocka | 45354f1 | 2018-03-26 20:29:47 +0200 | [diff] [blame] | 717 | static void write_endio(struct dm_buffer *b, blk_status_t status) |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 718 | { |
Mikulas Patocka | 45354f1 | 2018-03-26 20:29:47 +0200 | [diff] [blame] | 719 | b->write_error = status; |
| 720 | if (unlikely(status)) { |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 721 | struct dm_bufio_client *c = b->c; |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 722 | |
| 723 | (void)cmpxchg(&c->async_write_error, 0, |
Mikulas Patocka | 45354f1 | 2018-03-26 20:29:47 +0200 | [diff] [blame] | 724 | blk_status_to_errno(status)); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 725 | } |
| 726 | |
| 727 | BUG_ON(!test_bit(B_WRITING, &b->state)); |
| 728 | |
Peter Zijlstra | 4e857c5 | 2014-03-17 18:06:10 +0100 | [diff] [blame] | 729 | smp_mb__before_atomic(); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 730 | clear_bit(B_WRITING, &b->state); |
Peter Zijlstra | 4e857c5 | 2014-03-17 18:06:10 +0100 | [diff] [blame] | 731 | smp_mb__after_atomic(); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 732 | |
| 733 | wake_up_bit(&b->state, B_WRITING); |
| 734 | } |
| 735 | |
| 736 | /* |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 737 | * Initiate a write on a dirty buffer, but don't wait for it. |
| 738 | * |
| 739 | * - If the buffer is not dirty, exit. |
| 740 | * - If there some previous write going on, wait for it to finish (we can't |
| 741 | * have two writes on the same buffer simultaneously). |
| 742 | * - Submit our write and don't wait on it. We set B_WRITING indicating |
| 743 | * that there is a write in progress. |
| 744 | */ |
Mikulas Patocka | 2480945 | 2013-07-10 23:41:18 +0100 | [diff] [blame] | 745 | static void __write_dirty_buffer(struct dm_buffer *b, |
| 746 | struct list_head *write_list) |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 747 | { |
| 748 | if (!test_bit(B_DIRTY, &b->state)) |
| 749 | return; |
| 750 | |
| 751 | clear_bit(B_DIRTY, &b->state); |
NeilBrown | 7431620 | 2014-07-07 15:16:04 +1000 | [diff] [blame] | 752 | wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 753 | |
Mikulas Patocka | 1e3b21c | 2017-04-30 17:31:22 -0400 | [diff] [blame] | 754 | b->write_start = b->dirty_start; |
| 755 | b->write_end = b->dirty_end; |
| 756 | |
Mikulas Patocka | 2480945 | 2013-07-10 23:41:18 +0100 | [diff] [blame] | 757 | if (!write_list) |
Mikulas Patocka | 905be0a | 2017-12-02 00:33:39 -0500 | [diff] [blame] | 758 | submit_io(b, REQ_OP_WRITE, write_endio); |
Mikulas Patocka | 2480945 | 2013-07-10 23:41:18 +0100 | [diff] [blame] | 759 | else |
| 760 | list_add_tail(&b->write_list, write_list); |
| 761 | } |
| 762 | |
| 763 | static void __flush_write_list(struct list_head *write_list) |
| 764 | { |
| 765 | struct blk_plug plug; |
| 766 | blk_start_plug(&plug); |
| 767 | while (!list_empty(write_list)) { |
| 768 | struct dm_buffer *b = |
| 769 | list_entry(write_list->next, struct dm_buffer, write_list); |
| 770 | list_del(&b->write_list); |
Mikulas Patocka | 905be0a | 2017-12-02 00:33:39 -0500 | [diff] [blame] | 771 | submit_io(b, REQ_OP_WRITE, write_endio); |
Peter Zijlstra | 7cd3267 | 2016-09-13 10:45:20 +0200 | [diff] [blame] | 772 | cond_resched(); |
Mikulas Patocka | 2480945 | 2013-07-10 23:41:18 +0100 | [diff] [blame] | 773 | } |
| 774 | blk_finish_plug(&plug); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 775 | } |
| 776 | |
| 777 | /* |
| 778 | * Wait until any activity on the buffer finishes. Possibly write the |
| 779 | * buffer if it is dirty. When this function finishes, there is no I/O |
| 780 | * running on the buffer and the buffer is not dirty. |
| 781 | */ |
| 782 | static void __make_buffer_clean(struct dm_buffer *b) |
| 783 | { |
| 784 | BUG_ON(b->hold_count); |
| 785 | |
| 786 | if (!b->state) /* fast case */ |
| 787 | return; |
| 788 | |
NeilBrown | 7431620 | 2014-07-07 15:16:04 +1000 | [diff] [blame] | 789 | wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE); |
Mikulas Patocka | 2480945 | 2013-07-10 23:41:18 +0100 | [diff] [blame] | 790 | __write_dirty_buffer(b, NULL); |
NeilBrown | 7431620 | 2014-07-07 15:16:04 +1000 | [diff] [blame] | 791 | wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 792 | } |
| 793 | |
| 794 | /* |
| 795 | * Find some buffer that is not held by anybody, clean it, unlink it and |
| 796 | * return it. |
| 797 | */ |
| 798 | static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c) |
| 799 | { |
| 800 | struct dm_buffer *b; |
| 801 | |
| 802 | list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) { |
| 803 | BUG_ON(test_bit(B_WRITING, &b->state)); |
| 804 | BUG_ON(test_bit(B_DIRTY, &b->state)); |
| 805 | |
| 806 | if (!b->hold_count) { |
| 807 | __make_buffer_clean(b); |
| 808 | __unlink_buffer(b); |
| 809 | return b; |
| 810 | } |
Peter Zijlstra | 7cd3267 | 2016-09-13 10:45:20 +0200 | [diff] [blame] | 811 | cond_resched(); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 812 | } |
| 813 | |
| 814 | list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) { |
| 815 | BUG_ON(test_bit(B_READING, &b->state)); |
| 816 | |
| 817 | if (!b->hold_count) { |
| 818 | __make_buffer_clean(b); |
| 819 | __unlink_buffer(b); |
| 820 | return b; |
| 821 | } |
Peter Zijlstra | 7cd3267 | 2016-09-13 10:45:20 +0200 | [diff] [blame] | 822 | cond_resched(); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 823 | } |
| 824 | |
| 825 | return NULL; |
| 826 | } |
| 827 | |
| 828 | /* |
| 829 | * Wait until some other threads free some buffer or release hold count on |
| 830 | * some buffer. |
| 831 | * |
| 832 | * This function is entered with c->lock held, drops it and regains it |
| 833 | * before exiting. |
| 834 | */ |
| 835 | static void __wait_for_free_buffer(struct dm_bufio_client *c) |
| 836 | { |
| 837 | DECLARE_WAITQUEUE(wait, current); |
| 838 | |
| 839 | add_wait_queue(&c->free_buffer_wait, &wait); |
Davidlohr Bueso | 642fa44 | 2017-01-03 13:43:14 -0800 | [diff] [blame] | 840 | set_current_state(TASK_UNINTERRUPTIBLE); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 841 | dm_bufio_unlock(c); |
| 842 | |
| 843 | io_schedule(); |
| 844 | |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 845 | remove_wait_queue(&c->free_buffer_wait, &wait); |
| 846 | |
| 847 | dm_bufio_lock(c); |
| 848 | } |
| 849 | |
Mikulas Patocka | a66cc28 | 2012-03-28 18:41:29 +0100 | [diff] [blame] | 850 | enum new_flag { |
| 851 | NF_FRESH = 0, |
| 852 | NF_READ = 1, |
| 853 | NF_GET = 2, |
| 854 | NF_PREFETCH = 3 |
| 855 | }; |
| 856 | |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 857 | /* |
| 858 | * Allocate a new buffer. If the allocation is not possible, wait until |
| 859 | * some other thread frees a buffer. |
| 860 | * |
| 861 | * May drop the lock and regain it. |
| 862 | */ |
Mikulas Patocka | a66cc28 | 2012-03-28 18:41:29 +0100 | [diff] [blame] | 863 | static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf) |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 864 | { |
| 865 | struct dm_buffer *b; |
Mikulas Patocka | 41c73a4 | 2016-11-23 17:04:00 -0500 | [diff] [blame] | 866 | bool tried_noio_alloc = false; |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 867 | |
| 868 | /* |
| 869 | * dm-bufio is resistant to allocation failures (it just keeps |
| 870 | * one buffer reserved in cases all the allocations fail). |
| 871 | * So set flags to not try too hard: |
Douglas Anderson | 9ea61ca | 2016-11-17 11:24:20 -0800 | [diff] [blame] | 872 | * GFP_NOWAIT: don't wait; if we need to sleep we'll release our |
| 873 | * mutex and wait ourselves. |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 874 | * __GFP_NORETRY: don't retry and rather return failure |
| 875 | * __GFP_NOMEMALLOC: don't use emergency reserves |
| 876 | * __GFP_NOWARN: don't print a warning in case of failure |
| 877 | * |
| 878 | * For debugging, if we set the cache size to 1, no new buffers will |
| 879 | * be allocated. |
| 880 | */ |
| 881 | while (1) { |
| 882 | if (dm_bufio_cache_size_latch != 1) { |
Douglas Anderson | 9ea61ca | 2016-11-17 11:24:20 -0800 | [diff] [blame] | 883 | b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 884 | if (b) |
| 885 | return b; |
| 886 | } |
| 887 | |
Mikulas Patocka | a66cc28 | 2012-03-28 18:41:29 +0100 | [diff] [blame] | 888 | if (nf == NF_PREFETCH) |
| 889 | return NULL; |
| 890 | |
Mikulas Patocka | 41c73a4 | 2016-11-23 17:04:00 -0500 | [diff] [blame] | 891 | if (dm_bufio_cache_size_latch != 1 && !tried_noio_alloc) { |
| 892 | dm_bufio_unlock(c); |
| 893 | b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); |
| 894 | dm_bufio_lock(c); |
| 895 | if (b) |
| 896 | return b; |
| 897 | tried_noio_alloc = true; |
| 898 | } |
| 899 | |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 900 | if (!list_empty(&c->reserved_buffers)) { |
| 901 | b = list_entry(c->reserved_buffers.next, |
| 902 | struct dm_buffer, lru_list); |
| 903 | list_del(&b->lru_list); |
| 904 | c->need_reserved_buffers++; |
| 905 | |
| 906 | return b; |
| 907 | } |
| 908 | |
| 909 | b = __get_unclaimed_buffer(c); |
| 910 | if (b) |
| 911 | return b; |
| 912 | |
| 913 | __wait_for_free_buffer(c); |
| 914 | } |
| 915 | } |
| 916 | |
Mikulas Patocka | a66cc28 | 2012-03-28 18:41:29 +0100 | [diff] [blame] | 917 | static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf) |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 918 | { |
Mikulas Patocka | a66cc28 | 2012-03-28 18:41:29 +0100 | [diff] [blame] | 919 | struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf); |
| 920 | |
| 921 | if (!b) |
| 922 | return NULL; |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 923 | |
| 924 | if (c->alloc_callback) |
| 925 | c->alloc_callback(b); |
| 926 | |
| 927 | return b; |
| 928 | } |
| 929 | |
| 930 | /* |
| 931 | * Free a buffer and wake other threads waiting for free buffers. |
| 932 | */ |
| 933 | static void __free_buffer_wake(struct dm_buffer *b) |
| 934 | { |
| 935 | struct dm_bufio_client *c = b->c; |
| 936 | |
| 937 | if (!c->need_reserved_buffers) |
| 938 | free_buffer(b); |
| 939 | else { |
| 940 | list_add(&b->lru_list, &c->reserved_buffers); |
| 941 | c->need_reserved_buffers--; |
| 942 | } |
| 943 | |
| 944 | wake_up(&c->free_buffer_wait); |
| 945 | } |
| 946 | |
Mikulas Patocka | 2480945 | 2013-07-10 23:41:18 +0100 | [diff] [blame] | 947 | static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait, |
| 948 | struct list_head *write_list) |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 949 | { |
| 950 | struct dm_buffer *b, *tmp; |
| 951 | |
| 952 | list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) { |
| 953 | BUG_ON(test_bit(B_READING, &b->state)); |
| 954 | |
| 955 | if (!test_bit(B_DIRTY, &b->state) && |
| 956 | !test_bit(B_WRITING, &b->state)) { |
| 957 | __relink_lru(b, LIST_CLEAN); |
| 958 | continue; |
| 959 | } |
| 960 | |
| 961 | if (no_wait && test_bit(B_WRITING, &b->state)) |
| 962 | return; |
| 963 | |
Mikulas Patocka | 2480945 | 2013-07-10 23:41:18 +0100 | [diff] [blame] | 964 | __write_dirty_buffer(b, write_list); |
Peter Zijlstra | 7cd3267 | 2016-09-13 10:45:20 +0200 | [diff] [blame] | 965 | cond_resched(); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 966 | } |
| 967 | } |
| 968 | |
| 969 | /* |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 970 | * Check if we're over watermark. |
| 971 | * If we are over threshold_buffers, start freeing buffers. |
| 972 | * If we're over "limit_buffers", block until we get under the limit. |
| 973 | */ |
Mikulas Patocka | 2480945 | 2013-07-10 23:41:18 +0100 | [diff] [blame] | 974 | static void __check_watermark(struct dm_bufio_client *c, |
| 975 | struct list_head *write_list) |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 976 | { |
Mikulas Patocka | b132ff3 | 2019-09-12 10:44:47 +0200 | [diff] [blame] | 977 | if (c->n_buffers[LIST_DIRTY] > c->n_buffers[LIST_CLEAN] * DM_BUFIO_WRITEBACK_RATIO) |
Mikulas Patocka | 2480945 | 2013-07-10 23:41:18 +0100 | [diff] [blame] | 978 | __write_dirty_buffers_async(c, 1, write_list); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 979 | } |
| 980 | |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 981 | /*---------------------------------------------------------------- |
| 982 | * Getting a buffer |
| 983 | *--------------------------------------------------------------*/ |
| 984 | |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 985 | static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block, |
Mikulas Patocka | 2480945 | 2013-07-10 23:41:18 +0100 | [diff] [blame] | 986 | enum new_flag nf, int *need_submit, |
| 987 | struct list_head *write_list) |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 988 | { |
| 989 | struct dm_buffer *b, *new_b = NULL; |
| 990 | |
| 991 | *need_submit = 0; |
| 992 | |
| 993 | b = __find(c, block); |
Mikulas Patocka | a66cc28 | 2012-03-28 18:41:29 +0100 | [diff] [blame] | 994 | if (b) |
| 995 | goto found_buffer; |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 996 | |
| 997 | if (nf == NF_GET) |
| 998 | return NULL; |
| 999 | |
Mikulas Patocka | a66cc28 | 2012-03-28 18:41:29 +0100 | [diff] [blame] | 1000 | new_b = __alloc_buffer_wait(c, nf); |
| 1001 | if (!new_b) |
| 1002 | return NULL; |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1003 | |
| 1004 | /* |
| 1005 | * We've had a period where the mutex was unlocked, so need to |
Shenghui Wang | ef99237 | 2018-10-30 15:35:54 +0800 | [diff] [blame] | 1006 | * recheck the buffer tree. |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1007 | */ |
| 1008 | b = __find(c, block); |
| 1009 | if (b) { |
| 1010 | __free_buffer_wake(new_b); |
Mikulas Patocka | a66cc28 | 2012-03-28 18:41:29 +0100 | [diff] [blame] | 1011 | goto found_buffer; |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1012 | } |
| 1013 | |
Mikulas Patocka | 2480945 | 2013-07-10 23:41:18 +0100 | [diff] [blame] | 1014 | __check_watermark(c, write_list); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1015 | |
| 1016 | b = new_b; |
| 1017 | b->hold_count = 1; |
| 1018 | b->read_error = 0; |
| 1019 | b->write_error = 0; |
| 1020 | __link_buffer(b, block, LIST_CLEAN); |
| 1021 | |
| 1022 | if (nf == NF_FRESH) { |
| 1023 | b->state = 0; |
| 1024 | return b; |
| 1025 | } |
| 1026 | |
| 1027 | b->state = 1 << B_READING; |
| 1028 | *need_submit = 1; |
| 1029 | |
| 1030 | return b; |
Mikulas Patocka | a66cc28 | 2012-03-28 18:41:29 +0100 | [diff] [blame] | 1031 | |
| 1032 | found_buffer: |
| 1033 | if (nf == NF_PREFETCH) |
| 1034 | return NULL; |
| 1035 | /* |
| 1036 | * Note: it is essential that we don't wait for the buffer to be |
| 1037 | * read if dm_bufio_get function is used. Both dm_bufio_get and |
| 1038 | * dm_bufio_prefetch can be used in the driver request routine. |
| 1039 | * If the user called both dm_bufio_prefetch and dm_bufio_get on |
| 1040 | * the same buffer, it would deadlock if we waited. |
| 1041 | */ |
| 1042 | if (nf == NF_GET && unlikely(test_bit(B_READING, &b->state))) |
| 1043 | return NULL; |
| 1044 | |
| 1045 | b->hold_count++; |
| 1046 | __relink_lru(b, test_bit(B_DIRTY, &b->state) || |
| 1047 | test_bit(B_WRITING, &b->state)); |
| 1048 | return b; |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1049 | } |
| 1050 | |
| 1051 | /* |
| 1052 | * The endio routine for reading: set the error, clear the bit and wake up |
| 1053 | * anyone waiting on the buffer. |
| 1054 | */ |
Mikulas Patocka | 45354f1 | 2018-03-26 20:29:47 +0200 | [diff] [blame] | 1055 | static void read_endio(struct dm_buffer *b, blk_status_t status) |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1056 | { |
Mikulas Patocka | 45354f1 | 2018-03-26 20:29:47 +0200 | [diff] [blame] | 1057 | b->read_error = status; |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1058 | |
| 1059 | BUG_ON(!test_bit(B_READING, &b->state)); |
| 1060 | |
Peter Zijlstra | 4e857c5 | 2014-03-17 18:06:10 +0100 | [diff] [blame] | 1061 | smp_mb__before_atomic(); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1062 | clear_bit(B_READING, &b->state); |
Peter Zijlstra | 4e857c5 | 2014-03-17 18:06:10 +0100 | [diff] [blame] | 1063 | smp_mb__after_atomic(); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1064 | |
| 1065 | wake_up_bit(&b->state, B_READING); |
| 1066 | } |
| 1067 | |
| 1068 | /* |
| 1069 | * A common routine for dm_bufio_new and dm_bufio_read. Operation of these |
| 1070 | * functions is similar except that dm_bufio_new doesn't read the |
| 1071 | * buffer from the disk (assuming that the caller overwrites all the data |
| 1072 | * and uses dm_bufio_mark_buffer_dirty to write new data back). |
| 1073 | */ |
| 1074 | static void *new_read(struct dm_bufio_client *c, sector_t block, |
| 1075 | enum new_flag nf, struct dm_buffer **bp) |
| 1076 | { |
| 1077 | int need_submit; |
| 1078 | struct dm_buffer *b; |
| 1079 | |
Mikulas Patocka | 2480945 | 2013-07-10 23:41:18 +0100 | [diff] [blame] | 1080 | LIST_HEAD(write_list); |
| 1081 | |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1082 | dm_bufio_lock(c); |
Mikulas Patocka | 2480945 | 2013-07-10 23:41:18 +0100 | [diff] [blame] | 1083 | b = __bufio_new(c, block, nf, &need_submit, &write_list); |
Mikulas Patocka | 86bad0c | 2015-11-23 19:20:06 -0500 | [diff] [blame] | 1084 | #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING |
| 1085 | if (b && b->hold_count == 1) |
| 1086 | buffer_record_stack(b); |
| 1087 | #endif |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1088 | dm_bufio_unlock(c); |
| 1089 | |
Mikulas Patocka | 2480945 | 2013-07-10 23:41:18 +0100 | [diff] [blame] | 1090 | __flush_write_list(&write_list); |
| 1091 | |
Mikulas Patocka | a66cc28 | 2012-03-28 18:41:29 +0100 | [diff] [blame] | 1092 | if (!b) |
Mikulas Patocka | f98c8f7 | 2015-11-23 19:11:32 -0500 | [diff] [blame] | 1093 | return NULL; |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1094 | |
| 1095 | if (need_submit) |
Mikulas Patocka | 905be0a | 2017-12-02 00:33:39 -0500 | [diff] [blame] | 1096 | submit_io(b, REQ_OP_READ, read_endio); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1097 | |
NeilBrown | 7431620 | 2014-07-07 15:16:04 +1000 | [diff] [blame] | 1098 | wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1099 | |
| 1100 | if (b->read_error) { |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 1101 | int error = blk_status_to_errno(b->read_error); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1102 | |
| 1103 | dm_bufio_release(b); |
| 1104 | |
| 1105 | return ERR_PTR(error); |
| 1106 | } |
| 1107 | |
| 1108 | *bp = b; |
| 1109 | |
| 1110 | return b->data; |
| 1111 | } |
| 1112 | |
| 1113 | void *dm_bufio_get(struct dm_bufio_client *c, sector_t block, |
| 1114 | struct dm_buffer **bp) |
| 1115 | { |
| 1116 | return new_read(c, block, NF_GET, bp); |
| 1117 | } |
| 1118 | EXPORT_SYMBOL_GPL(dm_bufio_get); |
| 1119 | |
| 1120 | void *dm_bufio_read(struct dm_bufio_client *c, sector_t block, |
| 1121 | struct dm_buffer **bp) |
| 1122 | { |
| 1123 | BUG_ON(dm_bufio_in_request()); |
| 1124 | |
| 1125 | return new_read(c, block, NF_READ, bp); |
| 1126 | } |
| 1127 | EXPORT_SYMBOL_GPL(dm_bufio_read); |
| 1128 | |
| 1129 | void *dm_bufio_new(struct dm_bufio_client *c, sector_t block, |
| 1130 | struct dm_buffer **bp) |
| 1131 | { |
| 1132 | BUG_ON(dm_bufio_in_request()); |
| 1133 | |
| 1134 | return new_read(c, block, NF_FRESH, bp); |
| 1135 | } |
| 1136 | EXPORT_SYMBOL_GPL(dm_bufio_new); |
| 1137 | |
Mikulas Patocka | a66cc28 | 2012-03-28 18:41:29 +0100 | [diff] [blame] | 1138 | void dm_bufio_prefetch(struct dm_bufio_client *c, |
| 1139 | sector_t block, unsigned n_blocks) |
| 1140 | { |
| 1141 | struct blk_plug plug; |
| 1142 | |
Mikulas Patocka | 2480945 | 2013-07-10 23:41:18 +0100 | [diff] [blame] | 1143 | LIST_HEAD(write_list); |
| 1144 | |
Mikulas Patocka | 3b6b781 | 2013-03-20 17:21:25 +0000 | [diff] [blame] | 1145 | BUG_ON(dm_bufio_in_request()); |
| 1146 | |
Mikulas Patocka | a66cc28 | 2012-03-28 18:41:29 +0100 | [diff] [blame] | 1147 | blk_start_plug(&plug); |
| 1148 | dm_bufio_lock(c); |
| 1149 | |
| 1150 | for (; n_blocks--; block++) { |
| 1151 | int need_submit; |
| 1152 | struct dm_buffer *b; |
Mikulas Patocka | 2480945 | 2013-07-10 23:41:18 +0100 | [diff] [blame] | 1153 | b = __bufio_new(c, block, NF_PREFETCH, &need_submit, |
| 1154 | &write_list); |
| 1155 | if (unlikely(!list_empty(&write_list))) { |
| 1156 | dm_bufio_unlock(c); |
| 1157 | blk_finish_plug(&plug); |
| 1158 | __flush_write_list(&write_list); |
| 1159 | blk_start_plug(&plug); |
| 1160 | dm_bufio_lock(c); |
| 1161 | } |
Mikulas Patocka | a66cc28 | 2012-03-28 18:41:29 +0100 | [diff] [blame] | 1162 | if (unlikely(b != NULL)) { |
| 1163 | dm_bufio_unlock(c); |
| 1164 | |
| 1165 | if (need_submit) |
Mikulas Patocka | 905be0a | 2017-12-02 00:33:39 -0500 | [diff] [blame] | 1166 | submit_io(b, REQ_OP_READ, read_endio); |
Mikulas Patocka | a66cc28 | 2012-03-28 18:41:29 +0100 | [diff] [blame] | 1167 | dm_bufio_release(b); |
| 1168 | |
Peter Zijlstra | 7cd3267 | 2016-09-13 10:45:20 +0200 | [diff] [blame] | 1169 | cond_resched(); |
Mikulas Patocka | a66cc28 | 2012-03-28 18:41:29 +0100 | [diff] [blame] | 1170 | |
| 1171 | if (!n_blocks) |
| 1172 | goto flush_plug; |
| 1173 | dm_bufio_lock(c); |
| 1174 | } |
Mikulas Patocka | a66cc28 | 2012-03-28 18:41:29 +0100 | [diff] [blame] | 1175 | } |
| 1176 | |
| 1177 | dm_bufio_unlock(c); |
| 1178 | |
| 1179 | flush_plug: |
| 1180 | blk_finish_plug(&plug); |
| 1181 | } |
| 1182 | EXPORT_SYMBOL_GPL(dm_bufio_prefetch); |
| 1183 | |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1184 | void dm_bufio_release(struct dm_buffer *b) |
| 1185 | { |
| 1186 | struct dm_bufio_client *c = b->c; |
| 1187 | |
| 1188 | dm_bufio_lock(c); |
| 1189 | |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1190 | BUG_ON(!b->hold_count); |
| 1191 | |
| 1192 | b->hold_count--; |
| 1193 | if (!b->hold_count) { |
| 1194 | wake_up(&c->free_buffer_wait); |
| 1195 | |
| 1196 | /* |
| 1197 | * If there were errors on the buffer, and the buffer is not |
| 1198 | * to be written, free the buffer. There is no point in caching |
| 1199 | * invalid buffer. |
| 1200 | */ |
| 1201 | if ((b->read_error || b->write_error) && |
Mikulas Patocka | a66cc28 | 2012-03-28 18:41:29 +0100 | [diff] [blame] | 1202 | !test_bit(B_READING, &b->state) && |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1203 | !test_bit(B_WRITING, &b->state) && |
| 1204 | !test_bit(B_DIRTY, &b->state)) { |
| 1205 | __unlink_buffer(b); |
| 1206 | __free_buffer_wake(b); |
| 1207 | } |
| 1208 | } |
| 1209 | |
| 1210 | dm_bufio_unlock(c); |
| 1211 | } |
| 1212 | EXPORT_SYMBOL_GPL(dm_bufio_release); |
| 1213 | |
Mikulas Patocka | 1e3b21c | 2017-04-30 17:31:22 -0400 | [diff] [blame] | 1214 | void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b, |
| 1215 | unsigned start, unsigned end) |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1216 | { |
| 1217 | struct dm_bufio_client *c = b->c; |
| 1218 | |
Mikulas Patocka | 1e3b21c | 2017-04-30 17:31:22 -0400 | [diff] [blame] | 1219 | BUG_ON(start >= end); |
| 1220 | BUG_ON(end > b->c->block_size); |
| 1221 | |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1222 | dm_bufio_lock(c); |
| 1223 | |
Mikulas Patocka | a66cc28 | 2012-03-28 18:41:29 +0100 | [diff] [blame] | 1224 | BUG_ON(test_bit(B_READING, &b->state)); |
| 1225 | |
Mikulas Patocka | 1e3b21c | 2017-04-30 17:31:22 -0400 | [diff] [blame] | 1226 | if (!test_and_set_bit(B_DIRTY, &b->state)) { |
| 1227 | b->dirty_start = start; |
| 1228 | b->dirty_end = end; |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1229 | __relink_lru(b, LIST_DIRTY); |
Mikulas Patocka | 1e3b21c | 2017-04-30 17:31:22 -0400 | [diff] [blame] | 1230 | } else { |
| 1231 | if (start < b->dirty_start) |
| 1232 | b->dirty_start = start; |
| 1233 | if (end > b->dirty_end) |
| 1234 | b->dirty_end = end; |
| 1235 | } |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1236 | |
| 1237 | dm_bufio_unlock(c); |
| 1238 | } |
Mikulas Patocka | 1e3b21c | 2017-04-30 17:31:22 -0400 | [diff] [blame] | 1239 | EXPORT_SYMBOL_GPL(dm_bufio_mark_partial_buffer_dirty); |
| 1240 | |
| 1241 | void dm_bufio_mark_buffer_dirty(struct dm_buffer *b) |
| 1242 | { |
| 1243 | dm_bufio_mark_partial_buffer_dirty(b, 0, b->c->block_size); |
| 1244 | } |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1245 | EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty); |
| 1246 | |
| 1247 | void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c) |
| 1248 | { |
Mikulas Patocka | 2480945 | 2013-07-10 23:41:18 +0100 | [diff] [blame] | 1249 | LIST_HEAD(write_list); |
| 1250 | |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1251 | BUG_ON(dm_bufio_in_request()); |
| 1252 | |
| 1253 | dm_bufio_lock(c); |
Mikulas Patocka | 2480945 | 2013-07-10 23:41:18 +0100 | [diff] [blame] | 1254 | __write_dirty_buffers_async(c, 0, &write_list); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1255 | dm_bufio_unlock(c); |
Mikulas Patocka | 2480945 | 2013-07-10 23:41:18 +0100 | [diff] [blame] | 1256 | __flush_write_list(&write_list); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1257 | } |
| 1258 | EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async); |
| 1259 | |
| 1260 | /* |
| 1261 | * For performance, it is essential that the buffers are written asynchronously |
| 1262 | * and simultaneously (so that the block layer can merge the writes) and then |
| 1263 | * waited upon. |
| 1264 | * |
| 1265 | * Finally, we flush hardware disk cache. |
| 1266 | */ |
| 1267 | int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c) |
| 1268 | { |
Dan Carpenter | edc11d4 | 2017-07-12 10:26:34 +0300 | [diff] [blame] | 1269 | int a, f; |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1270 | unsigned long buffers_processed = 0; |
| 1271 | struct dm_buffer *b, *tmp; |
| 1272 | |
Mikulas Patocka | 2480945 | 2013-07-10 23:41:18 +0100 | [diff] [blame] | 1273 | LIST_HEAD(write_list); |
| 1274 | |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1275 | dm_bufio_lock(c); |
Mikulas Patocka | 2480945 | 2013-07-10 23:41:18 +0100 | [diff] [blame] | 1276 | __write_dirty_buffers_async(c, 0, &write_list); |
| 1277 | dm_bufio_unlock(c); |
| 1278 | __flush_write_list(&write_list); |
| 1279 | dm_bufio_lock(c); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1280 | |
| 1281 | again: |
| 1282 | list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) { |
| 1283 | int dropped_lock = 0; |
| 1284 | |
| 1285 | if (buffers_processed < c->n_buffers[LIST_DIRTY]) |
| 1286 | buffers_processed++; |
| 1287 | |
| 1288 | BUG_ON(test_bit(B_READING, &b->state)); |
| 1289 | |
| 1290 | if (test_bit(B_WRITING, &b->state)) { |
| 1291 | if (buffers_processed < c->n_buffers[LIST_DIRTY]) { |
| 1292 | dropped_lock = 1; |
| 1293 | b->hold_count++; |
| 1294 | dm_bufio_unlock(c); |
NeilBrown | 7431620 | 2014-07-07 15:16:04 +1000 | [diff] [blame] | 1295 | wait_on_bit_io(&b->state, B_WRITING, |
| 1296 | TASK_UNINTERRUPTIBLE); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1297 | dm_bufio_lock(c); |
| 1298 | b->hold_count--; |
| 1299 | } else |
NeilBrown | 7431620 | 2014-07-07 15:16:04 +1000 | [diff] [blame] | 1300 | wait_on_bit_io(&b->state, B_WRITING, |
| 1301 | TASK_UNINTERRUPTIBLE); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1302 | } |
| 1303 | |
| 1304 | if (!test_bit(B_DIRTY, &b->state) && |
| 1305 | !test_bit(B_WRITING, &b->state)) |
| 1306 | __relink_lru(b, LIST_CLEAN); |
| 1307 | |
Peter Zijlstra | 7cd3267 | 2016-09-13 10:45:20 +0200 | [diff] [blame] | 1308 | cond_resched(); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1309 | |
| 1310 | /* |
| 1311 | * If we dropped the lock, the list is no longer consistent, |
| 1312 | * so we must restart the search. |
| 1313 | * |
| 1314 | * In the most common case, the buffer just processed is |
| 1315 | * relinked to the clean list, so we won't loop scanning the |
| 1316 | * same buffer again and again. |
| 1317 | * |
| 1318 | * This may livelock if there is another thread simultaneously |
| 1319 | * dirtying buffers, so we count the number of buffers walked |
| 1320 | * and if it exceeds the total number of buffers, it means that |
| 1321 | * someone is doing some writes simultaneously with us. In |
| 1322 | * this case, stop, dropping the lock. |
| 1323 | */ |
| 1324 | if (dropped_lock) |
| 1325 | goto again; |
| 1326 | } |
| 1327 | wake_up(&c->free_buffer_wait); |
| 1328 | dm_bufio_unlock(c); |
| 1329 | |
| 1330 | a = xchg(&c->async_write_error, 0); |
| 1331 | f = dm_bufio_issue_flush(c); |
| 1332 | if (a) |
| 1333 | return a; |
| 1334 | |
| 1335 | return f; |
| 1336 | } |
| 1337 | EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers); |
| 1338 | |
| 1339 | /* |
Shenghui Wang | ef99237 | 2018-10-30 15:35:54 +0800 | [diff] [blame] | 1340 | * Use dm-io to send an empty barrier to flush the device. |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1341 | */ |
| 1342 | int dm_bufio_issue_flush(struct dm_bufio_client *c) |
| 1343 | { |
| 1344 | struct dm_io_request io_req = { |
Mike Christie | e604714 | 2016-06-05 14:32:04 -0500 | [diff] [blame] | 1345 | .bi_op = REQ_OP_WRITE, |
Jan Kara | ff0361b | 2017-05-31 09:44:32 +0200 | [diff] [blame] | 1346 | .bi_op_flags = REQ_PREFLUSH | REQ_SYNC, |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1347 | .mem.type = DM_IO_KMEM, |
| 1348 | .mem.ptr.addr = NULL, |
| 1349 | .client = c->dm_io, |
| 1350 | }; |
| 1351 | struct dm_io_region io_reg = { |
| 1352 | .bdev = c->bdev, |
| 1353 | .sector = 0, |
| 1354 | .count = 0, |
| 1355 | }; |
| 1356 | |
| 1357 | BUG_ON(dm_bufio_in_request()); |
| 1358 | |
| 1359 | return dm_io(&io_req, 1, &io_reg, NULL); |
| 1360 | } |
| 1361 | EXPORT_SYMBOL_GPL(dm_bufio_issue_flush); |
| 1362 | |
| 1363 | /* |
Mikulas Patocka | 6fbeb00 | 2020-02-07 15:59:25 -0500 | [diff] [blame] | 1364 | * Use dm-io to send a discard request to flush the device. |
| 1365 | */ |
| 1366 | int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count) |
| 1367 | { |
| 1368 | struct dm_io_request io_req = { |
| 1369 | .bi_op = REQ_OP_DISCARD, |
| 1370 | .bi_op_flags = REQ_SYNC, |
| 1371 | .mem.type = DM_IO_KMEM, |
| 1372 | .mem.ptr.addr = NULL, |
| 1373 | .client = c->dm_io, |
| 1374 | }; |
| 1375 | struct dm_io_region io_reg = { |
| 1376 | .bdev = c->bdev, |
| 1377 | .sector = block_to_sector(c, block), |
| 1378 | .count = block_to_sector(c, count), |
| 1379 | }; |
| 1380 | |
| 1381 | BUG_ON(dm_bufio_in_request()); |
| 1382 | |
| 1383 | return dm_io(&io_req, 1, &io_reg, NULL); |
| 1384 | } |
| 1385 | EXPORT_SYMBOL_GPL(dm_bufio_issue_discard); |
| 1386 | |
| 1387 | /* |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1388 | * We first delete any other buffer that may be at that new location. |
| 1389 | * |
| 1390 | * Then, we write the buffer to the original location if it was dirty. |
| 1391 | * |
| 1392 | * Then, if we are the only one who is holding the buffer, relink the buffer |
Shenghui Wang | ef99237 | 2018-10-30 15:35:54 +0800 | [diff] [blame] | 1393 | * in the buffer tree for the new location. |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1394 | * |
| 1395 | * If there was someone else holding the buffer, we write it to the new |
| 1396 | * location but not relink it, because that other user needs to have the buffer |
| 1397 | * at the same place. |
| 1398 | */ |
| 1399 | void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block) |
| 1400 | { |
| 1401 | struct dm_bufio_client *c = b->c; |
| 1402 | struct dm_buffer *new; |
| 1403 | |
| 1404 | BUG_ON(dm_bufio_in_request()); |
| 1405 | |
| 1406 | dm_bufio_lock(c); |
| 1407 | |
| 1408 | retry: |
| 1409 | new = __find(c, new_block); |
| 1410 | if (new) { |
| 1411 | if (new->hold_count) { |
| 1412 | __wait_for_free_buffer(c); |
| 1413 | goto retry; |
| 1414 | } |
| 1415 | |
| 1416 | /* |
| 1417 | * FIXME: Is there any point waiting for a write that's going |
| 1418 | * to be overwritten in a bit? |
| 1419 | */ |
| 1420 | __make_buffer_clean(new); |
| 1421 | __unlink_buffer(new); |
| 1422 | __free_buffer_wake(new); |
| 1423 | } |
| 1424 | |
| 1425 | BUG_ON(!b->hold_count); |
| 1426 | BUG_ON(test_bit(B_READING, &b->state)); |
| 1427 | |
Mikulas Patocka | 2480945 | 2013-07-10 23:41:18 +0100 | [diff] [blame] | 1428 | __write_dirty_buffer(b, NULL); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1429 | if (b->hold_count == 1) { |
NeilBrown | 7431620 | 2014-07-07 15:16:04 +1000 | [diff] [blame] | 1430 | wait_on_bit_io(&b->state, B_WRITING, |
| 1431 | TASK_UNINTERRUPTIBLE); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1432 | set_bit(B_DIRTY, &b->state); |
Mikulas Patocka | 1e3b21c | 2017-04-30 17:31:22 -0400 | [diff] [blame] | 1433 | b->dirty_start = 0; |
| 1434 | b->dirty_end = c->block_size; |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1435 | __unlink_buffer(b); |
| 1436 | __link_buffer(b, new_block, LIST_DIRTY); |
| 1437 | } else { |
| 1438 | sector_t old_block; |
NeilBrown | 7431620 | 2014-07-07 15:16:04 +1000 | [diff] [blame] | 1439 | wait_on_bit_lock_io(&b->state, B_WRITING, |
| 1440 | TASK_UNINTERRUPTIBLE); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1441 | /* |
| 1442 | * Relink buffer to "new_block" so that write_callback |
| 1443 | * sees "new_block" as a block number. |
| 1444 | * After the write, link the buffer back to old_block. |
| 1445 | * All this must be done in bufio lock, so that block number |
| 1446 | * change isn't visible to other threads. |
| 1447 | */ |
| 1448 | old_block = b->block; |
| 1449 | __unlink_buffer(b); |
| 1450 | __link_buffer(b, new_block, b->list_mode); |
Mikulas Patocka | 905be0a | 2017-12-02 00:33:39 -0500 | [diff] [blame] | 1451 | submit_io(b, REQ_OP_WRITE, write_endio); |
NeilBrown | 7431620 | 2014-07-07 15:16:04 +1000 | [diff] [blame] | 1452 | wait_on_bit_io(&b->state, B_WRITING, |
| 1453 | TASK_UNINTERRUPTIBLE); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1454 | __unlink_buffer(b); |
| 1455 | __link_buffer(b, old_block, b->list_mode); |
| 1456 | } |
| 1457 | |
| 1458 | dm_bufio_unlock(c); |
| 1459 | dm_bufio_release(b); |
| 1460 | } |
| 1461 | EXPORT_SYMBOL_GPL(dm_bufio_release_move); |
| 1462 | |
Mikulas Patocka | 33a1806 | 2020-06-02 15:34:40 +0200 | [diff] [blame] | 1463 | static void forget_buffer_locked(struct dm_buffer *b) |
| 1464 | { |
| 1465 | if (likely(!b->hold_count) && likely(!b->state)) { |
| 1466 | __unlink_buffer(b); |
| 1467 | __free_buffer_wake(b); |
| 1468 | } |
| 1469 | } |
| 1470 | |
Mikulas Patocka | 55494bf | 2014-01-13 19:12:36 -0500 | [diff] [blame] | 1471 | /* |
| 1472 | * Free the given buffer. |
| 1473 | * |
| 1474 | * This is just a hint, if the buffer is in use or dirty, this function |
| 1475 | * does nothing. |
| 1476 | */ |
| 1477 | void dm_bufio_forget(struct dm_bufio_client *c, sector_t block) |
| 1478 | { |
| 1479 | struct dm_buffer *b; |
| 1480 | |
| 1481 | dm_bufio_lock(c); |
| 1482 | |
| 1483 | b = __find(c, block); |
Mikulas Patocka | 33a1806 | 2020-06-02 15:34:40 +0200 | [diff] [blame] | 1484 | if (b) |
| 1485 | forget_buffer_locked(b); |
Mikulas Patocka | 55494bf | 2014-01-13 19:12:36 -0500 | [diff] [blame] | 1486 | |
| 1487 | dm_bufio_unlock(c); |
| 1488 | } |
Mikulas Patocka | afa53df | 2018-03-15 16:02:31 -0400 | [diff] [blame] | 1489 | EXPORT_SYMBOL_GPL(dm_bufio_forget); |
Mikulas Patocka | 55494bf | 2014-01-13 19:12:36 -0500 | [diff] [blame] | 1490 | |
Mikulas Patocka | 33a1806 | 2020-06-02 15:34:40 +0200 | [diff] [blame] | 1491 | void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t n_blocks) |
| 1492 | { |
| 1493 | struct dm_buffer *b; |
| 1494 | sector_t end_block = block + n_blocks; |
| 1495 | |
| 1496 | while (block < end_block) { |
| 1497 | dm_bufio_lock(c); |
| 1498 | |
| 1499 | b = __find_next(c, block); |
| 1500 | if (b) { |
| 1501 | block = b->block + 1; |
| 1502 | forget_buffer_locked(b); |
| 1503 | } |
| 1504 | |
| 1505 | dm_bufio_unlock(c); |
| 1506 | |
| 1507 | if (!b) |
| 1508 | break; |
| 1509 | } |
| 1510 | |
| 1511 | } |
| 1512 | EXPORT_SYMBOL_GPL(dm_bufio_forget_buffers); |
| 1513 | |
Mikulas Patocka | 55b082e | 2014-01-13 19:13:05 -0500 | [diff] [blame] | 1514 | void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n) |
| 1515 | { |
| 1516 | c->minimum_buffers = n; |
| 1517 | } |
Mikulas Patocka | afa53df | 2018-03-15 16:02:31 -0400 | [diff] [blame] | 1518 | EXPORT_SYMBOL_GPL(dm_bufio_set_minimum_buffers); |
Mikulas Patocka | 55b082e | 2014-01-13 19:13:05 -0500 | [diff] [blame] | 1519 | |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1520 | unsigned dm_bufio_get_block_size(struct dm_bufio_client *c) |
| 1521 | { |
| 1522 | return c->block_size; |
| 1523 | } |
| 1524 | EXPORT_SYMBOL_GPL(dm_bufio_get_block_size); |
| 1525 | |
| 1526 | sector_t dm_bufio_get_device_size(struct dm_bufio_client *c) |
| 1527 | { |
Christoph Hellwig | 6dcbb52 | 2021-10-18 12:11:05 +0200 | [diff] [blame] | 1528 | sector_t s = bdev_nr_sectors(c->bdev); |
Mikulas Patocka | a14e5ec | 2021-02-23 21:21:20 +0100 | [diff] [blame] | 1529 | if (s >= c->start) |
| 1530 | s -= c->start; |
| 1531 | else |
| 1532 | s = 0; |
Mikulas Patocka | f51f2e0 | 2018-03-26 20:29:46 +0200 | [diff] [blame] | 1533 | if (likely(c->sectors_per_block_bits >= 0)) |
| 1534 | s >>= c->sectors_per_block_bits; |
| 1535 | else |
| 1536 | sector_div(s, c->block_size >> SECTOR_SHIFT); |
| 1537 | return s; |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1538 | } |
| 1539 | EXPORT_SYMBOL_GPL(dm_bufio_get_device_size); |
| 1540 | |
Mikulas Patocka | 9b59482 | 2021-01-08 11:15:56 -0500 | [diff] [blame] | 1541 | struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c) |
| 1542 | { |
| 1543 | return c->dm_io; |
| 1544 | } |
| 1545 | EXPORT_SYMBOL_GPL(dm_bufio_get_dm_io_client); |
| 1546 | |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1547 | sector_t dm_bufio_get_block_number(struct dm_buffer *b) |
| 1548 | { |
| 1549 | return b->block; |
| 1550 | } |
| 1551 | EXPORT_SYMBOL_GPL(dm_bufio_get_block_number); |
| 1552 | |
| 1553 | void *dm_bufio_get_block_data(struct dm_buffer *b) |
| 1554 | { |
| 1555 | return b->data; |
| 1556 | } |
| 1557 | EXPORT_SYMBOL_GPL(dm_bufio_get_block_data); |
| 1558 | |
| 1559 | void *dm_bufio_get_aux_data(struct dm_buffer *b) |
| 1560 | { |
| 1561 | return b + 1; |
| 1562 | } |
| 1563 | EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data); |
| 1564 | |
| 1565 | struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b) |
| 1566 | { |
| 1567 | return b->c; |
| 1568 | } |
| 1569 | EXPORT_SYMBOL_GPL(dm_bufio_get_client); |
| 1570 | |
| 1571 | static void drop_buffers(struct dm_bufio_client *c) |
| 1572 | { |
| 1573 | struct dm_buffer *b; |
| 1574 | int i; |
Mikulas Patocka | 86bad0c | 2015-11-23 19:20:06 -0500 | [diff] [blame] | 1575 | bool warned = false; |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1576 | |
| 1577 | BUG_ON(dm_bufio_in_request()); |
| 1578 | |
| 1579 | /* |
| 1580 | * An optimization so that the buffers are not written one-by-one. |
| 1581 | */ |
| 1582 | dm_bufio_write_dirty_buffers_async(c); |
| 1583 | |
| 1584 | dm_bufio_lock(c); |
| 1585 | |
| 1586 | while ((b = __get_unclaimed_buffer(c))) |
| 1587 | __free_buffer_wake(b); |
| 1588 | |
| 1589 | for (i = 0; i < LIST_SIZE; i++) |
Mikulas Patocka | 86bad0c | 2015-11-23 19:20:06 -0500 | [diff] [blame] | 1590 | list_for_each_entry(b, &c->lru[i], lru_list) { |
| 1591 | WARN_ON(!warned); |
| 1592 | warned = true; |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1593 | DMERR("leaked buffer %llx, hold count %u, list %d", |
| 1594 | (unsigned long long)b->block, b->hold_count, i); |
Mikulas Patocka | 86bad0c | 2015-11-23 19:20:06 -0500 | [diff] [blame] | 1595 | #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING |
Thomas Gleixner | 741b58f | 2019-04-25 11:45:07 +0200 | [diff] [blame] | 1596 | stack_trace_print(b->stack_entries, b->stack_len, 1); |
| 1597 | /* mark unclaimed to avoid BUG_ON below */ |
| 1598 | b->hold_count = 0; |
Mikulas Patocka | 86bad0c | 2015-11-23 19:20:06 -0500 | [diff] [blame] | 1599 | #endif |
| 1600 | } |
| 1601 | |
| 1602 | #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING |
| 1603 | while ((b = __get_unclaimed_buffer(c))) |
| 1604 | __free_buffer_wake(b); |
| 1605 | #endif |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1606 | |
| 1607 | for (i = 0; i < LIST_SIZE; i++) |
| 1608 | BUG_ON(!list_empty(&c->lru[i])); |
| 1609 | |
| 1610 | dm_bufio_unlock(c); |
| 1611 | } |
| 1612 | |
| 1613 | /* |
Joe Thornber | 33096a7 | 2014-10-09 11:10:25 +0100 | [diff] [blame] | 1614 | * We may not be able to evict this buffer if IO pending or the client |
| 1615 | * is still using it. Caller is expected to know buffer is too old. |
| 1616 | * |
Mikulas Patocka | 9d28eb1 | 2014-10-16 14:45:20 -0400 | [diff] [blame] | 1617 | * And if GFP_NOFS is used, we must not do any I/O because we hold |
| 1618 | * dm_bufio_clients_lock and we would risk deadlock if the I/O gets |
| 1619 | * rerouted to different bufio client. |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1620 | */ |
Joe Thornber | 33096a7 | 2014-10-09 11:10:25 +0100 | [diff] [blame] | 1621 | static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp) |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1622 | { |
Mikulas Patocka | 9d28eb1 | 2014-10-16 14:45:20 -0400 | [diff] [blame] | 1623 | if (!(gfp & __GFP_FS)) { |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1624 | if (test_bit(B_READING, &b->state) || |
| 1625 | test_bit(B_WRITING, &b->state) || |
| 1626 | test_bit(B_DIRTY, &b->state)) |
Joe Thornber | 33096a7 | 2014-10-09 11:10:25 +0100 | [diff] [blame] | 1627 | return false; |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1628 | } |
| 1629 | |
| 1630 | if (b->hold_count) |
Joe Thornber | 33096a7 | 2014-10-09 11:10:25 +0100 | [diff] [blame] | 1631 | return false; |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1632 | |
| 1633 | __make_buffer_clean(b); |
| 1634 | __unlink_buffer(b); |
| 1635 | __free_buffer_wake(b); |
| 1636 | |
Joe Thornber | 33096a7 | 2014-10-09 11:10:25 +0100 | [diff] [blame] | 1637 | return true; |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1638 | } |
| 1639 | |
Mikulas Patocka | 13840d3 | 2017-04-30 17:32:28 -0400 | [diff] [blame] | 1640 | static unsigned long get_retain_buffers(struct dm_bufio_client *c) |
Joe Thornber | 33096a7 | 2014-10-09 11:10:25 +0100 | [diff] [blame] | 1641 | { |
Mikulas Patocka | f51f2e0 | 2018-03-26 20:29:46 +0200 | [diff] [blame] | 1642 | unsigned long retain_bytes = READ_ONCE(dm_bufio_retain_bytes); |
| 1643 | if (likely(c->sectors_per_block_bits >= 0)) |
| 1644 | retain_bytes >>= c->sectors_per_block_bits + SECTOR_SHIFT; |
| 1645 | else |
| 1646 | retain_bytes /= c->block_size; |
| 1647 | return retain_bytes; |
Joe Thornber | 33096a7 | 2014-10-09 11:10:25 +0100 | [diff] [blame] | 1648 | } |
| 1649 | |
Mikulas Patocka | 70704c3 | 2020-07-03 10:26:46 -0400 | [diff] [blame] | 1650 | static void __scan(struct dm_bufio_client *c) |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1651 | { |
| 1652 | int l; |
| 1653 | struct dm_buffer *b, *tmp; |
Joe Thornber | 33096a7 | 2014-10-09 11:10:25 +0100 | [diff] [blame] | 1654 | unsigned long freed = 0; |
Suren Baghdasaryan | fbc7c07 | 2017-12-06 09:27:30 -0800 | [diff] [blame] | 1655 | unsigned long count = c->n_buffers[LIST_CLEAN] + |
| 1656 | c->n_buffers[LIST_DIRTY]; |
Mikulas Patocka | 13840d3 | 2017-04-30 17:32:28 -0400 | [diff] [blame] | 1657 | unsigned long retain_target = get_retain_buffers(c); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1658 | |
| 1659 | for (l = 0; l < LIST_SIZE; l++) { |
Dave Chinner | 7dc19d5 | 2013-08-28 10:18:11 +1000 | [diff] [blame] | 1660 | list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) { |
Mikulas Patocka | 70704c3 | 2020-07-03 10:26:46 -0400 | [diff] [blame] | 1661 | if (count - freed <= retain_target) |
| 1662 | atomic_long_set(&c->need_shrink, 0); |
| 1663 | if (!atomic_long_read(&c->need_shrink)) |
| 1664 | return; |
| 1665 | if (__try_evict_buffer(b, GFP_KERNEL)) { |
| 1666 | atomic_long_dec(&c->need_shrink); |
Joe Thornber | 33096a7 | 2014-10-09 11:10:25 +0100 | [diff] [blame] | 1667 | freed++; |
Mikulas Patocka | 70704c3 | 2020-07-03 10:26:46 -0400 | [diff] [blame] | 1668 | } |
Peter Zijlstra | 7cd3267 | 2016-09-13 10:45:20 +0200 | [diff] [blame] | 1669 | cond_resched(); |
Dave Chinner | 7dc19d5 | 2013-08-28 10:18:11 +1000 | [diff] [blame] | 1670 | } |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1671 | } |
| 1672 | } |
| 1673 | |
Mikulas Patocka | 70704c3 | 2020-07-03 10:26:46 -0400 | [diff] [blame] | 1674 | static void shrink_work(struct work_struct *w) |
| 1675 | { |
| 1676 | struct dm_bufio_client *c = container_of(w, struct dm_bufio_client, shrink_work); |
| 1677 | |
| 1678 | dm_bufio_lock(c); |
| 1679 | __scan(c); |
| 1680 | dm_bufio_unlock(c); |
| 1681 | } |
| 1682 | |
| 1683 | static unsigned long dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1684 | { |
Dave Chinner | 7dc19d5 | 2013-08-28 10:18:11 +1000 | [diff] [blame] | 1685 | struct dm_bufio_client *c; |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1686 | |
Dave Chinner | 7dc19d5 | 2013-08-28 10:18:11 +1000 | [diff] [blame] | 1687 | c = container_of(shrink, struct dm_bufio_client, shrinker); |
Mikulas Patocka | 70704c3 | 2020-07-03 10:26:46 -0400 | [diff] [blame] | 1688 | atomic_long_add(sc->nr_to_scan, &c->need_shrink); |
| 1689 | queue_work(dm_bufio_wq, &c->shrink_work); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1690 | |
Mikulas Patocka | 70704c3 | 2020-07-03 10:26:46 -0400 | [diff] [blame] | 1691 | return sc->nr_to_scan; |
Dave Chinner | 7dc19d5 | 2013-08-28 10:18:11 +1000 | [diff] [blame] | 1692 | } |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1693 | |
Mikulas Patocka | 70704c3 | 2020-07-03 10:26:46 -0400 | [diff] [blame] | 1694 | static unsigned long dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc) |
Dave Chinner | 7dc19d5 | 2013-08-28 10:18:11 +1000 | [diff] [blame] | 1695 | { |
Mikulas Patocka | d12067f | 2016-11-23 16:52:01 -0500 | [diff] [blame] | 1696 | struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker); |
Suren Baghdasaryan | fbc7c07 | 2017-12-06 09:27:30 -0800 | [diff] [blame] | 1697 | unsigned long count = READ_ONCE(c->n_buffers[LIST_CLEAN]) + |
| 1698 | READ_ONCE(c->n_buffers[LIST_DIRTY]); |
| 1699 | unsigned long retain_target = get_retain_buffers(c); |
Mikulas Patocka | 70704c3 | 2020-07-03 10:26:46 -0400 | [diff] [blame] | 1700 | unsigned long queued_for_cleanup = atomic_long_read(&c->need_shrink); |
Dave Chinner | 7dc19d5 | 2013-08-28 10:18:11 +1000 | [diff] [blame] | 1701 | |
Mikulas Patocka | 70704c3 | 2020-07-03 10:26:46 -0400 | [diff] [blame] | 1702 | if (unlikely(count < retain_target)) |
| 1703 | count = 0; |
| 1704 | else |
| 1705 | count -= retain_target; |
| 1706 | |
| 1707 | if (unlikely(count < queued_for_cleanup)) |
| 1708 | count = 0; |
| 1709 | else |
| 1710 | count -= queued_for_cleanup; |
| 1711 | |
| 1712 | return count; |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1713 | } |
| 1714 | |
| 1715 | /* |
| 1716 | * Create the buffering interface |
| 1717 | */ |
| 1718 | struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned block_size, |
| 1719 | unsigned reserved_buffers, unsigned aux_size, |
| 1720 | void (*alloc_callback)(struct dm_buffer *), |
| 1721 | void (*write_callback)(struct dm_buffer *)) |
| 1722 | { |
| 1723 | int r; |
| 1724 | struct dm_bufio_client *c; |
| 1725 | unsigned i; |
Mikulas Patocka | 359dbf1 | 2018-03-26 20:29:45 +0200 | [diff] [blame] | 1726 | char slab_name[27]; |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1727 | |
Mikulas Patocka | f51f2e0 | 2018-03-26 20:29:46 +0200 | [diff] [blame] | 1728 | if (!block_size || block_size & ((1 << SECTOR_SHIFT) - 1)) { |
| 1729 | DMERR("%s: block size not specified or is not multiple of 512b", __func__); |
| 1730 | r = -EINVAL; |
| 1731 | goto bad_client; |
| 1732 | } |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1733 | |
Greg Thelen | d8c712e | 2014-07-31 09:07:19 -0700 | [diff] [blame] | 1734 | c = kzalloc(sizeof(*c), GFP_KERNEL); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1735 | if (!c) { |
| 1736 | r = -ENOMEM; |
| 1737 | goto bad_client; |
| 1738 | } |
Joe Thornber | 4e420c4 | 2014-10-06 13:48:51 +0100 | [diff] [blame] | 1739 | c->buffer_tree = RB_ROOT; |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1740 | |
| 1741 | c->bdev = bdev; |
| 1742 | c->block_size = block_size; |
Mikulas Patocka | f51f2e0 | 2018-03-26 20:29:46 +0200 | [diff] [blame] | 1743 | if (is_power_of_2(block_size)) |
| 1744 | c->sectors_per_block_bits = __ffs(block_size) - SECTOR_SHIFT; |
| 1745 | else |
| 1746 | c->sectors_per_block_bits = -1; |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1747 | |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1748 | c->alloc_callback = alloc_callback; |
| 1749 | c->write_callback = write_callback; |
| 1750 | |
| 1751 | for (i = 0; i < LIST_SIZE; i++) { |
| 1752 | INIT_LIST_HEAD(&c->lru[i]); |
| 1753 | c->n_buffers[i] = 0; |
| 1754 | } |
| 1755 | |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1756 | mutex_init(&c->lock); |
| 1757 | INIT_LIST_HEAD(&c->reserved_buffers); |
| 1758 | c->need_reserved_buffers = reserved_buffers; |
| 1759 | |
Mikulas Patocka | afa53df | 2018-03-15 16:02:31 -0400 | [diff] [blame] | 1760 | dm_bufio_set_minimum_buffers(c, DM_BUFIO_MIN_BUFFERS); |
Mikulas Patocka | 55b082e | 2014-01-13 19:13:05 -0500 | [diff] [blame] | 1761 | |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1762 | init_waitqueue_head(&c->free_buffer_wait); |
| 1763 | c->async_write_error = 0; |
| 1764 | |
| 1765 | c->dm_io = dm_io_client_create(); |
| 1766 | if (IS_ERR(c->dm_io)) { |
| 1767 | r = PTR_ERR(c->dm_io); |
| 1768 | goto bad_dm_io; |
| 1769 | } |
| 1770 | |
Mikulas Patocka | f51f2e0 | 2018-03-26 20:29:46 +0200 | [diff] [blame] | 1771 | if (block_size <= KMALLOC_MAX_SIZE && |
| 1772 | (block_size < PAGE_SIZE || !is_power_of_2(block_size))) { |
Mikulas Patocka | f7879b4 | 2018-04-19 08:33:00 -0400 | [diff] [blame] | 1773 | unsigned align = min(1U << __ffs(block_size), (unsigned)PAGE_SIZE); |
| 1774 | snprintf(slab_name, sizeof slab_name, "dm_bufio_cache-%u", block_size); |
| 1775 | c->slab_cache = kmem_cache_create(slab_name, block_size, align, |
Mikulas Patocka | 6b5e718 | 2018-03-15 17:22:00 -0400 | [diff] [blame] | 1776 | SLAB_RECLAIM_ACCOUNT, NULL); |
Mikulas Patocka | 21bb132 | 2018-03-26 20:29:42 +0200 | [diff] [blame] | 1777 | if (!c->slab_cache) { |
| 1778 | r = -ENOMEM; |
| 1779 | goto bad; |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1780 | } |
| 1781 | } |
Mikulas Patocka | 359dbf1 | 2018-03-26 20:29:45 +0200 | [diff] [blame] | 1782 | if (aux_size) |
| 1783 | snprintf(slab_name, sizeof slab_name, "dm_bufio_buffer-%u", aux_size); |
| 1784 | else |
| 1785 | snprintf(slab_name, sizeof slab_name, "dm_bufio_buffer"); |
| 1786 | c->slab_buffer = kmem_cache_create(slab_name, sizeof(struct dm_buffer) + aux_size, |
| 1787 | 0, SLAB_RECLAIM_ACCOUNT, NULL); |
| 1788 | if (!c->slab_buffer) { |
| 1789 | r = -ENOMEM; |
| 1790 | goto bad; |
| 1791 | } |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1792 | |
| 1793 | while (c->need_reserved_buffers) { |
| 1794 | struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL); |
| 1795 | |
| 1796 | if (!b) { |
| 1797 | r = -ENOMEM; |
Mike Snitzer | 0e696d3 | 2018-01-04 12:14:57 -0500 | [diff] [blame] | 1798 | goto bad; |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1799 | } |
| 1800 | __free_buffer_wake(b); |
| 1801 | } |
| 1802 | |
Mikulas Patocka | 70704c3 | 2020-07-03 10:26:46 -0400 | [diff] [blame] | 1803 | INIT_WORK(&c->shrink_work, shrink_work); |
| 1804 | atomic_long_set(&c->need_shrink, 0); |
| 1805 | |
Aliaksei Karaliou | 46898e9 | 2017-12-23 13:27:04 +0300 | [diff] [blame] | 1806 | c->shrinker.count_objects = dm_bufio_shrink_count; |
| 1807 | c->shrinker.scan_objects = dm_bufio_shrink_scan; |
| 1808 | c->shrinker.seeks = 1; |
| 1809 | c->shrinker.batch = 0; |
| 1810 | r = register_shrinker(&c->shrinker); |
| 1811 | if (r) |
Mike Snitzer | 0e696d3 | 2018-01-04 12:14:57 -0500 | [diff] [blame] | 1812 | goto bad; |
Aliaksei Karaliou | 46898e9 | 2017-12-23 13:27:04 +0300 | [diff] [blame] | 1813 | |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1814 | mutex_lock(&dm_bufio_clients_lock); |
| 1815 | dm_bufio_client_count++; |
| 1816 | list_add(&c->client_list, &dm_bufio_all_clients); |
| 1817 | __cache_size_refresh(); |
| 1818 | mutex_unlock(&dm_bufio_clients_lock); |
| 1819 | |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1820 | return c; |
| 1821 | |
Mike Snitzer | 0e696d3 | 2018-01-04 12:14:57 -0500 | [diff] [blame] | 1822 | bad: |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1823 | while (!list_empty(&c->reserved_buffers)) { |
| 1824 | struct dm_buffer *b = list_entry(c->reserved_buffers.next, |
| 1825 | struct dm_buffer, lru_list); |
| 1826 | list_del(&b->lru_list); |
| 1827 | free_buffer(b); |
| 1828 | } |
Mikulas Patocka | 21bb132 | 2018-03-26 20:29:42 +0200 | [diff] [blame] | 1829 | kmem_cache_destroy(c->slab_cache); |
Mikulas Patocka | 359dbf1 | 2018-03-26 20:29:45 +0200 | [diff] [blame] | 1830 | kmem_cache_destroy(c->slab_buffer); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1831 | dm_io_client_destroy(c->dm_io); |
| 1832 | bad_dm_io: |
Aliaksei Karaliou | bde1418 | 2017-12-23 13:27:03 +0300 | [diff] [blame] | 1833 | mutex_destroy(&c->lock); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1834 | kfree(c); |
| 1835 | bad_client: |
| 1836 | return ERR_PTR(r); |
| 1837 | } |
| 1838 | EXPORT_SYMBOL_GPL(dm_bufio_client_create); |
| 1839 | |
| 1840 | /* |
| 1841 | * Free the buffering interface. |
| 1842 | * It is required that there are no references on any buffers. |
| 1843 | */ |
| 1844 | void dm_bufio_client_destroy(struct dm_bufio_client *c) |
| 1845 | { |
| 1846 | unsigned i; |
| 1847 | |
| 1848 | drop_buffers(c); |
| 1849 | |
| 1850 | unregister_shrinker(&c->shrinker); |
Mikulas Patocka | 70704c3 | 2020-07-03 10:26:46 -0400 | [diff] [blame] | 1851 | flush_work(&c->shrink_work); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1852 | |
| 1853 | mutex_lock(&dm_bufio_clients_lock); |
| 1854 | |
| 1855 | list_del(&c->client_list); |
| 1856 | dm_bufio_client_count--; |
| 1857 | __cache_size_refresh(); |
| 1858 | |
| 1859 | mutex_unlock(&dm_bufio_clients_lock); |
| 1860 | |
Joe Thornber | 4e420c4 | 2014-10-06 13:48:51 +0100 | [diff] [blame] | 1861 | BUG_ON(!RB_EMPTY_ROOT(&c->buffer_tree)); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1862 | BUG_ON(c->need_reserved_buffers); |
| 1863 | |
| 1864 | while (!list_empty(&c->reserved_buffers)) { |
| 1865 | struct dm_buffer *b = list_entry(c->reserved_buffers.next, |
| 1866 | struct dm_buffer, lru_list); |
| 1867 | list_del(&b->lru_list); |
| 1868 | free_buffer(b); |
| 1869 | } |
| 1870 | |
| 1871 | for (i = 0; i < LIST_SIZE; i++) |
| 1872 | if (c->n_buffers[i]) |
| 1873 | DMERR("leaked buffer count %d: %ld", i, c->n_buffers[i]); |
| 1874 | |
| 1875 | for (i = 0; i < LIST_SIZE; i++) |
| 1876 | BUG_ON(c->n_buffers[i]); |
| 1877 | |
Mikulas Patocka | 21bb132 | 2018-03-26 20:29:42 +0200 | [diff] [blame] | 1878 | kmem_cache_destroy(c->slab_cache); |
Mikulas Patocka | 359dbf1 | 2018-03-26 20:29:45 +0200 | [diff] [blame] | 1879 | kmem_cache_destroy(c->slab_buffer); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1880 | dm_io_client_destroy(c->dm_io); |
Aliaksei Karaliou | bde1418 | 2017-12-23 13:27:03 +0300 | [diff] [blame] | 1881 | mutex_destroy(&c->lock); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1882 | kfree(c); |
| 1883 | } |
| 1884 | EXPORT_SYMBOL_GPL(dm_bufio_client_destroy); |
| 1885 | |
Mikulas Patocka | 400a0be | 2017-01-04 20:23:52 +0100 | [diff] [blame] | 1886 | void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start) |
| 1887 | { |
| 1888 | c->start = start; |
| 1889 | } |
| 1890 | EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset); |
| 1891 | |
Joe Thornber | 33096a7 | 2014-10-09 11:10:25 +0100 | [diff] [blame] | 1892 | static unsigned get_max_age_hz(void) |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1893 | { |
Mark Rutland | 6aa7de0 | 2017-10-23 14:07:29 -0700 | [diff] [blame] | 1894 | unsigned max_age = READ_ONCE(dm_bufio_max_age); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1895 | |
Joe Thornber | 33096a7 | 2014-10-09 11:10:25 +0100 | [diff] [blame] | 1896 | if (max_age > UINT_MAX / HZ) |
| 1897 | max_age = UINT_MAX / HZ; |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1898 | |
Joe Thornber | 33096a7 | 2014-10-09 11:10:25 +0100 | [diff] [blame] | 1899 | return max_age * HZ; |
| 1900 | } |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1901 | |
Joe Thornber | 33096a7 | 2014-10-09 11:10:25 +0100 | [diff] [blame] | 1902 | static bool older_than(struct dm_buffer *b, unsigned long age_hz) |
| 1903 | { |
Asaf Vertz | f495339 | 2015-01-06 15:44:15 +0200 | [diff] [blame] | 1904 | return time_after_eq(jiffies, b->last_accessed + age_hz); |
Joe Thornber | 33096a7 | 2014-10-09 11:10:25 +0100 | [diff] [blame] | 1905 | } |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1906 | |
Joe Thornber | 33096a7 | 2014-10-09 11:10:25 +0100 | [diff] [blame] | 1907 | static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz) |
| 1908 | { |
| 1909 | struct dm_buffer *b, *tmp; |
Mikulas Patocka | 13840d3 | 2017-04-30 17:32:28 -0400 | [diff] [blame] | 1910 | unsigned long retain_target = get_retain_buffers(c); |
| 1911 | unsigned long count; |
Mikulas Patocka | 390020a | 2017-04-30 17:34:53 -0400 | [diff] [blame] | 1912 | LIST_HEAD(write_list); |
Joe Thornber | 33096a7 | 2014-10-09 11:10:25 +0100 | [diff] [blame] | 1913 | |
| 1914 | dm_bufio_lock(c); |
| 1915 | |
Mikulas Patocka | 390020a | 2017-04-30 17:34:53 -0400 | [diff] [blame] | 1916 | __check_watermark(c, &write_list); |
| 1917 | if (unlikely(!list_empty(&write_list))) { |
| 1918 | dm_bufio_unlock(c); |
| 1919 | __flush_write_list(&write_list); |
| 1920 | dm_bufio_lock(c); |
| 1921 | } |
| 1922 | |
Joe Thornber | 33096a7 | 2014-10-09 11:10:25 +0100 | [diff] [blame] | 1923 | count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY]; |
| 1924 | list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_CLEAN], lru_list) { |
| 1925 | if (count <= retain_target) |
| 1926 | break; |
| 1927 | |
| 1928 | if (!older_than(b, age_hz)) |
| 1929 | break; |
| 1930 | |
| 1931 | if (__try_evict_buffer(b, 0)) |
| 1932 | count--; |
| 1933 | |
Peter Zijlstra | 7cd3267 | 2016-09-13 10:45:20 +0200 | [diff] [blame] | 1934 | cond_resched(); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 1935 | } |
Joe Thornber | 33096a7 | 2014-10-09 11:10:25 +0100 | [diff] [blame] | 1936 | |
| 1937 | dm_bufio_unlock(c); |
| 1938 | } |
| 1939 | |
Mikulas Patocka | 6e913b2 | 2019-09-12 12:07:23 -0400 | [diff] [blame] | 1940 | static void do_global_cleanup(struct work_struct *w) |
| 1941 | { |
| 1942 | struct dm_bufio_client *locked_client = NULL; |
| 1943 | struct dm_bufio_client *current_client; |
| 1944 | struct dm_buffer *b; |
| 1945 | unsigned spinlock_hold_count; |
| 1946 | unsigned long threshold = dm_bufio_cache_size - |
| 1947 | dm_bufio_cache_size / DM_BUFIO_LOW_WATERMARK_RATIO; |
| 1948 | unsigned long loops = global_num * 2; |
| 1949 | |
| 1950 | mutex_lock(&dm_bufio_clients_lock); |
| 1951 | |
| 1952 | while (1) { |
| 1953 | cond_resched(); |
| 1954 | |
| 1955 | spin_lock(&global_spinlock); |
| 1956 | if (unlikely(dm_bufio_current_allocated <= threshold)) |
| 1957 | break; |
| 1958 | |
| 1959 | spinlock_hold_count = 0; |
| 1960 | get_next: |
| 1961 | if (!loops--) |
| 1962 | break; |
| 1963 | if (unlikely(list_empty(&global_queue))) |
| 1964 | break; |
| 1965 | b = list_entry(global_queue.prev, struct dm_buffer, global_list); |
| 1966 | |
| 1967 | if (b->accessed) { |
| 1968 | b->accessed = 0; |
| 1969 | list_move(&b->global_list, &global_queue); |
| 1970 | if (likely(++spinlock_hold_count < 16)) |
| 1971 | goto get_next; |
| 1972 | spin_unlock(&global_spinlock); |
| 1973 | continue; |
| 1974 | } |
| 1975 | |
| 1976 | current_client = b->c; |
| 1977 | if (unlikely(current_client != locked_client)) { |
| 1978 | if (locked_client) |
| 1979 | dm_bufio_unlock(locked_client); |
| 1980 | |
| 1981 | if (!dm_bufio_trylock(current_client)) { |
| 1982 | spin_unlock(&global_spinlock); |
| 1983 | dm_bufio_lock(current_client); |
| 1984 | locked_client = current_client; |
| 1985 | continue; |
| 1986 | } |
| 1987 | |
| 1988 | locked_client = current_client; |
| 1989 | } |
| 1990 | |
| 1991 | spin_unlock(&global_spinlock); |
| 1992 | |
| 1993 | if (unlikely(!__try_evict_buffer(b, GFP_KERNEL))) { |
| 1994 | spin_lock(&global_spinlock); |
| 1995 | list_move(&b->global_list, &global_queue); |
| 1996 | spin_unlock(&global_spinlock); |
| 1997 | } |
| 1998 | } |
| 1999 | |
| 2000 | spin_unlock(&global_spinlock); |
| 2001 | |
| 2002 | if (locked_client) |
| 2003 | dm_bufio_unlock(locked_client); |
| 2004 | |
| 2005 | mutex_unlock(&dm_bufio_clients_lock); |
| 2006 | } |
| 2007 | |
Joe Thornber | 33096a7 | 2014-10-09 11:10:25 +0100 | [diff] [blame] | 2008 | static void cleanup_old_buffers(void) |
| 2009 | { |
| 2010 | unsigned long max_age_hz = get_max_age_hz(); |
| 2011 | struct dm_bufio_client *c; |
| 2012 | |
| 2013 | mutex_lock(&dm_bufio_clients_lock); |
| 2014 | |
Mikulas Patocka | 390020a | 2017-04-30 17:34:53 -0400 | [diff] [blame] | 2015 | __cache_size_refresh(); |
| 2016 | |
Joe Thornber | 33096a7 | 2014-10-09 11:10:25 +0100 | [diff] [blame] | 2017 | list_for_each_entry(c, &dm_bufio_all_clients, client_list) |
| 2018 | __evict_old_buffers(c, max_age_hz); |
| 2019 | |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 2020 | mutex_unlock(&dm_bufio_clients_lock); |
| 2021 | } |
| 2022 | |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 2023 | static void work_fn(struct work_struct *w) |
| 2024 | { |
| 2025 | cleanup_old_buffers(); |
| 2026 | |
Mikulas Patocka | 6e913b2 | 2019-09-12 12:07:23 -0400 | [diff] [blame] | 2027 | queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work, |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 2028 | DM_BUFIO_WORK_TIMER_SECS * HZ); |
| 2029 | } |
| 2030 | |
| 2031 | /*---------------------------------------------------------------- |
| 2032 | * Module setup |
| 2033 | *--------------------------------------------------------------*/ |
| 2034 | |
| 2035 | /* |
| 2036 | * This is called only once for the whole dm_bufio module. |
| 2037 | * It initializes memory limit. |
| 2038 | */ |
| 2039 | static int __init dm_bufio_init(void) |
| 2040 | { |
| 2041 | __u64 mem; |
| 2042 | |
Mikulas Patocka | 4cb57ab | 2013-12-05 17:33:29 -0500 | [diff] [blame] | 2043 | dm_bufio_allocated_kmem_cache = 0; |
| 2044 | dm_bufio_allocated_get_free_pages = 0; |
| 2045 | dm_bufio_allocated_vmalloc = 0; |
| 2046 | dm_bufio_current_allocated = 0; |
| 2047 | |
Arun KS | ca79b0c | 2018-12-28 00:34:29 -0800 | [diff] [blame] | 2048 | mem = (__u64)mult_frac(totalram_pages() - totalhigh_pages(), |
Eric Biggers | 74d4108 | 2017-11-15 16:38:09 -0800 | [diff] [blame] | 2049 | DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT; |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 2050 | |
| 2051 | if (mem > ULONG_MAX) |
| 2052 | mem = ULONG_MAX; |
| 2053 | |
| 2054 | #ifdef CONFIG_MMU |
Eric Biggers | 74d4108 | 2017-11-15 16:38:09 -0800 | [diff] [blame] | 2055 | if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100)) |
| 2056 | mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 2057 | #endif |
| 2058 | |
| 2059 | dm_bufio_default_cache_size = mem; |
| 2060 | |
| 2061 | mutex_lock(&dm_bufio_clients_lock); |
| 2062 | __cache_size_refresh(); |
| 2063 | mutex_unlock(&dm_bufio_clients_lock); |
| 2064 | |
Bhaktipriya Shridhar | edd1ea2 | 2016-08-30 22:19:11 +0530 | [diff] [blame] | 2065 | dm_bufio_wq = alloc_workqueue("dm_bufio_cache", WQ_MEM_RECLAIM, 0); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 2066 | if (!dm_bufio_wq) |
| 2067 | return -ENOMEM; |
| 2068 | |
Mikulas Patocka | 6e913b2 | 2019-09-12 12:07:23 -0400 | [diff] [blame] | 2069 | INIT_DELAYED_WORK(&dm_bufio_cleanup_old_work, work_fn); |
| 2070 | INIT_WORK(&dm_bufio_replacement_work, do_global_cleanup); |
| 2071 | queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work, |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 2072 | DM_BUFIO_WORK_TIMER_SECS * HZ); |
| 2073 | |
| 2074 | return 0; |
| 2075 | } |
| 2076 | |
| 2077 | /* |
| 2078 | * This is called once when unloading the dm_bufio module. |
| 2079 | */ |
| 2080 | static void __exit dm_bufio_exit(void) |
| 2081 | { |
| 2082 | int bug = 0; |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 2083 | |
Mikulas Patocka | 6e913b2 | 2019-09-12 12:07:23 -0400 | [diff] [blame] | 2084 | cancel_delayed_work_sync(&dm_bufio_cleanup_old_work); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 2085 | destroy_workqueue(dm_bufio_wq); |
| 2086 | |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 2087 | if (dm_bufio_client_count) { |
| 2088 | DMCRIT("%s: dm_bufio_client_count leaked: %d", |
| 2089 | __func__, dm_bufio_client_count); |
| 2090 | bug = 1; |
| 2091 | } |
| 2092 | |
| 2093 | if (dm_bufio_current_allocated) { |
| 2094 | DMCRIT("%s: dm_bufio_current_allocated leaked: %lu", |
| 2095 | __func__, dm_bufio_current_allocated); |
| 2096 | bug = 1; |
| 2097 | } |
| 2098 | |
| 2099 | if (dm_bufio_allocated_get_free_pages) { |
| 2100 | DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu", |
| 2101 | __func__, dm_bufio_allocated_get_free_pages); |
| 2102 | bug = 1; |
| 2103 | } |
| 2104 | |
| 2105 | if (dm_bufio_allocated_vmalloc) { |
| 2106 | DMCRIT("%s: dm_bufio_vmalloc leaked: %lu", |
| 2107 | __func__, dm_bufio_allocated_vmalloc); |
| 2108 | bug = 1; |
| 2109 | } |
| 2110 | |
Anup Limbu | 86a49e2 | 2015-11-25 15:46:05 +0530 | [diff] [blame] | 2111 | BUG_ON(bug); |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 2112 | } |
| 2113 | |
| 2114 | module_init(dm_bufio_init) |
| 2115 | module_exit(dm_bufio_exit) |
| 2116 | |
| 2117 | module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, S_IRUGO | S_IWUSR); |
| 2118 | MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache"); |
| 2119 | |
| 2120 | module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR); |
| 2121 | MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds"); |
| 2122 | |
Mikulas Patocka | 13840d3 | 2017-04-30 17:32:28 -0400 | [diff] [blame] | 2123 | module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, S_IRUGO | S_IWUSR); |
Joe Thornber | 33096a7 | 2014-10-09 11:10:25 +0100 | [diff] [blame] | 2124 | MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory"); |
| 2125 | |
Mikulas Patocka | 95d402f | 2011-10-31 20:19:09 +0000 | [diff] [blame] | 2126 | module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR); |
| 2127 | MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory"); |
| 2128 | |
| 2129 | module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, S_IRUGO); |
| 2130 | MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc"); |
| 2131 | |
| 2132 | module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, S_IRUGO); |
| 2133 | MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages"); |
| 2134 | |
| 2135 | module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, S_IRUGO); |
| 2136 | MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc"); |
| 2137 | |
| 2138 | module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, S_IRUGO); |
| 2139 | MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache"); |
| 2140 | |
| 2141 | MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>"); |
| 2142 | MODULE_DESCRIPTION(DM_NAME " buffered I/O library"); |
| 2143 | MODULE_LICENSE("GPL"); |