blob: 54c089a50b152e4fa42aee04b973d89095a8ae5d [file] [log] [blame]
Joe Thornber3241b1d2011-10-31 20:19:11 +00001/*
2 * Copyright (C) 2011 Red Hat, Inc.
3 *
4 * This file is released under the GPL.
5 */
6#include "dm-block-manager.h"
7#include "dm-persistent-data-internal.h"
Joe Thornber3241b1d2011-10-31 20:19:11 +00008
Mikulas Patockaafa53df2018-03-15 16:02:31 -04009#include <linux/dm-bufio.h>
Joe Thornber3241b1d2011-10-31 20:19:11 +000010#include <linux/crc32c.h>
11#include <linux/module.h>
12#include <linux/slab.h>
13#include <linux/rwsem.h>
14#include <linux/device-mapper.h>
15#include <linux/stacktrace.h>
Ingo Molnar0881e7b2017-02-05 15:30:50 +010016#include <linux/sched/task.h>
Joe Thornber3241b1d2011-10-31 20:19:11 +000017
18#define DM_MSG_PREFIX "block manager"
19
20/*----------------------------------------------------------------*/
21
Joe Thornber2e8ed712015-11-19 13:50:32 +000022#ifdef CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING
23
Joe Thornber3241b1d2011-10-31 20:19:11 +000024/*
25 * This is a read/write semaphore with a couple of differences.
26 *
27 * i) There is a restriction on the number of concurrent read locks that
28 * may be held at once. This is just an implementation detail.
29 *
30 * ii) Recursive locking attempts are detected and return EINVAL. A stack
Masanari Iida83f0d772012-10-30 00:18:08 +090031 * trace is also emitted for the previous lock acquisition.
Joe Thornber3241b1d2011-10-31 20:19:11 +000032 *
33 * iii) Priority is given to write locks.
34 */
35#define MAX_HOLDERS 4
36#define MAX_STACK 10
37
Thomas Gleixnerbe9c52e2019-04-25 11:45:08 +020038struct stack_store {
39 unsigned int nr_entries;
40 unsigned long entries[MAX_STACK];
41};
Joe Thornber3241b1d2011-10-31 20:19:11 +000042
43struct block_lock {
44 spinlock_t lock;
45 __s32 count;
46 struct list_head waiters;
47 struct task_struct *holders[MAX_HOLDERS];
48
49#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
Thomas Gleixnerbe9c52e2019-04-25 11:45:08 +020050 struct stack_store traces[MAX_HOLDERS];
Joe Thornber3241b1d2011-10-31 20:19:11 +000051#endif
52};
53
54struct waiter {
55 struct list_head list;
56 struct task_struct *task;
57 int wants_write;
58};
59
60static unsigned __find_holder(struct block_lock *lock,
61 struct task_struct *task)
62{
63 unsigned i;
64
65 for (i = 0; i < MAX_HOLDERS; i++)
66 if (lock->holders[i] == task)
67 break;
68
69 BUG_ON(i == MAX_HOLDERS);
70 return i;
71}
72
73/* call this *after* you increment lock->count */
74static void __add_holder(struct block_lock *lock, struct task_struct *task)
75{
76 unsigned h = __find_holder(lock, NULL);
77#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
Thomas Gleixnerbe9c52e2019-04-25 11:45:08 +020078 struct stack_store *t;
Joe Thornber3241b1d2011-10-31 20:19:11 +000079#endif
80
81 get_task_struct(task);
82 lock->holders[h] = task;
83
84#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
85 t = lock->traces + h;
Thomas Gleixnerbe9c52e2019-04-25 11:45:08 +020086 t->nr_entries = stack_trace_save(t->entries, MAX_STACK, 2);
Joe Thornber3241b1d2011-10-31 20:19:11 +000087#endif
88}
89
90/* call this *before* you decrement lock->count */
91static void __del_holder(struct block_lock *lock, struct task_struct *task)
92{
93 unsigned h = __find_holder(lock, task);
94 lock->holders[h] = NULL;
95 put_task_struct(task);
96}
97
98static int __check_holder(struct block_lock *lock)
99{
100 unsigned i;
Joe Thornber3241b1d2011-10-31 20:19:11 +0000101
102 for (i = 0; i < MAX_HOLDERS; i++) {
103 if (lock->holders[i] == current) {
Mike Snitzer10343182013-12-13 08:24:44 -0500104 DMERR("recursive lock detected in metadata");
Joe Thornber3241b1d2011-10-31 20:19:11 +0000105#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
106 DMERR("previously held here:");
Thomas Gleixnerbe9c52e2019-04-25 11:45:08 +0200107 stack_trace_print(lock->traces[i].entries,
108 lock->traces[i].nr_entries, 4);
Joe Thornber3241b1d2011-10-31 20:19:11 +0000109
Masanari Iida83f0d772012-10-30 00:18:08 +0900110 DMERR("subsequent acquisition attempted here:");
Mikulas Patocka313c9b92015-11-23 19:12:05 -0500111 dump_stack();
Joe Thornber3241b1d2011-10-31 20:19:11 +0000112#endif
113 return -EINVAL;
114 }
115 }
116
117 return 0;
118}
119
120static void __wait(struct waiter *w)
121{
122 for (;;) {
Davidlohr Bueso642fa442017-01-03 13:43:14 -0800123 set_current_state(TASK_UNINTERRUPTIBLE);
Joe Thornber3241b1d2011-10-31 20:19:11 +0000124
125 if (!w->task)
126 break;
127
128 schedule();
129 }
130
Davidlohr Bueso642fa442017-01-03 13:43:14 -0800131 set_current_state(TASK_RUNNING);
Joe Thornber3241b1d2011-10-31 20:19:11 +0000132}
133
134static void __wake_waiter(struct waiter *w)
135{
136 struct task_struct *task;
137
138 list_del(&w->list);
139 task = w->task;
140 smp_mb();
141 w->task = NULL;
142 wake_up_process(task);
143}
144
145/*
146 * We either wake a few readers or a single writer.
147 */
148static void __wake_many(struct block_lock *lock)
149{
150 struct waiter *w, *tmp;
151
152 BUG_ON(lock->count < 0);
153 list_for_each_entry_safe(w, tmp, &lock->waiters, list) {
154 if (lock->count >= MAX_HOLDERS)
155 return;
156
157 if (w->wants_write) {
158 if (lock->count > 0)
159 return; /* still read locked */
160
161 lock->count = -1;
162 __add_holder(lock, w->task);
163 __wake_waiter(w);
164 return;
165 }
166
167 lock->count++;
168 __add_holder(lock, w->task);
169 __wake_waiter(w);
170 }
171}
172
173static void bl_init(struct block_lock *lock)
174{
175 int i;
176
177 spin_lock_init(&lock->lock);
178 lock->count = 0;
179 INIT_LIST_HEAD(&lock->waiters);
180 for (i = 0; i < MAX_HOLDERS; i++)
181 lock->holders[i] = NULL;
182}
183
184static int __available_for_read(struct block_lock *lock)
185{
186 return lock->count >= 0 &&
187 lock->count < MAX_HOLDERS &&
188 list_empty(&lock->waiters);
189}
190
191static int bl_down_read(struct block_lock *lock)
192{
193 int r;
194 struct waiter w;
195
196 spin_lock(&lock->lock);
197 r = __check_holder(lock);
198 if (r) {
199 spin_unlock(&lock->lock);
200 return r;
201 }
202
203 if (__available_for_read(lock)) {
204 lock->count++;
205 __add_holder(lock, current);
206 spin_unlock(&lock->lock);
207 return 0;
208 }
209
210 get_task_struct(current);
211
212 w.task = current;
213 w.wants_write = 0;
214 list_add_tail(&w.list, &lock->waiters);
215 spin_unlock(&lock->lock);
216
217 __wait(&w);
218 put_task_struct(current);
219 return 0;
220}
221
222static int bl_down_read_nonblock(struct block_lock *lock)
223{
224 int r;
225
226 spin_lock(&lock->lock);
227 r = __check_holder(lock);
228 if (r)
229 goto out;
230
231 if (__available_for_read(lock)) {
232 lock->count++;
233 __add_holder(lock, current);
234 r = 0;
235 } else
236 r = -EWOULDBLOCK;
237
238out:
239 spin_unlock(&lock->lock);
240 return r;
241}
242
243static void bl_up_read(struct block_lock *lock)
244{
245 spin_lock(&lock->lock);
246 BUG_ON(lock->count <= 0);
247 __del_holder(lock, current);
248 --lock->count;
249 if (!list_empty(&lock->waiters))
250 __wake_many(lock);
251 spin_unlock(&lock->lock);
252}
253
254static int bl_down_write(struct block_lock *lock)
255{
256 int r;
257 struct waiter w;
258
259 spin_lock(&lock->lock);
260 r = __check_holder(lock);
261 if (r) {
262 spin_unlock(&lock->lock);
263 return r;
264 }
265
266 if (lock->count == 0 && list_empty(&lock->waiters)) {
267 lock->count = -1;
268 __add_holder(lock, current);
269 spin_unlock(&lock->lock);
270 return 0;
271 }
272
273 get_task_struct(current);
274 w.task = current;
275 w.wants_write = 1;
276
277 /*
278 * Writers given priority. We know there's only one mutator in the
279 * system, so ignoring the ordering reversal.
280 */
281 list_add(&w.list, &lock->waiters);
282 spin_unlock(&lock->lock);
283
284 __wait(&w);
285 put_task_struct(current);
286
287 return 0;
288}
289
290static void bl_up_write(struct block_lock *lock)
291{
292 spin_lock(&lock->lock);
293 __del_holder(lock, current);
294 lock->count = 0;
295 if (!list_empty(&lock->waiters))
296 __wake_many(lock);
297 spin_unlock(&lock->lock);
298}
299
300static void report_recursive_bug(dm_block_t b, int r)
301{
302 if (r == -EINVAL)
303 DMERR("recursive acquisition of block %llu requested.",
304 (unsigned long long) b);
305}
306
Joe Thornber2e8ed712015-11-19 13:50:32 +0000307#else /* !CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING */
308
309#define bl_init(x) do { } while (0)
310#define bl_down_read(x) 0
311#define bl_down_read_nonblock(x) 0
312#define bl_up_read(x) do { } while (0)
313#define bl_down_write(x) 0
314#define bl_up_write(x) do { } while (0)
315#define report_recursive_bug(x, y) do { } while (0)
316
317#endif /* CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING */
318
Joe Thornber3241b1d2011-10-31 20:19:11 +0000319/*----------------------------------------------------------------*/
320
321/*
322 * Block manager is currently implemented using dm-bufio. struct
323 * dm_block_manager and struct dm_block map directly onto a couple of
324 * structs in the bufio interface. I want to retain the freedom to move
325 * away from bufio in the future. So these structs are just cast within
326 * this .c file, rather than making it through to the public interface.
327 */
328static struct dm_buffer *to_buffer(struct dm_block *b)
329{
330 return (struct dm_buffer *) b;
331}
332
Joe Thornber3241b1d2011-10-31 20:19:11 +0000333dm_block_t dm_block_location(struct dm_block *b)
334{
335 return dm_bufio_get_block_number(to_buffer(b));
336}
337EXPORT_SYMBOL_GPL(dm_block_location);
338
339void *dm_block_data(struct dm_block *b)
340{
341 return dm_bufio_get_block_data(to_buffer(b));
342}
343EXPORT_SYMBOL_GPL(dm_block_data);
344
345struct buffer_aux {
346 struct dm_block_validator *validator;
Joe Thornber3241b1d2011-10-31 20:19:11 +0000347 int write_locked;
Joe Thornber2e8ed712015-11-19 13:50:32 +0000348
349#ifdef CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING
350 struct block_lock lock;
351#endif
Joe Thornber3241b1d2011-10-31 20:19:11 +0000352};
353
354static void dm_block_manager_alloc_callback(struct dm_buffer *buf)
355{
356 struct buffer_aux *aux = dm_bufio_get_aux_data(buf);
357 aux->validator = NULL;
358 bl_init(&aux->lock);
359}
360
361static void dm_block_manager_write_callback(struct dm_buffer *buf)
362{
363 struct buffer_aux *aux = dm_bufio_get_aux_data(buf);
364 if (aux->validator) {
365 aux->validator->prepare_for_write(aux->validator, (struct dm_block *) buf,
366 dm_bufio_get_block_size(dm_bufio_get_client(buf)));
367 }
368}
369
370/*----------------------------------------------------------------
371 * Public interface
372 *--------------------------------------------------------------*/
Joe Thornber51a0f652012-07-27 15:08:08 +0100373struct dm_block_manager {
374 struct dm_bufio_client *bufio;
Joe Thornber31097552012-07-27 15:08:15 +0100375 bool read_only:1;
Joe Thornber51a0f652012-07-27 15:08:08 +0100376};
377
Joe Thornber3241b1d2011-10-31 20:19:11 +0000378struct dm_block_manager *dm_block_manager_create(struct block_device *bdev,
379 unsigned block_size,
Joe Thornber3241b1d2011-10-31 20:19:11 +0000380 unsigned max_held_per_thread)
381{
Joe Thornber51a0f652012-07-27 15:08:08 +0100382 int r;
383 struct dm_block_manager *bm;
384
385 bm = kmalloc(sizeof(*bm), GFP_KERNEL);
386 if (!bm) {
387 r = -ENOMEM;
388 goto bad;
389 }
390
391 bm->bufio = dm_bufio_client_create(bdev, block_size, max_held_per_thread,
392 sizeof(struct buffer_aux),
393 dm_block_manager_alloc_callback,
394 dm_block_manager_write_callback);
395 if (IS_ERR(bm->bufio)) {
396 r = PTR_ERR(bm->bufio);
397 kfree(bm);
398 goto bad;
399 }
400
Joe Thornber31097552012-07-27 15:08:15 +0100401 bm->read_only = false;
402
Joe Thornber51a0f652012-07-27 15:08:08 +0100403 return bm;
404
405bad:
406 return ERR_PTR(r);
Joe Thornber3241b1d2011-10-31 20:19:11 +0000407}
408EXPORT_SYMBOL_GPL(dm_block_manager_create);
409
410void dm_block_manager_destroy(struct dm_block_manager *bm)
411{
Joe Thornber51a0f652012-07-27 15:08:08 +0100412 dm_bufio_client_destroy(bm->bufio);
413 kfree(bm);
Joe Thornber3241b1d2011-10-31 20:19:11 +0000414}
415EXPORT_SYMBOL_GPL(dm_block_manager_destroy);
416
417unsigned dm_bm_block_size(struct dm_block_manager *bm)
418{
Joe Thornber51a0f652012-07-27 15:08:08 +0100419 return dm_bufio_get_block_size(bm->bufio);
Joe Thornber3241b1d2011-10-31 20:19:11 +0000420}
421EXPORT_SYMBOL_GPL(dm_bm_block_size);
422
423dm_block_t dm_bm_nr_blocks(struct dm_block_manager *bm)
424{
Joe Thornber51a0f652012-07-27 15:08:08 +0100425 return dm_bufio_get_device_size(bm->bufio);
Joe Thornber3241b1d2011-10-31 20:19:11 +0000426}
427
428static int dm_bm_validate_buffer(struct dm_block_manager *bm,
429 struct dm_buffer *buf,
430 struct buffer_aux *aux,
431 struct dm_block_validator *v)
432{
433 if (unlikely(!aux->validator)) {
434 int r;
435 if (!v)
436 return 0;
Joe Thornber51a0f652012-07-27 15:08:08 +0100437 r = v->check(v, (struct dm_block *) buf, dm_bufio_get_block_size(bm->bufio));
Mike Snitzera5bd9682012-12-21 20:23:34 +0000438 if (unlikely(r)) {
Mike Snitzer89ddeb82012-12-21 20:23:34 +0000439 DMERR_LIMIT("%s validator check failed for block %llu", v->name,
440 (unsigned long long) dm_bufio_get_block_number(buf));
Joe Thornber3241b1d2011-10-31 20:19:11 +0000441 return r;
Mike Snitzera5bd9682012-12-21 20:23:34 +0000442 }
Joe Thornber3241b1d2011-10-31 20:19:11 +0000443 aux->validator = v;
444 } else {
445 if (unlikely(aux->validator != v)) {
Mike Snitzer89ddeb82012-12-21 20:23:34 +0000446 DMERR_LIMIT("validator mismatch (old=%s vs new=%s) for block %llu",
447 aux->validator->name, v ? v->name : "NULL",
448 (unsigned long long) dm_bufio_get_block_number(buf));
Joe Thornber3241b1d2011-10-31 20:19:11 +0000449 return -EINVAL;
450 }
451 }
452
453 return 0;
454}
455int dm_bm_read_lock(struct dm_block_manager *bm, dm_block_t b,
456 struct dm_block_validator *v,
457 struct dm_block **result)
458{
459 struct buffer_aux *aux;
460 void *p;
461 int r;
462
Joe Thornber51a0f652012-07-27 15:08:08 +0100463 p = dm_bufio_read(bm->bufio, b, (struct dm_buffer **) result);
Chengguang Xu5941c622019-02-13 13:46:58 +0800464 if (IS_ERR(p))
Joe Thornber3241b1d2011-10-31 20:19:11 +0000465 return PTR_ERR(p);
466
467 aux = dm_bufio_get_aux_data(to_buffer(*result));
468 r = bl_down_read(&aux->lock);
469 if (unlikely(r)) {
470 dm_bufio_release(to_buffer(*result));
471 report_recursive_bug(b, r);
472 return r;
473 }
474
475 aux->write_locked = 0;
476
477 r = dm_bm_validate_buffer(bm, to_buffer(*result), aux, v);
478 if (unlikely(r)) {
479 bl_up_read(&aux->lock);
480 dm_bufio_release(to_buffer(*result));
481 return r;
482 }
483
484 return 0;
485}
486EXPORT_SYMBOL_GPL(dm_bm_read_lock);
487
488int dm_bm_write_lock(struct dm_block_manager *bm,
489 dm_block_t b, struct dm_block_validator *v,
490 struct dm_block **result)
491{
492 struct buffer_aux *aux;
493 void *p;
494 int r;
495
Ye Bin3a653b22020-09-01 14:25:44 +0800496 if (dm_bm_is_read_only(bm))
Joe Thornber31097552012-07-27 15:08:15 +0100497 return -EPERM;
498
Joe Thornber51a0f652012-07-27 15:08:08 +0100499 p = dm_bufio_read(bm->bufio, b, (struct dm_buffer **) result);
Chengguang Xu5941c622019-02-13 13:46:58 +0800500 if (IS_ERR(p))
Joe Thornber3241b1d2011-10-31 20:19:11 +0000501 return PTR_ERR(p);
502
503 aux = dm_bufio_get_aux_data(to_buffer(*result));
504 r = bl_down_write(&aux->lock);
505 if (r) {
506 dm_bufio_release(to_buffer(*result));
507 report_recursive_bug(b, r);
508 return r;
509 }
510
511 aux->write_locked = 1;
512
513 r = dm_bm_validate_buffer(bm, to_buffer(*result), aux, v);
514 if (unlikely(r)) {
515 bl_up_write(&aux->lock);
516 dm_bufio_release(to_buffer(*result));
517 return r;
518 }
519
520 return 0;
521}
522EXPORT_SYMBOL_GPL(dm_bm_write_lock);
523
524int dm_bm_read_try_lock(struct dm_block_manager *bm,
525 dm_block_t b, struct dm_block_validator *v,
526 struct dm_block **result)
527{
528 struct buffer_aux *aux;
529 void *p;
530 int r;
531
Joe Thornber51a0f652012-07-27 15:08:08 +0100532 p = dm_bufio_get(bm->bufio, b, (struct dm_buffer **) result);
Chengguang Xu5941c622019-02-13 13:46:58 +0800533 if (IS_ERR(p))
Joe Thornber3241b1d2011-10-31 20:19:11 +0000534 return PTR_ERR(p);
535 if (unlikely(!p))
536 return -EWOULDBLOCK;
537
538 aux = dm_bufio_get_aux_data(to_buffer(*result));
539 r = bl_down_read_nonblock(&aux->lock);
540 if (r < 0) {
541 dm_bufio_release(to_buffer(*result));
542 report_recursive_bug(b, r);
543 return r;
544 }
545 aux->write_locked = 0;
546
547 r = dm_bm_validate_buffer(bm, to_buffer(*result), aux, v);
548 if (unlikely(r)) {
549 bl_up_read(&aux->lock);
550 dm_bufio_release(to_buffer(*result));
551 return r;
552 }
553
554 return 0;
555}
556
557int dm_bm_write_lock_zero(struct dm_block_manager *bm,
558 dm_block_t b, struct dm_block_validator *v,
559 struct dm_block **result)
560{
561 int r;
562 struct buffer_aux *aux;
563 void *p;
564
Ye Bin3a653b22020-09-01 14:25:44 +0800565 if (dm_bm_is_read_only(bm))
Joe Thornber31097552012-07-27 15:08:15 +0100566 return -EPERM;
567
Joe Thornber51a0f652012-07-27 15:08:08 +0100568 p = dm_bufio_new(bm->bufio, b, (struct dm_buffer **) result);
Chengguang Xu5941c622019-02-13 13:46:58 +0800569 if (IS_ERR(p))
Joe Thornber3241b1d2011-10-31 20:19:11 +0000570 return PTR_ERR(p);
571
572 memset(p, 0, dm_bm_block_size(bm));
573
574 aux = dm_bufio_get_aux_data(to_buffer(*result));
575 r = bl_down_write(&aux->lock);
576 if (r) {
577 dm_bufio_release(to_buffer(*result));
578 return r;
579 }
580
581 aux->write_locked = 1;
582 aux->validator = v;
583
584 return 0;
585}
Joe Thornber384ef0e2012-07-27 15:08:09 +0100586EXPORT_SYMBOL_GPL(dm_bm_write_lock_zero);
Joe Thornber3241b1d2011-10-31 20:19:11 +0000587
Mikulas Patocka4c7da062015-10-22 16:46:59 -0400588void dm_bm_unlock(struct dm_block *b)
Joe Thornber3241b1d2011-10-31 20:19:11 +0000589{
590 struct buffer_aux *aux;
591 aux = dm_bufio_get_aux_data(to_buffer(b));
592
593 if (aux->write_locked) {
594 dm_bufio_mark_buffer_dirty(to_buffer(b));
595 bl_up_write(&aux->lock);
596 } else
597 bl_up_read(&aux->lock);
598
599 dm_bufio_release(to_buffer(b));
Joe Thornber3241b1d2011-10-31 20:19:11 +0000600}
601EXPORT_SYMBOL_GPL(dm_bm_unlock);
602
Joe Thornbera9d45392014-03-27 14:13:20 +0000603int dm_bm_flush(struct dm_block_manager *bm)
Joe Thornber3241b1d2011-10-31 20:19:11 +0000604{
Ye Bin3a653b22020-09-01 14:25:44 +0800605 if (dm_bm_is_read_only(bm))
Joe Thornber31097552012-07-27 15:08:15 +0100606 return -EPERM;
607
Joe Thornber51a0f652012-07-27 15:08:08 +0100608 return dm_bufio_write_dirty_buffers(bm->bufio);
Joe Thornber3241b1d2011-10-31 20:19:11 +0000609}
Joe Thornbera9d45392014-03-27 14:13:20 +0000610EXPORT_SYMBOL_GPL(dm_bm_flush);
Joe Thornber3241b1d2011-10-31 20:19:11 +0000611
Joe Thornber04f17c82013-08-09 12:59:30 +0100612void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b)
613{
614 dm_bufio_prefetch(bm->bufio, b, 1);
615}
616
Mike Snitzer49f154c2015-04-23 15:06:27 -0400617bool dm_bm_is_read_only(struct dm_block_manager *bm)
618{
Ye Bin3a653b22020-09-01 14:25:44 +0800619 return (bm ? bm->read_only : true);
Mike Snitzer49f154c2015-04-23 15:06:27 -0400620}
621EXPORT_SYMBOL_GPL(dm_bm_is_read_only);
622
Joe Thornber31097552012-07-27 15:08:15 +0100623void dm_bm_set_read_only(struct dm_block_manager *bm)
624{
Ye Bin3a653b22020-09-01 14:25:44 +0800625 if (bm)
626 bm->read_only = true;
Joe Thornber31097552012-07-27 15:08:15 +0100627}
628EXPORT_SYMBOL_GPL(dm_bm_set_read_only);
629
Joe Thornber9b7aaa62013-12-04 16:58:19 -0500630void dm_bm_set_read_write(struct dm_block_manager *bm)
631{
Ye Bin3a653b22020-09-01 14:25:44 +0800632 if (bm)
633 bm->read_only = false;
Joe Thornber9b7aaa62013-12-04 16:58:19 -0500634}
635EXPORT_SYMBOL_GPL(dm_bm_set_read_write);
636
Joe Thornber3241b1d2011-10-31 20:19:11 +0000637u32 dm_bm_checksum(const void *data, size_t len, u32 init_xor)
638{
639 return crc32c(~(u32) 0, data, len) ^ init_xor;
640}
641EXPORT_SYMBOL_GPL(dm_bm_checksum);
642
643/*----------------------------------------------------------------*/
644
645MODULE_LICENSE("GPL");
646MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
647MODULE_DESCRIPTION("Immutable metadata library for dm");
648
649/*----------------------------------------------------------------*/