blob: c4ef1fceead6ee1ba83bfb0e54f2eefae26bd48b [file] [log] [blame]
Mike Snitzer4cc96132016-05-12 16:28:10 -04001/*
2 * Internal header file _only_ for device mapper core
3 *
4 * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
5 *
6 * This file is released under the LGPL.
7 */
8
9#ifndef DM_CORE_INTERNAL_H
10#define DM_CORE_INTERNAL_H
11
12#include <linux/kthread.h>
13#include <linux/ktime.h>
14#include <linux/blk-mq.h>
15
16#include <trace/events/block.h>
17
18#include "dm.h"
19
20#define DM_RESERVED_MAX_IOS 1024
21
22struct dm_kobject_holder {
23 struct kobject kobj;
24 struct completion completion;
25};
26
27/*
28 * DM core internal structure that used directly by dm.c and dm-rq.c
29 * DM targets must _not_ deference a mapped_device to directly access its members!
30 */
31struct mapped_device {
Mike Snitzer4cc96132016-05-12 16:28:10 -040032 struct mutex suspend_lock;
33
Mike Snitzer72d711c2018-05-22 18:26:20 -040034 struct mutex table_devices_lock;
35 struct list_head table_devices;
36
Mike Snitzer4cc96132016-05-12 16:28:10 -040037 /*
38 * The current mapping (struct dm_table *).
39 * Use dm_get_live_table{_fast} or take suspend_lock for
40 * dereference.
41 */
42 void __rcu *map;
43
Mike Snitzer4cc96132016-05-12 16:28:10 -040044 unsigned long flags;
45
Mike Snitzer4cc96132016-05-12 16:28:10 -040046 /* Protect queue and type against concurrent access. */
47 struct mutex type_lock;
Mike Snitzer72d711c2018-05-22 18:26:20 -040048 enum dm_queue_mode type;
49
50 int numa_node_id;
51 struct request_queue *queue;
Mike Snitzer4cc96132016-05-12 16:28:10 -040052
53 atomic_t holders;
54 atomic_t open_count;
55
56 struct dm_target *immutable_target;
57 struct target_type *immutable_target_type;
58
Mike Snitzer72d711c2018-05-22 18:26:20 -040059 char name[16];
Mike Snitzer4cc96132016-05-12 16:28:10 -040060 struct gendisk *disk;
Dan Williamsf26c5712017-04-12 12:35:44 -070061 struct dax_device *dax_dev;
Mike Snitzer4cc96132016-05-12 16:28:10 -040062
63 /*
64 * A list of ios that arrived while we were suspended.
65 */
Mike Snitzer4cc96132016-05-12 16:28:10 -040066 struct work_struct work;
Mike Snitzer72d711c2018-05-22 18:26:20 -040067 wait_queue_head_t wait;
Mike Snitzer4cc96132016-05-12 16:28:10 -040068 spinlock_t deferred_lock;
69 struct bio_list deferred;
70
Mike Snitzer72d711c2018-05-22 18:26:20 -040071 void *interface_ptr;
72
Mike Snitzer4cc96132016-05-12 16:28:10 -040073 /*
74 * Event handling.
75 */
76 wait_queue_head_t eventq;
77 atomic_t event_nr;
78 atomic_t uevent_seq;
79 struct list_head uevent_list;
80 spinlock_t uevent_lock; /* Protect access to uevent_list */
81
82 /* the number of internal suspends */
83 unsigned internal_suspend_count;
84
85 /*
Mike Snitzer4cc96132016-05-12 16:28:10 -040086 * io objects are allocated from here.
87 */
Kent Overstreet6f1c8192018-05-20 18:25:53 -040088 struct bio_set io_bs;
89 struct bio_set bs;
Mike Snitzer4cc96132016-05-12 16:28:10 -040090
91 /*
Mike Snitzer72d711c2018-05-22 18:26:20 -040092 * Processing queue (flush)
93 */
94 struct workqueue_struct *wq;
95
96 /*
Mike Snitzer4cc96132016-05-12 16:28:10 -040097 * freeze/thaw support require holding onto a super block
98 */
99 struct super_block *frozen_sb;
100
101 /* forced geometry settings */
102 struct hd_geometry geometry;
103
Mike Snitzer4cc96132016-05-12 16:28:10 -0400104 /* kobject and completion */
105 struct dm_kobject_holder kobj_holder;
106
Mike Snitzer72d711c2018-05-22 18:26:20 -0400107 struct block_device *bdev;
108
Mike Snitzer4cc96132016-05-12 16:28:10 -0400109 struct dm_stats stats;
110
Mike Snitzer4cc96132016-05-12 16:28:10 -0400111 /* for blk-mq request-based DM support */
112 struct blk_mq_tag_set *tag_set;
Mike Snitzer4cc96132016-05-12 16:28:10 -0400113 bool init_tio_pdu:1;
Mikulas Patocka856eb092017-10-31 19:33:02 -0400114
115 struct srcu_struct io_barrier;
Mike Snitzer4cc96132016-05-12 16:28:10 -0400116};
117
Mike Snitzerbcb44432019-04-03 12:23:11 -0400118void disable_discard(struct mapped_device *md);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400119void disable_write_same(struct mapped_device *md);
Christoph Hellwigac62d622017-04-05 19:21:05 +0200120void disable_write_zeroes(struct mapped_device *md);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400121
122static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj)
123{
124 return &container_of(kobj, struct dm_kobject_holder, kobj)->completion;
125}
126
127unsigned __dm_get_module_param(unsigned *module_param, unsigned def, unsigned max);
128
129static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen)
130{
131 return !maxlen || strlen(result) + 1 >= maxlen;
132}
133
Mikulas Patocka93e64422017-01-16 16:05:59 -0500134extern atomic_t dm_global_event_nr;
135extern wait_queue_head_t dm_global_eventq;
Mikulas Patocka62e08242017-09-20 07:29:49 -0400136void dm_issue_global_event(void);
Mikulas Patocka93e64422017-01-16 16:05:59 -0500137
Mike Snitzer4cc96132016-05-12 16:28:10 -0400138#endif