blob: 7e0501c47153d6853c2f5741a14fc90d40d00d65 [file] [log] [blame]
Thomas Gleixnereb1fe3b2019-05-24 12:03:47 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * Network block device - make block devices work over TCP
4 *
5 * Note that you can not swap over this thing, yet. Seems to work but
6 * deadlocks sometimes - you can not swap over TCP in general.
7 *
Pavel Macheka2531292010-07-18 14:27:13 +02008 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
10 *
Pavel Machekdbf492d2006-06-25 05:47:42 -070011 * (part of code stolen from loop.c)
Linus Torvalds1da177e2005-04-16 15:20:36 -070012 */
13
14#include <linux/major.h>
15
16#include <linux/blkdev.h>
17#include <linux/module.h>
18#include <linux/init.h>
19#include <linux/sched.h>
Vlastimil Babkaf1083042017-05-08 15:59:53 -070020#include <linux/sched/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/fs.h>
22#include <linux/bio.h>
23#include <linux/stat.h>
24#include <linux/errno.h>
25#include <linux/file.h>
26#include <linux/ioctl.h>
Arnd Bergmann2a48fc02010-06-02 14:28:52 +020027#include <linux/mutex.h>
Herbert Xu4b2f0262006-01-06 00:09:47 -080028#include <linux/compiler.h>
29#include <linux/err.h>
30#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090031#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <net/sock.h>
Trond Myklebust91cf45f2007-11-12 18:10:39 -080033#include <linux/net.h>
Laurent Vivier48cf6062008-04-29 01:02:46 -070034#include <linux/kthread.h>
Markus Pargmannb9c495b2015-04-02 10:11:37 +020035#include <linux/types.h>
Markus Pargmann30d53d92015-08-17 08:20:06 +020036#include <linux/debugfs.h>
Josef Bacikfd8383f2016-09-08 12:33:37 -070037#include <linux/blk-mq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080039#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070040#include <asm/types.h>
41
42#include <linux/nbd.h>
Josef Bacike46c7282017-04-06 17:02:00 -040043#include <linux/nbd-netlink.h>
44#include <net/genetlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Matt Mullinsea106722019-04-26 11:49:48 -070046#define CREATE_TRACE_POINTS
47#include <trace/events/nbd.h>
48
Josef Bacikb0d91112017-02-01 16:11:40 -050049static DEFINE_IDR(nbd_index_idr);
50static DEFINE_MUTEX(nbd_index_mutex);
Josef Bacik47d902b2017-04-06 17:02:05 -040051static int nbd_total_devices = 0;
Josef Bacikb0d91112017-02-01 16:11:40 -050052
Josef Bacik9561a7a2016-11-22 14:04:40 -050053struct nbd_sock {
54 struct socket *sock;
55 struct mutex tx_lock;
Josef Bacik9dd5d3a2017-03-24 14:08:26 -040056 struct request *pending;
57 int sent;
Josef Bacikf3733242017-04-06 17:01:57 -040058 bool dead;
59 int fallback_index;
Josef Bacik799f9a32017-04-06 17:02:02 -040060 int cookie;
Josef Bacik9561a7a2016-11-22 14:04:40 -050061};
62
Josef Bacik5ea8d102017-04-06 17:01:58 -040063struct recv_thread_args {
64 struct work_struct work;
65 struct nbd_device *nbd;
66 int index;
67};
68
Josef Bacik799f9a32017-04-06 17:02:02 -040069struct link_dead_args {
70 struct work_struct work;
71 int index;
72};
73
Xiubo Liec76a7b2019-09-17 17:26:05 +053074#define NBD_RT_TIMEDOUT 0
75#define NBD_RT_DISCONNECT_REQUESTED 1
76#define NBD_RT_DISCONNECTED 2
77#define NBD_RT_HAS_PID_FILE 3
78#define NBD_RT_HAS_CONFIG_REF 4
79#define NBD_RT_BOUND 5
80#define NBD_RT_DESTROY_ON_DISCONNECT 6
81#define NBD_RT_DISCONNECT_ON_CLOSE 7
Josef Bacik9b4a6ba2016-09-08 12:33:39 -070082
Josef Bacik5ea8d102017-04-06 17:01:58 -040083struct nbd_config {
Markus Pargmann22d109c2015-08-17 08:20:09 +020084 u32 flags;
Josef Bacik9b4a6ba2016-09-08 12:33:39 -070085 unsigned long runtime_flags;
Josef Bacik560bc4b2017-04-06 17:02:04 -040086 u64 dead_conn_timeout;
Josef Bacik5ea8d102017-04-06 17:01:58 -040087
Josef Bacik9561a7a2016-11-22 14:04:40 -050088 struct nbd_sock **socks;
Josef Bacik9561a7a2016-11-22 14:04:40 -050089 int num_connections;
Josef Bacik560bc4b2017-04-06 17:02:04 -040090 atomic_t live_connections;
91 wait_queue_head_t conn_wait;
Josef Bacik5ea8d102017-04-06 17:01:58 -040092
Josef Bacik9561a7a2016-11-22 14:04:40 -050093 atomic_t recv_threads;
94 wait_queue_head_t recv_wq;
Josef Bacikef77b512016-12-02 16:19:12 -050095 loff_t blksize;
Markus Pargmannb9c495b2015-04-02 10:11:37 +020096 loff_t bytesize;
Markus Pargmann30d53d92015-08-17 08:20:06 +020097#if IS_ENABLED(CONFIG_DEBUG_FS)
98 struct dentry *dbg_dir;
99#endif
Markus Pargmann13e71d62015-04-02 10:11:35 +0200100};
101
Josef Bacik5ea8d102017-04-06 17:01:58 -0400102struct nbd_device {
103 struct blk_mq_tag_set tag_set;
104
Josef Bacike46c7282017-04-06 17:02:00 -0400105 int index;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400106 refcount_t config_refs;
Josef Bacikc6a47592017-04-06 17:02:06 -0400107 refcount_t refs;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400108 struct nbd_config *config;
109 struct mutex config_lock;
110 struct gendisk *disk;
Mike Christiee9e006f2019-08-04 14:10:06 -0500111 struct workqueue_struct *recv_workq;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400112
Josef Bacikc6a47592017-04-06 17:02:06 -0400113 struct list_head list;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400114 struct task_struct *task_recv;
115 struct task_struct *task_setup;
116};
117
Josef Bacikd7d94d42018-07-16 12:11:34 -0400118#define NBD_CMD_REQUEUED 1
119
Josef Bacikfd8383f2016-09-08 12:33:37 -0700120struct nbd_cmd {
121 struct nbd_device *nbd;
Josef Bacik8f3ea352018-07-16 12:11:35 -0400122 struct mutex lock;
Josef Bacikf3733242017-04-06 17:01:57 -0400123 int index;
Josef Bacik799f9a32017-04-06 17:02:02 -0400124 int cookie;
Mike Christie2da22da2019-08-13 11:39:52 -0500125 int retries;
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200126 blk_status_t status;
Josef Bacikd7d94d42018-07-16 12:11:34 -0400127 unsigned long flags;
Josef Bacik8f3ea352018-07-16 12:11:35 -0400128 u32 cmd_cookie;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700129};
130
Markus Pargmann30d53d92015-08-17 08:20:06 +0200131#if IS_ENABLED(CONFIG_DEBUG_FS)
132static struct dentry *nbd_dbg_dir;
133#endif
134
135#define nbd_name(nbd) ((nbd)->disk->disk_name)
136
Wanlong Gaof4507162012-03-28 14:42:51 -0700137#define NBD_MAGIC 0x68797548
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138
Xiubo Li553768d2019-05-29 15:16:05 -0500139#define NBD_DEF_BLKSIZE 1024
140
Ingo van Lil9c7a4162006-07-01 04:36:36 -0700141static unsigned int nbds_max = 16;
Josef Bacik7a8362a2017-08-14 18:56:16 +0000142static int max_part = 16;
Josef Bacikb0d91112017-02-01 16:11:40 -0500143static int part_shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144
Josef Bacik9442b732017-02-07 17:10:22 -0500145static int nbd_dev_dbg_init(struct nbd_device *nbd);
146static void nbd_dev_dbg_close(struct nbd_device *nbd);
Josef Bacik5ea8d102017-04-06 17:01:58 -0400147static void nbd_config_put(struct nbd_device *nbd);
Josef Bacike46c7282017-04-06 17:02:00 -0400148static void nbd_connect_reply(struct genl_info *info, int index);
Josef Bacik47d902b2017-04-06 17:02:05 -0400149static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info);
Josef Bacik799f9a32017-04-06 17:02:02 -0400150static void nbd_dead_link_work(struct work_struct *work);
Doron Roberts-Kedes08ba91e2018-06-15 14:05:32 -0700151static void nbd_disconnect_and_put(struct nbd_device *nbd);
Josef Bacik9442b732017-02-07 17:10:22 -0500152
Markus Pargmannd18509f2015-04-02 10:11:38 +0200153static inline struct device *nbd_to_dev(struct nbd_device *nbd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154{
Markus Pargmannd18509f2015-04-02 10:11:38 +0200155 return disk_to_dev(nbd->disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156}
157
Josef Bacikd7d94d42018-07-16 12:11:34 -0400158static void nbd_requeue_cmd(struct nbd_cmd *cmd)
159{
160 struct request *req = blk_mq_rq_from_pdu(cmd);
161
162 if (!test_and_set_bit(NBD_CMD_REQUEUED, &cmd->flags))
163 blk_mq_requeue_request(req, true);
164}
165
Josef Bacik8f3ea352018-07-16 12:11:35 -0400166#define NBD_COOKIE_BITS 32
167
168static u64 nbd_cmd_handle(struct nbd_cmd *cmd)
169{
170 struct request *req = blk_mq_rq_from_pdu(cmd);
171 u32 tag = blk_mq_unique_tag(req);
172 u64 cookie = cmd->cmd_cookie;
173
174 return (cookie << NBD_COOKIE_BITS) | tag;
175}
176
177static u32 nbd_handle_to_tag(u64 handle)
178{
179 return (u32)handle;
180}
181
182static u32 nbd_handle_to_cookie(u64 handle)
183{
184 return (u32)(handle >> NBD_COOKIE_BITS);
185}
186
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187static const char *nbdcmd_to_ascii(int cmd)
188{
189 switch (cmd) {
190 case NBD_CMD_READ: return "read";
191 case NBD_CMD_WRITE: return "write";
192 case NBD_CMD_DISC: return "disconnect";
Alex Bligh75f187a2013-02-27 17:05:23 -0800193 case NBD_CMD_FLUSH: return "flush";
Paul Clementsa336d292012-10-04 17:16:18 -0700194 case NBD_CMD_TRIM: return "trim/discard";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 }
196 return "invalid";
197}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198
Josef Bacik5ea8d102017-04-06 17:01:58 -0400199static ssize_t pid_show(struct device *dev,
200 struct device_attribute *attr, char *buf)
201{
202 struct gendisk *disk = dev_to_disk(dev);
203 struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
204
205 return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv));
206}
207
Bhumika Goyaldfbde552017-08-21 17:13:08 +0530208static const struct device_attribute pid_attr = {
Joe Perches5657a812018-05-24 13:38:59 -0600209 .attr = { .name = "pid", .mode = 0444},
Josef Bacik5ea8d102017-04-06 17:01:58 -0400210 .show = pid_show,
211};
212
Josef Bacikc6a47592017-04-06 17:02:06 -0400213static void nbd_dev_remove(struct nbd_device *nbd)
214{
215 struct gendisk *disk = nbd->disk;
Josef Bacik8364da42018-05-16 14:51:17 -0400216 struct request_queue *q;
217
Josef Bacikc6a47592017-04-06 17:02:06 -0400218 if (disk) {
Josef Bacik8364da42018-05-16 14:51:17 -0400219 q = disk->queue;
Josef Bacikc6a47592017-04-06 17:02:06 -0400220 del_gendisk(disk);
Josef Bacik8364da42018-05-16 14:51:17 -0400221 blk_cleanup_queue(q);
Josef Bacikc6a47592017-04-06 17:02:06 -0400222 blk_mq_free_tag_set(&nbd->tag_set);
Josef Bacika2c97902017-04-06 17:02:07 -0400223 disk->private_data = NULL;
Josef Bacikc6a47592017-04-06 17:02:06 -0400224 put_disk(disk);
225 }
226 kfree(nbd);
227}
228
229static void nbd_put(struct nbd_device *nbd)
230{
231 if (refcount_dec_and_mutex_lock(&nbd->refs,
232 &nbd_index_mutex)) {
233 idr_remove(&nbd_index_idr, nbd->index);
234 mutex_unlock(&nbd_index_mutex);
235 nbd_dev_remove(nbd);
236 }
237}
238
Josef Bacik799f9a32017-04-06 17:02:02 -0400239static int nbd_disconnected(struct nbd_config *config)
Josef Bacikf3733242017-04-06 17:01:57 -0400240{
Xiubo Liec76a7b2019-09-17 17:26:05 +0530241 return test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags) ||
242 test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags);
Josef Bacik799f9a32017-04-06 17:02:02 -0400243}
244
245static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock,
246 int notify)
247{
248 if (!nsock->dead && notify && !nbd_disconnected(nbd->config)) {
249 struct link_dead_args *args;
250 args = kmalloc(sizeof(struct link_dead_args), GFP_NOIO);
251 if (args) {
252 INIT_WORK(&args->work, nbd_dead_link_work);
253 args->index = nbd->index;
254 queue_work(system_wq, &args->work);
255 }
256 }
Josef Bacik560bc4b2017-04-06 17:02:04 -0400257 if (!nsock->dead) {
Josef Bacikf3733242017-04-06 17:01:57 -0400258 kernel_sock_shutdown(nsock->sock, SHUT_RDWR);
Kevin Vigor5e3c3a72018-05-30 10:45:11 -0600259 if (atomic_dec_return(&nbd->config->live_connections) == 0) {
Xiubo Liec76a7b2019-09-17 17:26:05 +0530260 if (test_and_clear_bit(NBD_RT_DISCONNECT_REQUESTED,
Kevin Vigor5e3c3a72018-05-30 10:45:11 -0600261 &nbd->config->runtime_flags)) {
Xiubo Liec76a7b2019-09-17 17:26:05 +0530262 set_bit(NBD_RT_DISCONNECTED,
Kevin Vigor5e3c3a72018-05-30 10:45:11 -0600263 &nbd->config->runtime_flags);
264 dev_info(nbd_to_dev(nbd),
265 "Disconnected due to user request.\n");
266 }
267 }
Josef Bacik560bc4b2017-04-06 17:02:04 -0400268 }
Josef Bacikf3733242017-04-06 17:01:57 -0400269 nsock->dead = true;
270 nsock->pending = NULL;
271 nsock->sent = 0;
272}
273
Josef Bacik29eaadc2017-04-06 17:01:59 -0400274static void nbd_size_clear(struct nbd_device *nbd)
Markus Pargmann37091fd2015-07-27 07:36:49 +0200275{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400276 if (nbd->config->bytesize) {
Josef Bacik5ea8d102017-04-06 17:01:58 -0400277 set_capacity(nbd->disk, 0);
278 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
279 }
Markus Pargmann37091fd2015-07-27 07:36:49 +0200280}
281
Josef Bacik29eaadc2017-04-06 17:01:59 -0400282static void nbd_size_update(struct nbd_device *nbd)
Markus Pargmann37091fd2015-07-27 07:36:49 +0200283{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400284 struct nbd_config *config = nbd->config;
Josef Bacik9e2b19672018-05-16 14:51:19 -0400285 struct block_device *bdev = bdget_disk(nbd->disk, 0);
286
Josef Bacik6df133a2018-05-23 13:35:59 -0400287 if (config->flags & NBD_FLAG_SEND_TRIM) {
288 nbd->disk->queue->limits.discard_granularity = config->blksize;
Josef Bacik07ce2132018-06-05 11:41:23 -0400289 nbd->disk->queue->limits.discard_alignment = config->blksize;
Josef Bacik6df133a2018-05-23 13:35:59 -0400290 blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX);
291 }
Josef Bacik5ea8d102017-04-06 17:01:58 -0400292 blk_queue_logical_block_size(nbd->disk->queue, config->blksize);
293 blk_queue_physical_block_size(nbd->disk->queue, config->blksize);
Josef Bacik5ea8d102017-04-06 17:01:58 -0400294 set_capacity(nbd->disk, config->bytesize >> 9);
Josef Bacik9e2b19672018-05-16 14:51:19 -0400295 if (bdev) {
Jan Karac8a83a62019-01-14 09:48:09 +0100296 if (bdev->bd_disk) {
Josef Bacik9e2b19672018-05-16 14:51:19 -0400297 bd_set_size(bdev, config->bytesize);
Jan Karac8a83a62019-01-14 09:48:09 +0100298 set_blocksize(bdev, config->blksize);
299 } else
Josef Bacik9e2b19672018-05-16 14:51:19 -0400300 bdev->bd_invalidated = 1;
301 bdput(bdev);
302 }
Markus Pargmann37091fd2015-07-27 07:36:49 +0200303 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
304}
305
Josef Bacik29eaadc2017-04-06 17:01:59 -0400306static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize,
307 loff_t nr_blocks)
Markus Pargmann37091fd2015-07-27 07:36:49 +0200308{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400309 struct nbd_config *config = nbd->config;
310 config->blksize = blocksize;
311 config->bytesize = blocksize * nr_blocks;
Josef Bacikc3f7c932018-05-16 14:51:18 -0400312 if (nbd->task_recv != NULL)
313 nbd_size_update(nbd);
Markus Pargmann37091fd2015-07-27 07:36:49 +0200314}
315
Christoph Hellwig1e388ae2017-04-20 16:03:06 +0200316static void nbd_complete_rq(struct request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317{
Christoph Hellwig1e388ae2017-04-20 16:03:06 +0200318 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319
Kevin Vigoree57a052018-06-04 10:40:12 -0600320 dev_dbg(nbd_to_dev(cmd->nbd), "request %p: %s\n", req,
Christoph Hellwig1e388ae2017-04-20 16:03:06 +0200321 cmd->status ? "failed" : "done");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322
Christoph Hellwig1e388ae2017-04-20 16:03:06 +0200323 blk_mq_end_request(req, cmd->status);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324}
325
Markus Pargmanne018e752015-04-02 10:11:39 +0200326/*
327 * Forcibly shutdown the socket causing all listeners to error
328 */
Markus Pargmann36e47be2015-08-17 08:20:01 +0200329static void sock_shutdown(struct nbd_device *nbd)
Paul Clements7fdfd402007-10-16 23:27:37 -0700330{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400331 struct nbd_config *config = nbd->config;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500332 int i;
Josef Bacikc2611892016-09-08 12:33:38 -0700333
Josef Bacik5ea8d102017-04-06 17:01:58 -0400334 if (config->num_connections == 0)
Markus Pargmann260bbce2015-08-17 08:20:02 +0200335 return;
Xiubo Liec76a7b2019-09-17 17:26:05 +0530336 if (test_and_set_bit(NBD_RT_DISCONNECTED, &config->runtime_flags))
Josef Bacik9561a7a2016-11-22 14:04:40 -0500337 return;
338
Josef Bacik5ea8d102017-04-06 17:01:58 -0400339 for (i = 0; i < config->num_connections; i++) {
340 struct nbd_sock *nsock = config->socks[i];
Josef Bacik9561a7a2016-11-22 14:04:40 -0500341 mutex_lock(&nsock->tx_lock);
Josef Bacik799f9a32017-04-06 17:02:02 -0400342 nbd_mark_nsock_dead(nbd, nsock, 0);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500343 mutex_unlock(&nsock->tx_lock);
Markus Pargmann23272a672015-10-29 11:51:16 +0100344 }
Josef Bacik9561a7a2016-11-22 14:04:40 -0500345 dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n");
Paul Clements7fdfd402007-10-16 23:27:37 -0700346}
347
Mike Christie00514672019-08-13 11:39:50 -0500348static u32 req_to_nbd_cmd_type(struct request *req)
349{
350 switch (req_op(req)) {
351 case REQ_OP_DISCARD:
352 return NBD_CMD_TRIM;
353 case REQ_OP_FLUSH:
354 return NBD_CMD_FLUSH;
355 case REQ_OP_WRITE:
356 return NBD_CMD_WRITE;
357 case REQ_OP_READ:
358 return NBD_CMD_READ;
359 default:
360 return U32_MAX;
361 }
362}
363
Josef Bacik0eadf372016-09-08 12:33:40 -0700364static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
365 bool reserved)
Paul Clements7fdfd402007-10-16 23:27:37 -0700366{
Josef Bacik0eadf372016-09-08 12:33:40 -0700367 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
368 struct nbd_device *nbd = cmd->nbd;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400369 struct nbd_config *config;
Paul Clements7fdfd402007-10-16 23:27:37 -0700370
Josef Bacik5ea8d102017-04-06 17:01:58 -0400371 if (!refcount_inc_not_zero(&nbd->config_refs)) {
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200372 cmd->status = BLK_STS_TIMEOUT;
Christoph Hellwige5eab012018-05-29 15:52:31 +0200373 goto done;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400374 }
Josef Bacik5ea8d102017-04-06 17:01:58 -0400375 config = nbd->config;
376
Mike Christie887e9752019-08-13 11:39:51 -0500377 if (!mutex_trylock(&cmd->lock)) {
378 nbd_config_put(nbd);
Josef Bacik8f3ea352018-07-16 12:11:35 -0400379 return BLK_EH_RESET_TIMER;
Mike Christie887e9752019-08-13 11:39:51 -0500380 }
Josef Bacik8f3ea352018-07-16 12:11:35 -0400381
Josef Bacik5ea8d102017-04-06 17:01:58 -0400382 if (config->num_connections > 1) {
Josef Bacikf3733242017-04-06 17:01:57 -0400383 dev_err_ratelimited(nbd_to_dev(nbd),
Kevin Vigor5e3c3a72018-05-30 10:45:11 -0600384 "Connection timed out, retrying (%d/%d alive)\n",
385 atomic_read(&config->live_connections),
386 config->num_connections);
Josef Bacikf3733242017-04-06 17:01:57 -0400387 /*
388 * Hooray we have more connections, requeue this IO, the submit
389 * path will put it on a real connection.
390 */
Josef Bacik5ea8d102017-04-06 17:01:58 -0400391 if (config->socks && config->num_connections > 1) {
392 if (cmd->index < config->num_connections) {
Josef Bacikf3733242017-04-06 17:01:57 -0400393 struct nbd_sock *nsock =
Josef Bacik5ea8d102017-04-06 17:01:58 -0400394 config->socks[cmd->index];
Josef Bacikf3733242017-04-06 17:01:57 -0400395 mutex_lock(&nsock->tx_lock);
Josef Bacik799f9a32017-04-06 17:02:02 -0400396 /* We can have multiple outstanding requests, so
397 * we don't want to mark the nsock dead if we've
398 * already reconnected with a new socket, so
399 * only mark it dead if its the same socket we
400 * were sent out on.
401 */
402 if (cmd->cookie == nsock->cookie)
403 nbd_mark_nsock_dead(nbd, nsock, 1);
Josef Bacikf3733242017-04-06 17:01:57 -0400404 mutex_unlock(&nsock->tx_lock);
405 }
Josef Bacik8f3ea352018-07-16 12:11:35 -0400406 mutex_unlock(&cmd->lock);
Josef Bacikd7d94d42018-07-16 12:11:34 -0400407 nbd_requeue_cmd(cmd);
Josef Bacik5ea8d102017-04-06 17:01:58 -0400408 nbd_config_put(nbd);
Christoph Hellwig66005932018-05-29 15:52:29 +0200409 return BLK_EH_DONE;
Josef Bacikf3733242017-04-06 17:01:57 -0400410 }
Josef Bacikf3733242017-04-06 17:01:57 -0400411 }
Mike Christie2da22da2019-08-13 11:39:52 -0500412
413 if (!nbd->tag_set.timeout) {
414 /*
415 * Userspace sets timeout=0 to disable socket disconnection,
416 * so just warn and reset the timer.
417 */
418 cmd->retries++;
419 dev_info(nbd_to_dev(nbd), "Possible stuck request %p: control (%s@%llu,%uB). Runtime %u seconds\n",
420 req, nbdcmd_to_ascii(req_to_nbd_cmd_type(req)),
421 (unsigned long long)blk_rq_pos(req) << 9,
422 blk_rq_bytes(req), (req->timeout / HZ) * cmd->retries);
423
424 mutex_unlock(&cmd->lock);
425 nbd_config_put(nbd);
426 return BLK_EH_RESET_TIMER;
427 }
428
429 dev_err_ratelimited(nbd_to_dev(nbd), "Connection timed out\n");
Xiubo Liec76a7b2019-09-17 17:26:05 +0530430 set_bit(NBD_RT_TIMEDOUT, &config->runtime_flags);
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200431 cmd->status = BLK_STS_IOERR;
Josef Bacik8f3ea352018-07-16 12:11:35 -0400432 mutex_unlock(&cmd->lock);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500433 sock_shutdown(nbd);
Josef Bacik5ea8d102017-04-06 17:01:58 -0400434 nbd_config_put(nbd);
Christoph Hellwige5eab012018-05-29 15:52:31 +0200435done:
436 blk_mq_complete_request(req);
437 return BLK_EH_DONE;
Paul Clements7fdfd402007-10-16 23:27:37 -0700438}
439
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440/*
441 * Send or receive packet.
442 */
Al Viroc9f2b6a2015-11-12 05:09:35 -0500443static int sock_xmit(struct nbd_device *nbd, int index, int send,
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400444 struct iov_iter *iter, int msg_flags, int *sent)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400446 struct nbd_config *config = nbd->config;
447 struct socket *sock = config->socks[index]->sock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 int result;
449 struct msghdr msg;
Vlastimil Babkaf1083042017-05-08 15:59:53 -0700450 unsigned int noreclaim_flag;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451
Mike Snitzerffc41cf2008-04-02 13:04:47 -0700452 if (unlikely(!sock)) {
Josef Bacika897b662016-12-05 16:20:29 -0500453 dev_err_ratelimited(disk_to_dev(nbd->disk),
WANG Cong7f1b90f2011-08-19 14:48:22 +0200454 "Attempted %s on closed socket in sock_xmit\n",
455 (send ? "send" : "recv"));
Mike Snitzerffc41cf2008-04-02 13:04:47 -0700456 return -EINVAL;
457 }
458
Al Viroc9f2b6a2015-11-12 05:09:35 -0500459 msg.msg_iter = *iter;
Al Viroc1696ca2015-11-12 04:51:19 -0500460
Vlastimil Babkaf1083042017-05-08 15:59:53 -0700461 noreclaim_flag = memalloc_noreclaim_save();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 do {
Mel Gorman7f338fe2012-07-31 16:44:32 -0700463 sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464 msg.msg_name = NULL;
465 msg.msg_namelen = 0;
466 msg.msg_control = NULL;
467 msg.msg_controllen = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
469
Markus Pargmann7e2893a2015-08-17 08:20:00 +0200470 if (send)
Al Viroc1696ca2015-11-12 04:51:19 -0500471 result = sock_sendmsg(sock, &msg);
Markus Pargmann7e2893a2015-08-17 08:20:00 +0200472 else
Al Viroc1696ca2015-11-12 04:51:19 -0500473 result = sock_recvmsg(sock, &msg, msg.msg_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475 if (result <= 0) {
476 if (result == 0)
477 result = -EPIPE; /* short read */
478 break;
479 }
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400480 if (sent)
481 *sent += result;
Al Viroc1696ca2015-11-12 04:51:19 -0500482 } while (msg_data_left(&msg));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483
Vlastimil Babkaf1083042017-05-08 15:59:53 -0700484 memalloc_noreclaim_restore(noreclaim_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485
486 return result;
487}
488
Josef Bacik32e67a32017-10-24 15:57:18 -0400489/*
490 * Different settings for sk->sk_sndtimeo can result in different return values
491 * if there is a signal pending when we enter sendmsg, because reasons?
492 */
493static inline int was_interrupted(int result)
494{
495 return result == -ERESTARTSYS || result == -EINTR;
496}
497
Paul Clements7fdfd402007-10-16 23:27:37 -0700498/* always call with the tx_lock held */
Josef Bacik9561a7a2016-11-22 14:04:40 -0500499static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500{
Josef Bacikfd8383f2016-09-08 12:33:37 -0700501 struct request *req = blk_mq_rq_from_pdu(cmd);
Josef Bacik5ea8d102017-04-06 17:01:58 -0400502 struct nbd_config *config = nbd->config;
503 struct nbd_sock *nsock = config->socks[index];
Josef Bacikd61b7f92017-01-19 16:08:49 -0500504 int result;
Al Viroc9f2b6a2015-11-12 05:09:35 -0500505 struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)};
506 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
507 struct iov_iter from;
Tejun Heo1011c1b2009-05-07 22:24:45 +0900508 unsigned long size = blk_rq_bytes(req);
Jens Axboe429a7872016-11-17 12:30:37 -0700509 struct bio *bio;
Josef Bacik8f3ea352018-07-16 12:11:35 -0400510 u64 handle;
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200511 u32 type;
Shaun McDowell685c9b22017-05-25 23:55:54 -0400512 u32 nbd_cmd_flags = 0;
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400513 int sent = nsock->sent, skip = 0;
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200514
David Howellsaa563d72018-10-20 00:57:56 +0100515 iov_iter_kvec(&from, WRITE, &iov, 1, sizeof(request));
Al Viroc9f2b6a2015-11-12 05:09:35 -0500516
Mike Christie00514672019-08-13 11:39:50 -0500517 type = req_to_nbd_cmd_type(req);
518 if (type == U32_MAX)
Christoph Hellwigaebf5262017-01-31 16:57:31 +0100519 return -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520
Christoph Hellwig09fc54cc2017-01-31 16:57:28 +0100521 if (rq_data_dir(req) == WRITE &&
Josef Bacik5ea8d102017-04-06 17:01:58 -0400522 (config->flags & NBD_FLAG_READ_ONLY)) {
Christoph Hellwig09fc54cc2017-01-31 16:57:28 +0100523 dev_err_ratelimited(disk_to_dev(nbd->disk),
524 "Write on read-only\n");
525 return -EIO;
526 }
527
Shaun McDowell685c9b22017-05-25 23:55:54 -0400528 if (req->cmd_flags & REQ_FUA)
529 nbd_cmd_flags |= NBD_CMD_FLAG_FUA;
530
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400531 /* We did a partial send previously, and we at least sent the whole
532 * request struct, so just go and send the rest of the pages in the
533 * request.
534 */
535 if (sent) {
536 if (sent >= sizeof(request)) {
537 skip = sent - sizeof(request);
Andrew Hall2abd2de2019-04-26 11:49:49 -0700538
539 /* initialize handle for tracing purposes */
540 handle = nbd_cmd_handle(cmd);
541
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400542 goto send_pages;
543 }
544 iov_iter_advance(&from, sent);
Josef Bacik8f3ea352018-07-16 12:11:35 -0400545 } else {
546 cmd->cmd_cookie++;
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400547 }
Josef Bacikf3733242017-04-06 17:01:57 -0400548 cmd->index = index;
Josef Bacik799f9a32017-04-06 17:02:02 -0400549 cmd->cookie = nsock->cookie;
Mike Christie2da22da2019-08-13 11:39:52 -0500550 cmd->retries = 0;
Shaun McDowell685c9b22017-05-25 23:55:54 -0400551 request.type = htonl(type | nbd_cmd_flags);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500552 if (type != NBD_CMD_FLUSH) {
Alex Bligh75f187a2013-02-27 17:05:23 -0800553 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
554 request.len = htonl(size);
555 }
Josef Bacik8f3ea352018-07-16 12:11:35 -0400556 handle = nbd_cmd_handle(cmd);
557 memcpy(request.handle, &handle, sizeof(handle));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558
Matt Mullinsea106722019-04-26 11:49:48 -0700559 trace_nbd_send_request(&request, nbd->index, blk_mq_rq_from_pdu(cmd));
560
Markus Pargmannd18509f2015-04-02 10:11:38 +0200561 dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
Kevin Vigoree57a052018-06-04 10:40:12 -0600562 req, nbdcmd_to_ascii(type),
Markus Pargmannd18509f2015-04-02 10:11:38 +0200563 (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
Al Viroc9f2b6a2015-11-12 05:09:35 -0500564 result = sock_xmit(nbd, index, 1, &from,
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400565 (type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent);
Andrew Hall2abd2de2019-04-26 11:49:49 -0700566 trace_nbd_header_sent(req, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567 if (result <= 0) {
Josef Bacik32e67a32017-10-24 15:57:18 -0400568 if (was_interrupted(result)) {
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400569 /* If we havne't sent anything we can just return BUSY,
570 * however if we have sent something we need to make
571 * sure we only allow this req to be sent until we are
572 * completely done.
573 */
574 if (sent) {
575 nsock->pending = req;
576 nsock->sent = sent;
577 }
Josef Bacikd7d94d42018-07-16 12:11:34 -0400578 set_bit(NBD_CMD_REQUEUED, &cmd->flags);
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200579 return BLK_STS_RESOURCE;
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400580 }
Josef Bacika897b662016-12-05 16:20:29 -0500581 dev_err_ratelimited(disk_to_dev(nbd->disk),
WANG Cong7f1b90f2011-08-19 14:48:22 +0200582 "Send control failed (result %d)\n", result);
Josef Bacikf3733242017-04-06 17:01:57 -0400583 return -EAGAIN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584 }
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400585send_pages:
Jens Axboe429a7872016-11-17 12:30:37 -0700586 if (type != NBD_CMD_WRITE)
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400587 goto out;
Jens Axboe429a7872016-11-17 12:30:37 -0700588
Jens Axboe429a7872016-11-17 12:30:37 -0700589 bio = req->bio;
590 while (bio) {
591 struct bio *next = bio->bi_next;
592 struct bvec_iter iter;
Kent Overstreet79886132013-11-23 17:19:00 -0800593 struct bio_vec bvec;
Jens Axboe429a7872016-11-17 12:30:37 -0700594
595 bio_for_each_segment(bvec, bio, iter) {
596 bool is_last = !next && bio_iter_last(bvec, iter);
Josef Bacikd61b7f92017-01-19 16:08:49 -0500597 int flags = is_last ? 0 : MSG_MORE;
Jens Axboe429a7872016-11-17 12:30:37 -0700598
Markus Pargmannd18509f2015-04-02 10:11:38 +0200599 dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
Kevin Vigoree57a052018-06-04 10:40:12 -0600600 req, bvec.bv_len);
David Howellsaa563d72018-10-20 00:57:56 +0100601 iov_iter_bvec(&from, WRITE, &bvec, 1, bvec.bv_len);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400602 if (skip) {
603 if (skip >= iov_iter_count(&from)) {
604 skip -= iov_iter_count(&from);
605 continue;
606 }
607 iov_iter_advance(&from, skip);
608 skip = 0;
609 }
610 result = sock_xmit(nbd, index, 1, &from, flags, &sent);
Jens Axboe6c92e692007-08-16 13:43:12 +0200611 if (result <= 0) {
Josef Bacik32e67a32017-10-24 15:57:18 -0400612 if (was_interrupted(result)) {
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400613 /* We've already sent the header, we
614 * have no choice but to set pending and
615 * return BUSY.
616 */
617 nsock->pending = req;
618 nsock->sent = sent;
Josef Bacikd7d94d42018-07-16 12:11:34 -0400619 set_bit(NBD_CMD_REQUEUED, &cmd->flags);
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200620 return BLK_STS_RESOURCE;
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400621 }
Wanlong Gaof4507162012-03-28 14:42:51 -0700622 dev_err(disk_to_dev(nbd->disk),
WANG Cong7f1b90f2011-08-19 14:48:22 +0200623 "Send data failed (result %d)\n",
624 result);
Josef Bacikf3733242017-04-06 17:01:57 -0400625 return -EAGAIN;
Jens Axboe6c92e692007-08-16 13:43:12 +0200626 }
Jens Axboe429a7872016-11-17 12:30:37 -0700627 /*
628 * The completion might already have come in,
629 * so break for the last one instead of letting
630 * the iterator do it. This prevents use-after-free
631 * of the bio.
632 */
633 if (is_last)
634 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 }
Jens Axboe429a7872016-11-17 12:30:37 -0700636 bio = next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637 }
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400638out:
Andrew Hall2abd2de2019-04-26 11:49:49 -0700639 trace_nbd_payload_sent(req, handle);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400640 nsock->pending = NULL;
641 nsock->sent = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643}
644
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645/* NULL returned = something went wrong, inform userspace */
Josef Bacik9561a7a2016-11-22 14:04:40 -0500646static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400648 struct nbd_config *config = nbd->config;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 int result;
650 struct nbd_reply reply;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700651 struct nbd_cmd *cmd;
652 struct request *req = NULL;
Josef Bacik8f3ea352018-07-16 12:11:35 -0400653 u64 handle;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700654 u16 hwq;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500655 u32 tag;
Al Viroc9f2b6a2015-11-12 05:09:35 -0500656 struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)};
657 struct iov_iter to;
Josef Bacik8f3ea352018-07-16 12:11:35 -0400658 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659
660 reply.magic = 0;
David Howellsaa563d72018-10-20 00:57:56 +0100661 iov_iter_kvec(&to, READ, &iov, 1, sizeof(reply));
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400662 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 if (result <= 0) {
Josef Bacik5ea8d102017-04-06 17:01:58 -0400664 if (!nbd_disconnected(config))
Josef Bacik9561a7a2016-11-22 14:04:40 -0500665 dev_err(disk_to_dev(nbd->disk),
666 "Receive control failed (result %d)\n", result);
Markus Pargmann19391832015-08-17 08:20:03 +0200667 return ERR_PTR(result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668 }
Michal Feixe4b57e02006-07-30 03:03:31 -0700669
670 if (ntohl(reply.magic) != NBD_REPLY_MAGIC) {
Wanlong Gaof4507162012-03-28 14:42:51 -0700671 dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
Michal Feixe4b57e02006-07-30 03:03:31 -0700672 (unsigned long)ntohl(reply.magic));
Markus Pargmann19391832015-08-17 08:20:03 +0200673 return ERR_PTR(-EPROTO);
Michal Feixe4b57e02006-07-30 03:03:31 -0700674 }
675
Josef Bacik8f3ea352018-07-16 12:11:35 -0400676 memcpy(&handle, reply.handle, sizeof(handle));
677 tag = nbd_handle_to_tag(handle);
Josef Bacikfd8383f2016-09-08 12:33:37 -0700678 hwq = blk_mq_unique_tag_to_hwq(tag);
679 if (hwq < nbd->tag_set.nr_hw_queues)
680 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq],
681 blk_mq_unique_tag_to_tag(tag));
682 if (!req || !blk_mq_request_started(req)) {
683 dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n",
684 tag, req);
685 return ERR_PTR(-ENOENT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686 }
Andrew Hall2abd2de2019-04-26 11:49:49 -0700687 trace_nbd_header_received(req, handle);
Josef Bacikfd8383f2016-09-08 12:33:37 -0700688 cmd = blk_mq_rq_to_pdu(req);
Josef Bacik8f3ea352018-07-16 12:11:35 -0400689
690 mutex_lock(&cmd->lock);
691 if (cmd->cmd_cookie != nbd_handle_to_cookie(handle)) {
692 dev_err(disk_to_dev(nbd->disk), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n",
693 req, cmd->cmd_cookie, nbd_handle_to_cookie(handle));
694 ret = -ENOENT;
695 goto out;
696 }
697 if (test_bit(NBD_CMD_REQUEUED, &cmd->flags)) {
698 dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n",
699 req);
700 ret = -ENOENT;
701 goto out;
702 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703 if (ntohl(reply.error)) {
Wanlong Gaof4507162012-03-28 14:42:51 -0700704 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
WANG Cong7f1b90f2011-08-19 14:48:22 +0200705 ntohl(reply.error));
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200706 cmd->status = BLK_STS_IOERR;
Josef Bacik8f3ea352018-07-16 12:11:35 -0400707 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708 }
709
Kevin Vigoree57a052018-06-04 10:40:12 -0600710 dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req);
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200711 if (rq_data_dir(req) != WRITE) {
NeilBrown5705f702007-09-25 12:35:59 +0200712 struct req_iterator iter;
Kent Overstreet79886132013-11-23 17:19:00 -0800713 struct bio_vec bvec;
NeilBrown5705f702007-09-25 12:35:59 +0200714
715 rq_for_each_segment(bvec, req, iter) {
David Howellsaa563d72018-10-20 00:57:56 +0100716 iov_iter_bvec(&to, READ, &bvec, 1, bvec.bv_len);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400717 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
Jens Axboe6c92e692007-08-16 13:43:12 +0200718 if (result <= 0) {
Wanlong Gaof4507162012-03-28 14:42:51 -0700719 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
WANG Cong7f1b90f2011-08-19 14:48:22 +0200720 result);
Josef Bacikf3733242017-04-06 17:01:57 -0400721 /*
722 * If we've disconnected or we only have 1
723 * connection then we need to make sure we
724 * complete this request, otherwise error out
725 * and let the timeout stuff handle resubmitting
726 * this request onto another connection.
727 */
Josef Bacik5ea8d102017-04-06 17:01:58 -0400728 if (nbd_disconnected(config) ||
729 config->num_connections <= 1) {
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200730 cmd->status = BLK_STS_IOERR;
Josef Bacik8f3ea352018-07-16 12:11:35 -0400731 goto out;
Josef Bacikf3733242017-04-06 17:01:57 -0400732 }
Josef Bacik8f3ea352018-07-16 12:11:35 -0400733 ret = -EIO;
734 goto out;
Jens Axboe6c92e692007-08-16 13:43:12 +0200735 }
Markus Pargmannd18509f2015-04-02 10:11:38 +0200736 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
Kevin Vigoree57a052018-06-04 10:40:12 -0600737 req, bvec.bv_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738 }
739 }
Josef Bacik8f3ea352018-07-16 12:11:35 -0400740out:
Andrew Hall2abd2de2019-04-26 11:49:49 -0700741 trace_nbd_payload_received(req, handle);
Josef Bacik8f3ea352018-07-16 12:11:35 -0400742 mutex_unlock(&cmd->lock);
743 return ret ? ERR_PTR(ret) : cmd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744}
745
Josef Bacik9561a7a2016-11-22 14:04:40 -0500746static void recv_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747{
Josef Bacik9561a7a2016-11-22 14:04:40 -0500748 struct recv_thread_args *args = container_of(work,
749 struct recv_thread_args,
750 work);
751 struct nbd_device *nbd = args->nbd;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400752 struct nbd_config *config = nbd->config;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700753 struct nbd_cmd *cmd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754
Markus Pargmann19391832015-08-17 08:20:03 +0200755 while (1) {
Josef Bacik9561a7a2016-11-22 14:04:40 -0500756 cmd = nbd_read_stat(nbd, args->index);
Josef Bacikfd8383f2016-09-08 12:33:37 -0700757 if (IS_ERR(cmd)) {
Josef Bacik5ea8d102017-04-06 17:01:58 -0400758 struct nbd_sock *nsock = config->socks[args->index];
Josef Bacikf3733242017-04-06 17:01:57 -0400759
760 mutex_lock(&nsock->tx_lock);
Josef Bacik799f9a32017-04-06 17:02:02 -0400761 nbd_mark_nsock_dead(nbd, nsock, 1);
Josef Bacikf3733242017-04-06 17:01:57 -0400762 mutex_unlock(&nsock->tx_lock);
Markus Pargmann19391832015-08-17 08:20:03 +0200763 break;
764 }
765
Christoph Hellwig08e00292017-04-20 16:03:09 +0200766 blk_mq_complete_request(blk_mq_rq_from_pdu(cmd));
Markus Pargmann19391832015-08-17 08:20:03 +0200767 }
Josef Bacik5ea8d102017-04-06 17:01:58 -0400768 atomic_dec(&config->recv_threads);
769 wake_up(&config->recv_wq);
770 nbd_config_put(nbd);
771 kfree(args);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772}
773
Jens Axboe7baa8572018-11-08 10:24:07 -0700774static bool nbd_clear_req(struct request *req, void *data, bool reserved)
Josef Bacikfd8383f2016-09-08 12:33:37 -0700775{
Christoph Hellwigd250bf42018-05-30 18:51:00 +0200776 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
Josef Bacikfd8383f2016-09-08 12:33:37 -0700777
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200778 cmd->status = BLK_STS_IOERR;
Christoph Hellwig08e00292017-04-20 16:03:09 +0200779 blk_mq_complete_request(req);
Jens Axboe7baa8572018-11-08 10:24:07 -0700780 return true;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700781}
782
Wanlong Gaof4507162012-03-28 14:42:51 -0700783static void nbd_clear_que(struct nbd_device *nbd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784{
Sagi Grimbergb52c2e92017-07-04 09:57:09 +0300785 blk_mq_quiesce_queue(nbd->disk->queue);
Josef Bacikfd8383f2016-09-08 12:33:37 -0700786 blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL);
Sagi Grimbergb52c2e92017-07-04 09:57:09 +0300787 blk_mq_unquiesce_queue(nbd->disk->queue);
Markus Pargmanne78273c2015-08-17 08:20:04 +0200788 dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789}
790
Josef Bacikf3733242017-04-06 17:01:57 -0400791static int find_fallback(struct nbd_device *nbd, int index)
792{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400793 struct nbd_config *config = nbd->config;
Josef Bacikf3733242017-04-06 17:01:57 -0400794 int new_index = -1;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400795 struct nbd_sock *nsock = config->socks[index];
Josef Bacikf3733242017-04-06 17:01:57 -0400796 int fallback = nsock->fallback_index;
797
Xiubo Liec76a7b2019-09-17 17:26:05 +0530798 if (test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags))
Josef Bacikf3733242017-04-06 17:01:57 -0400799 return new_index;
800
Josef Bacik5ea8d102017-04-06 17:01:58 -0400801 if (config->num_connections <= 1) {
Josef Bacikf3733242017-04-06 17:01:57 -0400802 dev_err_ratelimited(disk_to_dev(nbd->disk),
803 "Attempted send on invalid socket\n");
804 return new_index;
805 }
806
Josef Bacik5ea8d102017-04-06 17:01:58 -0400807 if (fallback >= 0 && fallback < config->num_connections &&
808 !config->socks[fallback]->dead)
Josef Bacikf3733242017-04-06 17:01:57 -0400809 return fallback;
810
811 if (nsock->fallback_index < 0 ||
Josef Bacik5ea8d102017-04-06 17:01:58 -0400812 nsock->fallback_index >= config->num_connections ||
813 config->socks[nsock->fallback_index]->dead) {
Josef Bacikf3733242017-04-06 17:01:57 -0400814 int i;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400815 for (i = 0; i < config->num_connections; i++) {
Josef Bacikf3733242017-04-06 17:01:57 -0400816 if (i == index)
817 continue;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400818 if (!config->socks[i]->dead) {
Josef Bacikf3733242017-04-06 17:01:57 -0400819 new_index = i;
820 break;
821 }
822 }
823 nsock->fallback_index = new_index;
824 if (new_index < 0) {
825 dev_err_ratelimited(disk_to_dev(nbd->disk),
826 "Dead connection, failed to find a fallback\n");
827 return new_index;
828 }
829 }
830 new_index = nsock->fallback_index;
831 return new_index;
832}
Paul Clements7fdfd402007-10-16 23:27:37 -0700833
Josef Bacik560bc4b2017-04-06 17:02:04 -0400834static int wait_for_reconnect(struct nbd_device *nbd)
835{
836 struct nbd_config *config = nbd->config;
837 if (!config->dead_conn_timeout)
838 return 0;
Xiubo Liec76a7b2019-09-17 17:26:05 +0530839 if (test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags))
Josef Bacik560bc4b2017-04-06 17:02:04 -0400840 return 0;
Kevin Vigor5e3c3a72018-05-30 10:45:11 -0600841 return wait_event_timeout(config->conn_wait,
842 atomic_read(&config->live_connections) > 0,
843 config->dead_conn_timeout) > 0;
Josef Bacik560bc4b2017-04-06 17:02:04 -0400844}
845
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400846static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
Laurent Vivier48cf6062008-04-29 01:02:46 -0700847{
Josef Bacikfd8383f2016-09-08 12:33:37 -0700848 struct request *req = blk_mq_rq_from_pdu(cmd);
849 struct nbd_device *nbd = cmd->nbd;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400850 struct nbd_config *config;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500851 struct nbd_sock *nsock;
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400852 int ret;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700853
Josef Bacik5ea8d102017-04-06 17:01:58 -0400854 if (!refcount_inc_not_zero(&nbd->config_refs)) {
855 dev_err_ratelimited(disk_to_dev(nbd->disk),
856 "Socks array is empty\n");
Josef Bacik6a468d52017-11-06 16:11:58 -0500857 blk_mq_start_request(req);
Josef Bacik5ea8d102017-04-06 17:01:58 -0400858 return -EINVAL;
859 }
860 config = nbd->config;
861
862 if (index >= config->num_connections) {
Josef Bacika897b662016-12-05 16:20:29 -0500863 dev_err_ratelimited(disk_to_dev(nbd->disk),
864 "Attempted send on invalid socket\n");
Josef Bacik5ea8d102017-04-06 17:01:58 -0400865 nbd_config_put(nbd);
Josef Bacik6a468d52017-11-06 16:11:58 -0500866 blk_mq_start_request(req);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400867 return -EINVAL;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500868 }
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200869 cmd->status = BLK_STS_OK;
Josef Bacikf3733242017-04-06 17:01:57 -0400870again:
Josef Bacik5ea8d102017-04-06 17:01:58 -0400871 nsock = config->socks[index];
Josef Bacik9561a7a2016-11-22 14:04:40 -0500872 mutex_lock(&nsock->tx_lock);
Josef Bacikf3733242017-04-06 17:01:57 -0400873 if (nsock->dead) {
Josef Bacik560bc4b2017-04-06 17:02:04 -0400874 int old_index = index;
Josef Bacikf3733242017-04-06 17:01:57 -0400875 index = find_fallback(nbd, index);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500876 mutex_unlock(&nsock->tx_lock);
Josef Bacik560bc4b2017-04-06 17:02:04 -0400877 if (index < 0) {
878 if (wait_for_reconnect(nbd)) {
879 index = old_index;
880 goto again;
881 }
882 /* All the sockets should already be down at this point,
883 * we just want to make sure that DISCONNECTED is set so
884 * any requests that come in that were queue'ed waiting
885 * for the reconnect timer don't trigger the timer again
886 * and instead just error out.
887 */
888 sock_shutdown(nbd);
889 nbd_config_put(nbd);
Josef Bacik6a468d52017-11-06 16:11:58 -0500890 blk_mq_start_request(req);
Josef Bacik560bc4b2017-04-06 17:02:04 -0400891 return -EIO;
892 }
Josef Bacikf3733242017-04-06 17:01:57 -0400893 goto again;
Laurent Vivier48cf6062008-04-29 01:02:46 -0700894 }
895
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400896 /* Handle the case that we have a pending request that was partially
897 * transmitted that _has_ to be serviced first. We need to call requeue
898 * here so that it gets put _after_ the request that is already on the
899 * dispatch list.
900 */
Josef Bacik6a468d52017-11-06 16:11:58 -0500901 blk_mq_start_request(req);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400902 if (unlikely(nsock->pending && nsock->pending != req)) {
Josef Bacikd7d94d42018-07-16 12:11:34 -0400903 nbd_requeue_cmd(cmd);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400904 ret = 0;
905 goto out;
Laurent Vivier48cf6062008-04-29 01:02:46 -0700906 }
Josef Bacikf3733242017-04-06 17:01:57 -0400907 /*
908 * Some failures are related to the link going down, so anything that
909 * returns EAGAIN can be retried on a different socket.
910 */
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400911 ret = nbd_send_cmd(nbd, cmd, index);
Josef Bacikf3733242017-04-06 17:01:57 -0400912 if (ret == -EAGAIN) {
913 dev_err_ratelimited(disk_to_dev(nbd->disk),
Josef Bacik6a468d52017-11-06 16:11:58 -0500914 "Request send failed, requeueing\n");
Josef Bacik799f9a32017-04-06 17:02:02 -0400915 nbd_mark_nsock_dead(nbd, nsock, 1);
Josef Bacikd7d94d42018-07-16 12:11:34 -0400916 nbd_requeue_cmd(cmd);
Josef Bacik6a468d52017-11-06 16:11:58 -0500917 ret = 0;
Josef Bacikf3733242017-04-06 17:01:57 -0400918 }
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400919out:
Josef Bacik9561a7a2016-11-22 14:04:40 -0500920 mutex_unlock(&nsock->tx_lock);
Josef Bacik5ea8d102017-04-06 17:01:58 -0400921 nbd_config_put(nbd);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400922 return ret;
Laurent Vivier48cf6062008-04-29 01:02:46 -0700923}
924
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200925static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
Josef Bacikfd8383f2016-09-08 12:33:37 -0700926 const struct blk_mq_queue_data *bd)
Laurent Vivier48cf6062008-04-29 01:02:46 -0700927{
Josef Bacikfd8383f2016-09-08 12:33:37 -0700928 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400929 int ret;
Laurent Vivier48cf6062008-04-29 01:02:46 -0700930
Josef Bacik9561a7a2016-11-22 14:04:40 -0500931 /*
932 * Since we look at the bio's to send the request over the network we
933 * need to make sure the completion work doesn't mark this request done
934 * before we are done doing our send. This keeps us from dereferencing
935 * freed data if we have particularly fast completions (ie we get the
936 * completion before we exit sock_xmit on the last bvec) or in the case
937 * that the server is misbehaving (or there was an error) before we're
938 * done sending everything over the wire.
939 */
Josef Bacik8f3ea352018-07-16 12:11:35 -0400940 mutex_lock(&cmd->lock);
Josef Bacikd7d94d42018-07-16 12:11:34 -0400941 clear_bit(NBD_CMD_REQUEUED, &cmd->flags);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400942
943 /* We can be called directly from the user space process, which means we
944 * could possibly have signals pending so our sendmsg will fail. In
945 * this case we need to return that we are busy, otherwise error out as
946 * appropriate.
947 */
948 ret = nbd_handle_cmd(cmd, hctx->queue_num);
Josef Bacik6e60a3b2017-10-02 16:22:08 -0400949 if (ret < 0)
950 ret = BLK_STS_IOERR;
951 else if (!ret)
952 ret = BLK_STS_OK;
Josef Bacik8f3ea352018-07-16 12:11:35 -0400953 mutex_unlock(&cmd->lock);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500954
Josef Bacik6e60a3b2017-10-02 16:22:08 -0400955 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956}
957
Josef Bacike46c7282017-04-06 17:02:00 -0400958static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
959 bool netlink)
Markus Pargmann23272a672015-10-29 11:51:16 +0100960{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400961 struct nbd_config *config = nbd->config;
Josef Bacik9442b732017-02-07 17:10:22 -0500962 struct socket *sock;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500963 struct nbd_sock **socks;
964 struct nbd_sock *nsock;
Josef Bacik9442b732017-02-07 17:10:22 -0500965 int err;
966
967 sock = sockfd_lookup(arg, &err);
968 if (!sock)
969 return err;
Markus Pargmann23272a672015-10-29 11:51:16 +0100970
Josef Bacike46c7282017-04-06 17:02:00 -0400971 if (!netlink && !nbd->task_setup &&
Xiubo Liec76a7b2019-09-17 17:26:05 +0530972 !test_bit(NBD_RT_BOUND, &config->runtime_flags))
Josef Bacik9561a7a2016-11-22 14:04:40 -0500973 nbd->task_setup = current;
Josef Bacike46c7282017-04-06 17:02:00 -0400974
975 if (!netlink &&
976 (nbd->task_setup != current ||
Xiubo Liec76a7b2019-09-17 17:26:05 +0530977 test_bit(NBD_RT_BOUND, &config->runtime_flags))) {
Josef Bacik9561a7a2016-11-22 14:04:40 -0500978 dev_err(disk_to_dev(nbd->disk),
979 "Device being setup by another task");
Josef Bacik9b1355d2017-04-06 17:01:56 -0400980 sockfd_put(sock);
Josef Bacike46c7282017-04-06 17:02:00 -0400981 return -EBUSY;
Markus Pargmann23272a672015-10-29 11:51:16 +0100982 }
983
Josef Bacik5ea8d102017-04-06 17:01:58 -0400984 socks = krealloc(config->socks, (config->num_connections + 1) *
Josef Bacik9561a7a2016-11-22 14:04:40 -0500985 sizeof(struct nbd_sock *), GFP_KERNEL);
Josef Bacik9b1355d2017-04-06 17:01:56 -0400986 if (!socks) {
987 sockfd_put(sock);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500988 return -ENOMEM;
Josef Bacik9b1355d2017-04-06 17:01:56 -0400989 }
Josef Bacik9561a7a2016-11-22 14:04:40 -0500990 nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL);
Josef Bacik9b1355d2017-04-06 17:01:56 -0400991 if (!nsock) {
992 sockfd_put(sock);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500993 return -ENOMEM;
Josef Bacik9b1355d2017-04-06 17:01:56 -0400994 }
Markus Pargmann23272a672015-10-29 11:51:16 +0100995
Josef Bacik5ea8d102017-04-06 17:01:58 -0400996 config->socks = socks;
Markus Pargmann23272a672015-10-29 11:51:16 +0100997
Josef Bacikf3733242017-04-06 17:01:57 -0400998 nsock->fallback_index = -1;
999 nsock->dead = false;
Josef Bacik9561a7a2016-11-22 14:04:40 -05001000 mutex_init(&nsock->tx_lock);
1001 nsock->sock = sock;
Josef Bacik9dd5d3a2017-03-24 14:08:26 -04001002 nsock->pending = NULL;
1003 nsock->sent = 0;
Josef Bacik799f9a32017-04-06 17:02:02 -04001004 nsock->cookie = 0;
Josef Bacik5ea8d102017-04-06 17:01:58 -04001005 socks[config->num_connections++] = nsock;
Josef Bacik560bc4b2017-04-06 17:02:04 -04001006 atomic_inc(&config->live_connections);
Josef Bacik9561a7a2016-11-22 14:04:40 -05001007
1008 return 0;
Markus Pargmann23272a672015-10-29 11:51:16 +01001009}
1010
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001011static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
1012{
1013 struct nbd_config *config = nbd->config;
1014 struct socket *sock, *old;
1015 struct recv_thread_args *args;
1016 int i;
1017 int err;
1018
1019 sock = sockfd_lookup(arg, &err);
1020 if (!sock)
1021 return err;
1022
1023 args = kzalloc(sizeof(*args), GFP_KERNEL);
1024 if (!args) {
1025 sockfd_put(sock);
1026 return -ENOMEM;
1027 }
1028
1029 for (i = 0; i < config->num_connections; i++) {
1030 struct nbd_sock *nsock = config->socks[i];
1031
1032 if (!nsock->dead)
1033 continue;
1034
1035 mutex_lock(&nsock->tx_lock);
1036 if (!nsock->dead) {
1037 mutex_unlock(&nsock->tx_lock);
1038 continue;
1039 }
1040 sk_set_memalloc(sock->sk);
Josef Bacika7ee8cf2017-07-21 10:48:15 -04001041 if (nbd->tag_set.timeout)
1042 sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001043 atomic_inc(&config->recv_threads);
1044 refcount_inc(&nbd->config_refs);
1045 old = nsock->sock;
1046 nsock->fallback_index = -1;
1047 nsock->sock = sock;
1048 nsock->dead = false;
1049 INIT_WORK(&args->work, recv_work);
1050 args->index = i;
1051 args->nbd = nbd;
Josef Bacik799f9a32017-04-06 17:02:02 -04001052 nsock->cookie++;
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001053 mutex_unlock(&nsock->tx_lock);
1054 sockfd_put(old);
1055
Xiubo Liec76a7b2019-09-17 17:26:05 +05301056 clear_bit(NBD_RT_DISCONNECTED, &config->runtime_flags);
Josef Bacik7a362ea2017-07-25 13:31:19 -04001057
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001058 /* We take the tx_mutex in an error path in the recv_work, so we
1059 * need to queue_work outside of the tx_mutex.
1060 */
Mike Christiee9e006f2019-08-04 14:10:06 -05001061 queue_work(nbd->recv_workq, &args->work);
Josef Bacik560bc4b2017-04-06 17:02:04 -04001062
1063 atomic_inc(&config->live_connections);
1064 wake_up(&config->conn_wait);
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001065 return 0;
1066 }
1067 sockfd_put(sock);
1068 kfree(args);
1069 return -ENOSPC;
1070}
1071
Markus Pargmann0e4f0f62015-10-29 12:04:51 +01001072static void nbd_bdev_reset(struct block_device *bdev)
1073{
Ratna Manoj Bollaabbbdf12017-03-24 14:08:29 -04001074 if (bdev->bd_openers > 1)
1075 return;
Josef Bacik29eaadc2017-04-06 17:01:59 -04001076 bd_set_size(bdev, 0);
Markus Pargmann0e4f0f62015-10-29 12:04:51 +01001077}
1078
Josef Bacik29eaadc2017-04-06 17:01:59 -04001079static void nbd_parse_flags(struct nbd_device *nbd)
Markus Pargmannd02cf532015-10-29 12:06:15 +01001080{
Josef Bacik5ea8d102017-04-06 17:01:58 -04001081 struct nbd_config *config = nbd->config;
1082 if (config->flags & NBD_FLAG_READ_ONLY)
Josef Bacik29eaadc2017-04-06 17:01:59 -04001083 set_disk_ro(nbd->disk, true);
1084 else
1085 set_disk_ro(nbd->disk, false);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001086 if (config->flags & NBD_FLAG_SEND_TRIM)
Bart Van Assche8b904b52018-03-07 17:10:10 -08001087 blk_queue_flag_set(QUEUE_FLAG_DISCARD, nbd->disk->queue);
Shaun McDowell685c9b22017-05-25 23:55:54 -04001088 if (config->flags & NBD_FLAG_SEND_FLUSH) {
1089 if (config->flags & NBD_FLAG_SEND_FUA)
1090 blk_queue_write_cache(nbd->disk->queue, true, true);
1091 else
1092 blk_queue_write_cache(nbd->disk->queue, true, false);
1093 }
Markus Pargmannd02cf532015-10-29 12:06:15 +01001094 else
Jens Axboeaafb1ee2016-03-30 10:10:53 -06001095 blk_queue_write_cache(nbd->disk->queue, false, false);
Markus Pargmannd02cf532015-10-29 12:06:15 +01001096}
1097
Josef Bacik9561a7a2016-11-22 14:04:40 -05001098static void send_disconnects(struct nbd_device *nbd)
1099{
Josef Bacik5ea8d102017-04-06 17:01:58 -04001100 struct nbd_config *config = nbd->config;
Al Viroc9f2b6a2015-11-12 05:09:35 -05001101 struct nbd_request request = {
1102 .magic = htonl(NBD_REQUEST_MAGIC),
1103 .type = htonl(NBD_CMD_DISC),
1104 };
1105 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
1106 struct iov_iter from;
Josef Bacik9561a7a2016-11-22 14:04:40 -05001107 int i, ret;
1108
Josef Bacik5ea8d102017-04-06 17:01:58 -04001109 for (i = 0; i < config->num_connections; i++) {
Josef Bacikb4b2aec2017-07-21 10:48:14 -04001110 struct nbd_sock *nsock = config->socks[i];
1111
David Howellsaa563d72018-10-20 00:57:56 +01001112 iov_iter_kvec(&from, WRITE, &iov, 1, sizeof(request));
Josef Bacikb4b2aec2017-07-21 10:48:14 -04001113 mutex_lock(&nsock->tx_lock);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -04001114 ret = sock_xmit(nbd, i, 1, &from, 0, NULL);
Josef Bacik9561a7a2016-11-22 14:04:40 -05001115 if (ret <= 0)
1116 dev_err(disk_to_dev(nbd->disk),
1117 "Send disconnect failed %d\n", ret);
Josef Bacikb4b2aec2017-07-21 10:48:14 -04001118 mutex_unlock(&nsock->tx_lock);
Josef Bacik9561a7a2016-11-22 14:04:40 -05001119 }
1120}
1121
Josef Bacik29eaadc2017-04-06 17:01:59 -04001122static int nbd_disconnect(struct nbd_device *nbd)
Josef Bacik9442b732017-02-07 17:10:22 -05001123{
Josef Bacik5ea8d102017-04-06 17:01:58 -04001124 struct nbd_config *config = nbd->config;
Josef Bacik9442b732017-02-07 17:10:22 -05001125
Josef Bacik5ea8d102017-04-06 17:01:58 -04001126 dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
Xiubo Liec76a7b2019-09-17 17:26:05 +05301127 set_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags);
Josef Bacik2e134562017-07-21 10:48:13 -04001128 send_disconnects(nbd);
Josef Bacik9442b732017-02-07 17:10:22 -05001129 return 0;
1130}
1131
Josef Bacik29eaadc2017-04-06 17:01:59 -04001132static void nbd_clear_sock(struct nbd_device *nbd)
Josef Bacik9442b732017-02-07 17:10:22 -05001133{
1134 sock_shutdown(nbd);
1135 nbd_clear_que(nbd);
Josef Bacik9442b732017-02-07 17:10:22 -05001136 nbd->task_setup = NULL;
Josef Bacik9442b732017-02-07 17:10:22 -05001137}
1138
Josef Bacik5ea8d102017-04-06 17:01:58 -04001139static void nbd_config_put(struct nbd_device *nbd)
1140{
1141 if (refcount_dec_and_mutex_lock(&nbd->config_refs,
1142 &nbd->config_lock)) {
Josef Bacik5ea8d102017-04-06 17:01:58 -04001143 struct nbd_config *config = nbd->config;
Josef Bacik5ea8d102017-04-06 17:01:58 -04001144 nbd_dev_dbg_close(nbd);
Josef Bacik29eaadc2017-04-06 17:01:59 -04001145 nbd_size_clear(nbd);
Xiubo Liec76a7b2019-09-17 17:26:05 +05301146 if (test_and_clear_bit(NBD_RT_HAS_PID_FILE,
Josef Bacik5ea8d102017-04-06 17:01:58 -04001147 &config->runtime_flags))
1148 device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
1149 nbd->task_recv = NULL;
Josef Bacik29eaadc2017-04-06 17:01:59 -04001150 nbd_clear_sock(nbd);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001151 if (config->num_connections) {
1152 int i;
1153 for (i = 0; i < config->num_connections; i++) {
1154 sockfd_put(config->socks[i]->sock);
1155 kfree(config->socks[i]);
1156 }
1157 kfree(config->socks);
1158 }
Ilya Dryomovfa976532017-05-23 17:49:55 +02001159 kfree(nbd->config);
Ilya Dryomovaf622b82017-05-23 17:49:54 +02001160 nbd->config = NULL;
1161
Mike Christiee9e006f2019-08-04 14:10:06 -05001162 if (nbd->recv_workq)
1163 destroy_workqueue(nbd->recv_workq);
1164 nbd->recv_workq = NULL;
1165
Ilya Dryomovaf622b82017-05-23 17:49:54 +02001166 nbd->tag_set.timeout = 0;
Josef Bacik6df133a2018-05-23 13:35:59 -04001167 nbd->disk->queue->limits.discard_granularity = 0;
Josef Bacik07ce2132018-06-05 11:41:23 -04001168 nbd->disk->queue->limits.discard_alignment = 0;
Josef Bacik6df133a2018-05-23 13:35:59 -04001169 blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX);
Bart Van Assche8b904b52018-03-07 17:10:10 -08001170 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, nbd->disk->queue);
Josef Bacika2c97902017-04-06 17:02:07 -04001171
Josef Bacik5ea8d102017-04-06 17:01:58 -04001172 mutex_unlock(&nbd->config_lock);
Josef Bacikc6a47592017-04-06 17:02:06 -04001173 nbd_put(nbd);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001174 module_put(THIS_MODULE);
1175 }
1176}
1177
Josef Bacike46c7282017-04-06 17:02:00 -04001178static int nbd_start_device(struct nbd_device *nbd)
Josef Bacik9442b732017-02-07 17:10:22 -05001179{
Josef Bacik5ea8d102017-04-06 17:01:58 -04001180 struct nbd_config *config = nbd->config;
1181 int num_connections = config->num_connections;
Josef Bacik9442b732017-02-07 17:10:22 -05001182 int error = 0, i;
1183
1184 if (nbd->task_recv)
1185 return -EBUSY;
Josef Bacik5ea8d102017-04-06 17:01:58 -04001186 if (!config->socks)
Josef Bacik9442b732017-02-07 17:10:22 -05001187 return -EINVAL;
1188 if (num_connections > 1 &&
Josef Bacik5ea8d102017-04-06 17:01:58 -04001189 !(config->flags & NBD_FLAG_CAN_MULTI_CONN)) {
Josef Bacik9442b732017-02-07 17:10:22 -05001190 dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n");
Josef Bacik5ea8d102017-04-06 17:01:58 -04001191 return -EINVAL;
Josef Bacik9442b732017-02-07 17:10:22 -05001192 }
1193
Mike Christiee9e006f2019-08-04 14:10:06 -05001194 nbd->recv_workq = alloc_workqueue("knbd%d-recv",
1195 WQ_MEM_RECLAIM | WQ_HIGHPRI |
1196 WQ_UNBOUND, 0, nbd->index);
1197 if (!nbd->recv_workq) {
1198 dev_err(disk_to_dev(nbd->disk), "Could not allocate knbd recv work queue.\n");
1199 return -ENOMEM;
1200 }
1201
Josef Bacik5ea8d102017-04-06 17:01:58 -04001202 blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections);
Josef Bacik9442b732017-02-07 17:10:22 -05001203 nbd->task_recv = current;
Josef Bacik9442b732017-02-07 17:10:22 -05001204
Josef Bacik29eaadc2017-04-06 17:01:59 -04001205 nbd_parse_flags(nbd);
Josef Bacik9442b732017-02-07 17:10:22 -05001206
1207 error = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
1208 if (error) {
1209 dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
Josef Bacik5ea8d102017-04-06 17:01:58 -04001210 return error;
Josef Bacik9442b732017-02-07 17:10:22 -05001211 }
Xiubo Liec76a7b2019-09-17 17:26:05 +05301212 set_bit(NBD_RT_HAS_PID_FILE, &config->runtime_flags);
Josef Bacik9442b732017-02-07 17:10:22 -05001213
1214 nbd_dev_dbg_init(nbd);
1215 for (i = 0; i < num_connections; i++) {
Josef Bacik5ea8d102017-04-06 17:01:58 -04001216 struct recv_thread_args *args;
1217
1218 args = kzalloc(sizeof(*args), GFP_KERNEL);
1219 if (!args) {
1220 sock_shutdown(nbd);
1221 return -ENOMEM;
1222 }
1223 sk_set_memalloc(config->socks[i]->sock->sk);
Josef Bacika7ee8cf2017-07-21 10:48:15 -04001224 if (nbd->tag_set.timeout)
1225 config->socks[i]->sock->sk->sk_sndtimeo =
1226 nbd->tag_set.timeout;
Josef Bacik5ea8d102017-04-06 17:01:58 -04001227 atomic_inc(&config->recv_threads);
1228 refcount_inc(&nbd->config_refs);
1229 INIT_WORK(&args->work, recv_work);
1230 args->nbd = nbd;
1231 args->index = i;
Mike Christiee9e006f2019-08-04 14:10:06 -05001232 queue_work(nbd->recv_workq, &args->work);
Josef Bacik9442b732017-02-07 17:10:22 -05001233 }
Josef Bacik639812a2017-10-09 13:12:10 -04001234 nbd_size_update(nbd);
Josef Bacike46c7282017-04-06 17:02:00 -04001235 return error;
1236}
1237
1238static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *bdev)
1239{
1240 struct nbd_config *config = nbd->config;
1241 int ret;
1242
1243 ret = nbd_start_device(nbd);
1244 if (ret)
1245 return ret;
1246
Josef Bacike46c7282017-04-06 17:02:00 -04001247 if (max_part)
1248 bdev->bd_invalidated = 1;
1249 mutex_unlock(&nbd->config_lock);
1250 ret = wait_event_interruptible(config->recv_wq,
Josef Bacik5ea8d102017-04-06 17:01:58 -04001251 atomic_read(&config->recv_threads) == 0);
Mike Christiee9e006f2019-08-04 14:10:06 -05001252 if (ret) {
Josef Bacik5ea8d102017-04-06 17:01:58 -04001253 sock_shutdown(nbd);
Mike Christiee9e006f2019-08-04 14:10:06 -05001254 flush_workqueue(nbd->recv_workq);
1255 }
Josef Bacik9442b732017-02-07 17:10:22 -05001256 mutex_lock(&nbd->config_lock);
Josef Bacik76aa1d32018-05-16 14:51:22 -04001257 nbd_bdev_reset(bdev);
Josef Bacik9442b732017-02-07 17:10:22 -05001258 /* user requested, ignore socket errors */
Xiubo Liec76a7b2019-09-17 17:26:05 +05301259 if (test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags))
Josef Bacike46c7282017-04-06 17:02:00 -04001260 ret = 0;
Xiubo Liec76a7b2019-09-17 17:26:05 +05301261 if (test_bit(NBD_RT_TIMEDOUT, &config->runtime_flags))
Josef Bacike46c7282017-04-06 17:02:00 -04001262 ret = -ETIMEDOUT;
1263 return ret;
Josef Bacik9442b732017-02-07 17:10:22 -05001264}
Markus Pargmann30d53d92015-08-17 08:20:06 +02001265
Josef Bacik29eaadc2017-04-06 17:01:59 -04001266static void nbd_clear_sock_ioctl(struct nbd_device *nbd,
1267 struct block_device *bdev)
1268{
Josef Bacik2516ab12017-04-06 17:02:03 -04001269 sock_shutdown(nbd);
Munehisa Kamata2b5c8f02019-07-31 20:13:10 +08001270 __invalidate_device(bdev, true);
Josef Bacik29eaadc2017-04-06 17:01:59 -04001271 nbd_bdev_reset(bdev);
Xiubo Liec76a7b2019-09-17 17:26:05 +05301272 if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
Josef Bacike46c7282017-04-06 17:02:00 -04001273 &nbd->config->runtime_flags))
1274 nbd_config_put(nbd);
Josef Bacik29eaadc2017-04-06 17:01:59 -04001275}
1276
Xiubo Li553768d2019-05-29 15:16:05 -05001277static bool nbd_is_valid_blksize(unsigned long blksize)
1278{
1279 if (!blksize || !is_power_of_2(blksize) || blksize < 512 ||
1280 blksize > PAGE_SIZE)
1281 return false;
1282 return true;
1283}
1284
Mike Christie55313e92019-08-13 11:39:49 -05001285static void nbd_set_cmd_timeout(struct nbd_device *nbd, u64 timeout)
1286{
1287 nbd->tag_set.timeout = timeout * HZ;
Mike Christie2da22da2019-08-13 11:39:52 -05001288 if (timeout)
1289 blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ);
Mike Christie55313e92019-08-13 11:39:49 -05001290}
1291
Josef Bacik9561a7a2016-11-22 14:04:40 -05001292/* Must be called with config_lock held */
Wanlong Gaof4507162012-03-28 14:42:51 -07001293static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
Pavel Machek1a2ad212009-04-02 16:58:41 -07001294 unsigned int cmd, unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295{
Josef Bacik5ea8d102017-04-06 17:01:58 -04001296 struct nbd_config *config = nbd->config;
1297
Linus Torvalds1da177e2005-04-16 15:20:36 -07001298 switch (cmd) {
Josef Bacik9442b732017-02-07 17:10:22 -05001299 case NBD_DISCONNECT:
Josef Bacik29eaadc2017-04-06 17:01:59 -04001300 return nbd_disconnect(nbd);
Markus Pargmann23272a672015-10-29 11:51:16 +01001301 case NBD_CLEAR_SOCK:
Josef Bacik29eaadc2017-04-06 17:01:59 -04001302 nbd_clear_sock_ioctl(nbd, bdev);
1303 return 0;
Josef Bacik9442b732017-02-07 17:10:22 -05001304 case NBD_SET_SOCK:
Josef Bacike46c7282017-04-06 17:02:00 -04001305 return nbd_add_socket(nbd, arg, false);
Josef Bacik9442b732017-02-07 17:10:22 -05001306 case NBD_SET_BLKSIZE:
Xiubo Li553768d2019-05-29 15:16:05 -05001307 if (!arg)
1308 arg = NBD_DEF_BLKSIZE;
1309 if (!nbd_is_valid_blksize(arg))
Jens Axboebc811f02018-09-04 11:52:34 -06001310 return -EINVAL;
Josef Bacik29eaadc2017-04-06 17:01:59 -04001311 nbd_size_set(nbd, arg,
Josef Bacik5ea8d102017-04-06 17:01:58 -04001312 div_s64(config->bytesize, arg));
Josef Bacike5445412017-02-13 10:39:47 -05001313 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314 case NBD_SET_SIZE:
Josef Bacik29eaadc2017-04-06 17:01:59 -04001315 nbd_size_set(nbd, config->blksize,
Josef Bacik5ea8d102017-04-06 17:01:58 -04001316 div_s64(arg, config->blksize));
Josef Bacike5445412017-02-13 10:39:47 -05001317 return 0;
Markus Pargmann37091fd2015-07-27 07:36:49 +02001318 case NBD_SET_SIZE_BLOCKS:
Josef Bacik29eaadc2017-04-06 17:01:59 -04001319 nbd_size_set(nbd, config->blksize, arg);
Josef Bacike5445412017-02-13 10:39:47 -05001320 return 0;
Paul Clements7fdfd402007-10-16 23:27:37 -07001321 case NBD_SET_TIMEOUT:
Mike Christie2da22da2019-08-13 11:39:52 -05001322 nbd_set_cmd_timeout(nbd, arg);
Paul Clements7fdfd402007-10-16 23:27:37 -07001323 return 0;
Pavel Machek1a2ad212009-04-02 16:58:41 -07001324
Paul Clements2f012502012-10-04 17:16:15 -07001325 case NBD_SET_FLAGS:
Josef Bacik5ea8d102017-04-06 17:01:58 -04001326 config->flags = arg;
Paul Clements2f012502012-10-04 17:16:15 -07001327 return 0;
Josef Bacik9442b732017-02-07 17:10:22 -05001328 case NBD_DO_IT:
Josef Bacike46c7282017-04-06 17:02:00 -04001329 return nbd_start_device_ioctl(nbd, bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330 case NBD_CLEAR_QUE:
Herbert Xu4b2f0262006-01-06 00:09:47 -08001331 /*
1332 * This is for compatibility only. The queue is always cleared
1333 * by NBD_DO_IT or NBD_CLEAR_SOCK.
1334 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335 return 0;
1336 case NBD_PRINT_DEBUG:
Josef Bacikfd8383f2016-09-08 12:33:37 -07001337 /*
1338 * For compatibility only, we no longer keep a list of
1339 * outstanding requests.
1340 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341 return 0;
1342 }
Pavel Machek1a2ad212009-04-02 16:58:41 -07001343 return -ENOTTY;
1344}
1345
1346static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
1347 unsigned int cmd, unsigned long arg)
1348{
Wanlong Gaof4507162012-03-28 14:42:51 -07001349 struct nbd_device *nbd = bdev->bd_disk->private_data;
Josef Bacike46c7282017-04-06 17:02:00 -04001350 struct nbd_config *config = nbd->config;
1351 int error = -EINVAL;
Pavel Machek1a2ad212009-04-02 16:58:41 -07001352
1353 if (!capable(CAP_SYS_ADMIN))
1354 return -EPERM;
1355
Josef Bacik1dae69b2017-05-05 22:25:18 -04001356 /* The block layer will pass back some non-nbd ioctls in case we have
1357 * special handling for them, but we don't so just return an error.
1358 */
1359 if (_IOC_TYPE(cmd) != 0xab)
1360 return -EINVAL;
1361
Josef Bacik9561a7a2016-11-22 14:04:40 -05001362 mutex_lock(&nbd->config_lock);
Josef Bacike46c7282017-04-06 17:02:00 -04001363
1364 /* Don't allow ioctl operations on a nbd device that was created with
1365 * netlink, unless it's DISCONNECT or CLEAR_SOCK, which are fine.
1366 */
Xiubo Liec76a7b2019-09-17 17:26:05 +05301367 if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) ||
Josef Bacike46c7282017-04-06 17:02:00 -04001368 (cmd == NBD_DISCONNECT || cmd == NBD_CLEAR_SOCK))
1369 error = __nbd_ioctl(bdev, nbd, cmd, arg);
1370 else
1371 dev_err(nbd_to_dev(nbd), "Cannot use ioctl interface on a netlink controlled device.\n");
Josef Bacik9561a7a2016-11-22 14:04:40 -05001372 mutex_unlock(&nbd->config_lock);
Pavel Machek1a2ad212009-04-02 16:58:41 -07001373 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374}
1375
Josef Bacik5ea8d102017-04-06 17:01:58 -04001376static struct nbd_config *nbd_alloc_config(void)
1377{
1378 struct nbd_config *config;
1379
1380 config = kzalloc(sizeof(struct nbd_config), GFP_NOFS);
1381 if (!config)
1382 return NULL;
1383 atomic_set(&config->recv_threads, 0);
1384 init_waitqueue_head(&config->recv_wq);
Josef Bacik560bc4b2017-04-06 17:02:04 -04001385 init_waitqueue_head(&config->conn_wait);
Xiubo Li553768d2019-05-29 15:16:05 -05001386 config->blksize = NBD_DEF_BLKSIZE;
Josef Bacik560bc4b2017-04-06 17:02:04 -04001387 atomic_set(&config->live_connections, 0);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001388 try_module_get(THIS_MODULE);
1389 return config;
1390}
1391
1392static int nbd_open(struct block_device *bdev, fmode_t mode)
1393{
1394 struct nbd_device *nbd;
1395 int ret = 0;
1396
1397 mutex_lock(&nbd_index_mutex);
1398 nbd = bdev->bd_disk->private_data;
1399 if (!nbd) {
1400 ret = -ENXIO;
1401 goto out;
1402 }
Josef Bacikc6a47592017-04-06 17:02:06 -04001403 if (!refcount_inc_not_zero(&nbd->refs)) {
1404 ret = -ENXIO;
1405 goto out;
1406 }
Josef Bacik5ea8d102017-04-06 17:01:58 -04001407 if (!refcount_inc_not_zero(&nbd->config_refs)) {
1408 struct nbd_config *config;
1409
1410 mutex_lock(&nbd->config_lock);
1411 if (refcount_inc_not_zero(&nbd->config_refs)) {
1412 mutex_unlock(&nbd->config_lock);
1413 goto out;
1414 }
1415 config = nbd->config = nbd_alloc_config();
1416 if (!config) {
1417 ret = -ENOMEM;
1418 mutex_unlock(&nbd->config_lock);
1419 goto out;
1420 }
1421 refcount_set(&nbd->config_refs, 1);
Josef Bacikc6a47592017-04-06 17:02:06 -04001422 refcount_inc(&nbd->refs);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001423 mutex_unlock(&nbd->config_lock);
Josef Bacikfe1f9e62018-05-16 14:51:21 -04001424 bdev->bd_invalidated = 1;
1425 } else if (nbd_disconnected(nbd->config)) {
1426 bdev->bd_invalidated = 1;
Josef Bacik5ea8d102017-04-06 17:01:58 -04001427 }
1428out:
1429 mutex_unlock(&nbd_index_mutex);
1430 return ret;
1431}
1432
1433static void nbd_release(struct gendisk *disk, fmode_t mode)
1434{
1435 struct nbd_device *nbd = disk->private_data;
Doron Roberts-Kedes08ba91e2018-06-15 14:05:32 -07001436 struct block_device *bdev = bdget_disk(disk, 0);
1437
Xiubo Liec76a7b2019-09-17 17:26:05 +05301438 if (test_bit(NBD_RT_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) &&
Doron Roberts-Kedes08ba91e2018-06-15 14:05:32 -07001439 bdev->bd_openers == 0)
1440 nbd_disconnect_and_put(nbd);
1441
Josef Bacik5ea8d102017-04-06 17:01:58 -04001442 nbd_config_put(nbd);
Josef Bacikc6a47592017-04-06 17:02:06 -04001443 nbd_put(nbd);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001444}
1445
Alexey Dobriyan83d5cde2009-09-21 17:01:13 -07001446static const struct block_device_operations nbd_fops =
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447{
1448 .owner = THIS_MODULE,
Josef Bacik5ea8d102017-04-06 17:01:58 -04001449 .open = nbd_open,
1450 .release = nbd_release,
Arnd Bergmann8a6cfeb2010-07-08 10:18:46 +02001451 .ioctl = nbd_ioctl,
Al Viro263a3df2016-01-07 10:04:37 -05001452 .compat_ioctl = nbd_ioctl,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453};
1454
Markus Pargmann30d53d92015-08-17 08:20:06 +02001455#if IS_ENABLED(CONFIG_DEBUG_FS)
1456
1457static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
1458{
1459 struct nbd_device *nbd = s->private;
1460
1461 if (nbd->task_recv)
1462 seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv));
Markus Pargmann30d53d92015-08-17 08:20:06 +02001463
1464 return 0;
1465}
1466
1467static int nbd_dbg_tasks_open(struct inode *inode, struct file *file)
1468{
1469 return single_open(file, nbd_dbg_tasks_show, inode->i_private);
1470}
1471
1472static const struct file_operations nbd_dbg_tasks_ops = {
1473 .open = nbd_dbg_tasks_open,
1474 .read = seq_read,
1475 .llseek = seq_lseek,
1476 .release = single_release,
1477};
1478
1479static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
1480{
1481 struct nbd_device *nbd = s->private;
Josef Bacik5ea8d102017-04-06 17:01:58 -04001482 u32 flags = nbd->config->flags;
Markus Pargmann30d53d92015-08-17 08:20:06 +02001483
1484 seq_printf(s, "Hex: 0x%08x\n\n", flags);
1485
1486 seq_puts(s, "Known flags:\n");
1487
1488 if (flags & NBD_FLAG_HAS_FLAGS)
1489 seq_puts(s, "NBD_FLAG_HAS_FLAGS\n");
1490 if (flags & NBD_FLAG_READ_ONLY)
1491 seq_puts(s, "NBD_FLAG_READ_ONLY\n");
1492 if (flags & NBD_FLAG_SEND_FLUSH)
1493 seq_puts(s, "NBD_FLAG_SEND_FLUSH\n");
Shaun McDowell685c9b22017-05-25 23:55:54 -04001494 if (flags & NBD_FLAG_SEND_FUA)
1495 seq_puts(s, "NBD_FLAG_SEND_FUA\n");
Markus Pargmann30d53d92015-08-17 08:20:06 +02001496 if (flags & NBD_FLAG_SEND_TRIM)
1497 seq_puts(s, "NBD_FLAG_SEND_TRIM\n");
1498
1499 return 0;
1500}
1501
1502static int nbd_dbg_flags_open(struct inode *inode, struct file *file)
1503{
1504 return single_open(file, nbd_dbg_flags_show, inode->i_private);
1505}
1506
1507static const struct file_operations nbd_dbg_flags_ops = {
1508 .open = nbd_dbg_flags_open,
1509 .read = seq_read,
1510 .llseek = seq_lseek,
1511 .release = single_release,
1512};
1513
1514static int nbd_dev_dbg_init(struct nbd_device *nbd)
1515{
1516 struct dentry *dir;
Josef Bacik5ea8d102017-04-06 17:01:58 -04001517 struct nbd_config *config = nbd->config;
Markus Pargmann27ea43f2015-10-24 21:15:34 +02001518
1519 if (!nbd_dbg_dir)
1520 return -EIO;
Markus Pargmann30d53d92015-08-17 08:20:06 +02001521
1522 dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir);
Markus Pargmann27ea43f2015-10-24 21:15:34 +02001523 if (!dir) {
1524 dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n",
1525 nbd_name(nbd));
1526 return -EIO;
Markus Pargmann30d53d92015-08-17 08:20:06 +02001527 }
Josef Bacik5ea8d102017-04-06 17:01:58 -04001528 config->dbg_dir = dir;
Markus Pargmann30d53d92015-08-17 08:20:06 +02001529
Markus Pargmann27ea43f2015-10-24 21:15:34 +02001530 debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001531 debugfs_create_u64("size_bytes", 0444, dir, &config->bytesize);
Josef Bacik0eadf372016-09-08 12:33:40 -07001532 debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001533 debugfs_create_u64("blocksize", 0444, dir, &config->blksize);
Josef Bacikd366a0f2016-06-08 10:32:10 -04001534 debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops);
Markus Pargmann30d53d92015-08-17 08:20:06 +02001535
1536 return 0;
1537}
1538
1539static void nbd_dev_dbg_close(struct nbd_device *nbd)
1540{
Josef Bacik5ea8d102017-04-06 17:01:58 -04001541 debugfs_remove_recursive(nbd->config->dbg_dir);
Markus Pargmann30d53d92015-08-17 08:20:06 +02001542}
1543
1544static int nbd_dbg_init(void)
1545{
1546 struct dentry *dbg_dir;
1547
1548 dbg_dir = debugfs_create_dir("nbd", NULL);
Markus Pargmann27ea43f2015-10-24 21:15:34 +02001549 if (!dbg_dir)
1550 return -EIO;
Markus Pargmann30d53d92015-08-17 08:20:06 +02001551
1552 nbd_dbg_dir = dbg_dir;
1553
1554 return 0;
1555}
1556
1557static void nbd_dbg_close(void)
1558{
1559 debugfs_remove_recursive(nbd_dbg_dir);
1560}
1561
1562#else /* IS_ENABLED(CONFIG_DEBUG_FS) */
1563
1564static int nbd_dev_dbg_init(struct nbd_device *nbd)
1565{
1566 return 0;
1567}
1568
1569static void nbd_dev_dbg_close(struct nbd_device *nbd)
1570{
1571}
1572
1573static int nbd_dbg_init(void)
1574{
1575 return 0;
1576}
1577
1578static void nbd_dbg_close(void)
1579{
1580}
1581
1582#endif
1583
Christoph Hellwigd6296d392017-05-01 10:19:08 -06001584static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
1585 unsigned int hctx_idx, unsigned int numa_node)
Josef Bacikfd8383f2016-09-08 12:33:37 -07001586{
1587 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
Christoph Hellwigd6296d392017-05-01 10:19:08 -06001588 cmd->nbd = set->driver_data;
Josef Bacikd7d94d42018-07-16 12:11:34 -04001589 cmd->flags = 0;
Josef Bacik8f3ea352018-07-16 12:11:35 -04001590 mutex_init(&cmd->lock);
Josef Bacikfd8383f2016-09-08 12:33:37 -07001591 return 0;
1592}
1593
Eric Biggersf363b082017-03-30 13:39:16 -07001594static const struct blk_mq_ops nbd_mq_ops = {
Josef Bacikfd8383f2016-09-08 12:33:37 -07001595 .queue_rq = nbd_queue_rq,
Christoph Hellwig1e388ae2017-04-20 16:03:06 +02001596 .complete = nbd_complete_rq,
Josef Bacikfd8383f2016-09-08 12:33:37 -07001597 .init_request = nbd_init_request,
Josef Bacik0eadf372016-09-08 12:33:40 -07001598 .timeout = nbd_xmit_timeout,
Josef Bacikfd8383f2016-09-08 12:33:37 -07001599};
1600
Josef Bacikb0d91112017-02-01 16:11:40 -05001601static int nbd_dev_add(int index)
1602{
1603 struct nbd_device *nbd;
1604 struct gendisk *disk;
1605 struct request_queue *q;
1606 int err = -ENOMEM;
1607
1608 nbd = kzalloc(sizeof(struct nbd_device), GFP_KERNEL);
1609 if (!nbd)
1610 goto out;
1611
1612 disk = alloc_disk(1 << part_shift);
1613 if (!disk)
1614 goto out_free_nbd;
1615
1616 if (index >= 0) {
1617 err = idr_alloc(&nbd_index_idr, nbd, index, index + 1,
1618 GFP_KERNEL);
1619 if (err == -ENOSPC)
1620 err = -EEXIST;
1621 } else {
1622 err = idr_alloc(&nbd_index_idr, nbd, 0, 0, GFP_KERNEL);
1623 if (err >= 0)
1624 index = err;
1625 }
1626 if (err < 0)
1627 goto out_free_disk;
1628
Josef Bacike46c7282017-04-06 17:02:00 -04001629 nbd->index = index;
Josef Bacikb0d91112017-02-01 16:11:40 -05001630 nbd->disk = disk;
1631 nbd->tag_set.ops = &nbd_mq_ops;
1632 nbd->tag_set.nr_hw_queues = 1;
1633 nbd->tag_set.queue_depth = 128;
1634 nbd->tag_set.numa_node = NUMA_NO_NODE;
1635 nbd->tag_set.cmd_size = sizeof(struct nbd_cmd);
1636 nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
Ming Lei56d18f62019-02-15 19:13:24 +08001637 BLK_MQ_F_BLOCKING;
Josef Bacikb0d91112017-02-01 16:11:40 -05001638 nbd->tag_set.driver_data = nbd;
1639
1640 err = blk_mq_alloc_tag_set(&nbd->tag_set);
1641 if (err)
1642 goto out_free_idr;
1643
1644 q = blk_mq_init_queue(&nbd->tag_set);
1645 if (IS_ERR(q)) {
1646 err = PTR_ERR(q);
1647 goto out_free_tags;
1648 }
1649 disk->queue = q;
1650
1651 /*
1652 * Tell the block layer that we are not a rotational device
1653 */
Bart Van Assche8b904b52018-03-07 17:10:10 -08001654 blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
1655 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue);
Josef Bacik6df133a2018-05-23 13:35:59 -04001656 disk->queue->limits.discard_granularity = 0;
Josef Bacik07ce2132018-06-05 11:41:23 -04001657 disk->queue->limits.discard_alignment = 0;
Josef Bacik6df133a2018-05-23 13:35:59 -04001658 blk_queue_max_discard_sectors(disk->queue, 0);
Josef Bacikebb16d02017-04-18 16:22:51 -04001659 blk_queue_max_segment_size(disk->queue, UINT_MAX);
Josef Bacik1cc1f172017-04-20 15:47:01 -04001660 blk_queue_max_segments(disk->queue, USHRT_MAX);
Josef Bacikb0d91112017-02-01 16:11:40 -05001661 blk_queue_max_hw_sectors(disk->queue, 65536);
1662 disk->queue->limits.max_sectors = 256;
1663
Josef Bacikb0d91112017-02-01 16:11:40 -05001664 mutex_init(&nbd->config_lock);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001665 refcount_set(&nbd->config_refs, 0);
Josef Bacikc6a47592017-04-06 17:02:06 -04001666 refcount_set(&nbd->refs, 1);
1667 INIT_LIST_HEAD(&nbd->list);
Josef Bacikb0d91112017-02-01 16:11:40 -05001668 disk->major = NBD_MAJOR;
1669 disk->first_minor = index << part_shift;
1670 disk->fops = &nbd_fops;
1671 disk->private_data = nbd;
1672 sprintf(disk->disk_name, "nbd%d", index);
Josef Bacikb0d91112017-02-01 16:11:40 -05001673 add_disk(disk);
Josef Bacik47d902b2017-04-06 17:02:05 -04001674 nbd_total_devices++;
Josef Bacikb0d91112017-02-01 16:11:40 -05001675 return index;
1676
1677out_free_tags:
1678 blk_mq_free_tag_set(&nbd->tag_set);
1679out_free_idr:
1680 idr_remove(&nbd_index_idr, index);
1681out_free_disk:
1682 put_disk(disk);
1683out_free_nbd:
1684 kfree(nbd);
1685out:
1686 return err;
1687}
1688
Josef Bacike46c7282017-04-06 17:02:00 -04001689static int find_free_cb(int id, void *ptr, void *data)
1690{
1691 struct nbd_device *nbd = ptr;
1692 struct nbd_device **found = data;
1693
1694 if (!refcount_read(&nbd->config_refs)) {
1695 *found = nbd;
1696 return 1;
1697 }
1698 return 0;
1699}
1700
1701/* Netlink interface. */
Stephen Hemmingera86c4122018-07-18 09:32:43 -07001702static const struct nla_policy nbd_attr_policy[NBD_ATTR_MAX + 1] = {
Josef Bacike46c7282017-04-06 17:02:00 -04001703 [NBD_ATTR_INDEX] = { .type = NLA_U32 },
1704 [NBD_ATTR_SIZE_BYTES] = { .type = NLA_U64 },
1705 [NBD_ATTR_BLOCK_SIZE_BYTES] = { .type = NLA_U64 },
1706 [NBD_ATTR_TIMEOUT] = { .type = NLA_U64 },
1707 [NBD_ATTR_SERVER_FLAGS] = { .type = NLA_U64 },
1708 [NBD_ATTR_CLIENT_FLAGS] = { .type = NLA_U64 },
1709 [NBD_ATTR_SOCKETS] = { .type = NLA_NESTED},
Josef Bacik560bc4b2017-04-06 17:02:04 -04001710 [NBD_ATTR_DEAD_CONN_TIMEOUT] = { .type = NLA_U64 },
Josef Bacik47d902b2017-04-06 17:02:05 -04001711 [NBD_ATTR_DEVICE_LIST] = { .type = NLA_NESTED},
Josef Bacike46c7282017-04-06 17:02:00 -04001712};
1713
Stephen Hemmingera86c4122018-07-18 09:32:43 -07001714static const struct nla_policy nbd_sock_policy[NBD_SOCK_MAX + 1] = {
Josef Bacike46c7282017-04-06 17:02:00 -04001715 [NBD_SOCK_FD] = { .type = NLA_U32 },
1716};
1717
Josef Bacik47d902b2017-04-06 17:02:05 -04001718/* We don't use this right now since we don't parse the incoming list, but we
1719 * still want it here so userspace knows what to expect.
1720 */
Stephen Hemmingera86c4122018-07-18 09:32:43 -07001721static const struct nla_policy __attribute__((unused))
Josef Bacik47d902b2017-04-06 17:02:05 -04001722nbd_device_policy[NBD_DEVICE_ATTR_MAX + 1] = {
1723 [NBD_DEVICE_INDEX] = { .type = NLA_U32 },
1724 [NBD_DEVICE_CONNECTED] = { .type = NLA_U8 },
1725};
1726
Mike Christie4ddeaae82019-05-29 15:16:06 -05001727static int nbd_genl_size_set(struct genl_info *info, struct nbd_device *nbd)
1728{
1729 struct nbd_config *config = nbd->config;
1730 u64 bsize = config->blksize;
1731 u64 bytes = config->bytesize;
1732
1733 if (info->attrs[NBD_ATTR_SIZE_BYTES])
1734 bytes = nla_get_u64(info->attrs[NBD_ATTR_SIZE_BYTES]);
1735
1736 if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]) {
1737 bsize = nla_get_u64(info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]);
1738 if (!bsize)
1739 bsize = NBD_DEF_BLKSIZE;
1740 if (!nbd_is_valid_blksize(bsize)) {
1741 printk(KERN_ERR "Invalid block size %llu\n", bsize);
1742 return -EINVAL;
1743 }
1744 }
1745
1746 if (bytes != config->bytesize || bsize != config->blksize)
1747 nbd_size_set(nbd, bsize, div64_u64(bytes, bsize));
1748 return 0;
1749}
1750
Josef Bacike46c7282017-04-06 17:02:00 -04001751static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
1752{
1753 struct nbd_device *nbd = NULL;
1754 struct nbd_config *config;
1755 int index = -1;
1756 int ret;
Josef Bacika2c97902017-04-06 17:02:07 -04001757 bool put_dev = false;
Josef Bacike46c7282017-04-06 17:02:00 -04001758
1759 if (!netlink_capable(skb, CAP_SYS_ADMIN))
1760 return -EPERM;
1761
1762 if (info->attrs[NBD_ATTR_INDEX])
1763 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
1764 if (!info->attrs[NBD_ATTR_SOCKETS]) {
1765 printk(KERN_ERR "nbd: must specify at least one socket\n");
1766 return -EINVAL;
1767 }
1768 if (!info->attrs[NBD_ATTR_SIZE_BYTES]) {
1769 printk(KERN_ERR "nbd: must specify a size in bytes for the device\n");
1770 return -EINVAL;
1771 }
1772again:
1773 mutex_lock(&nbd_index_mutex);
1774 if (index == -1) {
1775 ret = idr_for_each(&nbd_index_idr, &find_free_cb, &nbd);
1776 if (ret == 0) {
1777 int new_index;
1778 new_index = nbd_dev_add(-1);
1779 if (new_index < 0) {
1780 mutex_unlock(&nbd_index_mutex);
1781 printk(KERN_ERR "nbd: failed to add new device\n");
Gustavo A. R. Silva09799622018-02-12 11:14:55 -06001782 return new_index;
Josef Bacike46c7282017-04-06 17:02:00 -04001783 }
1784 nbd = idr_find(&nbd_index_idr, new_index);
1785 }
1786 } else {
1787 nbd = idr_find(&nbd_index_idr, index);
Josef Bacike6a76272017-08-14 18:25:33 +00001788 if (!nbd) {
1789 ret = nbd_dev_add(index);
1790 if (ret < 0) {
1791 mutex_unlock(&nbd_index_mutex);
1792 printk(KERN_ERR "nbd: failed to add new device\n");
1793 return ret;
1794 }
1795 nbd = idr_find(&nbd_index_idr, index);
1796 }
Josef Bacike46c7282017-04-06 17:02:00 -04001797 }
Josef Bacike46c7282017-04-06 17:02:00 -04001798 if (!nbd) {
1799 printk(KERN_ERR "nbd: couldn't find device at index %d\n",
1800 index);
Josef Bacikc6a47592017-04-06 17:02:06 -04001801 mutex_unlock(&nbd_index_mutex);
Josef Bacike46c7282017-04-06 17:02:00 -04001802 return -EINVAL;
1803 }
Josef Bacikc6a47592017-04-06 17:02:06 -04001804 if (!refcount_inc_not_zero(&nbd->refs)) {
1805 mutex_unlock(&nbd_index_mutex);
1806 if (index == -1)
1807 goto again;
1808 printk(KERN_ERR "nbd: device at index %d is going down\n",
1809 index);
1810 return -EINVAL;
1811 }
1812 mutex_unlock(&nbd_index_mutex);
Josef Bacike46c7282017-04-06 17:02:00 -04001813
1814 mutex_lock(&nbd->config_lock);
1815 if (refcount_read(&nbd->config_refs)) {
1816 mutex_unlock(&nbd->config_lock);
Josef Bacikc6a47592017-04-06 17:02:06 -04001817 nbd_put(nbd);
Josef Bacike46c7282017-04-06 17:02:00 -04001818 if (index == -1)
1819 goto again;
1820 printk(KERN_ERR "nbd: nbd%d already in use\n", index);
1821 return -EBUSY;
1822 }
1823 if (WARN_ON(nbd->config)) {
1824 mutex_unlock(&nbd->config_lock);
Josef Bacikc6a47592017-04-06 17:02:06 -04001825 nbd_put(nbd);
Josef Bacike46c7282017-04-06 17:02:00 -04001826 return -EINVAL;
1827 }
1828 config = nbd->config = nbd_alloc_config();
1829 if (!nbd->config) {
1830 mutex_unlock(&nbd->config_lock);
Josef Bacikc6a47592017-04-06 17:02:06 -04001831 nbd_put(nbd);
Josef Bacike46c7282017-04-06 17:02:00 -04001832 printk(KERN_ERR "nbd: couldn't allocate config\n");
1833 return -ENOMEM;
1834 }
1835 refcount_set(&nbd->config_refs, 1);
Xiubo Liec76a7b2019-09-17 17:26:05 +05301836 set_bit(NBD_RT_BOUND, &config->runtime_flags);
Josef Bacike46c7282017-04-06 17:02:00 -04001837
Mike Christie4ddeaae82019-05-29 15:16:06 -05001838 ret = nbd_genl_size_set(info, nbd);
1839 if (ret)
1840 goto out;
1841
Mike Christie55313e92019-08-13 11:39:49 -05001842 if (info->attrs[NBD_ATTR_TIMEOUT])
1843 nbd_set_cmd_timeout(nbd,
1844 nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]));
Josef Bacik560bc4b2017-04-06 17:02:04 -04001845 if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
1846 config->dead_conn_timeout =
1847 nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
1848 config->dead_conn_timeout *= HZ;
1849 }
Josef Bacike46c7282017-04-06 17:02:00 -04001850 if (info->attrs[NBD_ATTR_SERVER_FLAGS])
1851 config->flags =
1852 nla_get_u64(info->attrs[NBD_ATTR_SERVER_FLAGS]);
Josef Bacika2c97902017-04-06 17:02:07 -04001853 if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
1854 u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
1855 if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
Xiubo Liec76a7b2019-09-17 17:26:05 +05301856 set_bit(NBD_RT_DESTROY_ON_DISCONNECT,
Josef Bacika2c97902017-04-06 17:02:07 -04001857 &config->runtime_flags);
1858 put_dev = true;
1859 }
Doron Roberts-Kedes08ba91e2018-06-15 14:05:32 -07001860 if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
Xiubo Liec76a7b2019-09-17 17:26:05 +05301861 set_bit(NBD_RT_DISCONNECT_ON_CLOSE,
Doron Roberts-Kedes08ba91e2018-06-15 14:05:32 -07001862 &config->runtime_flags);
1863 }
Josef Bacika2c97902017-04-06 17:02:07 -04001864 }
1865
Josef Bacike46c7282017-04-06 17:02:00 -04001866 if (info->attrs[NBD_ATTR_SOCKETS]) {
1867 struct nlattr *attr;
1868 int rem, fd;
1869
1870 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
1871 rem) {
1872 struct nlattr *socks[NBD_SOCK_MAX+1];
1873
1874 if (nla_type(attr) != NBD_SOCK_ITEM) {
1875 printk(KERN_ERR "nbd: socks must be embedded in a SOCK_ITEM attr\n");
1876 ret = -EINVAL;
1877 goto out;
1878 }
Johannes Berg8cb08172019-04-26 14:07:28 +02001879 ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX,
1880 attr,
1881 nbd_sock_policy,
1882 info->extack);
Josef Bacike46c7282017-04-06 17:02:00 -04001883 if (ret != 0) {
1884 printk(KERN_ERR "nbd: error processing sock list\n");
1885 ret = -EINVAL;
1886 goto out;
1887 }
1888 if (!socks[NBD_SOCK_FD])
1889 continue;
1890 fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
1891 ret = nbd_add_socket(nbd, fd, true);
1892 if (ret)
1893 goto out;
1894 }
1895 }
1896 ret = nbd_start_device(nbd);
1897out:
1898 mutex_unlock(&nbd->config_lock);
1899 if (!ret) {
Xiubo Liec76a7b2019-09-17 17:26:05 +05301900 set_bit(NBD_RT_HAS_CONFIG_REF, &config->runtime_flags);
Josef Bacike46c7282017-04-06 17:02:00 -04001901 refcount_inc(&nbd->config_refs);
1902 nbd_connect_reply(info, nbd->index);
1903 }
1904 nbd_config_put(nbd);
Josef Bacika2c97902017-04-06 17:02:07 -04001905 if (put_dev)
1906 nbd_put(nbd);
Josef Bacike46c7282017-04-06 17:02:00 -04001907 return ret;
1908}
1909
Doron Roberts-Kedes08ba91e2018-06-15 14:05:32 -07001910static void nbd_disconnect_and_put(struct nbd_device *nbd)
1911{
1912 mutex_lock(&nbd->config_lock);
1913 nbd_disconnect(nbd);
1914 nbd_clear_sock(nbd);
1915 mutex_unlock(&nbd->config_lock);
Mike Christiee9e006f2019-08-04 14:10:06 -05001916 /*
1917 * Make sure recv thread has finished, so it does not drop the last
1918 * config ref and try to destroy the workqueue from inside the work
1919 * queue.
1920 */
1921 flush_workqueue(nbd->recv_workq);
Xiubo Liec76a7b2019-09-17 17:26:05 +05301922 if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
Doron Roberts-Kedes08ba91e2018-06-15 14:05:32 -07001923 &nbd->config->runtime_flags))
1924 nbd_config_put(nbd);
1925}
1926
Josef Bacike46c7282017-04-06 17:02:00 -04001927static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
1928{
1929 struct nbd_device *nbd;
1930 int index;
1931
1932 if (!netlink_capable(skb, CAP_SYS_ADMIN))
1933 return -EPERM;
1934
1935 if (!info->attrs[NBD_ATTR_INDEX]) {
1936 printk(KERN_ERR "nbd: must specify an index to disconnect\n");
1937 return -EINVAL;
1938 }
1939 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
1940 mutex_lock(&nbd_index_mutex);
1941 nbd = idr_find(&nbd_index_idr, index);
Josef Bacike46c7282017-04-06 17:02:00 -04001942 if (!nbd) {
Josef Bacikc6a47592017-04-06 17:02:06 -04001943 mutex_unlock(&nbd_index_mutex);
Josef Bacike46c7282017-04-06 17:02:00 -04001944 printk(KERN_ERR "nbd: couldn't find device at index %d\n",
1945 index);
1946 return -EINVAL;
1947 }
Josef Bacikc6a47592017-04-06 17:02:06 -04001948 if (!refcount_inc_not_zero(&nbd->refs)) {
1949 mutex_unlock(&nbd_index_mutex);
1950 printk(KERN_ERR "nbd: device at index %d is going down\n",
1951 index);
1952 return -EINVAL;
1953 }
1954 mutex_unlock(&nbd_index_mutex);
1955 if (!refcount_inc_not_zero(&nbd->config_refs)) {
1956 nbd_put(nbd);
Josef Bacike46c7282017-04-06 17:02:00 -04001957 return 0;
Josef Bacikc6a47592017-04-06 17:02:06 -04001958 }
Doron Roberts-Kedes08ba91e2018-06-15 14:05:32 -07001959 nbd_disconnect_and_put(nbd);
Josef Bacike46c7282017-04-06 17:02:00 -04001960 nbd_config_put(nbd);
Josef Bacikc6a47592017-04-06 17:02:06 -04001961 nbd_put(nbd);
Josef Bacike46c7282017-04-06 17:02:00 -04001962 return 0;
1963}
1964
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001965static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
1966{
1967 struct nbd_device *nbd = NULL;
1968 struct nbd_config *config;
1969 int index;
Doron Roberts-Kedes08ba91e2018-06-15 14:05:32 -07001970 int ret = 0;
Josef Bacika2c97902017-04-06 17:02:07 -04001971 bool put_dev = false;
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001972
1973 if (!netlink_capable(skb, CAP_SYS_ADMIN))
1974 return -EPERM;
1975
1976 if (!info->attrs[NBD_ATTR_INDEX]) {
1977 printk(KERN_ERR "nbd: must specify a device to reconfigure\n");
1978 return -EINVAL;
1979 }
1980 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
1981 mutex_lock(&nbd_index_mutex);
1982 nbd = idr_find(&nbd_index_idr, index);
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001983 if (!nbd) {
Josef Bacikc6a47592017-04-06 17:02:06 -04001984 mutex_unlock(&nbd_index_mutex);
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001985 printk(KERN_ERR "nbd: couldn't find a device at index %d\n",
1986 index);
1987 return -EINVAL;
1988 }
Josef Bacikc6a47592017-04-06 17:02:06 -04001989 if (!refcount_inc_not_zero(&nbd->refs)) {
1990 mutex_unlock(&nbd_index_mutex);
1991 printk(KERN_ERR "nbd: device at index %d is going down\n",
1992 index);
1993 return -EINVAL;
1994 }
1995 mutex_unlock(&nbd_index_mutex);
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001996
1997 if (!refcount_inc_not_zero(&nbd->config_refs)) {
1998 dev_err(nbd_to_dev(nbd),
1999 "not configured, cannot reconfigure\n");
Josef Bacikc6a47592017-04-06 17:02:06 -04002000 nbd_put(nbd);
Josef Bacikb7aa3d32017-04-06 17:02:01 -04002001 return -EINVAL;
2002 }
2003
2004 mutex_lock(&nbd->config_lock);
2005 config = nbd->config;
Xiubo Liec76a7b2019-09-17 17:26:05 +05302006 if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) ||
Josef Bacikb7aa3d32017-04-06 17:02:01 -04002007 !nbd->task_recv) {
2008 dev_err(nbd_to_dev(nbd),
2009 "not configured, cannot reconfigure\n");
Doron Roberts-Kedes08ba91e2018-06-15 14:05:32 -07002010 ret = -EINVAL;
Josef Bacikb7aa3d32017-04-06 17:02:01 -04002011 goto out;
2012 }
2013
Mike Christie4ddeaae82019-05-29 15:16:06 -05002014 ret = nbd_genl_size_set(info, nbd);
2015 if (ret)
2016 goto out;
2017
Mike Christie55313e92019-08-13 11:39:49 -05002018 if (info->attrs[NBD_ATTR_TIMEOUT])
2019 nbd_set_cmd_timeout(nbd,
2020 nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]));
Josef Bacik560bc4b2017-04-06 17:02:04 -04002021 if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
2022 config->dead_conn_timeout =
2023 nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
2024 config->dead_conn_timeout *= HZ;
2025 }
Josef Bacika2c97902017-04-06 17:02:07 -04002026 if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
2027 u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
2028 if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
Xiubo Liec76a7b2019-09-17 17:26:05 +05302029 if (!test_and_set_bit(NBD_RT_DESTROY_ON_DISCONNECT,
Josef Bacika2c97902017-04-06 17:02:07 -04002030 &config->runtime_flags))
2031 put_dev = true;
2032 } else {
Xiubo Liec76a7b2019-09-17 17:26:05 +05302033 if (test_and_clear_bit(NBD_RT_DESTROY_ON_DISCONNECT,
Josef Bacika2c97902017-04-06 17:02:07 -04002034 &config->runtime_flags))
2035 refcount_inc(&nbd->refs);
2036 }
Doron Roberts-Kedes08ba91e2018-06-15 14:05:32 -07002037
2038 if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
Xiubo Liec76a7b2019-09-17 17:26:05 +05302039 set_bit(NBD_RT_DISCONNECT_ON_CLOSE,
Doron Roberts-Kedes08ba91e2018-06-15 14:05:32 -07002040 &config->runtime_flags);
2041 } else {
Xiubo Liec76a7b2019-09-17 17:26:05 +05302042 clear_bit(NBD_RT_DISCONNECT_ON_CLOSE,
Doron Roberts-Kedes08ba91e2018-06-15 14:05:32 -07002043 &config->runtime_flags);
2044 }
Josef Bacika2c97902017-04-06 17:02:07 -04002045 }
Josef Bacikb7aa3d32017-04-06 17:02:01 -04002046
2047 if (info->attrs[NBD_ATTR_SOCKETS]) {
2048 struct nlattr *attr;
2049 int rem, fd;
2050
2051 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
2052 rem) {
2053 struct nlattr *socks[NBD_SOCK_MAX+1];
2054
2055 if (nla_type(attr) != NBD_SOCK_ITEM) {
2056 printk(KERN_ERR "nbd: socks must be embedded in a SOCK_ITEM attr\n");
2057 ret = -EINVAL;
2058 goto out;
2059 }
Johannes Berg8cb08172019-04-26 14:07:28 +02002060 ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX,
2061 attr,
2062 nbd_sock_policy,
2063 info->extack);
Josef Bacikb7aa3d32017-04-06 17:02:01 -04002064 if (ret != 0) {
2065 printk(KERN_ERR "nbd: error processing sock list\n");
2066 ret = -EINVAL;
2067 goto out;
2068 }
2069 if (!socks[NBD_SOCK_FD])
2070 continue;
2071 fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
2072 ret = nbd_reconnect_socket(nbd, fd);
2073 if (ret) {
2074 if (ret == -ENOSPC)
2075 ret = 0;
2076 goto out;
2077 }
2078 dev_info(nbd_to_dev(nbd), "reconnected socket\n");
2079 }
2080 }
2081out:
2082 mutex_unlock(&nbd->config_lock);
2083 nbd_config_put(nbd);
Josef Bacikc6a47592017-04-06 17:02:06 -04002084 nbd_put(nbd);
Josef Bacika2c97902017-04-06 17:02:07 -04002085 if (put_dev)
2086 nbd_put(nbd);
Josef Bacikb7aa3d32017-04-06 17:02:01 -04002087 return ret;
2088}
2089
Josef Bacike46c7282017-04-06 17:02:00 -04002090static const struct genl_ops nbd_connect_genl_ops[] = {
2091 {
2092 .cmd = NBD_CMD_CONNECT,
Johannes Bergef6243a2019-04-26 14:07:31 +02002093 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
Josef Bacike46c7282017-04-06 17:02:00 -04002094 .doit = nbd_genl_connect,
2095 },
2096 {
2097 .cmd = NBD_CMD_DISCONNECT,
Johannes Bergef6243a2019-04-26 14:07:31 +02002098 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
Josef Bacike46c7282017-04-06 17:02:00 -04002099 .doit = nbd_genl_disconnect,
2100 },
Josef Bacikb7aa3d32017-04-06 17:02:01 -04002101 {
2102 .cmd = NBD_CMD_RECONFIGURE,
Johannes Bergef6243a2019-04-26 14:07:31 +02002103 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
Josef Bacikb7aa3d32017-04-06 17:02:01 -04002104 .doit = nbd_genl_reconfigure,
2105 },
Josef Bacik47d902b2017-04-06 17:02:05 -04002106 {
2107 .cmd = NBD_CMD_STATUS,
Johannes Bergef6243a2019-04-26 14:07:31 +02002108 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
Josef Bacik47d902b2017-04-06 17:02:05 -04002109 .doit = nbd_genl_status,
2110 },
Josef Bacike46c7282017-04-06 17:02:00 -04002111};
2112
Josef Bacik799f9a32017-04-06 17:02:02 -04002113static const struct genl_multicast_group nbd_mcast_grps[] = {
2114 { .name = NBD_GENL_MCAST_GROUP_NAME, },
2115};
2116
Josef Bacike46c7282017-04-06 17:02:00 -04002117static struct genl_family nbd_genl_family __ro_after_init = {
2118 .hdrsize = 0,
2119 .name = NBD_GENL_FAMILY_NAME,
2120 .version = NBD_GENL_VERSION,
2121 .module = THIS_MODULE,
2122 .ops = nbd_connect_genl_ops,
2123 .n_ops = ARRAY_SIZE(nbd_connect_genl_ops),
2124 .maxattr = NBD_ATTR_MAX,
Johannes Berg3b0f31f2019-03-21 22:51:02 +01002125 .policy = nbd_attr_policy,
Josef Bacik799f9a32017-04-06 17:02:02 -04002126 .mcgrps = nbd_mcast_grps,
2127 .n_mcgrps = ARRAY_SIZE(nbd_mcast_grps),
Josef Bacike46c7282017-04-06 17:02:00 -04002128};
2129
Josef Bacik47d902b2017-04-06 17:02:05 -04002130static int populate_nbd_status(struct nbd_device *nbd, struct sk_buff *reply)
2131{
2132 struct nlattr *dev_opt;
2133 u8 connected = 0;
2134 int ret;
2135
2136 /* This is a little racey, but for status it's ok. The
2137 * reason we don't take a ref here is because we can't
2138 * take a ref in the index == -1 case as we would need
2139 * to put under the nbd_index_mutex, which could
2140 * deadlock if we are configured to remove ourselves
2141 * once we're disconnected.
2142 */
2143 if (refcount_read(&nbd->config_refs))
2144 connected = 1;
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002145 dev_opt = nla_nest_start_noflag(reply, NBD_DEVICE_ITEM);
Josef Bacik47d902b2017-04-06 17:02:05 -04002146 if (!dev_opt)
2147 return -EMSGSIZE;
2148 ret = nla_put_u32(reply, NBD_DEVICE_INDEX, nbd->index);
2149 if (ret)
2150 return -EMSGSIZE;
2151 ret = nla_put_u8(reply, NBD_DEVICE_CONNECTED,
2152 connected);
2153 if (ret)
2154 return -EMSGSIZE;
2155 nla_nest_end(reply, dev_opt);
2156 return 0;
2157}
2158
2159static int status_cb(int id, void *ptr, void *data)
2160{
2161 struct nbd_device *nbd = ptr;
2162 return populate_nbd_status(nbd, (struct sk_buff *)data);
2163}
2164
2165static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info)
2166{
2167 struct nlattr *dev_list;
2168 struct sk_buff *reply;
2169 void *reply_head;
2170 size_t msg_size;
2171 int index = -1;
2172 int ret = -ENOMEM;
2173
2174 if (info->attrs[NBD_ATTR_INDEX])
2175 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
2176
2177 mutex_lock(&nbd_index_mutex);
2178
2179 msg_size = nla_total_size(nla_attr_size(sizeof(u32)) +
2180 nla_attr_size(sizeof(u8)));
2181 msg_size *= (index == -1) ? nbd_total_devices : 1;
2182
2183 reply = genlmsg_new(msg_size, GFP_KERNEL);
2184 if (!reply)
2185 goto out;
2186 reply_head = genlmsg_put_reply(reply, info, &nbd_genl_family, 0,
2187 NBD_CMD_STATUS);
2188 if (!reply_head) {
2189 nlmsg_free(reply);
2190 goto out;
2191 }
2192
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002193 dev_list = nla_nest_start_noflag(reply, NBD_ATTR_DEVICE_LIST);
Josef Bacik47d902b2017-04-06 17:02:05 -04002194 if (index == -1) {
2195 ret = idr_for_each(&nbd_index_idr, &status_cb, reply);
2196 if (ret) {
2197 nlmsg_free(reply);
2198 goto out;
2199 }
2200 } else {
2201 struct nbd_device *nbd;
2202 nbd = idr_find(&nbd_index_idr, index);
2203 if (nbd) {
2204 ret = populate_nbd_status(nbd, reply);
2205 if (ret) {
2206 nlmsg_free(reply);
2207 goto out;
2208 }
2209 }
2210 }
2211 nla_nest_end(reply, dev_list);
2212 genlmsg_end(reply, reply_head);
Li RongQingcd46eb82019-02-19 13:14:07 +08002213 ret = genlmsg_reply(reply, info);
Josef Bacik47d902b2017-04-06 17:02:05 -04002214out:
2215 mutex_unlock(&nbd_index_mutex);
2216 return ret;
2217}
2218
Josef Bacike46c7282017-04-06 17:02:00 -04002219static void nbd_connect_reply(struct genl_info *info, int index)
2220{
2221 struct sk_buff *skb;
2222 void *msg_head;
2223 int ret;
2224
2225 skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
2226 if (!skb)
2227 return;
2228 msg_head = genlmsg_put_reply(skb, info, &nbd_genl_family, 0,
2229 NBD_CMD_CONNECT);
2230 if (!msg_head) {
2231 nlmsg_free(skb);
2232 return;
2233 }
2234 ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
2235 if (ret) {
2236 nlmsg_free(skb);
2237 return;
2238 }
2239 genlmsg_end(skb, msg_head);
2240 genlmsg_reply(skb, info);
2241}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002242
Josef Bacik799f9a32017-04-06 17:02:02 -04002243static void nbd_mcast_index(int index)
2244{
2245 struct sk_buff *skb;
2246 void *msg_head;
2247 int ret;
2248
2249 skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
2250 if (!skb)
2251 return;
2252 msg_head = genlmsg_put(skb, 0, 0, &nbd_genl_family, 0,
2253 NBD_CMD_LINK_DEAD);
2254 if (!msg_head) {
2255 nlmsg_free(skb);
2256 return;
2257 }
2258 ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
2259 if (ret) {
2260 nlmsg_free(skb);
2261 return;
2262 }
2263 genlmsg_end(skb, msg_head);
2264 genlmsg_multicast(&nbd_genl_family, skb, 0, 0, GFP_KERNEL);
2265}
2266
2267static void nbd_dead_link_work(struct work_struct *work)
2268{
2269 struct link_dead_args *args = container_of(work, struct link_dead_args,
2270 work);
2271 nbd_mcast_index(args->index);
2272 kfree(args);
2273}
2274
Linus Torvalds1da177e2005-04-16 15:20:36 -07002275static int __init nbd_init(void)
2276{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002277 int i;
2278
Adrian Bunk5b7b18c2006-03-25 03:07:04 -08002279 BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002280
Laurent Vivierd71a6d72008-04-29 01:02:51 -07002281 if (max_part < 0) {
WANG Cong7742ce42011-08-19 14:48:28 +02002282 printk(KERN_ERR "nbd: max_part must be >= 0\n");
Laurent Vivierd71a6d72008-04-29 01:02:51 -07002283 return -EINVAL;
2284 }
2285
2286 part_shift = 0;
Namhyung Kim5988ce22011-05-28 14:44:46 +02002287 if (max_part > 0) {
Laurent Vivierd71a6d72008-04-29 01:02:51 -07002288 part_shift = fls(max_part);
2289
Namhyung Kim5988ce22011-05-28 14:44:46 +02002290 /*
2291 * Adjust max_part according to part_shift as it is exported
2292 * to user space so that user can know the max number of
2293 * partition kernel should be able to manage.
2294 *
2295 * Note that -1 is required because partition 0 is reserved
2296 * for the whole disk.
2297 */
2298 max_part = (1UL << part_shift) - 1;
2299 }
2300
Namhyung Kim3b271082011-05-28 14:44:46 +02002301 if ((1UL << part_shift) > DISK_MAX_PARTS)
2302 return -EINVAL;
2303
2304 if (nbds_max > 1UL << (MINORBITS - part_shift))
2305 return -EINVAL;
2306
Mike Christiee9e006f2019-08-04 14:10:06 -05002307 if (register_blkdev(NBD_MAJOR, "nbd"))
Josef Bacikb0d91112017-02-01 16:11:40 -05002308 return -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002309
Josef Bacike46c7282017-04-06 17:02:00 -04002310 if (genl_register_family(&nbd_genl_family)) {
2311 unregister_blkdev(NBD_MAJOR, "nbd");
Josef Bacike46c7282017-04-06 17:02:00 -04002312 return -EINVAL;
2313 }
Markus Pargmann30d53d92015-08-17 08:20:06 +02002314 nbd_dbg_init();
2315
Josef Bacikb0d91112017-02-01 16:11:40 -05002316 mutex_lock(&nbd_index_mutex);
2317 for (i = 0; i < nbds_max; i++)
2318 nbd_dev_add(i);
2319 mutex_unlock(&nbd_index_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002320 return 0;
Josef Bacikb0d91112017-02-01 16:11:40 -05002321}
2322
2323static int nbd_exit_cb(int id, void *ptr, void *data)
2324{
Josef Bacikc6a47592017-04-06 17:02:06 -04002325 struct list_head *list = (struct list_head *)data;
Josef Bacikb0d91112017-02-01 16:11:40 -05002326 struct nbd_device *nbd = ptr;
Josef Bacikc6a47592017-04-06 17:02:06 -04002327
Josef Bacikc6a47592017-04-06 17:02:06 -04002328 list_add_tail(&nbd->list, list);
Josef Bacikb0d91112017-02-01 16:11:40 -05002329 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002330}
2331
2332static void __exit nbd_cleanup(void)
2333{
Josef Bacikc6a47592017-04-06 17:02:06 -04002334 struct nbd_device *nbd;
2335 LIST_HEAD(del_list);
2336
Markus Pargmann30d53d92015-08-17 08:20:06 +02002337 nbd_dbg_close();
2338
Josef Bacikc6a47592017-04-06 17:02:06 -04002339 mutex_lock(&nbd_index_mutex);
2340 idr_for_each(&nbd_index_idr, &nbd_exit_cb, &del_list);
2341 mutex_unlock(&nbd_index_mutex);
2342
Josef Bacik60ae36a2017-04-28 09:49:19 -04002343 while (!list_empty(&del_list)) {
2344 nbd = list_first_entry(&del_list, struct nbd_device, list);
2345 list_del_init(&nbd->list);
2346 if (refcount_read(&nbd->refs) != 1)
Josef Bacikc6a47592017-04-06 17:02:06 -04002347 printk(KERN_ERR "nbd: possibly leaking a device\n");
2348 nbd_put(nbd);
Josef Bacikc6a47592017-04-06 17:02:06 -04002349 }
2350
Josef Bacikb0d91112017-02-01 16:11:40 -05002351 idr_destroy(&nbd_index_idr);
Josef Bacike46c7282017-04-06 17:02:00 -04002352 genl_unregister_family(&nbd_genl_family);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002353 unregister_blkdev(NBD_MAJOR, "nbd");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002354}
2355
2356module_init(nbd_init);
2357module_exit(nbd_cleanup);
2358
2359MODULE_DESCRIPTION("Network Block Device");
2360MODULE_LICENSE("GPL");
2361
Lars Marowsky-Bree40be0c22005-05-01 08:59:07 -07002362module_param(nbds_max, int, 0444);
Laurent Vivierd71a6d72008-04-29 01:02:51 -07002363MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
2364module_param(max_part, int, 0444);
Josef Bacik7a8362a2017-08-14 18:56:16 +00002365MODULE_PARM_DESC(max_part, "number of partitions per device (default: 16)");