blob: b4607dd9618521020bf10ee42c5d3a17b8992d59 [file] [log] [blame]
Thomas Gleixnereb1fe3b2019-05-24 12:03:47 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * Network block device - make block devices work over TCP
4 *
5 * Note that you can not swap over this thing, yet. Seems to work but
6 * deadlocks sometimes - you can not swap over TCP in general.
7 *
Pavel Macheka2531292010-07-18 14:27:13 +02008 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
10 *
Pavel Machekdbf492d2006-06-25 05:47:42 -070011 * (part of code stolen from loop.c)
Linus Torvalds1da177e2005-04-16 15:20:36 -070012 */
13
14#include <linux/major.h>
15
16#include <linux/blkdev.h>
17#include <linux/module.h>
18#include <linux/init.h>
19#include <linux/sched.h>
Vlastimil Babkaf1083042017-05-08 15:59:53 -070020#include <linux/sched/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/fs.h>
22#include <linux/bio.h>
23#include <linux/stat.h>
24#include <linux/errno.h>
25#include <linux/file.h>
26#include <linux/ioctl.h>
Arnd Bergmann2a48fc02010-06-02 14:28:52 +020027#include <linux/mutex.h>
Herbert Xu4b2f0262006-01-06 00:09:47 -080028#include <linux/compiler.h>
Xiubo Li8454d682019-09-17 17:26:06 +053029#include <linux/completion.h>
Herbert Xu4b2f0262006-01-06 00:09:47 -080030#include <linux/err.h>
31#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090032#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <net/sock.h>
Trond Myklebust91cf45f2007-11-12 18:10:39 -080034#include <linux/net.h>
Laurent Vivier48cf6062008-04-29 01:02:46 -070035#include <linux/kthread.h>
Markus Pargmannb9c495b2015-04-02 10:11:37 +020036#include <linux/types.h>
Markus Pargmann30d53d92015-08-17 08:20:06 +020037#include <linux/debugfs.h>
Josef Bacikfd8383f2016-09-08 12:33:37 -070038#include <linux/blk-mq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080040#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <asm/types.h>
42
43#include <linux/nbd.h>
Josef Bacike46c7282017-04-06 17:02:00 -040044#include <linux/nbd-netlink.h>
45#include <net/genetlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
Matt Mullinsea106722019-04-26 11:49:48 -070047#define CREATE_TRACE_POINTS
48#include <trace/events/nbd.h>
49
Josef Bacikb0d91112017-02-01 16:11:40 -050050static DEFINE_IDR(nbd_index_idr);
51static DEFINE_MUTEX(nbd_index_mutex);
Josef Bacik47d902b2017-04-06 17:02:05 -040052static int nbd_total_devices = 0;
Josef Bacikb0d91112017-02-01 16:11:40 -050053
Josef Bacik9561a7a2016-11-22 14:04:40 -050054struct nbd_sock {
55 struct socket *sock;
56 struct mutex tx_lock;
Josef Bacik9dd5d3a2017-03-24 14:08:26 -040057 struct request *pending;
58 int sent;
Josef Bacikf3733242017-04-06 17:01:57 -040059 bool dead;
60 int fallback_index;
Josef Bacik799f9a32017-04-06 17:02:02 -040061 int cookie;
Josef Bacik9561a7a2016-11-22 14:04:40 -050062};
63
Josef Bacik5ea8d102017-04-06 17:01:58 -040064struct recv_thread_args {
65 struct work_struct work;
66 struct nbd_device *nbd;
67 int index;
68};
69
Josef Bacik799f9a32017-04-06 17:02:02 -040070struct link_dead_args {
71 struct work_struct work;
72 int index;
73};
74
Xiubo Liec76a7b2019-09-17 17:26:05 +053075#define NBD_RT_TIMEDOUT 0
76#define NBD_RT_DISCONNECT_REQUESTED 1
77#define NBD_RT_DISCONNECTED 2
78#define NBD_RT_HAS_PID_FILE 3
79#define NBD_RT_HAS_CONFIG_REF 4
80#define NBD_RT_BOUND 5
81#define NBD_RT_DESTROY_ON_DISCONNECT 6
82#define NBD_RT_DISCONNECT_ON_CLOSE 7
Josef Bacik9b4a6ba2016-09-08 12:33:39 -070083
Xiubo Li8454d682019-09-17 17:26:06 +053084#define NBD_DESTROY_ON_DISCONNECT 0
85#define NBD_DISCONNECT_REQUESTED 1
86
Josef Bacik5ea8d102017-04-06 17:01:58 -040087struct nbd_config {
Markus Pargmann22d109c2015-08-17 08:20:09 +020088 u32 flags;
Josef Bacik9b4a6ba2016-09-08 12:33:39 -070089 unsigned long runtime_flags;
Josef Bacik560bc4b2017-04-06 17:02:04 -040090 u64 dead_conn_timeout;
Josef Bacik5ea8d102017-04-06 17:01:58 -040091
Josef Bacik9561a7a2016-11-22 14:04:40 -050092 struct nbd_sock **socks;
Josef Bacik9561a7a2016-11-22 14:04:40 -050093 int num_connections;
Josef Bacik560bc4b2017-04-06 17:02:04 -040094 atomic_t live_connections;
95 wait_queue_head_t conn_wait;
Josef Bacik5ea8d102017-04-06 17:01:58 -040096
Josef Bacik9561a7a2016-11-22 14:04:40 -050097 atomic_t recv_threads;
98 wait_queue_head_t recv_wq;
Josef Bacikef77b512016-12-02 16:19:12 -050099 loff_t blksize;
Markus Pargmannb9c495b2015-04-02 10:11:37 +0200100 loff_t bytesize;
Markus Pargmann30d53d92015-08-17 08:20:06 +0200101#if IS_ENABLED(CONFIG_DEBUG_FS)
102 struct dentry *dbg_dir;
103#endif
Markus Pargmann13e71d62015-04-02 10:11:35 +0200104};
105
Josef Bacik5ea8d102017-04-06 17:01:58 -0400106struct nbd_device {
107 struct blk_mq_tag_set tag_set;
108
Josef Bacike46c7282017-04-06 17:02:00 -0400109 int index;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400110 refcount_t config_refs;
Josef Bacikc6a47592017-04-06 17:02:06 -0400111 refcount_t refs;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400112 struct nbd_config *config;
113 struct mutex config_lock;
114 struct gendisk *disk;
Mike Christiee9e006f2019-08-04 14:10:06 -0500115 struct workqueue_struct *recv_workq;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400116
Josef Bacikc6a47592017-04-06 17:02:06 -0400117 struct list_head list;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400118 struct task_struct *task_recv;
119 struct task_struct *task_setup;
Xiubo Li8454d682019-09-17 17:26:06 +0530120
121 struct completion *destroy_complete;
122 unsigned long flags;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400123};
124
Josef Bacikd7d94d42018-07-16 12:11:34 -0400125#define NBD_CMD_REQUEUED 1
126
Josef Bacikfd8383f2016-09-08 12:33:37 -0700127struct nbd_cmd {
128 struct nbd_device *nbd;
Josef Bacik8f3ea352018-07-16 12:11:35 -0400129 struct mutex lock;
Josef Bacikf3733242017-04-06 17:01:57 -0400130 int index;
Josef Bacik799f9a32017-04-06 17:02:02 -0400131 int cookie;
Mike Christie2da22da2019-08-13 11:39:52 -0500132 int retries;
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200133 blk_status_t status;
Josef Bacikd7d94d42018-07-16 12:11:34 -0400134 unsigned long flags;
Josef Bacik8f3ea352018-07-16 12:11:35 -0400135 u32 cmd_cookie;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700136};
137
Markus Pargmann30d53d92015-08-17 08:20:06 +0200138#if IS_ENABLED(CONFIG_DEBUG_FS)
139static struct dentry *nbd_dbg_dir;
140#endif
141
142#define nbd_name(nbd) ((nbd)->disk->disk_name)
143
Wanlong Gaof4507162012-03-28 14:42:51 -0700144#define NBD_MAGIC 0x68797548
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145
Xiubo Li553768d2019-05-29 15:16:05 -0500146#define NBD_DEF_BLKSIZE 1024
147
Ingo van Lil9c7a4162006-07-01 04:36:36 -0700148static unsigned int nbds_max = 16;
Josef Bacik7a8362a2017-08-14 18:56:16 +0000149static int max_part = 16;
Josef Bacikb0d91112017-02-01 16:11:40 -0500150static int part_shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151
Josef Bacik9442b732017-02-07 17:10:22 -0500152static int nbd_dev_dbg_init(struct nbd_device *nbd);
153static void nbd_dev_dbg_close(struct nbd_device *nbd);
Josef Bacik5ea8d102017-04-06 17:01:58 -0400154static void nbd_config_put(struct nbd_device *nbd);
Josef Bacike46c7282017-04-06 17:02:00 -0400155static void nbd_connect_reply(struct genl_info *info, int index);
Josef Bacik47d902b2017-04-06 17:02:05 -0400156static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info);
Josef Bacik799f9a32017-04-06 17:02:02 -0400157static void nbd_dead_link_work(struct work_struct *work);
Doron Roberts-Kedes08ba91e2018-06-15 14:05:32 -0700158static void nbd_disconnect_and_put(struct nbd_device *nbd);
Josef Bacik9442b732017-02-07 17:10:22 -0500159
Markus Pargmannd18509f2015-04-02 10:11:38 +0200160static inline struct device *nbd_to_dev(struct nbd_device *nbd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161{
Markus Pargmannd18509f2015-04-02 10:11:38 +0200162 return disk_to_dev(nbd->disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163}
164
Josef Bacikd7d94d42018-07-16 12:11:34 -0400165static void nbd_requeue_cmd(struct nbd_cmd *cmd)
166{
167 struct request *req = blk_mq_rq_from_pdu(cmd);
168
169 if (!test_and_set_bit(NBD_CMD_REQUEUED, &cmd->flags))
170 blk_mq_requeue_request(req, true);
171}
172
Josef Bacik8f3ea352018-07-16 12:11:35 -0400173#define NBD_COOKIE_BITS 32
174
175static u64 nbd_cmd_handle(struct nbd_cmd *cmd)
176{
177 struct request *req = blk_mq_rq_from_pdu(cmd);
178 u32 tag = blk_mq_unique_tag(req);
179 u64 cookie = cmd->cmd_cookie;
180
181 return (cookie << NBD_COOKIE_BITS) | tag;
182}
183
184static u32 nbd_handle_to_tag(u64 handle)
185{
186 return (u32)handle;
187}
188
189static u32 nbd_handle_to_cookie(u64 handle)
190{
191 return (u32)(handle >> NBD_COOKIE_BITS);
192}
193
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194static const char *nbdcmd_to_ascii(int cmd)
195{
196 switch (cmd) {
197 case NBD_CMD_READ: return "read";
198 case NBD_CMD_WRITE: return "write";
199 case NBD_CMD_DISC: return "disconnect";
Alex Bligh75f187a2013-02-27 17:05:23 -0800200 case NBD_CMD_FLUSH: return "flush";
Paul Clementsa336d292012-10-04 17:16:18 -0700201 case NBD_CMD_TRIM: return "trim/discard";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 }
203 return "invalid";
204}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
Josef Bacik5ea8d102017-04-06 17:01:58 -0400206static ssize_t pid_show(struct device *dev,
207 struct device_attribute *attr, char *buf)
208{
209 struct gendisk *disk = dev_to_disk(dev);
210 struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
211
212 return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv));
213}
214
Bhumika Goyaldfbde552017-08-21 17:13:08 +0530215static const struct device_attribute pid_attr = {
Joe Perches5657a812018-05-24 13:38:59 -0600216 .attr = { .name = "pid", .mode = 0444},
Josef Bacik5ea8d102017-04-06 17:01:58 -0400217 .show = pid_show,
218};
219
Josef Bacikc6a47592017-04-06 17:02:06 -0400220static void nbd_dev_remove(struct nbd_device *nbd)
221{
222 struct gendisk *disk = nbd->disk;
Josef Bacik8364da42018-05-16 14:51:17 -0400223 struct request_queue *q;
224
Josef Bacikc6a47592017-04-06 17:02:06 -0400225 if (disk) {
Josef Bacik8364da42018-05-16 14:51:17 -0400226 q = disk->queue;
Josef Bacikc6a47592017-04-06 17:02:06 -0400227 del_gendisk(disk);
Josef Bacik8364da42018-05-16 14:51:17 -0400228 blk_cleanup_queue(q);
Josef Bacikc6a47592017-04-06 17:02:06 -0400229 blk_mq_free_tag_set(&nbd->tag_set);
Josef Bacika2c97902017-04-06 17:02:07 -0400230 disk->private_data = NULL;
Josef Bacikc6a47592017-04-06 17:02:06 -0400231 put_disk(disk);
232 }
Xiubo Li8454d682019-09-17 17:26:06 +0530233
234 /*
235 * Place this in the last just before the nbd is freed to
236 * make sure that the disk and the related kobject are also
237 * totally removed to avoid duplicate creation of the same
238 * one.
239 */
240 if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) && nbd->destroy_complete)
241 complete(nbd->destroy_complete);
242
Josef Bacikc6a47592017-04-06 17:02:06 -0400243 kfree(nbd);
244}
245
246static void nbd_put(struct nbd_device *nbd)
247{
248 if (refcount_dec_and_mutex_lock(&nbd->refs,
249 &nbd_index_mutex)) {
250 idr_remove(&nbd_index_idr, nbd->index);
Josef Bacikc6a47592017-04-06 17:02:06 -0400251 nbd_dev_remove(nbd);
Xiubo Li86248812019-09-19 11:44:27 +0530252 mutex_unlock(&nbd_index_mutex);
Josef Bacikc6a47592017-04-06 17:02:06 -0400253 }
254}
255
Josef Bacik799f9a32017-04-06 17:02:02 -0400256static int nbd_disconnected(struct nbd_config *config)
Josef Bacikf3733242017-04-06 17:01:57 -0400257{
Xiubo Liec76a7b2019-09-17 17:26:05 +0530258 return test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags) ||
259 test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags);
Josef Bacik799f9a32017-04-06 17:02:02 -0400260}
261
262static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock,
263 int notify)
264{
265 if (!nsock->dead && notify && !nbd_disconnected(nbd->config)) {
266 struct link_dead_args *args;
267 args = kmalloc(sizeof(struct link_dead_args), GFP_NOIO);
268 if (args) {
269 INIT_WORK(&args->work, nbd_dead_link_work);
270 args->index = nbd->index;
271 queue_work(system_wq, &args->work);
272 }
273 }
Josef Bacik560bc4b2017-04-06 17:02:04 -0400274 if (!nsock->dead) {
Josef Bacikf3733242017-04-06 17:01:57 -0400275 kernel_sock_shutdown(nsock->sock, SHUT_RDWR);
Kevin Vigor5e3c3a72018-05-30 10:45:11 -0600276 if (atomic_dec_return(&nbd->config->live_connections) == 0) {
Xiubo Liec76a7b2019-09-17 17:26:05 +0530277 if (test_and_clear_bit(NBD_RT_DISCONNECT_REQUESTED,
Kevin Vigor5e3c3a72018-05-30 10:45:11 -0600278 &nbd->config->runtime_flags)) {
Xiubo Liec76a7b2019-09-17 17:26:05 +0530279 set_bit(NBD_RT_DISCONNECTED,
Kevin Vigor5e3c3a72018-05-30 10:45:11 -0600280 &nbd->config->runtime_flags);
281 dev_info(nbd_to_dev(nbd),
282 "Disconnected due to user request.\n");
283 }
284 }
Josef Bacik560bc4b2017-04-06 17:02:04 -0400285 }
Josef Bacikf3733242017-04-06 17:01:57 -0400286 nsock->dead = true;
287 nsock->pending = NULL;
288 nsock->sent = 0;
289}
290
Josef Bacik29eaadc2017-04-06 17:01:59 -0400291static void nbd_size_clear(struct nbd_device *nbd)
Markus Pargmann37091fd2015-07-27 07:36:49 +0200292{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400293 if (nbd->config->bytesize) {
Josef Bacik5ea8d102017-04-06 17:01:58 -0400294 set_capacity(nbd->disk, 0);
295 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
296 }
Markus Pargmann37091fd2015-07-27 07:36:49 +0200297}
298
Josef Bacik29eaadc2017-04-06 17:01:59 -0400299static void nbd_size_update(struct nbd_device *nbd)
Markus Pargmann37091fd2015-07-27 07:36:49 +0200300{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400301 struct nbd_config *config = nbd->config;
Josef Bacik9e2b19672018-05-16 14:51:19 -0400302 struct block_device *bdev = bdget_disk(nbd->disk, 0);
303
Josef Bacik6df133a2018-05-23 13:35:59 -0400304 if (config->flags & NBD_FLAG_SEND_TRIM) {
305 nbd->disk->queue->limits.discard_granularity = config->blksize;
Josef Bacik07ce2132018-06-05 11:41:23 -0400306 nbd->disk->queue->limits.discard_alignment = config->blksize;
Josef Bacik6df133a2018-05-23 13:35:59 -0400307 blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX);
308 }
Josef Bacik5ea8d102017-04-06 17:01:58 -0400309 blk_queue_logical_block_size(nbd->disk->queue, config->blksize);
310 blk_queue_physical_block_size(nbd->disk->queue, config->blksize);
Josef Bacik5ea8d102017-04-06 17:01:58 -0400311 set_capacity(nbd->disk, config->bytesize >> 9);
Josef Bacik9e2b19672018-05-16 14:51:19 -0400312 if (bdev) {
Jan Karac8a83a62019-01-14 09:48:09 +0100313 if (bdev->bd_disk) {
Josef Bacik9e2b19672018-05-16 14:51:19 -0400314 bd_set_size(bdev, config->bytesize);
Jan Karac8a83a62019-01-14 09:48:09 +0100315 set_blocksize(bdev, config->blksize);
316 } else
Josef Bacik9e2b19672018-05-16 14:51:19 -0400317 bdev->bd_invalidated = 1;
318 bdput(bdev);
319 }
Markus Pargmann37091fd2015-07-27 07:36:49 +0200320 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
321}
322
Josef Bacik29eaadc2017-04-06 17:01:59 -0400323static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize,
324 loff_t nr_blocks)
Markus Pargmann37091fd2015-07-27 07:36:49 +0200325{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400326 struct nbd_config *config = nbd->config;
327 config->blksize = blocksize;
328 config->bytesize = blocksize * nr_blocks;
Josef Bacikc3f7c932018-05-16 14:51:18 -0400329 if (nbd->task_recv != NULL)
330 nbd_size_update(nbd);
Markus Pargmann37091fd2015-07-27 07:36:49 +0200331}
332
Christoph Hellwig1e388ae2017-04-20 16:03:06 +0200333static void nbd_complete_rq(struct request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334{
Christoph Hellwig1e388ae2017-04-20 16:03:06 +0200335 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336
Kevin Vigoree57a052018-06-04 10:40:12 -0600337 dev_dbg(nbd_to_dev(cmd->nbd), "request %p: %s\n", req,
Christoph Hellwig1e388ae2017-04-20 16:03:06 +0200338 cmd->status ? "failed" : "done");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339
Christoph Hellwig1e388ae2017-04-20 16:03:06 +0200340 blk_mq_end_request(req, cmd->status);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341}
342
Markus Pargmanne018e752015-04-02 10:11:39 +0200343/*
344 * Forcibly shutdown the socket causing all listeners to error
345 */
Markus Pargmann36e47be2015-08-17 08:20:01 +0200346static void sock_shutdown(struct nbd_device *nbd)
Paul Clements7fdfd402007-10-16 23:27:37 -0700347{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400348 struct nbd_config *config = nbd->config;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500349 int i;
Josef Bacikc2611892016-09-08 12:33:38 -0700350
Josef Bacik5ea8d102017-04-06 17:01:58 -0400351 if (config->num_connections == 0)
Markus Pargmann260bbce2015-08-17 08:20:02 +0200352 return;
Xiubo Liec76a7b2019-09-17 17:26:05 +0530353 if (test_and_set_bit(NBD_RT_DISCONNECTED, &config->runtime_flags))
Josef Bacik9561a7a2016-11-22 14:04:40 -0500354 return;
355
Josef Bacik5ea8d102017-04-06 17:01:58 -0400356 for (i = 0; i < config->num_connections; i++) {
357 struct nbd_sock *nsock = config->socks[i];
Josef Bacik9561a7a2016-11-22 14:04:40 -0500358 mutex_lock(&nsock->tx_lock);
Josef Bacik799f9a32017-04-06 17:02:02 -0400359 nbd_mark_nsock_dead(nbd, nsock, 0);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500360 mutex_unlock(&nsock->tx_lock);
Markus Pargmann23272a672015-10-29 11:51:16 +0100361 }
Josef Bacik9561a7a2016-11-22 14:04:40 -0500362 dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n");
Paul Clements7fdfd402007-10-16 23:27:37 -0700363}
364
Mike Christie00514672019-08-13 11:39:50 -0500365static u32 req_to_nbd_cmd_type(struct request *req)
366{
367 switch (req_op(req)) {
368 case REQ_OP_DISCARD:
369 return NBD_CMD_TRIM;
370 case REQ_OP_FLUSH:
371 return NBD_CMD_FLUSH;
372 case REQ_OP_WRITE:
373 return NBD_CMD_WRITE;
374 case REQ_OP_READ:
375 return NBD_CMD_READ;
376 default:
377 return U32_MAX;
378 }
379}
380
Josef Bacik0eadf372016-09-08 12:33:40 -0700381static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
382 bool reserved)
Paul Clements7fdfd402007-10-16 23:27:37 -0700383{
Josef Bacik0eadf372016-09-08 12:33:40 -0700384 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
385 struct nbd_device *nbd = cmd->nbd;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400386 struct nbd_config *config;
Paul Clements7fdfd402007-10-16 23:27:37 -0700387
Josef Bacikde6346e2019-10-21 15:56:27 -0400388 if (!mutex_trylock(&cmd->lock))
389 return BLK_EH_RESET_TIMER;
390
Josef Bacik5ea8d102017-04-06 17:01:58 -0400391 if (!refcount_inc_not_zero(&nbd->config_refs)) {
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200392 cmd->status = BLK_STS_TIMEOUT;
Josef Bacikde6346e2019-10-21 15:56:27 -0400393 mutex_unlock(&cmd->lock);
Christoph Hellwige5eab012018-05-29 15:52:31 +0200394 goto done;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400395 }
Josef Bacik5ea8d102017-04-06 17:01:58 -0400396 config = nbd->config;
397
398 if (config->num_connections > 1) {
Josef Bacikf3733242017-04-06 17:01:57 -0400399 dev_err_ratelimited(nbd_to_dev(nbd),
Kevin Vigor5e3c3a72018-05-30 10:45:11 -0600400 "Connection timed out, retrying (%d/%d alive)\n",
401 atomic_read(&config->live_connections),
402 config->num_connections);
Josef Bacikf3733242017-04-06 17:01:57 -0400403 /*
404 * Hooray we have more connections, requeue this IO, the submit
405 * path will put it on a real connection.
406 */
Josef Bacik5ea8d102017-04-06 17:01:58 -0400407 if (config->socks && config->num_connections > 1) {
408 if (cmd->index < config->num_connections) {
Josef Bacikf3733242017-04-06 17:01:57 -0400409 struct nbd_sock *nsock =
Josef Bacik5ea8d102017-04-06 17:01:58 -0400410 config->socks[cmd->index];
Josef Bacikf3733242017-04-06 17:01:57 -0400411 mutex_lock(&nsock->tx_lock);
Josef Bacik799f9a32017-04-06 17:02:02 -0400412 /* We can have multiple outstanding requests, so
413 * we don't want to mark the nsock dead if we've
414 * already reconnected with a new socket, so
415 * only mark it dead if its the same socket we
416 * were sent out on.
417 */
418 if (cmd->cookie == nsock->cookie)
419 nbd_mark_nsock_dead(nbd, nsock, 1);
Josef Bacikf3733242017-04-06 17:01:57 -0400420 mutex_unlock(&nsock->tx_lock);
421 }
Josef Bacik8f3ea352018-07-16 12:11:35 -0400422 mutex_unlock(&cmd->lock);
Josef Bacikd7d94d42018-07-16 12:11:34 -0400423 nbd_requeue_cmd(cmd);
Josef Bacik5ea8d102017-04-06 17:01:58 -0400424 nbd_config_put(nbd);
Christoph Hellwig66005932018-05-29 15:52:29 +0200425 return BLK_EH_DONE;
Josef Bacikf3733242017-04-06 17:01:57 -0400426 }
Josef Bacikf3733242017-04-06 17:01:57 -0400427 }
Mike Christie2da22da2019-08-13 11:39:52 -0500428
429 if (!nbd->tag_set.timeout) {
430 /*
431 * Userspace sets timeout=0 to disable socket disconnection,
432 * so just warn and reset the timer.
433 */
434 cmd->retries++;
435 dev_info(nbd_to_dev(nbd), "Possible stuck request %p: control (%s@%llu,%uB). Runtime %u seconds\n",
436 req, nbdcmd_to_ascii(req_to_nbd_cmd_type(req)),
437 (unsigned long long)blk_rq_pos(req) << 9,
438 blk_rq_bytes(req), (req->timeout / HZ) * cmd->retries);
439
440 mutex_unlock(&cmd->lock);
441 nbd_config_put(nbd);
442 return BLK_EH_RESET_TIMER;
443 }
444
445 dev_err_ratelimited(nbd_to_dev(nbd), "Connection timed out\n");
Xiubo Liec76a7b2019-09-17 17:26:05 +0530446 set_bit(NBD_RT_TIMEDOUT, &config->runtime_flags);
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200447 cmd->status = BLK_STS_IOERR;
Josef Bacik8f3ea352018-07-16 12:11:35 -0400448 mutex_unlock(&cmd->lock);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500449 sock_shutdown(nbd);
Josef Bacik5ea8d102017-04-06 17:01:58 -0400450 nbd_config_put(nbd);
Christoph Hellwige5eab012018-05-29 15:52:31 +0200451done:
452 blk_mq_complete_request(req);
453 return BLK_EH_DONE;
Paul Clements7fdfd402007-10-16 23:27:37 -0700454}
455
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456/*
457 * Send or receive packet.
458 */
Al Viroc9f2b6a2015-11-12 05:09:35 -0500459static int sock_xmit(struct nbd_device *nbd, int index, int send,
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400460 struct iov_iter *iter, int msg_flags, int *sent)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400462 struct nbd_config *config = nbd->config;
463 struct socket *sock = config->socks[index]->sock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464 int result;
465 struct msghdr msg;
Vlastimil Babkaf1083042017-05-08 15:59:53 -0700466 unsigned int noreclaim_flag;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467
Mike Snitzerffc41cf2008-04-02 13:04:47 -0700468 if (unlikely(!sock)) {
Josef Bacika897b662016-12-05 16:20:29 -0500469 dev_err_ratelimited(disk_to_dev(nbd->disk),
WANG Cong7f1b90f2011-08-19 14:48:22 +0200470 "Attempted %s on closed socket in sock_xmit\n",
471 (send ? "send" : "recv"));
Mike Snitzerffc41cf2008-04-02 13:04:47 -0700472 return -EINVAL;
473 }
474
Al Viroc9f2b6a2015-11-12 05:09:35 -0500475 msg.msg_iter = *iter;
Al Viroc1696ca2015-11-12 04:51:19 -0500476
Vlastimil Babkaf1083042017-05-08 15:59:53 -0700477 noreclaim_flag = memalloc_noreclaim_save();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 do {
Mel Gorman7f338fe2012-07-31 16:44:32 -0700479 sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480 msg.msg_name = NULL;
481 msg.msg_namelen = 0;
482 msg.msg_control = NULL;
483 msg.msg_controllen = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
485
Markus Pargmann7e2893a2015-08-17 08:20:00 +0200486 if (send)
Al Viroc1696ca2015-11-12 04:51:19 -0500487 result = sock_sendmsg(sock, &msg);
Markus Pargmann7e2893a2015-08-17 08:20:00 +0200488 else
Al Viroc1696ca2015-11-12 04:51:19 -0500489 result = sock_recvmsg(sock, &msg, msg.msg_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 if (result <= 0) {
492 if (result == 0)
493 result = -EPIPE; /* short read */
494 break;
495 }
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400496 if (sent)
497 *sent += result;
Al Viroc1696ca2015-11-12 04:51:19 -0500498 } while (msg_data_left(&msg));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499
Vlastimil Babkaf1083042017-05-08 15:59:53 -0700500 memalloc_noreclaim_restore(noreclaim_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501
502 return result;
503}
504
Josef Bacik32e67a32017-10-24 15:57:18 -0400505/*
506 * Different settings for sk->sk_sndtimeo can result in different return values
507 * if there is a signal pending when we enter sendmsg, because reasons?
508 */
509static inline int was_interrupted(int result)
510{
511 return result == -ERESTARTSYS || result == -EINTR;
512}
513
Paul Clements7fdfd402007-10-16 23:27:37 -0700514/* always call with the tx_lock held */
Josef Bacik9561a7a2016-11-22 14:04:40 -0500515static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516{
Josef Bacikfd8383f2016-09-08 12:33:37 -0700517 struct request *req = blk_mq_rq_from_pdu(cmd);
Josef Bacik5ea8d102017-04-06 17:01:58 -0400518 struct nbd_config *config = nbd->config;
519 struct nbd_sock *nsock = config->socks[index];
Josef Bacikd61b7f92017-01-19 16:08:49 -0500520 int result;
Al Viroc9f2b6a2015-11-12 05:09:35 -0500521 struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)};
522 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
523 struct iov_iter from;
Tejun Heo1011c1b2009-05-07 22:24:45 +0900524 unsigned long size = blk_rq_bytes(req);
Jens Axboe429a7872016-11-17 12:30:37 -0700525 struct bio *bio;
Josef Bacik8f3ea352018-07-16 12:11:35 -0400526 u64 handle;
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200527 u32 type;
Shaun McDowell685c9b22017-05-25 23:55:54 -0400528 u32 nbd_cmd_flags = 0;
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400529 int sent = nsock->sent, skip = 0;
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200530
David Howellsaa563d72018-10-20 00:57:56 +0100531 iov_iter_kvec(&from, WRITE, &iov, 1, sizeof(request));
Al Viroc9f2b6a2015-11-12 05:09:35 -0500532
Mike Christie00514672019-08-13 11:39:50 -0500533 type = req_to_nbd_cmd_type(req);
534 if (type == U32_MAX)
Christoph Hellwigaebf5262017-01-31 16:57:31 +0100535 return -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536
Christoph Hellwig09fc54cc2017-01-31 16:57:28 +0100537 if (rq_data_dir(req) == WRITE &&
Josef Bacik5ea8d102017-04-06 17:01:58 -0400538 (config->flags & NBD_FLAG_READ_ONLY)) {
Christoph Hellwig09fc54cc2017-01-31 16:57:28 +0100539 dev_err_ratelimited(disk_to_dev(nbd->disk),
540 "Write on read-only\n");
541 return -EIO;
542 }
543
Shaun McDowell685c9b22017-05-25 23:55:54 -0400544 if (req->cmd_flags & REQ_FUA)
545 nbd_cmd_flags |= NBD_CMD_FLAG_FUA;
546
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400547 /* We did a partial send previously, and we at least sent the whole
548 * request struct, so just go and send the rest of the pages in the
549 * request.
550 */
551 if (sent) {
552 if (sent >= sizeof(request)) {
553 skip = sent - sizeof(request);
Andrew Hall2abd2de2019-04-26 11:49:49 -0700554
555 /* initialize handle for tracing purposes */
556 handle = nbd_cmd_handle(cmd);
557
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400558 goto send_pages;
559 }
560 iov_iter_advance(&from, sent);
Josef Bacik8f3ea352018-07-16 12:11:35 -0400561 } else {
562 cmd->cmd_cookie++;
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400563 }
Josef Bacikf3733242017-04-06 17:01:57 -0400564 cmd->index = index;
Josef Bacik799f9a32017-04-06 17:02:02 -0400565 cmd->cookie = nsock->cookie;
Mike Christie2da22da2019-08-13 11:39:52 -0500566 cmd->retries = 0;
Shaun McDowell685c9b22017-05-25 23:55:54 -0400567 request.type = htonl(type | nbd_cmd_flags);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500568 if (type != NBD_CMD_FLUSH) {
Alex Bligh75f187a2013-02-27 17:05:23 -0800569 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
570 request.len = htonl(size);
571 }
Josef Bacik8f3ea352018-07-16 12:11:35 -0400572 handle = nbd_cmd_handle(cmd);
573 memcpy(request.handle, &handle, sizeof(handle));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574
Matt Mullinsea106722019-04-26 11:49:48 -0700575 trace_nbd_send_request(&request, nbd->index, blk_mq_rq_from_pdu(cmd));
576
Markus Pargmannd18509f2015-04-02 10:11:38 +0200577 dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
Kevin Vigoree57a052018-06-04 10:40:12 -0600578 req, nbdcmd_to_ascii(type),
Markus Pargmannd18509f2015-04-02 10:11:38 +0200579 (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
Al Viroc9f2b6a2015-11-12 05:09:35 -0500580 result = sock_xmit(nbd, index, 1, &from,
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400581 (type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent);
Andrew Hall2abd2de2019-04-26 11:49:49 -0700582 trace_nbd_header_sent(req, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583 if (result <= 0) {
Josef Bacik32e67a32017-10-24 15:57:18 -0400584 if (was_interrupted(result)) {
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400585 /* If we havne't sent anything we can just return BUSY,
586 * however if we have sent something we need to make
587 * sure we only allow this req to be sent until we are
588 * completely done.
589 */
590 if (sent) {
591 nsock->pending = req;
592 nsock->sent = sent;
593 }
Josef Bacikd7d94d42018-07-16 12:11:34 -0400594 set_bit(NBD_CMD_REQUEUED, &cmd->flags);
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200595 return BLK_STS_RESOURCE;
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400596 }
Josef Bacika897b662016-12-05 16:20:29 -0500597 dev_err_ratelimited(disk_to_dev(nbd->disk),
WANG Cong7f1b90f2011-08-19 14:48:22 +0200598 "Send control failed (result %d)\n", result);
Josef Bacikf3733242017-04-06 17:01:57 -0400599 return -EAGAIN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600 }
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400601send_pages:
Jens Axboe429a7872016-11-17 12:30:37 -0700602 if (type != NBD_CMD_WRITE)
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400603 goto out;
Jens Axboe429a7872016-11-17 12:30:37 -0700604
Jens Axboe429a7872016-11-17 12:30:37 -0700605 bio = req->bio;
606 while (bio) {
607 struct bio *next = bio->bi_next;
608 struct bvec_iter iter;
Kent Overstreet79886132013-11-23 17:19:00 -0800609 struct bio_vec bvec;
Jens Axboe429a7872016-11-17 12:30:37 -0700610
611 bio_for_each_segment(bvec, bio, iter) {
612 bool is_last = !next && bio_iter_last(bvec, iter);
Josef Bacikd61b7f92017-01-19 16:08:49 -0500613 int flags = is_last ? 0 : MSG_MORE;
Jens Axboe429a7872016-11-17 12:30:37 -0700614
Markus Pargmannd18509f2015-04-02 10:11:38 +0200615 dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
Kevin Vigoree57a052018-06-04 10:40:12 -0600616 req, bvec.bv_len);
David Howellsaa563d72018-10-20 00:57:56 +0100617 iov_iter_bvec(&from, WRITE, &bvec, 1, bvec.bv_len);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400618 if (skip) {
619 if (skip >= iov_iter_count(&from)) {
620 skip -= iov_iter_count(&from);
621 continue;
622 }
623 iov_iter_advance(&from, skip);
624 skip = 0;
625 }
626 result = sock_xmit(nbd, index, 1, &from, flags, &sent);
Jens Axboe6c92e692007-08-16 13:43:12 +0200627 if (result <= 0) {
Josef Bacik32e67a32017-10-24 15:57:18 -0400628 if (was_interrupted(result)) {
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400629 /* We've already sent the header, we
630 * have no choice but to set pending and
631 * return BUSY.
632 */
633 nsock->pending = req;
634 nsock->sent = sent;
Josef Bacikd7d94d42018-07-16 12:11:34 -0400635 set_bit(NBD_CMD_REQUEUED, &cmd->flags);
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200636 return BLK_STS_RESOURCE;
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400637 }
Wanlong Gaof4507162012-03-28 14:42:51 -0700638 dev_err(disk_to_dev(nbd->disk),
WANG Cong7f1b90f2011-08-19 14:48:22 +0200639 "Send data failed (result %d)\n",
640 result);
Josef Bacikf3733242017-04-06 17:01:57 -0400641 return -EAGAIN;
Jens Axboe6c92e692007-08-16 13:43:12 +0200642 }
Jens Axboe429a7872016-11-17 12:30:37 -0700643 /*
644 * The completion might already have come in,
645 * so break for the last one instead of letting
646 * the iterator do it. This prevents use-after-free
647 * of the bio.
648 */
649 if (is_last)
650 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 }
Jens Axboe429a7872016-11-17 12:30:37 -0700652 bio = next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653 }
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400654out:
Andrew Hall2abd2de2019-04-26 11:49:49 -0700655 trace_nbd_payload_sent(req, handle);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400656 nsock->pending = NULL;
657 nsock->sent = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659}
660
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661/* NULL returned = something went wrong, inform userspace */
Josef Bacik9561a7a2016-11-22 14:04:40 -0500662static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400664 struct nbd_config *config = nbd->config;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665 int result;
666 struct nbd_reply reply;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700667 struct nbd_cmd *cmd;
668 struct request *req = NULL;
Josef Bacik8f3ea352018-07-16 12:11:35 -0400669 u64 handle;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700670 u16 hwq;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500671 u32 tag;
Al Viroc9f2b6a2015-11-12 05:09:35 -0500672 struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)};
673 struct iov_iter to;
Josef Bacik8f3ea352018-07-16 12:11:35 -0400674 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675
676 reply.magic = 0;
David Howellsaa563d72018-10-20 00:57:56 +0100677 iov_iter_kvec(&to, READ, &iov, 1, sizeof(reply));
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400678 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679 if (result <= 0) {
Josef Bacik5ea8d102017-04-06 17:01:58 -0400680 if (!nbd_disconnected(config))
Josef Bacik9561a7a2016-11-22 14:04:40 -0500681 dev_err(disk_to_dev(nbd->disk),
682 "Receive control failed (result %d)\n", result);
Markus Pargmann19391832015-08-17 08:20:03 +0200683 return ERR_PTR(result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684 }
Michal Feixe4b57e02006-07-30 03:03:31 -0700685
686 if (ntohl(reply.magic) != NBD_REPLY_MAGIC) {
Wanlong Gaof4507162012-03-28 14:42:51 -0700687 dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
Michal Feixe4b57e02006-07-30 03:03:31 -0700688 (unsigned long)ntohl(reply.magic));
Markus Pargmann19391832015-08-17 08:20:03 +0200689 return ERR_PTR(-EPROTO);
Michal Feixe4b57e02006-07-30 03:03:31 -0700690 }
691
Josef Bacik8f3ea352018-07-16 12:11:35 -0400692 memcpy(&handle, reply.handle, sizeof(handle));
693 tag = nbd_handle_to_tag(handle);
Josef Bacikfd8383f2016-09-08 12:33:37 -0700694 hwq = blk_mq_unique_tag_to_hwq(tag);
695 if (hwq < nbd->tag_set.nr_hw_queues)
696 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq],
697 blk_mq_unique_tag_to_tag(tag));
698 if (!req || !blk_mq_request_started(req)) {
699 dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n",
700 tag, req);
701 return ERR_PTR(-ENOENT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702 }
Andrew Hall2abd2de2019-04-26 11:49:49 -0700703 trace_nbd_header_received(req, handle);
Josef Bacikfd8383f2016-09-08 12:33:37 -0700704 cmd = blk_mq_rq_to_pdu(req);
Josef Bacik8f3ea352018-07-16 12:11:35 -0400705
706 mutex_lock(&cmd->lock);
707 if (cmd->cmd_cookie != nbd_handle_to_cookie(handle)) {
708 dev_err(disk_to_dev(nbd->disk), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n",
709 req, cmd->cmd_cookie, nbd_handle_to_cookie(handle));
710 ret = -ENOENT;
711 goto out;
712 }
Josef Bacik7ce23e82019-10-21 15:56:28 -0400713 if (cmd->status != BLK_STS_OK) {
714 dev_err(disk_to_dev(nbd->disk), "Command already handled %p\n",
715 req);
716 ret = -ENOENT;
717 goto out;
718 }
Josef Bacik8f3ea352018-07-16 12:11:35 -0400719 if (test_bit(NBD_CMD_REQUEUED, &cmd->flags)) {
720 dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n",
721 req);
722 ret = -ENOENT;
723 goto out;
724 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725 if (ntohl(reply.error)) {
Wanlong Gaof4507162012-03-28 14:42:51 -0700726 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
WANG Cong7f1b90f2011-08-19 14:48:22 +0200727 ntohl(reply.error));
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200728 cmd->status = BLK_STS_IOERR;
Josef Bacik8f3ea352018-07-16 12:11:35 -0400729 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730 }
731
Kevin Vigoree57a052018-06-04 10:40:12 -0600732 dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req);
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200733 if (rq_data_dir(req) != WRITE) {
NeilBrown5705f702007-09-25 12:35:59 +0200734 struct req_iterator iter;
Kent Overstreet79886132013-11-23 17:19:00 -0800735 struct bio_vec bvec;
NeilBrown5705f702007-09-25 12:35:59 +0200736
737 rq_for_each_segment(bvec, req, iter) {
David Howellsaa563d72018-10-20 00:57:56 +0100738 iov_iter_bvec(&to, READ, &bvec, 1, bvec.bv_len);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400739 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
Jens Axboe6c92e692007-08-16 13:43:12 +0200740 if (result <= 0) {
Wanlong Gaof4507162012-03-28 14:42:51 -0700741 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
WANG Cong7f1b90f2011-08-19 14:48:22 +0200742 result);
Josef Bacikf3733242017-04-06 17:01:57 -0400743 /*
744 * If we've disconnected or we only have 1
745 * connection then we need to make sure we
746 * complete this request, otherwise error out
747 * and let the timeout stuff handle resubmitting
748 * this request onto another connection.
749 */
Josef Bacik5ea8d102017-04-06 17:01:58 -0400750 if (nbd_disconnected(config) ||
751 config->num_connections <= 1) {
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200752 cmd->status = BLK_STS_IOERR;
Josef Bacik8f3ea352018-07-16 12:11:35 -0400753 goto out;
Josef Bacikf3733242017-04-06 17:01:57 -0400754 }
Josef Bacik8f3ea352018-07-16 12:11:35 -0400755 ret = -EIO;
756 goto out;
Jens Axboe6c92e692007-08-16 13:43:12 +0200757 }
Markus Pargmannd18509f2015-04-02 10:11:38 +0200758 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
Kevin Vigoree57a052018-06-04 10:40:12 -0600759 req, bvec.bv_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760 }
761 }
Josef Bacik8f3ea352018-07-16 12:11:35 -0400762out:
Andrew Hall2abd2de2019-04-26 11:49:49 -0700763 trace_nbd_payload_received(req, handle);
Josef Bacik8f3ea352018-07-16 12:11:35 -0400764 mutex_unlock(&cmd->lock);
765 return ret ? ERR_PTR(ret) : cmd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766}
767
Josef Bacik9561a7a2016-11-22 14:04:40 -0500768static void recv_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769{
Josef Bacik9561a7a2016-11-22 14:04:40 -0500770 struct recv_thread_args *args = container_of(work,
771 struct recv_thread_args,
772 work);
773 struct nbd_device *nbd = args->nbd;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400774 struct nbd_config *config = nbd->config;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700775 struct nbd_cmd *cmd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776
Markus Pargmann19391832015-08-17 08:20:03 +0200777 while (1) {
Josef Bacik9561a7a2016-11-22 14:04:40 -0500778 cmd = nbd_read_stat(nbd, args->index);
Josef Bacikfd8383f2016-09-08 12:33:37 -0700779 if (IS_ERR(cmd)) {
Josef Bacik5ea8d102017-04-06 17:01:58 -0400780 struct nbd_sock *nsock = config->socks[args->index];
Josef Bacikf3733242017-04-06 17:01:57 -0400781
782 mutex_lock(&nsock->tx_lock);
Josef Bacik799f9a32017-04-06 17:02:02 -0400783 nbd_mark_nsock_dead(nbd, nsock, 1);
Josef Bacikf3733242017-04-06 17:01:57 -0400784 mutex_unlock(&nsock->tx_lock);
Markus Pargmann19391832015-08-17 08:20:03 +0200785 break;
786 }
787
Christoph Hellwig08e00292017-04-20 16:03:09 +0200788 blk_mq_complete_request(blk_mq_rq_from_pdu(cmd));
Markus Pargmann19391832015-08-17 08:20:03 +0200789 }
Josef Bacik5ea8d102017-04-06 17:01:58 -0400790 atomic_dec(&config->recv_threads);
791 wake_up(&config->recv_wq);
792 nbd_config_put(nbd);
793 kfree(args);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794}
795
Jens Axboe7baa8572018-11-08 10:24:07 -0700796static bool nbd_clear_req(struct request *req, void *data, bool reserved)
Josef Bacikfd8383f2016-09-08 12:33:37 -0700797{
Christoph Hellwigd250bf4e2018-05-30 18:51:00 +0200798 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
Josef Bacikfd8383f2016-09-08 12:33:37 -0700799
Josef Bacikde6346e2019-10-21 15:56:27 -0400800 mutex_lock(&cmd->lock);
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200801 cmd->status = BLK_STS_IOERR;
Josef Bacikde6346e2019-10-21 15:56:27 -0400802 mutex_unlock(&cmd->lock);
803
Christoph Hellwig08e00292017-04-20 16:03:09 +0200804 blk_mq_complete_request(req);
Jens Axboe7baa8572018-11-08 10:24:07 -0700805 return true;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700806}
807
Wanlong Gaof4507162012-03-28 14:42:51 -0700808static void nbd_clear_que(struct nbd_device *nbd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809{
Sagi Grimbergb52c2e92017-07-04 09:57:09 +0300810 blk_mq_quiesce_queue(nbd->disk->queue);
Josef Bacikfd8383f2016-09-08 12:33:37 -0700811 blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL);
Sagi Grimbergb52c2e92017-07-04 09:57:09 +0300812 blk_mq_unquiesce_queue(nbd->disk->queue);
Markus Pargmanne78273c2015-08-17 08:20:04 +0200813 dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814}
815
Josef Bacikf3733242017-04-06 17:01:57 -0400816static int find_fallback(struct nbd_device *nbd, int index)
817{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400818 struct nbd_config *config = nbd->config;
Josef Bacikf3733242017-04-06 17:01:57 -0400819 int new_index = -1;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400820 struct nbd_sock *nsock = config->socks[index];
Josef Bacikf3733242017-04-06 17:01:57 -0400821 int fallback = nsock->fallback_index;
822
Xiubo Liec76a7b2019-09-17 17:26:05 +0530823 if (test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags))
Josef Bacikf3733242017-04-06 17:01:57 -0400824 return new_index;
825
Josef Bacik5ea8d102017-04-06 17:01:58 -0400826 if (config->num_connections <= 1) {
Josef Bacikf3733242017-04-06 17:01:57 -0400827 dev_err_ratelimited(disk_to_dev(nbd->disk),
828 "Attempted send on invalid socket\n");
829 return new_index;
830 }
831
Josef Bacik5ea8d102017-04-06 17:01:58 -0400832 if (fallback >= 0 && fallback < config->num_connections &&
833 !config->socks[fallback]->dead)
Josef Bacikf3733242017-04-06 17:01:57 -0400834 return fallback;
835
836 if (nsock->fallback_index < 0 ||
Josef Bacik5ea8d102017-04-06 17:01:58 -0400837 nsock->fallback_index >= config->num_connections ||
838 config->socks[nsock->fallback_index]->dead) {
Josef Bacikf3733242017-04-06 17:01:57 -0400839 int i;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400840 for (i = 0; i < config->num_connections; i++) {
Josef Bacikf3733242017-04-06 17:01:57 -0400841 if (i == index)
842 continue;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400843 if (!config->socks[i]->dead) {
Josef Bacikf3733242017-04-06 17:01:57 -0400844 new_index = i;
845 break;
846 }
847 }
848 nsock->fallback_index = new_index;
849 if (new_index < 0) {
850 dev_err_ratelimited(disk_to_dev(nbd->disk),
851 "Dead connection, failed to find a fallback\n");
852 return new_index;
853 }
854 }
855 new_index = nsock->fallback_index;
856 return new_index;
857}
Paul Clements7fdfd402007-10-16 23:27:37 -0700858
Josef Bacik560bc4b2017-04-06 17:02:04 -0400859static int wait_for_reconnect(struct nbd_device *nbd)
860{
861 struct nbd_config *config = nbd->config;
862 if (!config->dead_conn_timeout)
863 return 0;
Xiubo Liec76a7b2019-09-17 17:26:05 +0530864 if (test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags))
Josef Bacik560bc4b2017-04-06 17:02:04 -0400865 return 0;
Kevin Vigor5e3c3a72018-05-30 10:45:11 -0600866 return wait_event_timeout(config->conn_wait,
867 atomic_read(&config->live_connections) > 0,
868 config->dead_conn_timeout) > 0;
Josef Bacik560bc4b2017-04-06 17:02:04 -0400869}
870
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400871static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
Laurent Vivier48cf6062008-04-29 01:02:46 -0700872{
Josef Bacikfd8383f2016-09-08 12:33:37 -0700873 struct request *req = blk_mq_rq_from_pdu(cmd);
874 struct nbd_device *nbd = cmd->nbd;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400875 struct nbd_config *config;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500876 struct nbd_sock *nsock;
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400877 int ret;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700878
Josef Bacik5ea8d102017-04-06 17:01:58 -0400879 if (!refcount_inc_not_zero(&nbd->config_refs)) {
880 dev_err_ratelimited(disk_to_dev(nbd->disk),
881 "Socks array is empty\n");
Josef Bacik6a468d52017-11-06 16:11:58 -0500882 blk_mq_start_request(req);
Josef Bacik5ea8d102017-04-06 17:01:58 -0400883 return -EINVAL;
884 }
885 config = nbd->config;
886
887 if (index >= config->num_connections) {
Josef Bacika897b662016-12-05 16:20:29 -0500888 dev_err_ratelimited(disk_to_dev(nbd->disk),
889 "Attempted send on invalid socket\n");
Josef Bacik5ea8d102017-04-06 17:01:58 -0400890 nbd_config_put(nbd);
Josef Bacik6a468d52017-11-06 16:11:58 -0500891 blk_mq_start_request(req);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400892 return -EINVAL;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500893 }
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200894 cmd->status = BLK_STS_OK;
Josef Bacikf3733242017-04-06 17:01:57 -0400895again:
Josef Bacik5ea8d102017-04-06 17:01:58 -0400896 nsock = config->socks[index];
Josef Bacik9561a7a2016-11-22 14:04:40 -0500897 mutex_lock(&nsock->tx_lock);
Josef Bacikf3733242017-04-06 17:01:57 -0400898 if (nsock->dead) {
Josef Bacik560bc4b2017-04-06 17:02:04 -0400899 int old_index = index;
Josef Bacikf3733242017-04-06 17:01:57 -0400900 index = find_fallback(nbd, index);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500901 mutex_unlock(&nsock->tx_lock);
Josef Bacik560bc4b2017-04-06 17:02:04 -0400902 if (index < 0) {
903 if (wait_for_reconnect(nbd)) {
904 index = old_index;
905 goto again;
906 }
907 /* All the sockets should already be down at this point,
908 * we just want to make sure that DISCONNECTED is set so
909 * any requests that come in that were queue'ed waiting
910 * for the reconnect timer don't trigger the timer again
911 * and instead just error out.
912 */
913 sock_shutdown(nbd);
914 nbd_config_put(nbd);
Josef Bacik6a468d52017-11-06 16:11:58 -0500915 blk_mq_start_request(req);
Josef Bacik560bc4b2017-04-06 17:02:04 -0400916 return -EIO;
917 }
Josef Bacikf3733242017-04-06 17:01:57 -0400918 goto again;
Laurent Vivier48cf6062008-04-29 01:02:46 -0700919 }
920
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400921 /* Handle the case that we have a pending request that was partially
922 * transmitted that _has_ to be serviced first. We need to call requeue
923 * here so that it gets put _after_ the request that is already on the
924 * dispatch list.
925 */
Josef Bacik6a468d52017-11-06 16:11:58 -0500926 blk_mq_start_request(req);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400927 if (unlikely(nsock->pending && nsock->pending != req)) {
Josef Bacikd7d94d42018-07-16 12:11:34 -0400928 nbd_requeue_cmd(cmd);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400929 ret = 0;
930 goto out;
Laurent Vivier48cf6062008-04-29 01:02:46 -0700931 }
Josef Bacikf3733242017-04-06 17:01:57 -0400932 /*
933 * Some failures are related to the link going down, so anything that
934 * returns EAGAIN can be retried on a different socket.
935 */
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400936 ret = nbd_send_cmd(nbd, cmd, index);
Josef Bacikf3733242017-04-06 17:01:57 -0400937 if (ret == -EAGAIN) {
938 dev_err_ratelimited(disk_to_dev(nbd->disk),
Josef Bacik6a468d52017-11-06 16:11:58 -0500939 "Request send failed, requeueing\n");
Josef Bacik799f9a32017-04-06 17:02:02 -0400940 nbd_mark_nsock_dead(nbd, nsock, 1);
Josef Bacikd7d94d42018-07-16 12:11:34 -0400941 nbd_requeue_cmd(cmd);
Josef Bacik6a468d52017-11-06 16:11:58 -0500942 ret = 0;
Josef Bacikf3733242017-04-06 17:01:57 -0400943 }
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400944out:
Josef Bacik9561a7a2016-11-22 14:04:40 -0500945 mutex_unlock(&nsock->tx_lock);
Josef Bacik5ea8d102017-04-06 17:01:58 -0400946 nbd_config_put(nbd);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400947 return ret;
Laurent Vivier48cf6062008-04-29 01:02:46 -0700948}
949
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200950static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
Josef Bacikfd8383f2016-09-08 12:33:37 -0700951 const struct blk_mq_queue_data *bd)
Laurent Vivier48cf6062008-04-29 01:02:46 -0700952{
Josef Bacikfd8383f2016-09-08 12:33:37 -0700953 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400954 int ret;
Laurent Vivier48cf6062008-04-29 01:02:46 -0700955
Josef Bacik9561a7a2016-11-22 14:04:40 -0500956 /*
957 * Since we look at the bio's to send the request over the network we
958 * need to make sure the completion work doesn't mark this request done
959 * before we are done doing our send. This keeps us from dereferencing
960 * freed data if we have particularly fast completions (ie we get the
961 * completion before we exit sock_xmit on the last bvec) or in the case
962 * that the server is misbehaving (or there was an error) before we're
963 * done sending everything over the wire.
964 */
Josef Bacik8f3ea352018-07-16 12:11:35 -0400965 mutex_lock(&cmd->lock);
Josef Bacikd7d94d42018-07-16 12:11:34 -0400966 clear_bit(NBD_CMD_REQUEUED, &cmd->flags);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400967
968 /* We can be called directly from the user space process, which means we
969 * could possibly have signals pending so our sendmsg will fail. In
970 * this case we need to return that we are busy, otherwise error out as
971 * appropriate.
972 */
973 ret = nbd_handle_cmd(cmd, hctx->queue_num);
Josef Bacik6e60a3b2017-10-02 16:22:08 -0400974 if (ret < 0)
975 ret = BLK_STS_IOERR;
976 else if (!ret)
977 ret = BLK_STS_OK;
Josef Bacik8f3ea352018-07-16 12:11:35 -0400978 mutex_unlock(&cmd->lock);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500979
Josef Bacik6e60a3b2017-10-02 16:22:08 -0400980 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981}
982
Mike Christiecf1b2322019-10-17 16:27:34 -0500983static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd,
984 int *err)
985{
986 struct socket *sock;
987
988 *err = 0;
989 sock = sockfd_lookup(fd, err);
990 if (!sock)
991 return NULL;
992
993 if (sock->ops->shutdown == sock_no_shutdown) {
994 dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n");
995 *err = -EINVAL;
Sun Kedff10bb2019-11-19 14:09:11 +0800996 sockfd_put(sock);
Mike Christiecf1b2322019-10-17 16:27:34 -0500997 return NULL;
998 }
999
1000 return sock;
1001}
1002
Josef Bacike46c7282017-04-06 17:02:00 -04001003static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
1004 bool netlink)
Markus Pargmann23272a672015-10-29 11:51:16 +01001005{
Josef Bacik5ea8d102017-04-06 17:01:58 -04001006 struct nbd_config *config = nbd->config;
Josef Bacik9442b732017-02-07 17:10:22 -05001007 struct socket *sock;
Josef Bacik9561a7a2016-11-22 14:04:40 -05001008 struct nbd_sock **socks;
1009 struct nbd_sock *nsock;
Josef Bacik9442b732017-02-07 17:10:22 -05001010 int err;
1011
Mike Christiecf1b2322019-10-17 16:27:34 -05001012 sock = nbd_get_socket(nbd, arg, &err);
Josef Bacik9442b732017-02-07 17:10:22 -05001013 if (!sock)
1014 return err;
Markus Pargmann23272a672015-10-29 11:51:16 +01001015
Josef Bacike46c7282017-04-06 17:02:00 -04001016 if (!netlink && !nbd->task_setup &&
Xiubo Liec76a7b2019-09-17 17:26:05 +05301017 !test_bit(NBD_RT_BOUND, &config->runtime_flags))
Josef Bacik9561a7a2016-11-22 14:04:40 -05001018 nbd->task_setup = current;
Josef Bacike46c7282017-04-06 17:02:00 -04001019
1020 if (!netlink &&
1021 (nbd->task_setup != current ||
Xiubo Liec76a7b2019-09-17 17:26:05 +05301022 test_bit(NBD_RT_BOUND, &config->runtime_flags))) {
Josef Bacik9561a7a2016-11-22 14:04:40 -05001023 dev_err(disk_to_dev(nbd->disk),
1024 "Device being setup by another task");
Josef Bacik9b1355d2017-04-06 17:01:56 -04001025 sockfd_put(sock);
Josef Bacike46c7282017-04-06 17:02:00 -04001026 return -EBUSY;
Markus Pargmann23272a672015-10-29 11:51:16 +01001027 }
1028
Josef Bacik5ea8d102017-04-06 17:01:58 -04001029 socks = krealloc(config->socks, (config->num_connections + 1) *
Josef Bacik9561a7a2016-11-22 14:04:40 -05001030 sizeof(struct nbd_sock *), GFP_KERNEL);
Josef Bacik9b1355d2017-04-06 17:01:56 -04001031 if (!socks) {
1032 sockfd_put(sock);
Josef Bacik9561a7a2016-11-22 14:04:40 -05001033 return -ENOMEM;
Josef Bacik9b1355d2017-04-06 17:01:56 -04001034 }
Navid Emamdoost03bf73c2019-09-23 15:09:58 -05001035
1036 config->socks = socks;
1037
Josef Bacik9561a7a2016-11-22 14:04:40 -05001038 nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL);
Josef Bacik9b1355d2017-04-06 17:01:56 -04001039 if (!nsock) {
1040 sockfd_put(sock);
Josef Bacik9561a7a2016-11-22 14:04:40 -05001041 return -ENOMEM;
Josef Bacik9b1355d2017-04-06 17:01:56 -04001042 }
Markus Pargmann23272a672015-10-29 11:51:16 +01001043
Josef Bacikf3733242017-04-06 17:01:57 -04001044 nsock->fallback_index = -1;
1045 nsock->dead = false;
Josef Bacik9561a7a2016-11-22 14:04:40 -05001046 mutex_init(&nsock->tx_lock);
1047 nsock->sock = sock;
Josef Bacik9dd5d3a2017-03-24 14:08:26 -04001048 nsock->pending = NULL;
1049 nsock->sent = 0;
Josef Bacik799f9a32017-04-06 17:02:02 -04001050 nsock->cookie = 0;
Josef Bacik5ea8d102017-04-06 17:01:58 -04001051 socks[config->num_connections++] = nsock;
Josef Bacik560bc4b2017-04-06 17:02:04 -04001052 atomic_inc(&config->live_connections);
Josef Bacik9561a7a2016-11-22 14:04:40 -05001053
1054 return 0;
Markus Pargmann23272a672015-10-29 11:51:16 +01001055}
1056
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001057static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
1058{
1059 struct nbd_config *config = nbd->config;
1060 struct socket *sock, *old;
1061 struct recv_thread_args *args;
1062 int i;
1063 int err;
1064
Mike Christiecf1b2322019-10-17 16:27:34 -05001065 sock = nbd_get_socket(nbd, arg, &err);
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001066 if (!sock)
1067 return err;
1068
1069 args = kzalloc(sizeof(*args), GFP_KERNEL);
1070 if (!args) {
1071 sockfd_put(sock);
1072 return -ENOMEM;
1073 }
1074
1075 for (i = 0; i < config->num_connections; i++) {
1076 struct nbd_sock *nsock = config->socks[i];
1077
1078 if (!nsock->dead)
1079 continue;
1080
1081 mutex_lock(&nsock->tx_lock);
1082 if (!nsock->dead) {
1083 mutex_unlock(&nsock->tx_lock);
1084 continue;
1085 }
1086 sk_set_memalloc(sock->sk);
Josef Bacika7ee8cf2017-07-21 10:48:15 -04001087 if (nbd->tag_set.timeout)
1088 sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001089 atomic_inc(&config->recv_threads);
1090 refcount_inc(&nbd->config_refs);
1091 old = nsock->sock;
1092 nsock->fallback_index = -1;
1093 nsock->sock = sock;
1094 nsock->dead = false;
1095 INIT_WORK(&args->work, recv_work);
1096 args->index = i;
1097 args->nbd = nbd;
Josef Bacik799f9a32017-04-06 17:02:02 -04001098 nsock->cookie++;
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001099 mutex_unlock(&nsock->tx_lock);
1100 sockfd_put(old);
1101
Xiubo Liec76a7b2019-09-17 17:26:05 +05301102 clear_bit(NBD_RT_DISCONNECTED, &config->runtime_flags);
Josef Bacik7a362ea2017-07-25 13:31:19 -04001103
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001104 /* We take the tx_mutex in an error path in the recv_work, so we
1105 * need to queue_work outside of the tx_mutex.
1106 */
Mike Christiee9e006f2019-08-04 14:10:06 -05001107 queue_work(nbd->recv_workq, &args->work);
Josef Bacik560bc4b2017-04-06 17:02:04 -04001108
1109 atomic_inc(&config->live_connections);
1110 wake_up(&config->conn_wait);
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001111 return 0;
1112 }
1113 sockfd_put(sock);
1114 kfree(args);
1115 return -ENOSPC;
1116}
1117
Markus Pargmann0e4f0f62015-10-29 12:04:51 +01001118static void nbd_bdev_reset(struct block_device *bdev)
1119{
Ratna Manoj Bollaabbbdf12017-03-24 14:08:29 -04001120 if (bdev->bd_openers > 1)
1121 return;
Josef Bacik29eaadc2017-04-06 17:01:59 -04001122 bd_set_size(bdev, 0);
Markus Pargmann0e4f0f62015-10-29 12:04:51 +01001123}
1124
Josef Bacik29eaadc2017-04-06 17:01:59 -04001125static void nbd_parse_flags(struct nbd_device *nbd)
Markus Pargmannd02cf532015-10-29 12:06:15 +01001126{
Josef Bacik5ea8d102017-04-06 17:01:58 -04001127 struct nbd_config *config = nbd->config;
1128 if (config->flags & NBD_FLAG_READ_ONLY)
Josef Bacik29eaadc2017-04-06 17:01:59 -04001129 set_disk_ro(nbd->disk, true);
1130 else
1131 set_disk_ro(nbd->disk, false);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001132 if (config->flags & NBD_FLAG_SEND_TRIM)
Bart Van Assche8b904b52018-03-07 17:10:10 -08001133 blk_queue_flag_set(QUEUE_FLAG_DISCARD, nbd->disk->queue);
Shaun McDowell685c9b22017-05-25 23:55:54 -04001134 if (config->flags & NBD_FLAG_SEND_FLUSH) {
1135 if (config->flags & NBD_FLAG_SEND_FUA)
1136 blk_queue_write_cache(nbd->disk->queue, true, true);
1137 else
1138 blk_queue_write_cache(nbd->disk->queue, true, false);
1139 }
Markus Pargmannd02cf532015-10-29 12:06:15 +01001140 else
Jens Axboeaafb1ee2016-03-30 10:10:53 -06001141 blk_queue_write_cache(nbd->disk->queue, false, false);
Markus Pargmannd02cf532015-10-29 12:06:15 +01001142}
1143
Josef Bacik9561a7a2016-11-22 14:04:40 -05001144static void send_disconnects(struct nbd_device *nbd)
1145{
Josef Bacik5ea8d102017-04-06 17:01:58 -04001146 struct nbd_config *config = nbd->config;
Al Viroc9f2b6a2015-11-12 05:09:35 -05001147 struct nbd_request request = {
1148 .magic = htonl(NBD_REQUEST_MAGIC),
1149 .type = htonl(NBD_CMD_DISC),
1150 };
1151 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
1152 struct iov_iter from;
Josef Bacik9561a7a2016-11-22 14:04:40 -05001153 int i, ret;
1154
Josef Bacik5ea8d102017-04-06 17:01:58 -04001155 for (i = 0; i < config->num_connections; i++) {
Josef Bacikb4b2aec2017-07-21 10:48:14 -04001156 struct nbd_sock *nsock = config->socks[i];
1157
David Howellsaa563d72018-10-20 00:57:56 +01001158 iov_iter_kvec(&from, WRITE, &iov, 1, sizeof(request));
Josef Bacikb4b2aec2017-07-21 10:48:14 -04001159 mutex_lock(&nsock->tx_lock);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -04001160 ret = sock_xmit(nbd, i, 1, &from, 0, NULL);
Josef Bacik9561a7a2016-11-22 14:04:40 -05001161 if (ret <= 0)
1162 dev_err(disk_to_dev(nbd->disk),
1163 "Send disconnect failed %d\n", ret);
Josef Bacikb4b2aec2017-07-21 10:48:14 -04001164 mutex_unlock(&nsock->tx_lock);
Josef Bacik9561a7a2016-11-22 14:04:40 -05001165 }
1166}
1167
Josef Bacik29eaadc2017-04-06 17:01:59 -04001168static int nbd_disconnect(struct nbd_device *nbd)
Josef Bacik9442b732017-02-07 17:10:22 -05001169{
Josef Bacik5ea8d102017-04-06 17:01:58 -04001170 struct nbd_config *config = nbd->config;
Josef Bacik9442b732017-02-07 17:10:22 -05001171
Josef Bacik5ea8d102017-04-06 17:01:58 -04001172 dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
Xiubo Liec76a7b2019-09-17 17:26:05 +05301173 set_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags);
Xiubo Li8454d682019-09-17 17:26:06 +05301174 set_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags);
Josef Bacik2e134562017-07-21 10:48:13 -04001175 send_disconnects(nbd);
Josef Bacik9442b732017-02-07 17:10:22 -05001176 return 0;
1177}
1178
Josef Bacik29eaadc2017-04-06 17:01:59 -04001179static void nbd_clear_sock(struct nbd_device *nbd)
Josef Bacik9442b732017-02-07 17:10:22 -05001180{
1181 sock_shutdown(nbd);
1182 nbd_clear_que(nbd);
Josef Bacik9442b732017-02-07 17:10:22 -05001183 nbd->task_setup = NULL;
Josef Bacik9442b732017-02-07 17:10:22 -05001184}
1185
Josef Bacik5ea8d102017-04-06 17:01:58 -04001186static void nbd_config_put(struct nbd_device *nbd)
1187{
1188 if (refcount_dec_and_mutex_lock(&nbd->config_refs,
1189 &nbd->config_lock)) {
Josef Bacik5ea8d102017-04-06 17:01:58 -04001190 struct nbd_config *config = nbd->config;
Josef Bacik5ea8d102017-04-06 17:01:58 -04001191 nbd_dev_dbg_close(nbd);
Josef Bacik29eaadc2017-04-06 17:01:59 -04001192 nbd_size_clear(nbd);
Xiubo Liec76a7b2019-09-17 17:26:05 +05301193 if (test_and_clear_bit(NBD_RT_HAS_PID_FILE,
Josef Bacik5ea8d102017-04-06 17:01:58 -04001194 &config->runtime_flags))
1195 device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
1196 nbd->task_recv = NULL;
Josef Bacik29eaadc2017-04-06 17:01:59 -04001197 nbd_clear_sock(nbd);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001198 if (config->num_connections) {
1199 int i;
1200 for (i = 0; i < config->num_connections; i++) {
1201 sockfd_put(config->socks[i]->sock);
1202 kfree(config->socks[i]);
1203 }
1204 kfree(config->socks);
1205 }
Ilya Dryomovfa976532017-05-23 17:49:55 +02001206 kfree(nbd->config);
Ilya Dryomovaf622b82017-05-23 17:49:54 +02001207 nbd->config = NULL;
1208
Mike Christiee9e006f2019-08-04 14:10:06 -05001209 if (nbd->recv_workq)
1210 destroy_workqueue(nbd->recv_workq);
1211 nbd->recv_workq = NULL;
1212
Ilya Dryomovaf622b82017-05-23 17:49:54 +02001213 nbd->tag_set.timeout = 0;
Josef Bacik6df133a2018-05-23 13:35:59 -04001214 nbd->disk->queue->limits.discard_granularity = 0;
Josef Bacik07ce2132018-06-05 11:41:23 -04001215 nbd->disk->queue->limits.discard_alignment = 0;
Josef Bacik6df133a2018-05-23 13:35:59 -04001216 blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX);
Bart Van Assche8b904b52018-03-07 17:10:10 -08001217 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, nbd->disk->queue);
Josef Bacika2c97902017-04-06 17:02:07 -04001218
Josef Bacik5ea8d102017-04-06 17:01:58 -04001219 mutex_unlock(&nbd->config_lock);
Josef Bacikc6a47592017-04-06 17:02:06 -04001220 nbd_put(nbd);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001221 module_put(THIS_MODULE);
1222 }
1223}
1224
Josef Bacike46c7282017-04-06 17:02:00 -04001225static int nbd_start_device(struct nbd_device *nbd)
Josef Bacik9442b732017-02-07 17:10:22 -05001226{
Josef Bacik5ea8d102017-04-06 17:01:58 -04001227 struct nbd_config *config = nbd->config;
1228 int num_connections = config->num_connections;
Josef Bacik9442b732017-02-07 17:10:22 -05001229 int error = 0, i;
1230
1231 if (nbd->task_recv)
1232 return -EBUSY;
Josef Bacik5ea8d102017-04-06 17:01:58 -04001233 if (!config->socks)
Josef Bacik9442b732017-02-07 17:10:22 -05001234 return -EINVAL;
1235 if (num_connections > 1 &&
Josef Bacik5ea8d102017-04-06 17:01:58 -04001236 !(config->flags & NBD_FLAG_CAN_MULTI_CONN)) {
Josef Bacik9442b732017-02-07 17:10:22 -05001237 dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n");
Josef Bacik5ea8d102017-04-06 17:01:58 -04001238 return -EINVAL;
Josef Bacik9442b732017-02-07 17:10:22 -05001239 }
1240
Mike Christiee9e006f2019-08-04 14:10:06 -05001241 nbd->recv_workq = alloc_workqueue("knbd%d-recv",
1242 WQ_MEM_RECLAIM | WQ_HIGHPRI |
1243 WQ_UNBOUND, 0, nbd->index);
1244 if (!nbd->recv_workq) {
1245 dev_err(disk_to_dev(nbd->disk), "Could not allocate knbd recv work queue.\n");
1246 return -ENOMEM;
1247 }
1248
Josef Bacik5ea8d102017-04-06 17:01:58 -04001249 blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections);
Josef Bacik9442b732017-02-07 17:10:22 -05001250 nbd->task_recv = current;
Josef Bacik9442b732017-02-07 17:10:22 -05001251
Josef Bacik29eaadc2017-04-06 17:01:59 -04001252 nbd_parse_flags(nbd);
Josef Bacik9442b732017-02-07 17:10:22 -05001253
1254 error = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
1255 if (error) {
1256 dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
Josef Bacik5ea8d102017-04-06 17:01:58 -04001257 return error;
Josef Bacik9442b732017-02-07 17:10:22 -05001258 }
Xiubo Liec76a7b2019-09-17 17:26:05 +05301259 set_bit(NBD_RT_HAS_PID_FILE, &config->runtime_flags);
Josef Bacik9442b732017-02-07 17:10:22 -05001260
1261 nbd_dev_dbg_init(nbd);
1262 for (i = 0; i < num_connections; i++) {
Josef Bacik5ea8d102017-04-06 17:01:58 -04001263 struct recv_thread_args *args;
1264
1265 args = kzalloc(sizeof(*args), GFP_KERNEL);
1266 if (!args) {
1267 sock_shutdown(nbd);
1268 return -ENOMEM;
1269 }
1270 sk_set_memalloc(config->socks[i]->sock->sk);
Josef Bacika7ee8cf2017-07-21 10:48:15 -04001271 if (nbd->tag_set.timeout)
1272 config->socks[i]->sock->sk->sk_sndtimeo =
1273 nbd->tag_set.timeout;
Josef Bacik5ea8d102017-04-06 17:01:58 -04001274 atomic_inc(&config->recv_threads);
1275 refcount_inc(&nbd->config_refs);
1276 INIT_WORK(&args->work, recv_work);
1277 args->nbd = nbd;
1278 args->index = i;
Mike Christiee9e006f2019-08-04 14:10:06 -05001279 queue_work(nbd->recv_workq, &args->work);
Josef Bacik9442b732017-02-07 17:10:22 -05001280 }
Josef Bacik639812a2017-10-09 13:12:10 -04001281 nbd_size_update(nbd);
Josef Bacike46c7282017-04-06 17:02:00 -04001282 return error;
1283}
1284
1285static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *bdev)
1286{
1287 struct nbd_config *config = nbd->config;
1288 int ret;
1289
1290 ret = nbd_start_device(nbd);
1291 if (ret)
1292 return ret;
1293
Josef Bacike46c7282017-04-06 17:02:00 -04001294 if (max_part)
1295 bdev->bd_invalidated = 1;
1296 mutex_unlock(&nbd->config_lock);
1297 ret = wait_event_interruptible(config->recv_wq,
Josef Bacik5ea8d102017-04-06 17:01:58 -04001298 atomic_read(&config->recv_threads) == 0);
Mike Christie1c058392019-12-08 16:51:50 -06001299 if (ret)
Josef Bacik5ea8d102017-04-06 17:01:58 -04001300 sock_shutdown(nbd);
Mike Christie1c058392019-12-08 16:51:50 -06001301 flush_workqueue(nbd->recv_workq);
1302
Josef Bacik9442b732017-02-07 17:10:22 -05001303 mutex_lock(&nbd->config_lock);
Josef Bacik76aa1d32018-05-16 14:51:22 -04001304 nbd_bdev_reset(bdev);
Josef Bacik9442b732017-02-07 17:10:22 -05001305 /* user requested, ignore socket errors */
Xiubo Liec76a7b2019-09-17 17:26:05 +05301306 if (test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags))
Josef Bacike46c7282017-04-06 17:02:00 -04001307 ret = 0;
Xiubo Liec76a7b2019-09-17 17:26:05 +05301308 if (test_bit(NBD_RT_TIMEDOUT, &config->runtime_flags))
Josef Bacike46c7282017-04-06 17:02:00 -04001309 ret = -ETIMEDOUT;
1310 return ret;
Josef Bacik9442b732017-02-07 17:10:22 -05001311}
Markus Pargmann30d53d92015-08-17 08:20:06 +02001312
Josef Bacik29eaadc2017-04-06 17:01:59 -04001313static void nbd_clear_sock_ioctl(struct nbd_device *nbd,
1314 struct block_device *bdev)
1315{
Josef Bacik2516ab12017-04-06 17:02:03 -04001316 sock_shutdown(nbd);
Munehisa Kamata2b5c8f02019-07-31 20:13:10 +08001317 __invalidate_device(bdev, true);
Josef Bacik29eaadc2017-04-06 17:01:59 -04001318 nbd_bdev_reset(bdev);
Xiubo Liec76a7b2019-09-17 17:26:05 +05301319 if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
Josef Bacike46c7282017-04-06 17:02:00 -04001320 &nbd->config->runtime_flags))
1321 nbd_config_put(nbd);
Josef Bacik29eaadc2017-04-06 17:01:59 -04001322}
1323
Xiubo Li553768d2019-05-29 15:16:05 -05001324static bool nbd_is_valid_blksize(unsigned long blksize)
1325{
1326 if (!blksize || !is_power_of_2(blksize) || blksize < 512 ||
1327 blksize > PAGE_SIZE)
1328 return false;
1329 return true;
1330}
1331
Mike Christie55313e92019-08-13 11:39:49 -05001332static void nbd_set_cmd_timeout(struct nbd_device *nbd, u64 timeout)
1333{
1334 nbd->tag_set.timeout = timeout * HZ;
Mike Christie2da22da2019-08-13 11:39:52 -05001335 if (timeout)
1336 blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ);
Mike Christie55313e92019-08-13 11:39:49 -05001337}
1338
Josef Bacik9561a7a2016-11-22 14:04:40 -05001339/* Must be called with config_lock held */
Wanlong Gaof4507162012-03-28 14:42:51 -07001340static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
Pavel Machek1a2ad212009-04-02 16:58:41 -07001341 unsigned int cmd, unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342{
Josef Bacik5ea8d102017-04-06 17:01:58 -04001343 struct nbd_config *config = nbd->config;
1344
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345 switch (cmd) {
Josef Bacik9442b732017-02-07 17:10:22 -05001346 case NBD_DISCONNECT:
Josef Bacik29eaadc2017-04-06 17:01:59 -04001347 return nbd_disconnect(nbd);
Markus Pargmann23272a672015-10-29 11:51:16 +01001348 case NBD_CLEAR_SOCK:
Josef Bacik29eaadc2017-04-06 17:01:59 -04001349 nbd_clear_sock_ioctl(nbd, bdev);
1350 return 0;
Josef Bacik9442b732017-02-07 17:10:22 -05001351 case NBD_SET_SOCK:
Josef Bacike46c7282017-04-06 17:02:00 -04001352 return nbd_add_socket(nbd, arg, false);
Josef Bacik9442b732017-02-07 17:10:22 -05001353 case NBD_SET_BLKSIZE:
Xiubo Li553768d2019-05-29 15:16:05 -05001354 if (!arg)
1355 arg = NBD_DEF_BLKSIZE;
1356 if (!nbd_is_valid_blksize(arg))
Jens Axboebc811f02018-09-04 11:52:34 -06001357 return -EINVAL;
Josef Bacik29eaadc2017-04-06 17:01:59 -04001358 nbd_size_set(nbd, arg,
Josef Bacik5ea8d102017-04-06 17:01:58 -04001359 div_s64(config->bytesize, arg));
Josef Bacike5445412017-02-13 10:39:47 -05001360 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361 case NBD_SET_SIZE:
Josef Bacik29eaadc2017-04-06 17:01:59 -04001362 nbd_size_set(nbd, config->blksize,
Josef Bacik5ea8d102017-04-06 17:01:58 -04001363 div_s64(arg, config->blksize));
Josef Bacike5445412017-02-13 10:39:47 -05001364 return 0;
Markus Pargmann37091fd2015-07-27 07:36:49 +02001365 case NBD_SET_SIZE_BLOCKS:
Josef Bacik29eaadc2017-04-06 17:01:59 -04001366 nbd_size_set(nbd, config->blksize, arg);
Josef Bacike5445412017-02-13 10:39:47 -05001367 return 0;
Paul Clements7fdfd402007-10-16 23:27:37 -07001368 case NBD_SET_TIMEOUT:
Mike Christie2da22da2019-08-13 11:39:52 -05001369 nbd_set_cmd_timeout(nbd, arg);
Paul Clements7fdfd402007-10-16 23:27:37 -07001370 return 0;
Pavel Machek1a2ad212009-04-02 16:58:41 -07001371
Paul Clements2f012502012-10-04 17:16:15 -07001372 case NBD_SET_FLAGS:
Josef Bacik5ea8d102017-04-06 17:01:58 -04001373 config->flags = arg;
Paul Clements2f012502012-10-04 17:16:15 -07001374 return 0;
Josef Bacik9442b732017-02-07 17:10:22 -05001375 case NBD_DO_IT:
Josef Bacike46c7282017-04-06 17:02:00 -04001376 return nbd_start_device_ioctl(nbd, bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377 case NBD_CLEAR_QUE:
Herbert Xu4b2f0262006-01-06 00:09:47 -08001378 /*
1379 * This is for compatibility only. The queue is always cleared
1380 * by NBD_DO_IT or NBD_CLEAR_SOCK.
1381 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382 return 0;
1383 case NBD_PRINT_DEBUG:
Josef Bacikfd8383f2016-09-08 12:33:37 -07001384 /*
1385 * For compatibility only, we no longer keep a list of
1386 * outstanding requests.
1387 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388 return 0;
1389 }
Pavel Machek1a2ad212009-04-02 16:58:41 -07001390 return -ENOTTY;
1391}
1392
1393static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
1394 unsigned int cmd, unsigned long arg)
1395{
Wanlong Gaof4507162012-03-28 14:42:51 -07001396 struct nbd_device *nbd = bdev->bd_disk->private_data;
Josef Bacike46c7282017-04-06 17:02:00 -04001397 struct nbd_config *config = nbd->config;
1398 int error = -EINVAL;
Pavel Machek1a2ad212009-04-02 16:58:41 -07001399
1400 if (!capable(CAP_SYS_ADMIN))
1401 return -EPERM;
1402
Josef Bacik1dae69b2017-05-05 22:25:18 -04001403 /* The block layer will pass back some non-nbd ioctls in case we have
1404 * special handling for them, but we don't so just return an error.
1405 */
1406 if (_IOC_TYPE(cmd) != 0xab)
1407 return -EINVAL;
1408
Josef Bacik9561a7a2016-11-22 14:04:40 -05001409 mutex_lock(&nbd->config_lock);
Josef Bacike46c7282017-04-06 17:02:00 -04001410
1411 /* Don't allow ioctl operations on a nbd device that was created with
1412 * netlink, unless it's DISCONNECT or CLEAR_SOCK, which are fine.
1413 */
Xiubo Liec76a7b2019-09-17 17:26:05 +05301414 if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) ||
Josef Bacike46c7282017-04-06 17:02:00 -04001415 (cmd == NBD_DISCONNECT || cmd == NBD_CLEAR_SOCK))
1416 error = __nbd_ioctl(bdev, nbd, cmd, arg);
1417 else
1418 dev_err(nbd_to_dev(nbd), "Cannot use ioctl interface on a netlink controlled device.\n");
Josef Bacik9561a7a2016-11-22 14:04:40 -05001419 mutex_unlock(&nbd->config_lock);
Pavel Machek1a2ad212009-04-02 16:58:41 -07001420 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421}
1422
Josef Bacik5ea8d102017-04-06 17:01:58 -04001423static struct nbd_config *nbd_alloc_config(void)
1424{
1425 struct nbd_config *config;
1426
1427 config = kzalloc(sizeof(struct nbd_config), GFP_NOFS);
1428 if (!config)
1429 return NULL;
1430 atomic_set(&config->recv_threads, 0);
1431 init_waitqueue_head(&config->recv_wq);
Josef Bacik560bc4b2017-04-06 17:02:04 -04001432 init_waitqueue_head(&config->conn_wait);
Xiubo Li553768d2019-05-29 15:16:05 -05001433 config->blksize = NBD_DEF_BLKSIZE;
Josef Bacik560bc4b2017-04-06 17:02:04 -04001434 atomic_set(&config->live_connections, 0);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001435 try_module_get(THIS_MODULE);
1436 return config;
1437}
1438
1439static int nbd_open(struct block_device *bdev, fmode_t mode)
1440{
1441 struct nbd_device *nbd;
1442 int ret = 0;
1443
1444 mutex_lock(&nbd_index_mutex);
1445 nbd = bdev->bd_disk->private_data;
1446 if (!nbd) {
1447 ret = -ENXIO;
1448 goto out;
1449 }
Josef Bacikc6a47592017-04-06 17:02:06 -04001450 if (!refcount_inc_not_zero(&nbd->refs)) {
1451 ret = -ENXIO;
1452 goto out;
1453 }
Josef Bacik5ea8d102017-04-06 17:01:58 -04001454 if (!refcount_inc_not_zero(&nbd->config_refs)) {
1455 struct nbd_config *config;
1456
1457 mutex_lock(&nbd->config_lock);
1458 if (refcount_inc_not_zero(&nbd->config_refs)) {
1459 mutex_unlock(&nbd->config_lock);
1460 goto out;
1461 }
1462 config = nbd->config = nbd_alloc_config();
1463 if (!config) {
1464 ret = -ENOMEM;
1465 mutex_unlock(&nbd->config_lock);
1466 goto out;
1467 }
1468 refcount_set(&nbd->config_refs, 1);
Josef Bacikc6a47592017-04-06 17:02:06 -04001469 refcount_inc(&nbd->refs);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001470 mutex_unlock(&nbd->config_lock);
Josef Bacikfe1f9e62018-05-16 14:51:21 -04001471 bdev->bd_invalidated = 1;
1472 } else if (nbd_disconnected(nbd->config)) {
1473 bdev->bd_invalidated = 1;
Josef Bacik5ea8d102017-04-06 17:01:58 -04001474 }
1475out:
1476 mutex_unlock(&nbd_index_mutex);
1477 return ret;
1478}
1479
1480static void nbd_release(struct gendisk *disk, fmode_t mode)
1481{
1482 struct nbd_device *nbd = disk->private_data;
Doron Roberts-Kedes08ba91e2018-06-15 14:05:32 -07001483 struct block_device *bdev = bdget_disk(disk, 0);
1484
Xiubo Liec76a7b2019-09-17 17:26:05 +05301485 if (test_bit(NBD_RT_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) &&
Doron Roberts-Kedes08ba91e2018-06-15 14:05:32 -07001486 bdev->bd_openers == 0)
1487 nbd_disconnect_and_put(nbd);
1488
Josef Bacik5ea8d102017-04-06 17:01:58 -04001489 nbd_config_put(nbd);
Josef Bacikc6a47592017-04-06 17:02:06 -04001490 nbd_put(nbd);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001491}
1492
Alexey Dobriyan83d5cde2009-09-21 17:01:13 -07001493static const struct block_device_operations nbd_fops =
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494{
1495 .owner = THIS_MODULE,
Josef Bacik5ea8d102017-04-06 17:01:58 -04001496 .open = nbd_open,
1497 .release = nbd_release,
Arnd Bergmann8a6cfeb2010-07-08 10:18:46 +02001498 .ioctl = nbd_ioctl,
Al Viro263a3df2016-01-07 10:04:37 -05001499 .compat_ioctl = nbd_ioctl,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500};
1501
Markus Pargmann30d53d92015-08-17 08:20:06 +02001502#if IS_ENABLED(CONFIG_DEBUG_FS)
1503
1504static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
1505{
1506 struct nbd_device *nbd = s->private;
1507
1508 if (nbd->task_recv)
1509 seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv));
Markus Pargmann30d53d92015-08-17 08:20:06 +02001510
1511 return 0;
1512}
1513
1514static int nbd_dbg_tasks_open(struct inode *inode, struct file *file)
1515{
1516 return single_open(file, nbd_dbg_tasks_show, inode->i_private);
1517}
1518
1519static const struct file_operations nbd_dbg_tasks_ops = {
1520 .open = nbd_dbg_tasks_open,
1521 .read = seq_read,
1522 .llseek = seq_lseek,
1523 .release = single_release,
1524};
1525
1526static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
1527{
1528 struct nbd_device *nbd = s->private;
Josef Bacik5ea8d102017-04-06 17:01:58 -04001529 u32 flags = nbd->config->flags;
Markus Pargmann30d53d92015-08-17 08:20:06 +02001530
1531 seq_printf(s, "Hex: 0x%08x\n\n", flags);
1532
1533 seq_puts(s, "Known flags:\n");
1534
1535 if (flags & NBD_FLAG_HAS_FLAGS)
1536 seq_puts(s, "NBD_FLAG_HAS_FLAGS\n");
1537 if (flags & NBD_FLAG_READ_ONLY)
1538 seq_puts(s, "NBD_FLAG_READ_ONLY\n");
1539 if (flags & NBD_FLAG_SEND_FLUSH)
1540 seq_puts(s, "NBD_FLAG_SEND_FLUSH\n");
Shaun McDowell685c9b22017-05-25 23:55:54 -04001541 if (flags & NBD_FLAG_SEND_FUA)
1542 seq_puts(s, "NBD_FLAG_SEND_FUA\n");
Markus Pargmann30d53d92015-08-17 08:20:06 +02001543 if (flags & NBD_FLAG_SEND_TRIM)
1544 seq_puts(s, "NBD_FLAG_SEND_TRIM\n");
1545
1546 return 0;
1547}
1548
1549static int nbd_dbg_flags_open(struct inode *inode, struct file *file)
1550{
1551 return single_open(file, nbd_dbg_flags_show, inode->i_private);
1552}
1553
1554static const struct file_operations nbd_dbg_flags_ops = {
1555 .open = nbd_dbg_flags_open,
1556 .read = seq_read,
1557 .llseek = seq_lseek,
1558 .release = single_release,
1559};
1560
1561static int nbd_dev_dbg_init(struct nbd_device *nbd)
1562{
1563 struct dentry *dir;
Josef Bacik5ea8d102017-04-06 17:01:58 -04001564 struct nbd_config *config = nbd->config;
Markus Pargmann27ea43f2015-10-24 21:15:34 +02001565
1566 if (!nbd_dbg_dir)
1567 return -EIO;
Markus Pargmann30d53d92015-08-17 08:20:06 +02001568
1569 dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir);
Markus Pargmann27ea43f2015-10-24 21:15:34 +02001570 if (!dir) {
1571 dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n",
1572 nbd_name(nbd));
1573 return -EIO;
Markus Pargmann30d53d92015-08-17 08:20:06 +02001574 }
Josef Bacik5ea8d102017-04-06 17:01:58 -04001575 config->dbg_dir = dir;
Markus Pargmann30d53d92015-08-17 08:20:06 +02001576
Markus Pargmann27ea43f2015-10-24 21:15:34 +02001577 debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001578 debugfs_create_u64("size_bytes", 0444, dir, &config->bytesize);
Josef Bacik0eadf372016-09-08 12:33:40 -07001579 debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001580 debugfs_create_u64("blocksize", 0444, dir, &config->blksize);
Josef Bacikd366a0f2016-06-08 10:32:10 -04001581 debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops);
Markus Pargmann30d53d92015-08-17 08:20:06 +02001582
1583 return 0;
1584}
1585
1586static void nbd_dev_dbg_close(struct nbd_device *nbd)
1587{
Josef Bacik5ea8d102017-04-06 17:01:58 -04001588 debugfs_remove_recursive(nbd->config->dbg_dir);
Markus Pargmann30d53d92015-08-17 08:20:06 +02001589}
1590
1591static int nbd_dbg_init(void)
1592{
1593 struct dentry *dbg_dir;
1594
1595 dbg_dir = debugfs_create_dir("nbd", NULL);
Markus Pargmann27ea43f2015-10-24 21:15:34 +02001596 if (!dbg_dir)
1597 return -EIO;
Markus Pargmann30d53d92015-08-17 08:20:06 +02001598
1599 nbd_dbg_dir = dbg_dir;
1600
1601 return 0;
1602}
1603
1604static void nbd_dbg_close(void)
1605{
1606 debugfs_remove_recursive(nbd_dbg_dir);
1607}
1608
1609#else /* IS_ENABLED(CONFIG_DEBUG_FS) */
1610
1611static int nbd_dev_dbg_init(struct nbd_device *nbd)
1612{
1613 return 0;
1614}
1615
1616static void nbd_dev_dbg_close(struct nbd_device *nbd)
1617{
1618}
1619
1620static int nbd_dbg_init(void)
1621{
1622 return 0;
1623}
1624
1625static void nbd_dbg_close(void)
1626{
1627}
1628
1629#endif
1630
Christoph Hellwigd6296d392017-05-01 10:19:08 -06001631static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
1632 unsigned int hctx_idx, unsigned int numa_node)
Josef Bacikfd8383f2016-09-08 12:33:37 -07001633{
1634 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
Christoph Hellwigd6296d392017-05-01 10:19:08 -06001635 cmd->nbd = set->driver_data;
Josef Bacikd7d94d42018-07-16 12:11:34 -04001636 cmd->flags = 0;
Josef Bacik8f3ea352018-07-16 12:11:35 -04001637 mutex_init(&cmd->lock);
Josef Bacikfd8383f2016-09-08 12:33:37 -07001638 return 0;
1639}
1640
Eric Biggersf363b082017-03-30 13:39:16 -07001641static const struct blk_mq_ops nbd_mq_ops = {
Josef Bacikfd8383f2016-09-08 12:33:37 -07001642 .queue_rq = nbd_queue_rq,
Christoph Hellwig1e388ae2017-04-20 16:03:06 +02001643 .complete = nbd_complete_rq,
Josef Bacikfd8383f2016-09-08 12:33:37 -07001644 .init_request = nbd_init_request,
Josef Bacik0eadf372016-09-08 12:33:40 -07001645 .timeout = nbd_xmit_timeout,
Josef Bacikfd8383f2016-09-08 12:33:37 -07001646};
1647
Josef Bacikb0d91112017-02-01 16:11:40 -05001648static int nbd_dev_add(int index)
1649{
1650 struct nbd_device *nbd;
1651 struct gendisk *disk;
1652 struct request_queue *q;
1653 int err = -ENOMEM;
1654
1655 nbd = kzalloc(sizeof(struct nbd_device), GFP_KERNEL);
1656 if (!nbd)
1657 goto out;
1658
1659 disk = alloc_disk(1 << part_shift);
1660 if (!disk)
1661 goto out_free_nbd;
1662
1663 if (index >= 0) {
1664 err = idr_alloc(&nbd_index_idr, nbd, index, index + 1,
1665 GFP_KERNEL);
1666 if (err == -ENOSPC)
1667 err = -EEXIST;
1668 } else {
1669 err = idr_alloc(&nbd_index_idr, nbd, 0, 0, GFP_KERNEL);
1670 if (err >= 0)
1671 index = err;
1672 }
1673 if (err < 0)
1674 goto out_free_disk;
1675
Josef Bacike46c7282017-04-06 17:02:00 -04001676 nbd->index = index;
Josef Bacikb0d91112017-02-01 16:11:40 -05001677 nbd->disk = disk;
1678 nbd->tag_set.ops = &nbd_mq_ops;
1679 nbd->tag_set.nr_hw_queues = 1;
1680 nbd->tag_set.queue_depth = 128;
1681 nbd->tag_set.numa_node = NUMA_NO_NODE;
1682 nbd->tag_set.cmd_size = sizeof(struct nbd_cmd);
1683 nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
Ming Lei56d18f62019-02-15 19:13:24 +08001684 BLK_MQ_F_BLOCKING;
Josef Bacikb0d91112017-02-01 16:11:40 -05001685 nbd->tag_set.driver_data = nbd;
Xiubo Li8454d682019-09-17 17:26:06 +05301686 nbd->destroy_complete = NULL;
Josef Bacikb0d91112017-02-01 16:11:40 -05001687
1688 err = blk_mq_alloc_tag_set(&nbd->tag_set);
1689 if (err)
1690 goto out_free_idr;
1691
1692 q = blk_mq_init_queue(&nbd->tag_set);
1693 if (IS_ERR(q)) {
1694 err = PTR_ERR(q);
1695 goto out_free_tags;
1696 }
1697 disk->queue = q;
1698
1699 /*
1700 * Tell the block layer that we are not a rotational device
1701 */
Bart Van Assche8b904b52018-03-07 17:10:10 -08001702 blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
1703 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue);
Josef Bacik6df133a2018-05-23 13:35:59 -04001704 disk->queue->limits.discard_granularity = 0;
Josef Bacik07ce2132018-06-05 11:41:23 -04001705 disk->queue->limits.discard_alignment = 0;
Josef Bacik6df133a2018-05-23 13:35:59 -04001706 blk_queue_max_discard_sectors(disk->queue, 0);
Josef Bacikebb16d02017-04-18 16:22:51 -04001707 blk_queue_max_segment_size(disk->queue, UINT_MAX);
Josef Bacik1cc1f172017-04-20 15:47:01 -04001708 blk_queue_max_segments(disk->queue, USHRT_MAX);
Josef Bacikb0d91112017-02-01 16:11:40 -05001709 blk_queue_max_hw_sectors(disk->queue, 65536);
1710 disk->queue->limits.max_sectors = 256;
1711
Josef Bacikb0d91112017-02-01 16:11:40 -05001712 mutex_init(&nbd->config_lock);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001713 refcount_set(&nbd->config_refs, 0);
Josef Bacikc6a47592017-04-06 17:02:06 -04001714 refcount_set(&nbd->refs, 1);
1715 INIT_LIST_HEAD(&nbd->list);
Josef Bacikb0d91112017-02-01 16:11:40 -05001716 disk->major = NBD_MAJOR;
1717 disk->first_minor = index << part_shift;
1718 disk->fops = &nbd_fops;
1719 disk->private_data = nbd;
1720 sprintf(disk->disk_name, "nbd%d", index);
Josef Bacikb0d91112017-02-01 16:11:40 -05001721 add_disk(disk);
Josef Bacik47d902b2017-04-06 17:02:05 -04001722 nbd_total_devices++;
Josef Bacikb0d91112017-02-01 16:11:40 -05001723 return index;
1724
1725out_free_tags:
1726 blk_mq_free_tag_set(&nbd->tag_set);
1727out_free_idr:
1728 idr_remove(&nbd_index_idr, index);
1729out_free_disk:
1730 put_disk(disk);
1731out_free_nbd:
1732 kfree(nbd);
1733out:
1734 return err;
1735}
1736
Josef Bacike46c7282017-04-06 17:02:00 -04001737static int find_free_cb(int id, void *ptr, void *data)
1738{
1739 struct nbd_device *nbd = ptr;
1740 struct nbd_device **found = data;
1741
1742 if (!refcount_read(&nbd->config_refs)) {
1743 *found = nbd;
1744 return 1;
1745 }
1746 return 0;
1747}
1748
1749/* Netlink interface. */
Stephen Hemmingera86c4122018-07-18 09:32:43 -07001750static const struct nla_policy nbd_attr_policy[NBD_ATTR_MAX + 1] = {
Josef Bacike46c7282017-04-06 17:02:00 -04001751 [NBD_ATTR_INDEX] = { .type = NLA_U32 },
1752 [NBD_ATTR_SIZE_BYTES] = { .type = NLA_U64 },
1753 [NBD_ATTR_BLOCK_SIZE_BYTES] = { .type = NLA_U64 },
1754 [NBD_ATTR_TIMEOUT] = { .type = NLA_U64 },
1755 [NBD_ATTR_SERVER_FLAGS] = { .type = NLA_U64 },
1756 [NBD_ATTR_CLIENT_FLAGS] = { .type = NLA_U64 },
1757 [NBD_ATTR_SOCKETS] = { .type = NLA_NESTED},
Josef Bacik560bc4b2017-04-06 17:02:04 -04001758 [NBD_ATTR_DEAD_CONN_TIMEOUT] = { .type = NLA_U64 },
Josef Bacik47d902b2017-04-06 17:02:05 -04001759 [NBD_ATTR_DEVICE_LIST] = { .type = NLA_NESTED},
Josef Bacike46c7282017-04-06 17:02:00 -04001760};
1761
Stephen Hemmingera86c4122018-07-18 09:32:43 -07001762static const struct nla_policy nbd_sock_policy[NBD_SOCK_MAX + 1] = {
Josef Bacike46c7282017-04-06 17:02:00 -04001763 [NBD_SOCK_FD] = { .type = NLA_U32 },
1764};
1765
Josef Bacik47d902b2017-04-06 17:02:05 -04001766/* We don't use this right now since we don't parse the incoming list, but we
1767 * still want it here so userspace knows what to expect.
1768 */
Stephen Hemmingera86c4122018-07-18 09:32:43 -07001769static const struct nla_policy __attribute__((unused))
Josef Bacik47d902b2017-04-06 17:02:05 -04001770nbd_device_policy[NBD_DEVICE_ATTR_MAX + 1] = {
1771 [NBD_DEVICE_INDEX] = { .type = NLA_U32 },
1772 [NBD_DEVICE_CONNECTED] = { .type = NLA_U8 },
1773};
1774
Mike Christie4ddeaae82019-05-29 15:16:06 -05001775static int nbd_genl_size_set(struct genl_info *info, struct nbd_device *nbd)
1776{
1777 struct nbd_config *config = nbd->config;
1778 u64 bsize = config->blksize;
1779 u64 bytes = config->bytesize;
1780
1781 if (info->attrs[NBD_ATTR_SIZE_BYTES])
1782 bytes = nla_get_u64(info->attrs[NBD_ATTR_SIZE_BYTES]);
1783
1784 if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]) {
1785 bsize = nla_get_u64(info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]);
1786 if (!bsize)
1787 bsize = NBD_DEF_BLKSIZE;
1788 if (!nbd_is_valid_blksize(bsize)) {
1789 printk(KERN_ERR "Invalid block size %llu\n", bsize);
1790 return -EINVAL;
1791 }
1792 }
1793
1794 if (bytes != config->bytesize || bsize != config->blksize)
1795 nbd_size_set(nbd, bsize, div64_u64(bytes, bsize));
1796 return 0;
1797}
1798
Josef Bacike46c7282017-04-06 17:02:00 -04001799static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
1800{
Xiubo Li8454d682019-09-17 17:26:06 +05301801 DECLARE_COMPLETION_ONSTACK(destroy_complete);
Josef Bacike46c7282017-04-06 17:02:00 -04001802 struct nbd_device *nbd = NULL;
1803 struct nbd_config *config;
1804 int index = -1;
1805 int ret;
Josef Bacika2c97902017-04-06 17:02:07 -04001806 bool put_dev = false;
Josef Bacike46c7282017-04-06 17:02:00 -04001807
1808 if (!netlink_capable(skb, CAP_SYS_ADMIN))
1809 return -EPERM;
1810
1811 if (info->attrs[NBD_ATTR_INDEX])
1812 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
1813 if (!info->attrs[NBD_ATTR_SOCKETS]) {
1814 printk(KERN_ERR "nbd: must specify at least one socket\n");
1815 return -EINVAL;
1816 }
1817 if (!info->attrs[NBD_ATTR_SIZE_BYTES]) {
1818 printk(KERN_ERR "nbd: must specify a size in bytes for the device\n");
1819 return -EINVAL;
1820 }
1821again:
1822 mutex_lock(&nbd_index_mutex);
1823 if (index == -1) {
1824 ret = idr_for_each(&nbd_index_idr, &find_free_cb, &nbd);
1825 if (ret == 0) {
1826 int new_index;
1827 new_index = nbd_dev_add(-1);
1828 if (new_index < 0) {
1829 mutex_unlock(&nbd_index_mutex);
1830 printk(KERN_ERR "nbd: failed to add new device\n");
Gustavo A. R. Silva09799622018-02-12 11:14:55 -06001831 return new_index;
Josef Bacike46c7282017-04-06 17:02:00 -04001832 }
1833 nbd = idr_find(&nbd_index_idr, new_index);
1834 }
1835 } else {
1836 nbd = idr_find(&nbd_index_idr, index);
Josef Bacike6a76272017-08-14 18:25:33 +00001837 if (!nbd) {
1838 ret = nbd_dev_add(index);
1839 if (ret < 0) {
1840 mutex_unlock(&nbd_index_mutex);
1841 printk(KERN_ERR "nbd: failed to add new device\n");
1842 return ret;
1843 }
1844 nbd = idr_find(&nbd_index_idr, index);
1845 }
Josef Bacike46c7282017-04-06 17:02:00 -04001846 }
Josef Bacike46c7282017-04-06 17:02:00 -04001847 if (!nbd) {
1848 printk(KERN_ERR "nbd: couldn't find device at index %d\n",
1849 index);
Josef Bacikc6a47592017-04-06 17:02:06 -04001850 mutex_unlock(&nbd_index_mutex);
Josef Bacike46c7282017-04-06 17:02:00 -04001851 return -EINVAL;
1852 }
Xiubo Li8454d682019-09-17 17:26:06 +05301853
1854 if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) &&
1855 test_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags)) {
1856 nbd->destroy_complete = &destroy_complete;
1857 mutex_unlock(&nbd_index_mutex);
1858
1859 /* Wait untill the the nbd stuff is totally destroyed */
1860 wait_for_completion(&destroy_complete);
1861 goto again;
1862 }
1863
Josef Bacikc6a47592017-04-06 17:02:06 -04001864 if (!refcount_inc_not_zero(&nbd->refs)) {
1865 mutex_unlock(&nbd_index_mutex);
1866 if (index == -1)
1867 goto again;
1868 printk(KERN_ERR "nbd: device at index %d is going down\n",
1869 index);
1870 return -EINVAL;
1871 }
1872 mutex_unlock(&nbd_index_mutex);
Josef Bacike46c7282017-04-06 17:02:00 -04001873
1874 mutex_lock(&nbd->config_lock);
1875 if (refcount_read(&nbd->config_refs)) {
1876 mutex_unlock(&nbd->config_lock);
Josef Bacikc6a47592017-04-06 17:02:06 -04001877 nbd_put(nbd);
Josef Bacike46c7282017-04-06 17:02:00 -04001878 if (index == -1)
1879 goto again;
1880 printk(KERN_ERR "nbd: nbd%d already in use\n", index);
1881 return -EBUSY;
1882 }
1883 if (WARN_ON(nbd->config)) {
1884 mutex_unlock(&nbd->config_lock);
Josef Bacikc6a47592017-04-06 17:02:06 -04001885 nbd_put(nbd);
Josef Bacike46c7282017-04-06 17:02:00 -04001886 return -EINVAL;
1887 }
1888 config = nbd->config = nbd_alloc_config();
1889 if (!nbd->config) {
1890 mutex_unlock(&nbd->config_lock);
Josef Bacikc6a47592017-04-06 17:02:06 -04001891 nbd_put(nbd);
Josef Bacike46c7282017-04-06 17:02:00 -04001892 printk(KERN_ERR "nbd: couldn't allocate config\n");
1893 return -ENOMEM;
1894 }
1895 refcount_set(&nbd->config_refs, 1);
Xiubo Liec76a7b2019-09-17 17:26:05 +05301896 set_bit(NBD_RT_BOUND, &config->runtime_flags);
Josef Bacike46c7282017-04-06 17:02:00 -04001897
Mike Christie4ddeaae82019-05-29 15:16:06 -05001898 ret = nbd_genl_size_set(info, nbd);
1899 if (ret)
1900 goto out;
1901
Mike Christie55313e92019-08-13 11:39:49 -05001902 if (info->attrs[NBD_ATTR_TIMEOUT])
1903 nbd_set_cmd_timeout(nbd,
1904 nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]));
Josef Bacik560bc4b2017-04-06 17:02:04 -04001905 if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
1906 config->dead_conn_timeout =
1907 nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
1908 config->dead_conn_timeout *= HZ;
1909 }
Josef Bacike46c7282017-04-06 17:02:00 -04001910 if (info->attrs[NBD_ATTR_SERVER_FLAGS])
1911 config->flags =
1912 nla_get_u64(info->attrs[NBD_ATTR_SERVER_FLAGS]);
Josef Bacika2c97902017-04-06 17:02:07 -04001913 if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
1914 u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
1915 if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
Xiubo Liec76a7b2019-09-17 17:26:05 +05301916 set_bit(NBD_RT_DESTROY_ON_DISCONNECT,
Josef Bacika2c97902017-04-06 17:02:07 -04001917 &config->runtime_flags);
Xiubo Li8454d682019-09-17 17:26:06 +05301918 set_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags);
Josef Bacika2c97902017-04-06 17:02:07 -04001919 put_dev = true;
Xiubo Li8454d682019-09-17 17:26:06 +05301920 } else {
1921 clear_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags);
Josef Bacika2c97902017-04-06 17:02:07 -04001922 }
Doron Roberts-Kedes08ba91e2018-06-15 14:05:32 -07001923 if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
Xiubo Liec76a7b2019-09-17 17:26:05 +05301924 set_bit(NBD_RT_DISCONNECT_ON_CLOSE,
Doron Roberts-Kedes08ba91e2018-06-15 14:05:32 -07001925 &config->runtime_flags);
1926 }
Josef Bacika2c97902017-04-06 17:02:07 -04001927 }
1928
Josef Bacike46c7282017-04-06 17:02:00 -04001929 if (info->attrs[NBD_ATTR_SOCKETS]) {
1930 struct nlattr *attr;
1931 int rem, fd;
1932
1933 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
1934 rem) {
1935 struct nlattr *socks[NBD_SOCK_MAX+1];
1936
1937 if (nla_type(attr) != NBD_SOCK_ITEM) {
1938 printk(KERN_ERR "nbd: socks must be embedded in a SOCK_ITEM attr\n");
1939 ret = -EINVAL;
1940 goto out;
1941 }
Johannes Berg8cb08172019-04-26 14:07:28 +02001942 ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX,
1943 attr,
1944 nbd_sock_policy,
1945 info->extack);
Josef Bacike46c7282017-04-06 17:02:00 -04001946 if (ret != 0) {
1947 printk(KERN_ERR "nbd: error processing sock list\n");
1948 ret = -EINVAL;
1949 goto out;
1950 }
1951 if (!socks[NBD_SOCK_FD])
1952 continue;
1953 fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
1954 ret = nbd_add_socket(nbd, fd, true);
1955 if (ret)
1956 goto out;
1957 }
1958 }
1959 ret = nbd_start_device(nbd);
1960out:
1961 mutex_unlock(&nbd->config_lock);
1962 if (!ret) {
Xiubo Liec76a7b2019-09-17 17:26:05 +05301963 set_bit(NBD_RT_HAS_CONFIG_REF, &config->runtime_flags);
Josef Bacike46c7282017-04-06 17:02:00 -04001964 refcount_inc(&nbd->config_refs);
1965 nbd_connect_reply(info, nbd->index);
1966 }
1967 nbd_config_put(nbd);
Josef Bacika2c97902017-04-06 17:02:07 -04001968 if (put_dev)
1969 nbd_put(nbd);
Josef Bacike46c7282017-04-06 17:02:00 -04001970 return ret;
1971}
1972
Doron Roberts-Kedes08ba91e2018-06-15 14:05:32 -07001973static void nbd_disconnect_and_put(struct nbd_device *nbd)
1974{
1975 mutex_lock(&nbd->config_lock);
1976 nbd_disconnect(nbd);
1977 nbd_clear_sock(nbd);
1978 mutex_unlock(&nbd->config_lock);
Mike Christiee9e006f2019-08-04 14:10:06 -05001979 /*
1980 * Make sure recv thread has finished, so it does not drop the last
1981 * config ref and try to destroy the workqueue from inside the work
1982 * queue.
1983 */
1984 flush_workqueue(nbd->recv_workq);
Xiubo Liec76a7b2019-09-17 17:26:05 +05301985 if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
Doron Roberts-Kedes08ba91e2018-06-15 14:05:32 -07001986 &nbd->config->runtime_flags))
1987 nbd_config_put(nbd);
1988}
1989
Josef Bacike46c7282017-04-06 17:02:00 -04001990static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
1991{
1992 struct nbd_device *nbd;
1993 int index;
1994
1995 if (!netlink_capable(skb, CAP_SYS_ADMIN))
1996 return -EPERM;
1997
1998 if (!info->attrs[NBD_ATTR_INDEX]) {
1999 printk(KERN_ERR "nbd: must specify an index to disconnect\n");
2000 return -EINVAL;
2001 }
2002 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
2003 mutex_lock(&nbd_index_mutex);
2004 nbd = idr_find(&nbd_index_idr, index);
Josef Bacike46c7282017-04-06 17:02:00 -04002005 if (!nbd) {
Josef Bacikc6a47592017-04-06 17:02:06 -04002006 mutex_unlock(&nbd_index_mutex);
Josef Bacike46c7282017-04-06 17:02:00 -04002007 printk(KERN_ERR "nbd: couldn't find device at index %d\n",
2008 index);
2009 return -EINVAL;
2010 }
Josef Bacikc6a47592017-04-06 17:02:06 -04002011 if (!refcount_inc_not_zero(&nbd->refs)) {
2012 mutex_unlock(&nbd_index_mutex);
2013 printk(KERN_ERR "nbd: device at index %d is going down\n",
2014 index);
2015 return -EINVAL;
2016 }
2017 mutex_unlock(&nbd_index_mutex);
2018 if (!refcount_inc_not_zero(&nbd->config_refs)) {
2019 nbd_put(nbd);
Josef Bacike46c7282017-04-06 17:02:00 -04002020 return 0;
Josef Bacikc6a47592017-04-06 17:02:06 -04002021 }
Doron Roberts-Kedes08ba91e2018-06-15 14:05:32 -07002022 nbd_disconnect_and_put(nbd);
Josef Bacike46c7282017-04-06 17:02:00 -04002023 nbd_config_put(nbd);
Josef Bacikc6a47592017-04-06 17:02:06 -04002024 nbd_put(nbd);
Josef Bacike46c7282017-04-06 17:02:00 -04002025 return 0;
2026}
2027
Josef Bacikb7aa3d32017-04-06 17:02:01 -04002028static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
2029{
2030 struct nbd_device *nbd = NULL;
2031 struct nbd_config *config;
2032 int index;
Doron Roberts-Kedes08ba91e2018-06-15 14:05:32 -07002033 int ret = 0;
Josef Bacika2c97902017-04-06 17:02:07 -04002034 bool put_dev = false;
Josef Bacikb7aa3d32017-04-06 17:02:01 -04002035
2036 if (!netlink_capable(skb, CAP_SYS_ADMIN))
2037 return -EPERM;
2038
2039 if (!info->attrs[NBD_ATTR_INDEX]) {
2040 printk(KERN_ERR "nbd: must specify a device to reconfigure\n");
2041 return -EINVAL;
2042 }
2043 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
2044 mutex_lock(&nbd_index_mutex);
2045 nbd = idr_find(&nbd_index_idr, index);
Josef Bacikb7aa3d32017-04-06 17:02:01 -04002046 if (!nbd) {
Josef Bacikc6a47592017-04-06 17:02:06 -04002047 mutex_unlock(&nbd_index_mutex);
Josef Bacikb7aa3d32017-04-06 17:02:01 -04002048 printk(KERN_ERR "nbd: couldn't find a device at index %d\n",
2049 index);
2050 return -EINVAL;
2051 }
Josef Bacikc6a47592017-04-06 17:02:06 -04002052 if (!refcount_inc_not_zero(&nbd->refs)) {
2053 mutex_unlock(&nbd_index_mutex);
2054 printk(KERN_ERR "nbd: device at index %d is going down\n",
2055 index);
2056 return -EINVAL;
2057 }
2058 mutex_unlock(&nbd_index_mutex);
Josef Bacikb7aa3d32017-04-06 17:02:01 -04002059
2060 if (!refcount_inc_not_zero(&nbd->config_refs)) {
2061 dev_err(nbd_to_dev(nbd),
2062 "not configured, cannot reconfigure\n");
Josef Bacikc6a47592017-04-06 17:02:06 -04002063 nbd_put(nbd);
Josef Bacikb7aa3d32017-04-06 17:02:01 -04002064 return -EINVAL;
2065 }
2066
2067 mutex_lock(&nbd->config_lock);
2068 config = nbd->config;
Xiubo Liec76a7b2019-09-17 17:26:05 +05302069 if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) ||
Josef Bacikb7aa3d32017-04-06 17:02:01 -04002070 !nbd->task_recv) {
2071 dev_err(nbd_to_dev(nbd),
2072 "not configured, cannot reconfigure\n");
Doron Roberts-Kedes08ba91e2018-06-15 14:05:32 -07002073 ret = -EINVAL;
Josef Bacikb7aa3d32017-04-06 17:02:01 -04002074 goto out;
2075 }
2076
Mike Christie4ddeaae82019-05-29 15:16:06 -05002077 ret = nbd_genl_size_set(info, nbd);
2078 if (ret)
2079 goto out;
2080
Mike Christie55313e92019-08-13 11:39:49 -05002081 if (info->attrs[NBD_ATTR_TIMEOUT])
2082 nbd_set_cmd_timeout(nbd,
2083 nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]));
Josef Bacik560bc4b2017-04-06 17:02:04 -04002084 if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
2085 config->dead_conn_timeout =
2086 nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
2087 config->dead_conn_timeout *= HZ;
2088 }
Josef Bacika2c97902017-04-06 17:02:07 -04002089 if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
2090 u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
2091 if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
Xiubo Liec76a7b2019-09-17 17:26:05 +05302092 if (!test_and_set_bit(NBD_RT_DESTROY_ON_DISCONNECT,
Josef Bacika2c97902017-04-06 17:02:07 -04002093 &config->runtime_flags))
2094 put_dev = true;
Xiubo Li8454d682019-09-17 17:26:06 +05302095 set_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags);
Josef Bacika2c97902017-04-06 17:02:07 -04002096 } else {
Xiubo Liec76a7b2019-09-17 17:26:05 +05302097 if (test_and_clear_bit(NBD_RT_DESTROY_ON_DISCONNECT,
Josef Bacika2c97902017-04-06 17:02:07 -04002098 &config->runtime_flags))
2099 refcount_inc(&nbd->refs);
Xiubo Li8454d682019-09-17 17:26:06 +05302100 clear_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags);
Josef Bacika2c97902017-04-06 17:02:07 -04002101 }
Doron Roberts-Kedes08ba91e2018-06-15 14:05:32 -07002102
2103 if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
Xiubo Liec76a7b2019-09-17 17:26:05 +05302104 set_bit(NBD_RT_DISCONNECT_ON_CLOSE,
Doron Roberts-Kedes08ba91e2018-06-15 14:05:32 -07002105 &config->runtime_flags);
2106 } else {
Xiubo Liec76a7b2019-09-17 17:26:05 +05302107 clear_bit(NBD_RT_DISCONNECT_ON_CLOSE,
Doron Roberts-Kedes08ba91e2018-06-15 14:05:32 -07002108 &config->runtime_flags);
2109 }
Josef Bacika2c97902017-04-06 17:02:07 -04002110 }
Josef Bacikb7aa3d32017-04-06 17:02:01 -04002111
2112 if (info->attrs[NBD_ATTR_SOCKETS]) {
2113 struct nlattr *attr;
2114 int rem, fd;
2115
2116 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
2117 rem) {
2118 struct nlattr *socks[NBD_SOCK_MAX+1];
2119
2120 if (nla_type(attr) != NBD_SOCK_ITEM) {
2121 printk(KERN_ERR "nbd: socks must be embedded in a SOCK_ITEM attr\n");
2122 ret = -EINVAL;
2123 goto out;
2124 }
Johannes Berg8cb08172019-04-26 14:07:28 +02002125 ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX,
2126 attr,
2127 nbd_sock_policy,
2128 info->extack);
Josef Bacikb7aa3d32017-04-06 17:02:01 -04002129 if (ret != 0) {
2130 printk(KERN_ERR "nbd: error processing sock list\n");
2131 ret = -EINVAL;
2132 goto out;
2133 }
2134 if (!socks[NBD_SOCK_FD])
2135 continue;
2136 fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
2137 ret = nbd_reconnect_socket(nbd, fd);
2138 if (ret) {
2139 if (ret == -ENOSPC)
2140 ret = 0;
2141 goto out;
2142 }
2143 dev_info(nbd_to_dev(nbd), "reconnected socket\n");
2144 }
2145 }
2146out:
2147 mutex_unlock(&nbd->config_lock);
2148 nbd_config_put(nbd);
Josef Bacikc6a47592017-04-06 17:02:06 -04002149 nbd_put(nbd);
Josef Bacika2c97902017-04-06 17:02:07 -04002150 if (put_dev)
2151 nbd_put(nbd);
Josef Bacikb7aa3d32017-04-06 17:02:01 -04002152 return ret;
2153}
2154
Josef Bacike46c7282017-04-06 17:02:00 -04002155static const struct genl_ops nbd_connect_genl_ops[] = {
2156 {
2157 .cmd = NBD_CMD_CONNECT,
Johannes Bergef6243a2019-04-26 14:07:31 +02002158 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
Josef Bacike46c7282017-04-06 17:02:00 -04002159 .doit = nbd_genl_connect,
2160 },
2161 {
2162 .cmd = NBD_CMD_DISCONNECT,
Johannes Bergef6243a2019-04-26 14:07:31 +02002163 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
Josef Bacike46c7282017-04-06 17:02:00 -04002164 .doit = nbd_genl_disconnect,
2165 },
Josef Bacikb7aa3d32017-04-06 17:02:01 -04002166 {
2167 .cmd = NBD_CMD_RECONFIGURE,
Johannes Bergef6243a2019-04-26 14:07:31 +02002168 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
Josef Bacikb7aa3d32017-04-06 17:02:01 -04002169 .doit = nbd_genl_reconfigure,
2170 },
Josef Bacik47d902b2017-04-06 17:02:05 -04002171 {
2172 .cmd = NBD_CMD_STATUS,
Johannes Bergef6243a2019-04-26 14:07:31 +02002173 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
Josef Bacik47d902b2017-04-06 17:02:05 -04002174 .doit = nbd_genl_status,
2175 },
Josef Bacike46c7282017-04-06 17:02:00 -04002176};
2177
Josef Bacik799f9a32017-04-06 17:02:02 -04002178static const struct genl_multicast_group nbd_mcast_grps[] = {
2179 { .name = NBD_GENL_MCAST_GROUP_NAME, },
2180};
2181
Josef Bacike46c7282017-04-06 17:02:00 -04002182static struct genl_family nbd_genl_family __ro_after_init = {
2183 .hdrsize = 0,
2184 .name = NBD_GENL_FAMILY_NAME,
2185 .version = NBD_GENL_VERSION,
2186 .module = THIS_MODULE,
2187 .ops = nbd_connect_genl_ops,
2188 .n_ops = ARRAY_SIZE(nbd_connect_genl_ops),
2189 .maxattr = NBD_ATTR_MAX,
Johannes Berg3b0f31f2019-03-21 22:51:02 +01002190 .policy = nbd_attr_policy,
Josef Bacik799f9a32017-04-06 17:02:02 -04002191 .mcgrps = nbd_mcast_grps,
2192 .n_mcgrps = ARRAY_SIZE(nbd_mcast_grps),
Josef Bacike46c7282017-04-06 17:02:00 -04002193};
2194
Josef Bacik47d902b2017-04-06 17:02:05 -04002195static int populate_nbd_status(struct nbd_device *nbd, struct sk_buff *reply)
2196{
2197 struct nlattr *dev_opt;
2198 u8 connected = 0;
2199 int ret;
2200
2201 /* This is a little racey, but for status it's ok. The
2202 * reason we don't take a ref here is because we can't
2203 * take a ref in the index == -1 case as we would need
2204 * to put under the nbd_index_mutex, which could
2205 * deadlock if we are configured to remove ourselves
2206 * once we're disconnected.
2207 */
2208 if (refcount_read(&nbd->config_refs))
2209 connected = 1;
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002210 dev_opt = nla_nest_start_noflag(reply, NBD_DEVICE_ITEM);
Josef Bacik47d902b2017-04-06 17:02:05 -04002211 if (!dev_opt)
2212 return -EMSGSIZE;
2213 ret = nla_put_u32(reply, NBD_DEVICE_INDEX, nbd->index);
2214 if (ret)
2215 return -EMSGSIZE;
2216 ret = nla_put_u8(reply, NBD_DEVICE_CONNECTED,
2217 connected);
2218 if (ret)
2219 return -EMSGSIZE;
2220 nla_nest_end(reply, dev_opt);
2221 return 0;
2222}
2223
2224static int status_cb(int id, void *ptr, void *data)
2225{
2226 struct nbd_device *nbd = ptr;
2227 return populate_nbd_status(nbd, (struct sk_buff *)data);
2228}
2229
2230static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info)
2231{
2232 struct nlattr *dev_list;
2233 struct sk_buff *reply;
2234 void *reply_head;
2235 size_t msg_size;
2236 int index = -1;
2237 int ret = -ENOMEM;
2238
2239 if (info->attrs[NBD_ATTR_INDEX])
2240 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
2241
2242 mutex_lock(&nbd_index_mutex);
2243
2244 msg_size = nla_total_size(nla_attr_size(sizeof(u32)) +
2245 nla_attr_size(sizeof(u8)));
2246 msg_size *= (index == -1) ? nbd_total_devices : 1;
2247
2248 reply = genlmsg_new(msg_size, GFP_KERNEL);
2249 if (!reply)
2250 goto out;
2251 reply_head = genlmsg_put_reply(reply, info, &nbd_genl_family, 0,
2252 NBD_CMD_STATUS);
2253 if (!reply_head) {
2254 nlmsg_free(reply);
2255 goto out;
2256 }
2257
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002258 dev_list = nla_nest_start_noflag(reply, NBD_ATTR_DEVICE_LIST);
Josef Bacik47d902b2017-04-06 17:02:05 -04002259 if (index == -1) {
2260 ret = idr_for_each(&nbd_index_idr, &status_cb, reply);
2261 if (ret) {
2262 nlmsg_free(reply);
2263 goto out;
2264 }
2265 } else {
2266 struct nbd_device *nbd;
2267 nbd = idr_find(&nbd_index_idr, index);
2268 if (nbd) {
2269 ret = populate_nbd_status(nbd, reply);
2270 if (ret) {
2271 nlmsg_free(reply);
2272 goto out;
2273 }
2274 }
2275 }
2276 nla_nest_end(reply, dev_list);
2277 genlmsg_end(reply, reply_head);
Li RongQingcd46eb82019-02-19 13:14:07 +08002278 ret = genlmsg_reply(reply, info);
Josef Bacik47d902b2017-04-06 17:02:05 -04002279out:
2280 mutex_unlock(&nbd_index_mutex);
2281 return ret;
2282}
2283
Josef Bacike46c7282017-04-06 17:02:00 -04002284static void nbd_connect_reply(struct genl_info *info, int index)
2285{
2286 struct sk_buff *skb;
2287 void *msg_head;
2288 int ret;
2289
2290 skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
2291 if (!skb)
2292 return;
2293 msg_head = genlmsg_put_reply(skb, info, &nbd_genl_family, 0,
2294 NBD_CMD_CONNECT);
2295 if (!msg_head) {
2296 nlmsg_free(skb);
2297 return;
2298 }
2299 ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
2300 if (ret) {
2301 nlmsg_free(skb);
2302 return;
2303 }
2304 genlmsg_end(skb, msg_head);
2305 genlmsg_reply(skb, info);
2306}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002307
Josef Bacik799f9a32017-04-06 17:02:02 -04002308static void nbd_mcast_index(int index)
2309{
2310 struct sk_buff *skb;
2311 void *msg_head;
2312 int ret;
2313
2314 skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
2315 if (!skb)
2316 return;
2317 msg_head = genlmsg_put(skb, 0, 0, &nbd_genl_family, 0,
2318 NBD_CMD_LINK_DEAD);
2319 if (!msg_head) {
2320 nlmsg_free(skb);
2321 return;
2322 }
2323 ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
2324 if (ret) {
2325 nlmsg_free(skb);
2326 return;
2327 }
2328 genlmsg_end(skb, msg_head);
2329 genlmsg_multicast(&nbd_genl_family, skb, 0, 0, GFP_KERNEL);
2330}
2331
2332static void nbd_dead_link_work(struct work_struct *work)
2333{
2334 struct link_dead_args *args = container_of(work, struct link_dead_args,
2335 work);
2336 nbd_mcast_index(args->index);
2337 kfree(args);
2338}
2339
Linus Torvalds1da177e2005-04-16 15:20:36 -07002340static int __init nbd_init(void)
2341{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002342 int i;
2343
Adrian Bunk5b7b18c2006-03-25 03:07:04 -08002344 BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002345
Laurent Vivierd71a6d72008-04-29 01:02:51 -07002346 if (max_part < 0) {
WANG Cong7742ce42011-08-19 14:48:28 +02002347 printk(KERN_ERR "nbd: max_part must be >= 0\n");
Laurent Vivierd71a6d72008-04-29 01:02:51 -07002348 return -EINVAL;
2349 }
2350
2351 part_shift = 0;
Namhyung Kim5988ce22011-05-28 14:44:46 +02002352 if (max_part > 0) {
Laurent Vivierd71a6d72008-04-29 01:02:51 -07002353 part_shift = fls(max_part);
2354
Namhyung Kim5988ce22011-05-28 14:44:46 +02002355 /*
2356 * Adjust max_part according to part_shift as it is exported
2357 * to user space so that user can know the max number of
2358 * partition kernel should be able to manage.
2359 *
2360 * Note that -1 is required because partition 0 is reserved
2361 * for the whole disk.
2362 */
2363 max_part = (1UL << part_shift) - 1;
2364 }
2365
Namhyung Kim3b271082011-05-28 14:44:46 +02002366 if ((1UL << part_shift) > DISK_MAX_PARTS)
2367 return -EINVAL;
2368
2369 if (nbds_max > 1UL << (MINORBITS - part_shift))
2370 return -EINVAL;
2371
Mike Christiee9e006f2019-08-04 14:10:06 -05002372 if (register_blkdev(NBD_MAJOR, "nbd"))
Josef Bacikb0d91112017-02-01 16:11:40 -05002373 return -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002374
Josef Bacike46c7282017-04-06 17:02:00 -04002375 if (genl_register_family(&nbd_genl_family)) {
2376 unregister_blkdev(NBD_MAJOR, "nbd");
Josef Bacike46c7282017-04-06 17:02:00 -04002377 return -EINVAL;
2378 }
Markus Pargmann30d53d92015-08-17 08:20:06 +02002379 nbd_dbg_init();
2380
Josef Bacikb0d91112017-02-01 16:11:40 -05002381 mutex_lock(&nbd_index_mutex);
2382 for (i = 0; i < nbds_max; i++)
2383 nbd_dev_add(i);
2384 mutex_unlock(&nbd_index_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002385 return 0;
Josef Bacikb0d91112017-02-01 16:11:40 -05002386}
2387
2388static int nbd_exit_cb(int id, void *ptr, void *data)
2389{
Josef Bacikc6a47592017-04-06 17:02:06 -04002390 struct list_head *list = (struct list_head *)data;
Josef Bacikb0d91112017-02-01 16:11:40 -05002391 struct nbd_device *nbd = ptr;
Josef Bacikc6a47592017-04-06 17:02:06 -04002392
Josef Bacikc6a47592017-04-06 17:02:06 -04002393 list_add_tail(&nbd->list, list);
Josef Bacikb0d91112017-02-01 16:11:40 -05002394 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002395}
2396
2397static void __exit nbd_cleanup(void)
2398{
Josef Bacikc6a47592017-04-06 17:02:06 -04002399 struct nbd_device *nbd;
2400 LIST_HEAD(del_list);
2401
Markus Pargmann30d53d92015-08-17 08:20:06 +02002402 nbd_dbg_close();
2403
Josef Bacikc6a47592017-04-06 17:02:06 -04002404 mutex_lock(&nbd_index_mutex);
2405 idr_for_each(&nbd_index_idr, &nbd_exit_cb, &del_list);
2406 mutex_unlock(&nbd_index_mutex);
2407
Josef Bacik60ae36a2017-04-28 09:49:19 -04002408 while (!list_empty(&del_list)) {
2409 nbd = list_first_entry(&del_list, struct nbd_device, list);
2410 list_del_init(&nbd->list);
2411 if (refcount_read(&nbd->refs) != 1)
Josef Bacikc6a47592017-04-06 17:02:06 -04002412 printk(KERN_ERR "nbd: possibly leaking a device\n");
2413 nbd_put(nbd);
Josef Bacikc6a47592017-04-06 17:02:06 -04002414 }
2415
Josef Bacikb0d91112017-02-01 16:11:40 -05002416 idr_destroy(&nbd_index_idr);
Josef Bacike46c7282017-04-06 17:02:00 -04002417 genl_unregister_family(&nbd_genl_family);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002418 unregister_blkdev(NBD_MAJOR, "nbd");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002419}
2420
2421module_init(nbd_init);
2422module_exit(nbd_cleanup);
2423
2424MODULE_DESCRIPTION("Network Block Device");
2425MODULE_LICENSE("GPL");
2426
Lars Marowsky-Bree40be0c22005-05-01 08:59:07 -07002427module_param(nbds_max, int, 0444);
Laurent Vivierd71a6d72008-04-29 01:02:51 -07002428MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
2429module_param(max_part, int, 0444);
Josef Bacik7a8362a2017-08-14 18:56:16 +00002430MODULE_PARM_DESC(max_part, "number of partitions per device (default: 16)");