blob: ebc98cf76365bba8d851f76cfaf90ec59f0281ed [file] [log] [blame]
Thomas Gleixnereb1fe3b2019-05-24 12:03:47 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * Network block device - make block devices work over TCP
4 *
5 * Note that you can not swap over this thing, yet. Seems to work but
6 * deadlocks sometimes - you can not swap over TCP in general.
7 *
Pavel Macheka2531292010-07-18 14:27:13 +02008 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
10 *
Pavel Machekdbf492d2006-06-25 05:47:42 -070011 * (part of code stolen from loop.c)
Linus Torvalds1da177e2005-04-16 15:20:36 -070012 */
13
14#include <linux/major.h>
15
16#include <linux/blkdev.h>
17#include <linux/module.h>
18#include <linux/init.h>
19#include <linux/sched.h>
Vlastimil Babkaf1083042017-05-08 15:59:53 -070020#include <linux/sched/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/fs.h>
22#include <linux/bio.h>
23#include <linux/stat.h>
24#include <linux/errno.h>
25#include <linux/file.h>
26#include <linux/ioctl.h>
Arnd Bergmann2a48fc02010-06-02 14:28:52 +020027#include <linux/mutex.h>
Herbert Xu4b2f0262006-01-06 00:09:47 -080028#include <linux/compiler.h>
29#include <linux/err.h>
30#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090031#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <net/sock.h>
Trond Myklebust91cf45f2007-11-12 18:10:39 -080033#include <linux/net.h>
Laurent Vivier48cf6062008-04-29 01:02:46 -070034#include <linux/kthread.h>
Markus Pargmannb9c495b2015-04-02 10:11:37 +020035#include <linux/types.h>
Markus Pargmann30d53d92015-08-17 08:20:06 +020036#include <linux/debugfs.h>
Josef Bacikfd8383f2016-09-08 12:33:37 -070037#include <linux/blk-mq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080039#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070040#include <asm/types.h>
41
42#include <linux/nbd.h>
Josef Bacike46c7282017-04-06 17:02:00 -040043#include <linux/nbd-netlink.h>
44#include <net/genetlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Matt Mullinsea106722019-04-26 11:49:48 -070046#define CREATE_TRACE_POINTS
47#include <trace/events/nbd.h>
48
Josef Bacikb0d91112017-02-01 16:11:40 -050049static DEFINE_IDR(nbd_index_idr);
50static DEFINE_MUTEX(nbd_index_mutex);
Josef Bacik47d902b2017-04-06 17:02:05 -040051static int nbd_total_devices = 0;
Josef Bacikb0d91112017-02-01 16:11:40 -050052
Josef Bacik9561a7a2016-11-22 14:04:40 -050053struct nbd_sock {
54 struct socket *sock;
55 struct mutex tx_lock;
Josef Bacik9dd5d3a2017-03-24 14:08:26 -040056 struct request *pending;
57 int sent;
Josef Bacikf3733242017-04-06 17:01:57 -040058 bool dead;
59 int fallback_index;
Josef Bacik799f9a32017-04-06 17:02:02 -040060 int cookie;
Josef Bacik9561a7a2016-11-22 14:04:40 -050061};
62
Josef Bacik5ea8d102017-04-06 17:01:58 -040063struct recv_thread_args {
64 struct work_struct work;
65 struct nbd_device *nbd;
66 int index;
67};
68
Josef Bacik799f9a32017-04-06 17:02:02 -040069struct link_dead_args {
70 struct work_struct work;
71 int index;
72};
73
Josef Bacik9b4a6ba2016-09-08 12:33:39 -070074#define NBD_TIMEDOUT 0
75#define NBD_DISCONNECT_REQUESTED 1
Josef Bacik9561a7a2016-11-22 14:04:40 -050076#define NBD_DISCONNECTED 2
Josef Bacik5ea8d102017-04-06 17:01:58 -040077#define NBD_HAS_PID_FILE 3
Josef Bacike46c7282017-04-06 17:02:00 -040078#define NBD_HAS_CONFIG_REF 4
79#define NBD_BOUND 5
Josef Bacika2c97902017-04-06 17:02:07 -040080#define NBD_DESTROY_ON_DISCONNECT 6
Doron Roberts-Kedes08ba91e2018-06-15 14:05:32 -070081#define NBD_DISCONNECT_ON_CLOSE 7
Josef Bacik9b4a6ba2016-09-08 12:33:39 -070082
Josef Bacik5ea8d102017-04-06 17:01:58 -040083struct nbd_config {
Markus Pargmann22d109c2015-08-17 08:20:09 +020084 u32 flags;
Josef Bacik9b4a6ba2016-09-08 12:33:39 -070085 unsigned long runtime_flags;
Josef Bacik560bc4b2017-04-06 17:02:04 -040086 u64 dead_conn_timeout;
Josef Bacik5ea8d102017-04-06 17:01:58 -040087
Josef Bacik9561a7a2016-11-22 14:04:40 -050088 struct nbd_sock **socks;
Josef Bacik9561a7a2016-11-22 14:04:40 -050089 int num_connections;
Josef Bacik560bc4b2017-04-06 17:02:04 -040090 atomic_t live_connections;
91 wait_queue_head_t conn_wait;
Josef Bacik5ea8d102017-04-06 17:01:58 -040092
Josef Bacik9561a7a2016-11-22 14:04:40 -050093 atomic_t recv_threads;
94 wait_queue_head_t recv_wq;
Josef Bacikef77b512016-12-02 16:19:12 -050095 loff_t blksize;
Markus Pargmannb9c495b2015-04-02 10:11:37 +020096 loff_t bytesize;
Markus Pargmann30d53d92015-08-17 08:20:06 +020097#if IS_ENABLED(CONFIG_DEBUG_FS)
98 struct dentry *dbg_dir;
99#endif
Markus Pargmann13e71d62015-04-02 10:11:35 +0200100};
101
Josef Bacik5ea8d102017-04-06 17:01:58 -0400102struct nbd_device {
103 struct blk_mq_tag_set tag_set;
104
Josef Bacike46c7282017-04-06 17:02:00 -0400105 int index;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400106 refcount_t config_refs;
Josef Bacikc6a47592017-04-06 17:02:06 -0400107 refcount_t refs;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400108 struct nbd_config *config;
109 struct mutex config_lock;
110 struct gendisk *disk;
111
Josef Bacikc6a47592017-04-06 17:02:06 -0400112 struct list_head list;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400113 struct task_struct *task_recv;
114 struct task_struct *task_setup;
115};
116
Josef Bacikd7d94d42018-07-16 12:11:34 -0400117#define NBD_CMD_REQUEUED 1
118
Josef Bacikfd8383f2016-09-08 12:33:37 -0700119struct nbd_cmd {
120 struct nbd_device *nbd;
Josef Bacik8f3ea352018-07-16 12:11:35 -0400121 struct mutex lock;
Josef Bacikf3733242017-04-06 17:01:57 -0400122 int index;
Josef Bacik799f9a32017-04-06 17:02:02 -0400123 int cookie;
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200124 blk_status_t status;
Josef Bacikd7d94d42018-07-16 12:11:34 -0400125 unsigned long flags;
Josef Bacik8f3ea352018-07-16 12:11:35 -0400126 u32 cmd_cookie;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700127};
128
Markus Pargmann30d53d92015-08-17 08:20:06 +0200129#if IS_ENABLED(CONFIG_DEBUG_FS)
130static struct dentry *nbd_dbg_dir;
131#endif
132
133#define nbd_name(nbd) ((nbd)->disk->disk_name)
134
Wanlong Gaof4507162012-03-28 14:42:51 -0700135#define NBD_MAGIC 0x68797548
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136
Xiubo Li553768d2019-05-29 15:16:05 -0500137#define NBD_DEF_BLKSIZE 1024
138
Ingo van Lil9c7a4162006-07-01 04:36:36 -0700139static unsigned int nbds_max = 16;
Josef Bacik7a8362a2017-08-14 18:56:16 +0000140static int max_part = 16;
Josef Bacik124d6db2017-02-01 16:11:11 -0500141static struct workqueue_struct *recv_workqueue;
Josef Bacikb0d91112017-02-01 16:11:40 -0500142static int part_shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143
Josef Bacik9442b732017-02-07 17:10:22 -0500144static int nbd_dev_dbg_init(struct nbd_device *nbd);
145static void nbd_dev_dbg_close(struct nbd_device *nbd);
Josef Bacik5ea8d102017-04-06 17:01:58 -0400146static void nbd_config_put(struct nbd_device *nbd);
Josef Bacike46c7282017-04-06 17:02:00 -0400147static void nbd_connect_reply(struct genl_info *info, int index);
Josef Bacik47d902b2017-04-06 17:02:05 -0400148static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info);
Josef Bacik799f9a32017-04-06 17:02:02 -0400149static void nbd_dead_link_work(struct work_struct *work);
Doron Roberts-Kedes08ba91e2018-06-15 14:05:32 -0700150static void nbd_disconnect_and_put(struct nbd_device *nbd);
Josef Bacik9442b732017-02-07 17:10:22 -0500151
Markus Pargmannd18509f2015-04-02 10:11:38 +0200152static inline struct device *nbd_to_dev(struct nbd_device *nbd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153{
Markus Pargmannd18509f2015-04-02 10:11:38 +0200154 return disk_to_dev(nbd->disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155}
156
Josef Bacikd7d94d42018-07-16 12:11:34 -0400157static void nbd_requeue_cmd(struct nbd_cmd *cmd)
158{
159 struct request *req = blk_mq_rq_from_pdu(cmd);
160
161 if (!test_and_set_bit(NBD_CMD_REQUEUED, &cmd->flags))
162 blk_mq_requeue_request(req, true);
163}
164
Josef Bacik8f3ea352018-07-16 12:11:35 -0400165#define NBD_COOKIE_BITS 32
166
167static u64 nbd_cmd_handle(struct nbd_cmd *cmd)
168{
169 struct request *req = blk_mq_rq_from_pdu(cmd);
170 u32 tag = blk_mq_unique_tag(req);
171 u64 cookie = cmd->cmd_cookie;
172
173 return (cookie << NBD_COOKIE_BITS) | tag;
174}
175
176static u32 nbd_handle_to_tag(u64 handle)
177{
178 return (u32)handle;
179}
180
181static u32 nbd_handle_to_cookie(u64 handle)
182{
183 return (u32)(handle >> NBD_COOKIE_BITS);
184}
185
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186static const char *nbdcmd_to_ascii(int cmd)
187{
188 switch (cmd) {
189 case NBD_CMD_READ: return "read";
190 case NBD_CMD_WRITE: return "write";
191 case NBD_CMD_DISC: return "disconnect";
Alex Bligh75f187a2013-02-27 17:05:23 -0800192 case NBD_CMD_FLUSH: return "flush";
Paul Clementsa336d292012-10-04 17:16:18 -0700193 case NBD_CMD_TRIM: return "trim/discard";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 }
195 return "invalid";
196}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197
Josef Bacik5ea8d102017-04-06 17:01:58 -0400198static ssize_t pid_show(struct device *dev,
199 struct device_attribute *attr, char *buf)
200{
201 struct gendisk *disk = dev_to_disk(dev);
202 struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
203
204 return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv));
205}
206
Bhumika Goyaldfbde552017-08-21 17:13:08 +0530207static const struct device_attribute pid_attr = {
Joe Perches5657a812018-05-24 13:38:59 -0600208 .attr = { .name = "pid", .mode = 0444},
Josef Bacik5ea8d102017-04-06 17:01:58 -0400209 .show = pid_show,
210};
211
Josef Bacikc6a47592017-04-06 17:02:06 -0400212static void nbd_dev_remove(struct nbd_device *nbd)
213{
214 struct gendisk *disk = nbd->disk;
Josef Bacik8364da42018-05-16 14:51:17 -0400215 struct request_queue *q;
216
Josef Bacikc6a47592017-04-06 17:02:06 -0400217 if (disk) {
Josef Bacik8364da42018-05-16 14:51:17 -0400218 q = disk->queue;
Josef Bacikc6a47592017-04-06 17:02:06 -0400219 del_gendisk(disk);
Josef Bacik8364da42018-05-16 14:51:17 -0400220 blk_cleanup_queue(q);
Josef Bacikc6a47592017-04-06 17:02:06 -0400221 blk_mq_free_tag_set(&nbd->tag_set);
Josef Bacika2c97902017-04-06 17:02:07 -0400222 disk->private_data = NULL;
Josef Bacikc6a47592017-04-06 17:02:06 -0400223 put_disk(disk);
224 }
225 kfree(nbd);
226}
227
228static void nbd_put(struct nbd_device *nbd)
229{
230 if (refcount_dec_and_mutex_lock(&nbd->refs,
231 &nbd_index_mutex)) {
232 idr_remove(&nbd_index_idr, nbd->index);
233 mutex_unlock(&nbd_index_mutex);
234 nbd_dev_remove(nbd);
235 }
236}
237
Josef Bacik799f9a32017-04-06 17:02:02 -0400238static int nbd_disconnected(struct nbd_config *config)
Josef Bacikf3733242017-04-06 17:01:57 -0400239{
Josef Bacik799f9a32017-04-06 17:02:02 -0400240 return test_bit(NBD_DISCONNECTED, &config->runtime_flags) ||
241 test_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags);
242}
243
244static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock,
245 int notify)
246{
247 if (!nsock->dead && notify && !nbd_disconnected(nbd->config)) {
248 struct link_dead_args *args;
249 args = kmalloc(sizeof(struct link_dead_args), GFP_NOIO);
250 if (args) {
251 INIT_WORK(&args->work, nbd_dead_link_work);
252 args->index = nbd->index;
253 queue_work(system_wq, &args->work);
254 }
255 }
Josef Bacik560bc4b2017-04-06 17:02:04 -0400256 if (!nsock->dead) {
Josef Bacikf3733242017-04-06 17:01:57 -0400257 kernel_sock_shutdown(nsock->sock, SHUT_RDWR);
Kevin Vigor5e3c3a72018-05-30 10:45:11 -0600258 if (atomic_dec_return(&nbd->config->live_connections) == 0) {
259 if (test_and_clear_bit(NBD_DISCONNECT_REQUESTED,
260 &nbd->config->runtime_flags)) {
261 set_bit(NBD_DISCONNECTED,
262 &nbd->config->runtime_flags);
263 dev_info(nbd_to_dev(nbd),
264 "Disconnected due to user request.\n");
265 }
266 }
Josef Bacik560bc4b2017-04-06 17:02:04 -0400267 }
Josef Bacikf3733242017-04-06 17:01:57 -0400268 nsock->dead = true;
269 nsock->pending = NULL;
270 nsock->sent = 0;
271}
272
Josef Bacik29eaadc2017-04-06 17:01:59 -0400273static void nbd_size_clear(struct nbd_device *nbd)
Markus Pargmann37091fd2015-07-27 07:36:49 +0200274{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400275 if (nbd->config->bytesize) {
Josef Bacik5ea8d102017-04-06 17:01:58 -0400276 set_capacity(nbd->disk, 0);
277 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
278 }
Markus Pargmann37091fd2015-07-27 07:36:49 +0200279}
280
Josef Bacik29eaadc2017-04-06 17:01:59 -0400281static void nbd_size_update(struct nbd_device *nbd)
Markus Pargmann37091fd2015-07-27 07:36:49 +0200282{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400283 struct nbd_config *config = nbd->config;
Josef Bacik9e2b19672018-05-16 14:51:19 -0400284 struct block_device *bdev = bdget_disk(nbd->disk, 0);
285
Josef Bacik6df133a2018-05-23 13:35:59 -0400286 if (config->flags & NBD_FLAG_SEND_TRIM) {
287 nbd->disk->queue->limits.discard_granularity = config->blksize;
Josef Bacik07ce2132018-06-05 11:41:23 -0400288 nbd->disk->queue->limits.discard_alignment = config->blksize;
Josef Bacik6df133a2018-05-23 13:35:59 -0400289 blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX);
290 }
Josef Bacik5ea8d102017-04-06 17:01:58 -0400291 blk_queue_logical_block_size(nbd->disk->queue, config->blksize);
292 blk_queue_physical_block_size(nbd->disk->queue, config->blksize);
Josef Bacik5ea8d102017-04-06 17:01:58 -0400293 set_capacity(nbd->disk, config->bytesize >> 9);
Josef Bacik9e2b19672018-05-16 14:51:19 -0400294 if (bdev) {
Jan Karac8a83a62019-01-14 09:48:09 +0100295 if (bdev->bd_disk) {
Josef Bacik9e2b19672018-05-16 14:51:19 -0400296 bd_set_size(bdev, config->bytesize);
Jan Karac8a83a62019-01-14 09:48:09 +0100297 set_blocksize(bdev, config->blksize);
298 } else
Josef Bacik9e2b19672018-05-16 14:51:19 -0400299 bdev->bd_invalidated = 1;
300 bdput(bdev);
301 }
Markus Pargmann37091fd2015-07-27 07:36:49 +0200302 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
303}
304
Josef Bacik29eaadc2017-04-06 17:01:59 -0400305static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize,
306 loff_t nr_blocks)
Markus Pargmann37091fd2015-07-27 07:36:49 +0200307{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400308 struct nbd_config *config = nbd->config;
309 config->blksize = blocksize;
310 config->bytesize = blocksize * nr_blocks;
Josef Bacikc3f7c932018-05-16 14:51:18 -0400311 if (nbd->task_recv != NULL)
312 nbd_size_update(nbd);
Markus Pargmann37091fd2015-07-27 07:36:49 +0200313}
314
Christoph Hellwig1e388ae2017-04-20 16:03:06 +0200315static void nbd_complete_rq(struct request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316{
Christoph Hellwig1e388ae2017-04-20 16:03:06 +0200317 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318
Kevin Vigoree57a052018-06-04 10:40:12 -0600319 dev_dbg(nbd_to_dev(cmd->nbd), "request %p: %s\n", req,
Christoph Hellwig1e388ae2017-04-20 16:03:06 +0200320 cmd->status ? "failed" : "done");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321
Christoph Hellwig1e388ae2017-04-20 16:03:06 +0200322 blk_mq_end_request(req, cmd->status);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323}
324
Markus Pargmanne018e752015-04-02 10:11:39 +0200325/*
326 * Forcibly shutdown the socket causing all listeners to error
327 */
Markus Pargmann36e47be2015-08-17 08:20:01 +0200328static void sock_shutdown(struct nbd_device *nbd)
Paul Clements7fdfd402007-10-16 23:27:37 -0700329{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400330 struct nbd_config *config = nbd->config;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500331 int i;
Josef Bacikc2611892016-09-08 12:33:38 -0700332
Josef Bacik5ea8d102017-04-06 17:01:58 -0400333 if (config->num_connections == 0)
Markus Pargmann260bbce2015-08-17 08:20:02 +0200334 return;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400335 if (test_and_set_bit(NBD_DISCONNECTED, &config->runtime_flags))
Josef Bacik9561a7a2016-11-22 14:04:40 -0500336 return;
337
Josef Bacik5ea8d102017-04-06 17:01:58 -0400338 for (i = 0; i < config->num_connections; i++) {
339 struct nbd_sock *nsock = config->socks[i];
Josef Bacik9561a7a2016-11-22 14:04:40 -0500340 mutex_lock(&nsock->tx_lock);
Josef Bacik799f9a32017-04-06 17:02:02 -0400341 nbd_mark_nsock_dead(nbd, nsock, 0);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500342 mutex_unlock(&nsock->tx_lock);
Markus Pargmann23272a672015-10-29 11:51:16 +0100343 }
Josef Bacik9561a7a2016-11-22 14:04:40 -0500344 dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n");
Paul Clements7fdfd402007-10-16 23:27:37 -0700345}
346
Mike Christie00514672019-08-13 11:39:50 -0500347static u32 req_to_nbd_cmd_type(struct request *req)
348{
349 switch (req_op(req)) {
350 case REQ_OP_DISCARD:
351 return NBD_CMD_TRIM;
352 case REQ_OP_FLUSH:
353 return NBD_CMD_FLUSH;
354 case REQ_OP_WRITE:
355 return NBD_CMD_WRITE;
356 case REQ_OP_READ:
357 return NBD_CMD_READ;
358 default:
359 return U32_MAX;
360 }
361}
362
Josef Bacik0eadf372016-09-08 12:33:40 -0700363static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
364 bool reserved)
Paul Clements7fdfd402007-10-16 23:27:37 -0700365{
Josef Bacik0eadf372016-09-08 12:33:40 -0700366 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
367 struct nbd_device *nbd = cmd->nbd;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400368 struct nbd_config *config;
Paul Clements7fdfd402007-10-16 23:27:37 -0700369
Josef Bacik5ea8d102017-04-06 17:01:58 -0400370 if (!refcount_inc_not_zero(&nbd->config_refs)) {
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200371 cmd->status = BLK_STS_TIMEOUT;
Christoph Hellwige5eab012018-05-29 15:52:31 +0200372 goto done;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400373 }
Josef Bacik5ea8d102017-04-06 17:01:58 -0400374 config = nbd->config;
375
Mike Christie887e9752019-08-13 11:39:51 -0500376 if (!mutex_trylock(&cmd->lock)) {
377 nbd_config_put(nbd);
Josef Bacik8f3ea352018-07-16 12:11:35 -0400378 return BLK_EH_RESET_TIMER;
Mike Christie887e9752019-08-13 11:39:51 -0500379 }
Josef Bacik8f3ea352018-07-16 12:11:35 -0400380
Josef Bacik5ea8d102017-04-06 17:01:58 -0400381 if (config->num_connections > 1) {
Josef Bacikf3733242017-04-06 17:01:57 -0400382 dev_err_ratelimited(nbd_to_dev(nbd),
Kevin Vigor5e3c3a72018-05-30 10:45:11 -0600383 "Connection timed out, retrying (%d/%d alive)\n",
384 atomic_read(&config->live_connections),
385 config->num_connections);
Josef Bacikf3733242017-04-06 17:01:57 -0400386 /*
387 * Hooray we have more connections, requeue this IO, the submit
388 * path will put it on a real connection.
389 */
Josef Bacik5ea8d102017-04-06 17:01:58 -0400390 if (config->socks && config->num_connections > 1) {
391 if (cmd->index < config->num_connections) {
Josef Bacikf3733242017-04-06 17:01:57 -0400392 struct nbd_sock *nsock =
Josef Bacik5ea8d102017-04-06 17:01:58 -0400393 config->socks[cmd->index];
Josef Bacikf3733242017-04-06 17:01:57 -0400394 mutex_lock(&nsock->tx_lock);
Josef Bacik799f9a32017-04-06 17:02:02 -0400395 /* We can have multiple outstanding requests, so
396 * we don't want to mark the nsock dead if we've
397 * already reconnected with a new socket, so
398 * only mark it dead if its the same socket we
399 * were sent out on.
400 */
401 if (cmd->cookie == nsock->cookie)
402 nbd_mark_nsock_dead(nbd, nsock, 1);
Josef Bacikf3733242017-04-06 17:01:57 -0400403 mutex_unlock(&nsock->tx_lock);
404 }
Josef Bacik8f3ea352018-07-16 12:11:35 -0400405 mutex_unlock(&cmd->lock);
Josef Bacikd7d94d42018-07-16 12:11:34 -0400406 nbd_requeue_cmd(cmd);
Josef Bacik5ea8d102017-04-06 17:01:58 -0400407 nbd_config_put(nbd);
Christoph Hellwig66005932018-05-29 15:52:29 +0200408 return BLK_EH_DONE;
Josef Bacikf3733242017-04-06 17:01:57 -0400409 }
Josef Bacikf3733242017-04-06 17:01:57 -0400410 } else {
411 dev_err_ratelimited(nbd_to_dev(nbd),
412 "Connection timed out\n");
413 }
Josef Bacik5ea8d102017-04-06 17:01:58 -0400414 set_bit(NBD_TIMEDOUT, &config->runtime_flags);
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200415 cmd->status = BLK_STS_IOERR;
Josef Bacik8f3ea352018-07-16 12:11:35 -0400416 mutex_unlock(&cmd->lock);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500417 sock_shutdown(nbd);
Josef Bacik5ea8d102017-04-06 17:01:58 -0400418 nbd_config_put(nbd);
Christoph Hellwige5eab012018-05-29 15:52:31 +0200419done:
420 blk_mq_complete_request(req);
421 return BLK_EH_DONE;
Paul Clements7fdfd402007-10-16 23:27:37 -0700422}
423
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424/*
425 * Send or receive packet.
426 */
Al Viroc9f2b6a2015-11-12 05:09:35 -0500427static int sock_xmit(struct nbd_device *nbd, int index, int send,
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400428 struct iov_iter *iter, int msg_flags, int *sent)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400430 struct nbd_config *config = nbd->config;
431 struct socket *sock = config->socks[index]->sock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 int result;
433 struct msghdr msg;
Vlastimil Babkaf1083042017-05-08 15:59:53 -0700434 unsigned int noreclaim_flag;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435
Mike Snitzerffc41cf2008-04-02 13:04:47 -0700436 if (unlikely(!sock)) {
Josef Bacika897b662016-12-05 16:20:29 -0500437 dev_err_ratelimited(disk_to_dev(nbd->disk),
WANG Cong7f1b90f2011-08-19 14:48:22 +0200438 "Attempted %s on closed socket in sock_xmit\n",
439 (send ? "send" : "recv"));
Mike Snitzerffc41cf2008-04-02 13:04:47 -0700440 return -EINVAL;
441 }
442
Al Viroc9f2b6a2015-11-12 05:09:35 -0500443 msg.msg_iter = *iter;
Al Viroc1696ca2015-11-12 04:51:19 -0500444
Vlastimil Babkaf1083042017-05-08 15:59:53 -0700445 noreclaim_flag = memalloc_noreclaim_save();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446 do {
Mel Gorman7f338fe2012-07-31 16:44:32 -0700447 sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 msg.msg_name = NULL;
449 msg.msg_namelen = 0;
450 msg.msg_control = NULL;
451 msg.msg_controllen = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
453
Markus Pargmann7e2893a2015-08-17 08:20:00 +0200454 if (send)
Al Viroc1696ca2015-11-12 04:51:19 -0500455 result = sock_sendmsg(sock, &msg);
Markus Pargmann7e2893a2015-08-17 08:20:00 +0200456 else
Al Viroc1696ca2015-11-12 04:51:19 -0500457 result = sock_recvmsg(sock, &msg, msg.msg_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 if (result <= 0) {
460 if (result == 0)
461 result = -EPIPE; /* short read */
462 break;
463 }
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400464 if (sent)
465 *sent += result;
Al Viroc1696ca2015-11-12 04:51:19 -0500466 } while (msg_data_left(&msg));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467
Vlastimil Babkaf1083042017-05-08 15:59:53 -0700468 memalloc_noreclaim_restore(noreclaim_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469
470 return result;
471}
472
Josef Bacik32e67a32017-10-24 15:57:18 -0400473/*
474 * Different settings for sk->sk_sndtimeo can result in different return values
475 * if there is a signal pending when we enter sendmsg, because reasons?
476 */
477static inline int was_interrupted(int result)
478{
479 return result == -ERESTARTSYS || result == -EINTR;
480}
481
Paul Clements7fdfd402007-10-16 23:27:37 -0700482/* always call with the tx_lock held */
Josef Bacik9561a7a2016-11-22 14:04:40 -0500483static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484{
Josef Bacikfd8383f2016-09-08 12:33:37 -0700485 struct request *req = blk_mq_rq_from_pdu(cmd);
Josef Bacik5ea8d102017-04-06 17:01:58 -0400486 struct nbd_config *config = nbd->config;
487 struct nbd_sock *nsock = config->socks[index];
Josef Bacikd61b7f92017-01-19 16:08:49 -0500488 int result;
Al Viroc9f2b6a2015-11-12 05:09:35 -0500489 struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)};
490 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
491 struct iov_iter from;
Tejun Heo1011c1b2009-05-07 22:24:45 +0900492 unsigned long size = blk_rq_bytes(req);
Jens Axboe429a7872016-11-17 12:30:37 -0700493 struct bio *bio;
Josef Bacik8f3ea352018-07-16 12:11:35 -0400494 u64 handle;
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200495 u32 type;
Shaun McDowell685c9b22017-05-25 23:55:54 -0400496 u32 nbd_cmd_flags = 0;
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400497 int sent = nsock->sent, skip = 0;
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200498
David Howellsaa563d72018-10-20 00:57:56 +0100499 iov_iter_kvec(&from, WRITE, &iov, 1, sizeof(request));
Al Viroc9f2b6a2015-11-12 05:09:35 -0500500
Mike Christie00514672019-08-13 11:39:50 -0500501 type = req_to_nbd_cmd_type(req);
502 if (type == U32_MAX)
Christoph Hellwigaebf5262017-01-31 16:57:31 +0100503 return -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504
Christoph Hellwig09fc54cc2017-01-31 16:57:28 +0100505 if (rq_data_dir(req) == WRITE &&
Josef Bacik5ea8d102017-04-06 17:01:58 -0400506 (config->flags & NBD_FLAG_READ_ONLY)) {
Christoph Hellwig09fc54cc2017-01-31 16:57:28 +0100507 dev_err_ratelimited(disk_to_dev(nbd->disk),
508 "Write on read-only\n");
509 return -EIO;
510 }
511
Shaun McDowell685c9b22017-05-25 23:55:54 -0400512 if (req->cmd_flags & REQ_FUA)
513 nbd_cmd_flags |= NBD_CMD_FLAG_FUA;
514
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400515 /* We did a partial send previously, and we at least sent the whole
516 * request struct, so just go and send the rest of the pages in the
517 * request.
518 */
519 if (sent) {
520 if (sent >= sizeof(request)) {
521 skip = sent - sizeof(request);
Andrew Hall2abd2de2019-04-26 11:49:49 -0700522
523 /* initialize handle for tracing purposes */
524 handle = nbd_cmd_handle(cmd);
525
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400526 goto send_pages;
527 }
528 iov_iter_advance(&from, sent);
Josef Bacik8f3ea352018-07-16 12:11:35 -0400529 } else {
530 cmd->cmd_cookie++;
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400531 }
Josef Bacikf3733242017-04-06 17:01:57 -0400532 cmd->index = index;
Josef Bacik799f9a32017-04-06 17:02:02 -0400533 cmd->cookie = nsock->cookie;
Shaun McDowell685c9b22017-05-25 23:55:54 -0400534 request.type = htonl(type | nbd_cmd_flags);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500535 if (type != NBD_CMD_FLUSH) {
Alex Bligh75f187a2013-02-27 17:05:23 -0800536 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
537 request.len = htonl(size);
538 }
Josef Bacik8f3ea352018-07-16 12:11:35 -0400539 handle = nbd_cmd_handle(cmd);
540 memcpy(request.handle, &handle, sizeof(handle));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541
Matt Mullinsea106722019-04-26 11:49:48 -0700542 trace_nbd_send_request(&request, nbd->index, blk_mq_rq_from_pdu(cmd));
543
Markus Pargmannd18509f2015-04-02 10:11:38 +0200544 dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
Kevin Vigoree57a052018-06-04 10:40:12 -0600545 req, nbdcmd_to_ascii(type),
Markus Pargmannd18509f2015-04-02 10:11:38 +0200546 (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
Al Viroc9f2b6a2015-11-12 05:09:35 -0500547 result = sock_xmit(nbd, index, 1, &from,
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400548 (type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent);
Andrew Hall2abd2de2019-04-26 11:49:49 -0700549 trace_nbd_header_sent(req, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 if (result <= 0) {
Josef Bacik32e67a32017-10-24 15:57:18 -0400551 if (was_interrupted(result)) {
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400552 /* If we havne't sent anything we can just return BUSY,
553 * however if we have sent something we need to make
554 * sure we only allow this req to be sent until we are
555 * completely done.
556 */
557 if (sent) {
558 nsock->pending = req;
559 nsock->sent = sent;
560 }
Josef Bacikd7d94d42018-07-16 12:11:34 -0400561 set_bit(NBD_CMD_REQUEUED, &cmd->flags);
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200562 return BLK_STS_RESOURCE;
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400563 }
Josef Bacika897b662016-12-05 16:20:29 -0500564 dev_err_ratelimited(disk_to_dev(nbd->disk),
WANG Cong7f1b90f2011-08-19 14:48:22 +0200565 "Send control failed (result %d)\n", result);
Josef Bacikf3733242017-04-06 17:01:57 -0400566 return -EAGAIN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567 }
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400568send_pages:
Jens Axboe429a7872016-11-17 12:30:37 -0700569 if (type != NBD_CMD_WRITE)
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400570 goto out;
Jens Axboe429a7872016-11-17 12:30:37 -0700571
Jens Axboe429a7872016-11-17 12:30:37 -0700572 bio = req->bio;
573 while (bio) {
574 struct bio *next = bio->bi_next;
575 struct bvec_iter iter;
Kent Overstreet79886132013-11-23 17:19:00 -0800576 struct bio_vec bvec;
Jens Axboe429a7872016-11-17 12:30:37 -0700577
578 bio_for_each_segment(bvec, bio, iter) {
579 bool is_last = !next && bio_iter_last(bvec, iter);
Josef Bacikd61b7f92017-01-19 16:08:49 -0500580 int flags = is_last ? 0 : MSG_MORE;
Jens Axboe429a7872016-11-17 12:30:37 -0700581
Markus Pargmannd18509f2015-04-02 10:11:38 +0200582 dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
Kevin Vigoree57a052018-06-04 10:40:12 -0600583 req, bvec.bv_len);
David Howellsaa563d72018-10-20 00:57:56 +0100584 iov_iter_bvec(&from, WRITE, &bvec, 1, bvec.bv_len);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400585 if (skip) {
586 if (skip >= iov_iter_count(&from)) {
587 skip -= iov_iter_count(&from);
588 continue;
589 }
590 iov_iter_advance(&from, skip);
591 skip = 0;
592 }
593 result = sock_xmit(nbd, index, 1, &from, flags, &sent);
Jens Axboe6c92e692007-08-16 13:43:12 +0200594 if (result <= 0) {
Josef Bacik32e67a32017-10-24 15:57:18 -0400595 if (was_interrupted(result)) {
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400596 /* We've already sent the header, we
597 * have no choice but to set pending and
598 * return BUSY.
599 */
600 nsock->pending = req;
601 nsock->sent = sent;
Josef Bacikd7d94d42018-07-16 12:11:34 -0400602 set_bit(NBD_CMD_REQUEUED, &cmd->flags);
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200603 return BLK_STS_RESOURCE;
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400604 }
Wanlong Gaof4507162012-03-28 14:42:51 -0700605 dev_err(disk_to_dev(nbd->disk),
WANG Cong7f1b90f2011-08-19 14:48:22 +0200606 "Send data failed (result %d)\n",
607 result);
Josef Bacikf3733242017-04-06 17:01:57 -0400608 return -EAGAIN;
Jens Axboe6c92e692007-08-16 13:43:12 +0200609 }
Jens Axboe429a7872016-11-17 12:30:37 -0700610 /*
611 * The completion might already have come in,
612 * so break for the last one instead of letting
613 * the iterator do it. This prevents use-after-free
614 * of the bio.
615 */
616 if (is_last)
617 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618 }
Jens Axboe429a7872016-11-17 12:30:37 -0700619 bio = next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 }
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400621out:
Andrew Hall2abd2de2019-04-26 11:49:49 -0700622 trace_nbd_payload_sent(req, handle);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400623 nsock->pending = NULL;
624 nsock->sent = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626}
627
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628/* NULL returned = something went wrong, inform userspace */
Josef Bacik9561a7a2016-11-22 14:04:40 -0500629static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400631 struct nbd_config *config = nbd->config;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632 int result;
633 struct nbd_reply reply;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700634 struct nbd_cmd *cmd;
635 struct request *req = NULL;
Josef Bacik8f3ea352018-07-16 12:11:35 -0400636 u64 handle;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700637 u16 hwq;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500638 u32 tag;
Al Viroc9f2b6a2015-11-12 05:09:35 -0500639 struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)};
640 struct iov_iter to;
Josef Bacik8f3ea352018-07-16 12:11:35 -0400641 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642
643 reply.magic = 0;
David Howellsaa563d72018-10-20 00:57:56 +0100644 iov_iter_kvec(&to, READ, &iov, 1, sizeof(reply));
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400645 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 if (result <= 0) {
Josef Bacik5ea8d102017-04-06 17:01:58 -0400647 if (!nbd_disconnected(config))
Josef Bacik9561a7a2016-11-22 14:04:40 -0500648 dev_err(disk_to_dev(nbd->disk),
649 "Receive control failed (result %d)\n", result);
Markus Pargmann19391832015-08-17 08:20:03 +0200650 return ERR_PTR(result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 }
Michal Feixe4b57e02006-07-30 03:03:31 -0700652
653 if (ntohl(reply.magic) != NBD_REPLY_MAGIC) {
Wanlong Gaof4507162012-03-28 14:42:51 -0700654 dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
Michal Feixe4b57e02006-07-30 03:03:31 -0700655 (unsigned long)ntohl(reply.magic));
Markus Pargmann19391832015-08-17 08:20:03 +0200656 return ERR_PTR(-EPROTO);
Michal Feixe4b57e02006-07-30 03:03:31 -0700657 }
658
Josef Bacik8f3ea352018-07-16 12:11:35 -0400659 memcpy(&handle, reply.handle, sizeof(handle));
660 tag = nbd_handle_to_tag(handle);
Josef Bacikfd8383f2016-09-08 12:33:37 -0700661 hwq = blk_mq_unique_tag_to_hwq(tag);
662 if (hwq < nbd->tag_set.nr_hw_queues)
663 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq],
664 blk_mq_unique_tag_to_tag(tag));
665 if (!req || !blk_mq_request_started(req)) {
666 dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n",
667 tag, req);
668 return ERR_PTR(-ENOENT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669 }
Andrew Hall2abd2de2019-04-26 11:49:49 -0700670 trace_nbd_header_received(req, handle);
Josef Bacikfd8383f2016-09-08 12:33:37 -0700671 cmd = blk_mq_rq_to_pdu(req);
Josef Bacik8f3ea352018-07-16 12:11:35 -0400672
673 mutex_lock(&cmd->lock);
674 if (cmd->cmd_cookie != nbd_handle_to_cookie(handle)) {
675 dev_err(disk_to_dev(nbd->disk), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n",
676 req, cmd->cmd_cookie, nbd_handle_to_cookie(handle));
677 ret = -ENOENT;
678 goto out;
679 }
680 if (test_bit(NBD_CMD_REQUEUED, &cmd->flags)) {
681 dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n",
682 req);
683 ret = -ENOENT;
684 goto out;
685 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686 if (ntohl(reply.error)) {
Wanlong Gaof4507162012-03-28 14:42:51 -0700687 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
WANG Cong7f1b90f2011-08-19 14:48:22 +0200688 ntohl(reply.error));
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200689 cmd->status = BLK_STS_IOERR;
Josef Bacik8f3ea352018-07-16 12:11:35 -0400690 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691 }
692
Kevin Vigoree57a052018-06-04 10:40:12 -0600693 dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req);
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200694 if (rq_data_dir(req) != WRITE) {
NeilBrown5705f702007-09-25 12:35:59 +0200695 struct req_iterator iter;
Kent Overstreet79886132013-11-23 17:19:00 -0800696 struct bio_vec bvec;
NeilBrown5705f702007-09-25 12:35:59 +0200697
698 rq_for_each_segment(bvec, req, iter) {
David Howellsaa563d72018-10-20 00:57:56 +0100699 iov_iter_bvec(&to, READ, &bvec, 1, bvec.bv_len);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400700 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
Jens Axboe6c92e692007-08-16 13:43:12 +0200701 if (result <= 0) {
Wanlong Gaof4507162012-03-28 14:42:51 -0700702 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
WANG Cong7f1b90f2011-08-19 14:48:22 +0200703 result);
Josef Bacikf3733242017-04-06 17:01:57 -0400704 /*
705 * If we've disconnected or we only have 1
706 * connection then we need to make sure we
707 * complete this request, otherwise error out
708 * and let the timeout stuff handle resubmitting
709 * this request onto another connection.
710 */
Josef Bacik5ea8d102017-04-06 17:01:58 -0400711 if (nbd_disconnected(config) ||
712 config->num_connections <= 1) {
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200713 cmd->status = BLK_STS_IOERR;
Josef Bacik8f3ea352018-07-16 12:11:35 -0400714 goto out;
Josef Bacikf3733242017-04-06 17:01:57 -0400715 }
Josef Bacik8f3ea352018-07-16 12:11:35 -0400716 ret = -EIO;
717 goto out;
Jens Axboe6c92e692007-08-16 13:43:12 +0200718 }
Markus Pargmannd18509f2015-04-02 10:11:38 +0200719 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
Kevin Vigoree57a052018-06-04 10:40:12 -0600720 req, bvec.bv_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 }
722 }
Josef Bacik8f3ea352018-07-16 12:11:35 -0400723out:
Andrew Hall2abd2de2019-04-26 11:49:49 -0700724 trace_nbd_payload_received(req, handle);
Josef Bacik8f3ea352018-07-16 12:11:35 -0400725 mutex_unlock(&cmd->lock);
726 return ret ? ERR_PTR(ret) : cmd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727}
728
Josef Bacik9561a7a2016-11-22 14:04:40 -0500729static void recv_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730{
Josef Bacik9561a7a2016-11-22 14:04:40 -0500731 struct recv_thread_args *args = container_of(work,
732 struct recv_thread_args,
733 work);
734 struct nbd_device *nbd = args->nbd;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400735 struct nbd_config *config = nbd->config;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700736 struct nbd_cmd *cmd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737
Markus Pargmann19391832015-08-17 08:20:03 +0200738 while (1) {
Josef Bacik9561a7a2016-11-22 14:04:40 -0500739 cmd = nbd_read_stat(nbd, args->index);
Josef Bacikfd8383f2016-09-08 12:33:37 -0700740 if (IS_ERR(cmd)) {
Josef Bacik5ea8d102017-04-06 17:01:58 -0400741 struct nbd_sock *nsock = config->socks[args->index];
Josef Bacikf3733242017-04-06 17:01:57 -0400742
743 mutex_lock(&nsock->tx_lock);
Josef Bacik799f9a32017-04-06 17:02:02 -0400744 nbd_mark_nsock_dead(nbd, nsock, 1);
Josef Bacikf3733242017-04-06 17:01:57 -0400745 mutex_unlock(&nsock->tx_lock);
Markus Pargmann19391832015-08-17 08:20:03 +0200746 break;
747 }
748
Christoph Hellwig08e00292017-04-20 16:03:09 +0200749 blk_mq_complete_request(blk_mq_rq_from_pdu(cmd));
Markus Pargmann19391832015-08-17 08:20:03 +0200750 }
Josef Bacik5ea8d102017-04-06 17:01:58 -0400751 atomic_dec(&config->recv_threads);
752 wake_up(&config->recv_wq);
753 nbd_config_put(nbd);
754 kfree(args);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755}
756
Jens Axboe7baa8572018-11-08 10:24:07 -0700757static bool nbd_clear_req(struct request *req, void *data, bool reserved)
Josef Bacikfd8383f2016-09-08 12:33:37 -0700758{
Christoph Hellwigd250bf42018-05-30 18:51:00 +0200759 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
Josef Bacikfd8383f2016-09-08 12:33:37 -0700760
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200761 cmd->status = BLK_STS_IOERR;
Christoph Hellwig08e00292017-04-20 16:03:09 +0200762 blk_mq_complete_request(req);
Jens Axboe7baa8572018-11-08 10:24:07 -0700763 return true;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700764}
765
Wanlong Gaof4507162012-03-28 14:42:51 -0700766static void nbd_clear_que(struct nbd_device *nbd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767{
Sagi Grimbergb52c2e92017-07-04 09:57:09 +0300768 blk_mq_quiesce_queue(nbd->disk->queue);
Josef Bacikfd8383f2016-09-08 12:33:37 -0700769 blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL);
Sagi Grimbergb52c2e92017-07-04 09:57:09 +0300770 blk_mq_unquiesce_queue(nbd->disk->queue);
Markus Pargmanne78273c2015-08-17 08:20:04 +0200771 dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772}
773
Josef Bacikf3733242017-04-06 17:01:57 -0400774static int find_fallback(struct nbd_device *nbd, int index)
775{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400776 struct nbd_config *config = nbd->config;
Josef Bacikf3733242017-04-06 17:01:57 -0400777 int new_index = -1;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400778 struct nbd_sock *nsock = config->socks[index];
Josef Bacikf3733242017-04-06 17:01:57 -0400779 int fallback = nsock->fallback_index;
780
Josef Bacik5ea8d102017-04-06 17:01:58 -0400781 if (test_bit(NBD_DISCONNECTED, &config->runtime_flags))
Josef Bacikf3733242017-04-06 17:01:57 -0400782 return new_index;
783
Josef Bacik5ea8d102017-04-06 17:01:58 -0400784 if (config->num_connections <= 1) {
Josef Bacikf3733242017-04-06 17:01:57 -0400785 dev_err_ratelimited(disk_to_dev(nbd->disk),
786 "Attempted send on invalid socket\n");
787 return new_index;
788 }
789
Josef Bacik5ea8d102017-04-06 17:01:58 -0400790 if (fallback >= 0 && fallback < config->num_connections &&
791 !config->socks[fallback]->dead)
Josef Bacikf3733242017-04-06 17:01:57 -0400792 return fallback;
793
794 if (nsock->fallback_index < 0 ||
Josef Bacik5ea8d102017-04-06 17:01:58 -0400795 nsock->fallback_index >= config->num_connections ||
796 config->socks[nsock->fallback_index]->dead) {
Josef Bacikf3733242017-04-06 17:01:57 -0400797 int i;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400798 for (i = 0; i < config->num_connections; i++) {
Josef Bacikf3733242017-04-06 17:01:57 -0400799 if (i == index)
800 continue;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400801 if (!config->socks[i]->dead) {
Josef Bacikf3733242017-04-06 17:01:57 -0400802 new_index = i;
803 break;
804 }
805 }
806 nsock->fallback_index = new_index;
807 if (new_index < 0) {
808 dev_err_ratelimited(disk_to_dev(nbd->disk),
809 "Dead connection, failed to find a fallback\n");
810 return new_index;
811 }
812 }
813 new_index = nsock->fallback_index;
814 return new_index;
815}
Paul Clements7fdfd402007-10-16 23:27:37 -0700816
Josef Bacik560bc4b2017-04-06 17:02:04 -0400817static int wait_for_reconnect(struct nbd_device *nbd)
818{
819 struct nbd_config *config = nbd->config;
820 if (!config->dead_conn_timeout)
821 return 0;
822 if (test_bit(NBD_DISCONNECTED, &config->runtime_flags))
823 return 0;
Kevin Vigor5e3c3a72018-05-30 10:45:11 -0600824 return wait_event_timeout(config->conn_wait,
825 atomic_read(&config->live_connections) > 0,
826 config->dead_conn_timeout) > 0;
Josef Bacik560bc4b2017-04-06 17:02:04 -0400827}
828
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400829static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
Laurent Vivier48cf6062008-04-29 01:02:46 -0700830{
Josef Bacikfd8383f2016-09-08 12:33:37 -0700831 struct request *req = blk_mq_rq_from_pdu(cmd);
832 struct nbd_device *nbd = cmd->nbd;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400833 struct nbd_config *config;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500834 struct nbd_sock *nsock;
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400835 int ret;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700836
Josef Bacik5ea8d102017-04-06 17:01:58 -0400837 if (!refcount_inc_not_zero(&nbd->config_refs)) {
838 dev_err_ratelimited(disk_to_dev(nbd->disk),
839 "Socks array is empty\n");
Josef Bacik6a468d52017-11-06 16:11:58 -0500840 blk_mq_start_request(req);
Josef Bacik5ea8d102017-04-06 17:01:58 -0400841 return -EINVAL;
842 }
843 config = nbd->config;
844
845 if (index >= config->num_connections) {
Josef Bacika897b662016-12-05 16:20:29 -0500846 dev_err_ratelimited(disk_to_dev(nbd->disk),
847 "Attempted send on invalid socket\n");
Josef Bacik5ea8d102017-04-06 17:01:58 -0400848 nbd_config_put(nbd);
Josef Bacik6a468d52017-11-06 16:11:58 -0500849 blk_mq_start_request(req);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400850 return -EINVAL;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500851 }
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200852 cmd->status = BLK_STS_OK;
Josef Bacikf3733242017-04-06 17:01:57 -0400853again:
Josef Bacik5ea8d102017-04-06 17:01:58 -0400854 nsock = config->socks[index];
Josef Bacik9561a7a2016-11-22 14:04:40 -0500855 mutex_lock(&nsock->tx_lock);
Josef Bacikf3733242017-04-06 17:01:57 -0400856 if (nsock->dead) {
Josef Bacik560bc4b2017-04-06 17:02:04 -0400857 int old_index = index;
Josef Bacikf3733242017-04-06 17:01:57 -0400858 index = find_fallback(nbd, index);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500859 mutex_unlock(&nsock->tx_lock);
Josef Bacik560bc4b2017-04-06 17:02:04 -0400860 if (index < 0) {
861 if (wait_for_reconnect(nbd)) {
862 index = old_index;
863 goto again;
864 }
865 /* All the sockets should already be down at this point,
866 * we just want to make sure that DISCONNECTED is set so
867 * any requests that come in that were queue'ed waiting
868 * for the reconnect timer don't trigger the timer again
869 * and instead just error out.
870 */
871 sock_shutdown(nbd);
872 nbd_config_put(nbd);
Josef Bacik6a468d52017-11-06 16:11:58 -0500873 blk_mq_start_request(req);
Josef Bacik560bc4b2017-04-06 17:02:04 -0400874 return -EIO;
875 }
Josef Bacikf3733242017-04-06 17:01:57 -0400876 goto again;
Laurent Vivier48cf6062008-04-29 01:02:46 -0700877 }
878
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400879 /* Handle the case that we have a pending request that was partially
880 * transmitted that _has_ to be serviced first. We need to call requeue
881 * here so that it gets put _after_ the request that is already on the
882 * dispatch list.
883 */
Josef Bacik6a468d52017-11-06 16:11:58 -0500884 blk_mq_start_request(req);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400885 if (unlikely(nsock->pending && nsock->pending != req)) {
Josef Bacikd7d94d42018-07-16 12:11:34 -0400886 nbd_requeue_cmd(cmd);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400887 ret = 0;
888 goto out;
Laurent Vivier48cf6062008-04-29 01:02:46 -0700889 }
Josef Bacikf3733242017-04-06 17:01:57 -0400890 /*
891 * Some failures are related to the link going down, so anything that
892 * returns EAGAIN can be retried on a different socket.
893 */
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400894 ret = nbd_send_cmd(nbd, cmd, index);
Josef Bacikf3733242017-04-06 17:01:57 -0400895 if (ret == -EAGAIN) {
896 dev_err_ratelimited(disk_to_dev(nbd->disk),
Josef Bacik6a468d52017-11-06 16:11:58 -0500897 "Request send failed, requeueing\n");
Josef Bacik799f9a32017-04-06 17:02:02 -0400898 nbd_mark_nsock_dead(nbd, nsock, 1);
Josef Bacikd7d94d42018-07-16 12:11:34 -0400899 nbd_requeue_cmd(cmd);
Josef Bacik6a468d52017-11-06 16:11:58 -0500900 ret = 0;
Josef Bacikf3733242017-04-06 17:01:57 -0400901 }
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400902out:
Josef Bacik9561a7a2016-11-22 14:04:40 -0500903 mutex_unlock(&nsock->tx_lock);
Josef Bacik5ea8d102017-04-06 17:01:58 -0400904 nbd_config_put(nbd);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400905 return ret;
Laurent Vivier48cf6062008-04-29 01:02:46 -0700906}
907
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200908static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
Josef Bacikfd8383f2016-09-08 12:33:37 -0700909 const struct blk_mq_queue_data *bd)
Laurent Vivier48cf6062008-04-29 01:02:46 -0700910{
Josef Bacikfd8383f2016-09-08 12:33:37 -0700911 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400912 int ret;
Laurent Vivier48cf6062008-04-29 01:02:46 -0700913
Josef Bacik9561a7a2016-11-22 14:04:40 -0500914 /*
915 * Since we look at the bio's to send the request over the network we
916 * need to make sure the completion work doesn't mark this request done
917 * before we are done doing our send. This keeps us from dereferencing
918 * freed data if we have particularly fast completions (ie we get the
919 * completion before we exit sock_xmit on the last bvec) or in the case
920 * that the server is misbehaving (or there was an error) before we're
921 * done sending everything over the wire.
922 */
Josef Bacik8f3ea352018-07-16 12:11:35 -0400923 mutex_lock(&cmd->lock);
Josef Bacikd7d94d42018-07-16 12:11:34 -0400924 clear_bit(NBD_CMD_REQUEUED, &cmd->flags);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400925
926 /* We can be called directly from the user space process, which means we
927 * could possibly have signals pending so our sendmsg will fail. In
928 * this case we need to return that we are busy, otherwise error out as
929 * appropriate.
930 */
931 ret = nbd_handle_cmd(cmd, hctx->queue_num);
Josef Bacik6e60a3b2017-10-02 16:22:08 -0400932 if (ret < 0)
933 ret = BLK_STS_IOERR;
934 else if (!ret)
935 ret = BLK_STS_OK;
Josef Bacik8f3ea352018-07-16 12:11:35 -0400936 mutex_unlock(&cmd->lock);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500937
Josef Bacik6e60a3b2017-10-02 16:22:08 -0400938 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939}
940
Josef Bacike46c7282017-04-06 17:02:00 -0400941static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
942 bool netlink)
Markus Pargmann23272a672015-10-29 11:51:16 +0100943{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400944 struct nbd_config *config = nbd->config;
Josef Bacik9442b732017-02-07 17:10:22 -0500945 struct socket *sock;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500946 struct nbd_sock **socks;
947 struct nbd_sock *nsock;
Josef Bacik9442b732017-02-07 17:10:22 -0500948 int err;
949
950 sock = sockfd_lookup(arg, &err);
951 if (!sock)
952 return err;
Markus Pargmann23272a672015-10-29 11:51:16 +0100953
Josef Bacike46c7282017-04-06 17:02:00 -0400954 if (!netlink && !nbd->task_setup &&
955 !test_bit(NBD_BOUND, &config->runtime_flags))
Josef Bacik9561a7a2016-11-22 14:04:40 -0500956 nbd->task_setup = current;
Josef Bacike46c7282017-04-06 17:02:00 -0400957
958 if (!netlink &&
959 (nbd->task_setup != current ||
960 test_bit(NBD_BOUND, &config->runtime_flags))) {
Josef Bacik9561a7a2016-11-22 14:04:40 -0500961 dev_err(disk_to_dev(nbd->disk),
962 "Device being setup by another task");
Josef Bacik9b1355d2017-04-06 17:01:56 -0400963 sockfd_put(sock);
Josef Bacike46c7282017-04-06 17:02:00 -0400964 return -EBUSY;
Markus Pargmann23272a672015-10-29 11:51:16 +0100965 }
966
Josef Bacik5ea8d102017-04-06 17:01:58 -0400967 socks = krealloc(config->socks, (config->num_connections + 1) *
Josef Bacik9561a7a2016-11-22 14:04:40 -0500968 sizeof(struct nbd_sock *), GFP_KERNEL);
Josef Bacik9b1355d2017-04-06 17:01:56 -0400969 if (!socks) {
970 sockfd_put(sock);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500971 return -ENOMEM;
Josef Bacik9b1355d2017-04-06 17:01:56 -0400972 }
Josef Bacik9561a7a2016-11-22 14:04:40 -0500973 nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL);
Josef Bacik9b1355d2017-04-06 17:01:56 -0400974 if (!nsock) {
975 sockfd_put(sock);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500976 return -ENOMEM;
Josef Bacik9b1355d2017-04-06 17:01:56 -0400977 }
Markus Pargmann23272a672015-10-29 11:51:16 +0100978
Josef Bacik5ea8d102017-04-06 17:01:58 -0400979 config->socks = socks;
Markus Pargmann23272a672015-10-29 11:51:16 +0100980
Josef Bacikf3733242017-04-06 17:01:57 -0400981 nsock->fallback_index = -1;
982 nsock->dead = false;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500983 mutex_init(&nsock->tx_lock);
984 nsock->sock = sock;
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400985 nsock->pending = NULL;
986 nsock->sent = 0;
Josef Bacik799f9a32017-04-06 17:02:02 -0400987 nsock->cookie = 0;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400988 socks[config->num_connections++] = nsock;
Josef Bacik560bc4b2017-04-06 17:02:04 -0400989 atomic_inc(&config->live_connections);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500990
991 return 0;
Markus Pargmann23272a672015-10-29 11:51:16 +0100992}
993
Josef Bacikb7aa3d32017-04-06 17:02:01 -0400994static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
995{
996 struct nbd_config *config = nbd->config;
997 struct socket *sock, *old;
998 struct recv_thread_args *args;
999 int i;
1000 int err;
1001
1002 sock = sockfd_lookup(arg, &err);
1003 if (!sock)
1004 return err;
1005
1006 args = kzalloc(sizeof(*args), GFP_KERNEL);
1007 if (!args) {
1008 sockfd_put(sock);
1009 return -ENOMEM;
1010 }
1011
1012 for (i = 0; i < config->num_connections; i++) {
1013 struct nbd_sock *nsock = config->socks[i];
1014
1015 if (!nsock->dead)
1016 continue;
1017
1018 mutex_lock(&nsock->tx_lock);
1019 if (!nsock->dead) {
1020 mutex_unlock(&nsock->tx_lock);
1021 continue;
1022 }
1023 sk_set_memalloc(sock->sk);
Josef Bacika7ee8cf2017-07-21 10:48:15 -04001024 if (nbd->tag_set.timeout)
1025 sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001026 atomic_inc(&config->recv_threads);
1027 refcount_inc(&nbd->config_refs);
1028 old = nsock->sock;
1029 nsock->fallback_index = -1;
1030 nsock->sock = sock;
1031 nsock->dead = false;
1032 INIT_WORK(&args->work, recv_work);
1033 args->index = i;
1034 args->nbd = nbd;
Josef Bacik799f9a32017-04-06 17:02:02 -04001035 nsock->cookie++;
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001036 mutex_unlock(&nsock->tx_lock);
1037 sockfd_put(old);
1038
Josef Bacik7a362ea2017-07-25 13:31:19 -04001039 clear_bit(NBD_DISCONNECTED, &config->runtime_flags);
1040
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001041 /* We take the tx_mutex in an error path in the recv_work, so we
1042 * need to queue_work outside of the tx_mutex.
1043 */
1044 queue_work(recv_workqueue, &args->work);
Josef Bacik560bc4b2017-04-06 17:02:04 -04001045
1046 atomic_inc(&config->live_connections);
1047 wake_up(&config->conn_wait);
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001048 return 0;
1049 }
1050 sockfd_put(sock);
1051 kfree(args);
1052 return -ENOSPC;
1053}
1054
Markus Pargmann0e4f0f62015-10-29 12:04:51 +01001055static void nbd_bdev_reset(struct block_device *bdev)
1056{
Ratna Manoj Bollaabbbdf12017-03-24 14:08:29 -04001057 if (bdev->bd_openers > 1)
1058 return;
Josef Bacik29eaadc2017-04-06 17:01:59 -04001059 bd_set_size(bdev, 0);
Markus Pargmann0e4f0f62015-10-29 12:04:51 +01001060}
1061
Josef Bacik29eaadc2017-04-06 17:01:59 -04001062static void nbd_parse_flags(struct nbd_device *nbd)
Markus Pargmannd02cf532015-10-29 12:06:15 +01001063{
Josef Bacik5ea8d102017-04-06 17:01:58 -04001064 struct nbd_config *config = nbd->config;
1065 if (config->flags & NBD_FLAG_READ_ONLY)
Josef Bacik29eaadc2017-04-06 17:01:59 -04001066 set_disk_ro(nbd->disk, true);
1067 else
1068 set_disk_ro(nbd->disk, false);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001069 if (config->flags & NBD_FLAG_SEND_TRIM)
Bart Van Assche8b904b52018-03-07 17:10:10 -08001070 blk_queue_flag_set(QUEUE_FLAG_DISCARD, nbd->disk->queue);
Shaun McDowell685c9b22017-05-25 23:55:54 -04001071 if (config->flags & NBD_FLAG_SEND_FLUSH) {
1072 if (config->flags & NBD_FLAG_SEND_FUA)
1073 blk_queue_write_cache(nbd->disk->queue, true, true);
1074 else
1075 blk_queue_write_cache(nbd->disk->queue, true, false);
1076 }
Markus Pargmannd02cf532015-10-29 12:06:15 +01001077 else
Jens Axboeaafb1ee2016-03-30 10:10:53 -06001078 blk_queue_write_cache(nbd->disk->queue, false, false);
Markus Pargmannd02cf532015-10-29 12:06:15 +01001079}
1080
Josef Bacik9561a7a2016-11-22 14:04:40 -05001081static void send_disconnects(struct nbd_device *nbd)
1082{
Josef Bacik5ea8d102017-04-06 17:01:58 -04001083 struct nbd_config *config = nbd->config;
Al Viroc9f2b6a2015-11-12 05:09:35 -05001084 struct nbd_request request = {
1085 .magic = htonl(NBD_REQUEST_MAGIC),
1086 .type = htonl(NBD_CMD_DISC),
1087 };
1088 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
1089 struct iov_iter from;
Josef Bacik9561a7a2016-11-22 14:04:40 -05001090 int i, ret;
1091
Josef Bacik5ea8d102017-04-06 17:01:58 -04001092 for (i = 0; i < config->num_connections; i++) {
Josef Bacikb4b2aec2017-07-21 10:48:14 -04001093 struct nbd_sock *nsock = config->socks[i];
1094
David Howellsaa563d72018-10-20 00:57:56 +01001095 iov_iter_kvec(&from, WRITE, &iov, 1, sizeof(request));
Josef Bacikb4b2aec2017-07-21 10:48:14 -04001096 mutex_lock(&nsock->tx_lock);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -04001097 ret = sock_xmit(nbd, i, 1, &from, 0, NULL);
Josef Bacik9561a7a2016-11-22 14:04:40 -05001098 if (ret <= 0)
1099 dev_err(disk_to_dev(nbd->disk),
1100 "Send disconnect failed %d\n", ret);
Josef Bacikb4b2aec2017-07-21 10:48:14 -04001101 mutex_unlock(&nsock->tx_lock);
Josef Bacik9561a7a2016-11-22 14:04:40 -05001102 }
1103}
1104
Josef Bacik29eaadc2017-04-06 17:01:59 -04001105static int nbd_disconnect(struct nbd_device *nbd)
Josef Bacik9442b732017-02-07 17:10:22 -05001106{
Josef Bacik5ea8d102017-04-06 17:01:58 -04001107 struct nbd_config *config = nbd->config;
Josef Bacik9442b732017-02-07 17:10:22 -05001108
Josef Bacik5ea8d102017-04-06 17:01:58 -04001109 dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
Josef Bacik2e134562017-07-21 10:48:13 -04001110 set_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags);
1111 send_disconnects(nbd);
Josef Bacik9442b732017-02-07 17:10:22 -05001112 return 0;
1113}
1114
Josef Bacik29eaadc2017-04-06 17:01:59 -04001115static void nbd_clear_sock(struct nbd_device *nbd)
Josef Bacik9442b732017-02-07 17:10:22 -05001116{
1117 sock_shutdown(nbd);
1118 nbd_clear_que(nbd);
Josef Bacik9442b732017-02-07 17:10:22 -05001119 nbd->task_setup = NULL;
Josef Bacik9442b732017-02-07 17:10:22 -05001120}
1121
Josef Bacik5ea8d102017-04-06 17:01:58 -04001122static void nbd_config_put(struct nbd_device *nbd)
1123{
1124 if (refcount_dec_and_mutex_lock(&nbd->config_refs,
1125 &nbd->config_lock)) {
Josef Bacik5ea8d102017-04-06 17:01:58 -04001126 struct nbd_config *config = nbd->config;
Josef Bacik5ea8d102017-04-06 17:01:58 -04001127 nbd_dev_dbg_close(nbd);
Josef Bacik29eaadc2017-04-06 17:01:59 -04001128 nbd_size_clear(nbd);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001129 if (test_and_clear_bit(NBD_HAS_PID_FILE,
1130 &config->runtime_flags))
1131 device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
1132 nbd->task_recv = NULL;
Josef Bacik29eaadc2017-04-06 17:01:59 -04001133 nbd_clear_sock(nbd);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001134 if (config->num_connections) {
1135 int i;
1136 for (i = 0; i < config->num_connections; i++) {
1137 sockfd_put(config->socks[i]->sock);
1138 kfree(config->socks[i]);
1139 }
1140 kfree(config->socks);
1141 }
Ilya Dryomovfa976532017-05-23 17:49:55 +02001142 kfree(nbd->config);
Ilya Dryomovaf622b82017-05-23 17:49:54 +02001143 nbd->config = NULL;
1144
1145 nbd->tag_set.timeout = 0;
Josef Bacik6df133a2018-05-23 13:35:59 -04001146 nbd->disk->queue->limits.discard_granularity = 0;
Josef Bacik07ce2132018-06-05 11:41:23 -04001147 nbd->disk->queue->limits.discard_alignment = 0;
Josef Bacik6df133a2018-05-23 13:35:59 -04001148 blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX);
Bart Van Assche8b904b52018-03-07 17:10:10 -08001149 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, nbd->disk->queue);
Josef Bacika2c97902017-04-06 17:02:07 -04001150
Josef Bacik5ea8d102017-04-06 17:01:58 -04001151 mutex_unlock(&nbd->config_lock);
Josef Bacikc6a47592017-04-06 17:02:06 -04001152 nbd_put(nbd);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001153 module_put(THIS_MODULE);
1154 }
1155}
1156
Josef Bacike46c7282017-04-06 17:02:00 -04001157static int nbd_start_device(struct nbd_device *nbd)
Josef Bacik9442b732017-02-07 17:10:22 -05001158{
Josef Bacik5ea8d102017-04-06 17:01:58 -04001159 struct nbd_config *config = nbd->config;
1160 int num_connections = config->num_connections;
Josef Bacik9442b732017-02-07 17:10:22 -05001161 int error = 0, i;
1162
1163 if (nbd->task_recv)
1164 return -EBUSY;
Josef Bacik5ea8d102017-04-06 17:01:58 -04001165 if (!config->socks)
Josef Bacik9442b732017-02-07 17:10:22 -05001166 return -EINVAL;
1167 if (num_connections > 1 &&
Josef Bacik5ea8d102017-04-06 17:01:58 -04001168 !(config->flags & NBD_FLAG_CAN_MULTI_CONN)) {
Josef Bacik9442b732017-02-07 17:10:22 -05001169 dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n");
Josef Bacik5ea8d102017-04-06 17:01:58 -04001170 return -EINVAL;
Josef Bacik9442b732017-02-07 17:10:22 -05001171 }
1172
Josef Bacik5ea8d102017-04-06 17:01:58 -04001173 blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections);
Josef Bacik9442b732017-02-07 17:10:22 -05001174 nbd->task_recv = current;
Josef Bacik9442b732017-02-07 17:10:22 -05001175
Josef Bacik29eaadc2017-04-06 17:01:59 -04001176 nbd_parse_flags(nbd);
Josef Bacik9442b732017-02-07 17:10:22 -05001177
1178 error = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
1179 if (error) {
1180 dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
Josef Bacik5ea8d102017-04-06 17:01:58 -04001181 return error;
Josef Bacik9442b732017-02-07 17:10:22 -05001182 }
Josef Bacik29eaadc2017-04-06 17:01:59 -04001183 set_bit(NBD_HAS_PID_FILE, &config->runtime_flags);
Josef Bacik9442b732017-02-07 17:10:22 -05001184
1185 nbd_dev_dbg_init(nbd);
1186 for (i = 0; i < num_connections; i++) {
Josef Bacik5ea8d102017-04-06 17:01:58 -04001187 struct recv_thread_args *args;
1188
1189 args = kzalloc(sizeof(*args), GFP_KERNEL);
1190 if (!args) {
1191 sock_shutdown(nbd);
1192 return -ENOMEM;
1193 }
1194 sk_set_memalloc(config->socks[i]->sock->sk);
Josef Bacika7ee8cf2017-07-21 10:48:15 -04001195 if (nbd->tag_set.timeout)
1196 config->socks[i]->sock->sk->sk_sndtimeo =
1197 nbd->tag_set.timeout;
Josef Bacik5ea8d102017-04-06 17:01:58 -04001198 atomic_inc(&config->recv_threads);
1199 refcount_inc(&nbd->config_refs);
1200 INIT_WORK(&args->work, recv_work);
1201 args->nbd = nbd;
1202 args->index = i;
1203 queue_work(recv_workqueue, &args->work);
Josef Bacik9442b732017-02-07 17:10:22 -05001204 }
Josef Bacik639812a2017-10-09 13:12:10 -04001205 nbd_size_update(nbd);
Josef Bacike46c7282017-04-06 17:02:00 -04001206 return error;
1207}
1208
1209static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *bdev)
1210{
1211 struct nbd_config *config = nbd->config;
1212 int ret;
1213
1214 ret = nbd_start_device(nbd);
1215 if (ret)
1216 return ret;
1217
Josef Bacike46c7282017-04-06 17:02:00 -04001218 if (max_part)
1219 bdev->bd_invalidated = 1;
1220 mutex_unlock(&nbd->config_lock);
1221 ret = wait_event_interruptible(config->recv_wq,
Josef Bacik5ea8d102017-04-06 17:01:58 -04001222 atomic_read(&config->recv_threads) == 0);
Josef Bacike46c7282017-04-06 17:02:00 -04001223 if (ret)
Josef Bacik5ea8d102017-04-06 17:01:58 -04001224 sock_shutdown(nbd);
Josef Bacik9442b732017-02-07 17:10:22 -05001225 mutex_lock(&nbd->config_lock);
Josef Bacik76aa1d32018-05-16 14:51:22 -04001226 nbd_bdev_reset(bdev);
Josef Bacik9442b732017-02-07 17:10:22 -05001227 /* user requested, ignore socket errors */
Josef Bacik5ea8d102017-04-06 17:01:58 -04001228 if (test_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags))
Josef Bacike46c7282017-04-06 17:02:00 -04001229 ret = 0;
Josef Bacik5ea8d102017-04-06 17:01:58 -04001230 if (test_bit(NBD_TIMEDOUT, &config->runtime_flags))
Josef Bacike46c7282017-04-06 17:02:00 -04001231 ret = -ETIMEDOUT;
1232 return ret;
Josef Bacik9442b732017-02-07 17:10:22 -05001233}
Markus Pargmann30d53d92015-08-17 08:20:06 +02001234
Josef Bacik29eaadc2017-04-06 17:01:59 -04001235static void nbd_clear_sock_ioctl(struct nbd_device *nbd,
1236 struct block_device *bdev)
1237{
Josef Bacik2516ab12017-04-06 17:02:03 -04001238 sock_shutdown(nbd);
Munehisa Kamata2b5c8f02019-07-31 20:13:10 +08001239 __invalidate_device(bdev, true);
Josef Bacik29eaadc2017-04-06 17:01:59 -04001240 nbd_bdev_reset(bdev);
Josef Bacike46c7282017-04-06 17:02:00 -04001241 if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
1242 &nbd->config->runtime_flags))
1243 nbd_config_put(nbd);
Josef Bacik29eaadc2017-04-06 17:01:59 -04001244}
1245
Xiubo Li553768d2019-05-29 15:16:05 -05001246static bool nbd_is_valid_blksize(unsigned long blksize)
1247{
1248 if (!blksize || !is_power_of_2(blksize) || blksize < 512 ||
1249 blksize > PAGE_SIZE)
1250 return false;
1251 return true;
1252}
1253
Mike Christie55313e92019-08-13 11:39:49 -05001254static void nbd_set_cmd_timeout(struct nbd_device *nbd, u64 timeout)
1255{
1256 nbd->tag_set.timeout = timeout * HZ;
1257 blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ);
1258}
1259
Josef Bacik9561a7a2016-11-22 14:04:40 -05001260/* Must be called with config_lock held */
Wanlong Gaof4507162012-03-28 14:42:51 -07001261static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
Pavel Machek1a2ad212009-04-02 16:58:41 -07001262 unsigned int cmd, unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263{
Josef Bacik5ea8d102017-04-06 17:01:58 -04001264 struct nbd_config *config = nbd->config;
1265
Linus Torvalds1da177e2005-04-16 15:20:36 -07001266 switch (cmd) {
Josef Bacik9442b732017-02-07 17:10:22 -05001267 case NBD_DISCONNECT:
Josef Bacik29eaadc2017-04-06 17:01:59 -04001268 return nbd_disconnect(nbd);
Markus Pargmann23272a672015-10-29 11:51:16 +01001269 case NBD_CLEAR_SOCK:
Josef Bacik29eaadc2017-04-06 17:01:59 -04001270 nbd_clear_sock_ioctl(nbd, bdev);
1271 return 0;
Josef Bacik9442b732017-02-07 17:10:22 -05001272 case NBD_SET_SOCK:
Josef Bacike46c7282017-04-06 17:02:00 -04001273 return nbd_add_socket(nbd, arg, false);
Josef Bacik9442b732017-02-07 17:10:22 -05001274 case NBD_SET_BLKSIZE:
Xiubo Li553768d2019-05-29 15:16:05 -05001275 if (!arg)
1276 arg = NBD_DEF_BLKSIZE;
1277 if (!nbd_is_valid_blksize(arg))
Jens Axboebc811f02018-09-04 11:52:34 -06001278 return -EINVAL;
Josef Bacik29eaadc2017-04-06 17:01:59 -04001279 nbd_size_set(nbd, arg,
Josef Bacik5ea8d102017-04-06 17:01:58 -04001280 div_s64(config->bytesize, arg));
Josef Bacike5445412017-02-13 10:39:47 -05001281 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282 case NBD_SET_SIZE:
Josef Bacik29eaadc2017-04-06 17:01:59 -04001283 nbd_size_set(nbd, config->blksize,
Josef Bacik5ea8d102017-04-06 17:01:58 -04001284 div_s64(arg, config->blksize));
Josef Bacike5445412017-02-13 10:39:47 -05001285 return 0;
Markus Pargmann37091fd2015-07-27 07:36:49 +02001286 case NBD_SET_SIZE_BLOCKS:
Josef Bacik29eaadc2017-04-06 17:01:59 -04001287 nbd_size_set(nbd, config->blksize, arg);
Josef Bacike5445412017-02-13 10:39:47 -05001288 return 0;
Paul Clements7fdfd402007-10-16 23:27:37 -07001289 case NBD_SET_TIMEOUT:
Mike Christie55313e92019-08-13 11:39:49 -05001290 if (arg)
1291 nbd_set_cmd_timeout(nbd, arg);
Paul Clements7fdfd402007-10-16 23:27:37 -07001292 return 0;
Pavel Machek1a2ad212009-04-02 16:58:41 -07001293
Paul Clements2f012502012-10-04 17:16:15 -07001294 case NBD_SET_FLAGS:
Josef Bacik5ea8d102017-04-06 17:01:58 -04001295 config->flags = arg;
Paul Clements2f012502012-10-04 17:16:15 -07001296 return 0;
Josef Bacik9442b732017-02-07 17:10:22 -05001297 case NBD_DO_IT:
Josef Bacike46c7282017-04-06 17:02:00 -04001298 return nbd_start_device_ioctl(nbd, bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299 case NBD_CLEAR_QUE:
Herbert Xu4b2f0262006-01-06 00:09:47 -08001300 /*
1301 * This is for compatibility only. The queue is always cleared
1302 * by NBD_DO_IT or NBD_CLEAR_SOCK.
1303 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304 return 0;
1305 case NBD_PRINT_DEBUG:
Josef Bacikfd8383f2016-09-08 12:33:37 -07001306 /*
1307 * For compatibility only, we no longer keep a list of
1308 * outstanding requests.
1309 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310 return 0;
1311 }
Pavel Machek1a2ad212009-04-02 16:58:41 -07001312 return -ENOTTY;
1313}
1314
1315static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
1316 unsigned int cmd, unsigned long arg)
1317{
Wanlong Gaof4507162012-03-28 14:42:51 -07001318 struct nbd_device *nbd = bdev->bd_disk->private_data;
Josef Bacike46c7282017-04-06 17:02:00 -04001319 struct nbd_config *config = nbd->config;
1320 int error = -EINVAL;
Pavel Machek1a2ad212009-04-02 16:58:41 -07001321
1322 if (!capable(CAP_SYS_ADMIN))
1323 return -EPERM;
1324
Josef Bacik1dae69b2017-05-05 22:25:18 -04001325 /* The block layer will pass back some non-nbd ioctls in case we have
1326 * special handling for them, but we don't so just return an error.
1327 */
1328 if (_IOC_TYPE(cmd) != 0xab)
1329 return -EINVAL;
1330
Josef Bacik9561a7a2016-11-22 14:04:40 -05001331 mutex_lock(&nbd->config_lock);
Josef Bacike46c7282017-04-06 17:02:00 -04001332
1333 /* Don't allow ioctl operations on a nbd device that was created with
1334 * netlink, unless it's DISCONNECT or CLEAR_SOCK, which are fine.
1335 */
1336 if (!test_bit(NBD_BOUND, &config->runtime_flags) ||
1337 (cmd == NBD_DISCONNECT || cmd == NBD_CLEAR_SOCK))
1338 error = __nbd_ioctl(bdev, nbd, cmd, arg);
1339 else
1340 dev_err(nbd_to_dev(nbd), "Cannot use ioctl interface on a netlink controlled device.\n");
Josef Bacik9561a7a2016-11-22 14:04:40 -05001341 mutex_unlock(&nbd->config_lock);
Pavel Machek1a2ad212009-04-02 16:58:41 -07001342 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343}
1344
Josef Bacik5ea8d102017-04-06 17:01:58 -04001345static struct nbd_config *nbd_alloc_config(void)
1346{
1347 struct nbd_config *config;
1348
1349 config = kzalloc(sizeof(struct nbd_config), GFP_NOFS);
1350 if (!config)
1351 return NULL;
1352 atomic_set(&config->recv_threads, 0);
1353 init_waitqueue_head(&config->recv_wq);
Josef Bacik560bc4b2017-04-06 17:02:04 -04001354 init_waitqueue_head(&config->conn_wait);
Xiubo Li553768d2019-05-29 15:16:05 -05001355 config->blksize = NBD_DEF_BLKSIZE;
Josef Bacik560bc4b2017-04-06 17:02:04 -04001356 atomic_set(&config->live_connections, 0);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001357 try_module_get(THIS_MODULE);
1358 return config;
1359}
1360
1361static int nbd_open(struct block_device *bdev, fmode_t mode)
1362{
1363 struct nbd_device *nbd;
1364 int ret = 0;
1365
1366 mutex_lock(&nbd_index_mutex);
1367 nbd = bdev->bd_disk->private_data;
1368 if (!nbd) {
1369 ret = -ENXIO;
1370 goto out;
1371 }
Josef Bacikc6a47592017-04-06 17:02:06 -04001372 if (!refcount_inc_not_zero(&nbd->refs)) {
1373 ret = -ENXIO;
1374 goto out;
1375 }
Josef Bacik5ea8d102017-04-06 17:01:58 -04001376 if (!refcount_inc_not_zero(&nbd->config_refs)) {
1377 struct nbd_config *config;
1378
1379 mutex_lock(&nbd->config_lock);
1380 if (refcount_inc_not_zero(&nbd->config_refs)) {
1381 mutex_unlock(&nbd->config_lock);
1382 goto out;
1383 }
1384 config = nbd->config = nbd_alloc_config();
1385 if (!config) {
1386 ret = -ENOMEM;
1387 mutex_unlock(&nbd->config_lock);
1388 goto out;
1389 }
1390 refcount_set(&nbd->config_refs, 1);
Josef Bacikc6a47592017-04-06 17:02:06 -04001391 refcount_inc(&nbd->refs);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001392 mutex_unlock(&nbd->config_lock);
Josef Bacikfe1f9e62018-05-16 14:51:21 -04001393 bdev->bd_invalidated = 1;
1394 } else if (nbd_disconnected(nbd->config)) {
1395 bdev->bd_invalidated = 1;
Josef Bacik5ea8d102017-04-06 17:01:58 -04001396 }
1397out:
1398 mutex_unlock(&nbd_index_mutex);
1399 return ret;
1400}
1401
1402static void nbd_release(struct gendisk *disk, fmode_t mode)
1403{
1404 struct nbd_device *nbd = disk->private_data;
Doron Roberts-Kedes08ba91e2018-06-15 14:05:32 -07001405 struct block_device *bdev = bdget_disk(disk, 0);
1406
1407 if (test_bit(NBD_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) &&
1408 bdev->bd_openers == 0)
1409 nbd_disconnect_and_put(nbd);
1410
Josef Bacik5ea8d102017-04-06 17:01:58 -04001411 nbd_config_put(nbd);
Josef Bacikc6a47592017-04-06 17:02:06 -04001412 nbd_put(nbd);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001413}
1414
Alexey Dobriyan83d5cde2009-09-21 17:01:13 -07001415static const struct block_device_operations nbd_fops =
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416{
1417 .owner = THIS_MODULE,
Josef Bacik5ea8d102017-04-06 17:01:58 -04001418 .open = nbd_open,
1419 .release = nbd_release,
Arnd Bergmann8a6cfeb2010-07-08 10:18:46 +02001420 .ioctl = nbd_ioctl,
Al Viro263a3df2016-01-07 10:04:37 -05001421 .compat_ioctl = nbd_ioctl,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422};
1423
Markus Pargmann30d53d92015-08-17 08:20:06 +02001424#if IS_ENABLED(CONFIG_DEBUG_FS)
1425
1426static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
1427{
1428 struct nbd_device *nbd = s->private;
1429
1430 if (nbd->task_recv)
1431 seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv));
Markus Pargmann30d53d92015-08-17 08:20:06 +02001432
1433 return 0;
1434}
1435
1436static int nbd_dbg_tasks_open(struct inode *inode, struct file *file)
1437{
1438 return single_open(file, nbd_dbg_tasks_show, inode->i_private);
1439}
1440
1441static const struct file_operations nbd_dbg_tasks_ops = {
1442 .open = nbd_dbg_tasks_open,
1443 .read = seq_read,
1444 .llseek = seq_lseek,
1445 .release = single_release,
1446};
1447
1448static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
1449{
1450 struct nbd_device *nbd = s->private;
Josef Bacik5ea8d102017-04-06 17:01:58 -04001451 u32 flags = nbd->config->flags;
Markus Pargmann30d53d92015-08-17 08:20:06 +02001452
1453 seq_printf(s, "Hex: 0x%08x\n\n", flags);
1454
1455 seq_puts(s, "Known flags:\n");
1456
1457 if (flags & NBD_FLAG_HAS_FLAGS)
1458 seq_puts(s, "NBD_FLAG_HAS_FLAGS\n");
1459 if (flags & NBD_FLAG_READ_ONLY)
1460 seq_puts(s, "NBD_FLAG_READ_ONLY\n");
1461 if (flags & NBD_FLAG_SEND_FLUSH)
1462 seq_puts(s, "NBD_FLAG_SEND_FLUSH\n");
Shaun McDowell685c9b22017-05-25 23:55:54 -04001463 if (flags & NBD_FLAG_SEND_FUA)
1464 seq_puts(s, "NBD_FLAG_SEND_FUA\n");
Markus Pargmann30d53d92015-08-17 08:20:06 +02001465 if (flags & NBD_FLAG_SEND_TRIM)
1466 seq_puts(s, "NBD_FLAG_SEND_TRIM\n");
1467
1468 return 0;
1469}
1470
1471static int nbd_dbg_flags_open(struct inode *inode, struct file *file)
1472{
1473 return single_open(file, nbd_dbg_flags_show, inode->i_private);
1474}
1475
1476static const struct file_operations nbd_dbg_flags_ops = {
1477 .open = nbd_dbg_flags_open,
1478 .read = seq_read,
1479 .llseek = seq_lseek,
1480 .release = single_release,
1481};
1482
1483static int nbd_dev_dbg_init(struct nbd_device *nbd)
1484{
1485 struct dentry *dir;
Josef Bacik5ea8d102017-04-06 17:01:58 -04001486 struct nbd_config *config = nbd->config;
Markus Pargmann27ea43f2015-10-24 21:15:34 +02001487
1488 if (!nbd_dbg_dir)
1489 return -EIO;
Markus Pargmann30d53d92015-08-17 08:20:06 +02001490
1491 dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir);
Markus Pargmann27ea43f2015-10-24 21:15:34 +02001492 if (!dir) {
1493 dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n",
1494 nbd_name(nbd));
1495 return -EIO;
Markus Pargmann30d53d92015-08-17 08:20:06 +02001496 }
Josef Bacik5ea8d102017-04-06 17:01:58 -04001497 config->dbg_dir = dir;
Markus Pargmann30d53d92015-08-17 08:20:06 +02001498
Markus Pargmann27ea43f2015-10-24 21:15:34 +02001499 debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001500 debugfs_create_u64("size_bytes", 0444, dir, &config->bytesize);
Josef Bacik0eadf372016-09-08 12:33:40 -07001501 debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001502 debugfs_create_u64("blocksize", 0444, dir, &config->blksize);
Josef Bacikd366a0f2016-06-08 10:32:10 -04001503 debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops);
Markus Pargmann30d53d92015-08-17 08:20:06 +02001504
1505 return 0;
1506}
1507
1508static void nbd_dev_dbg_close(struct nbd_device *nbd)
1509{
Josef Bacik5ea8d102017-04-06 17:01:58 -04001510 debugfs_remove_recursive(nbd->config->dbg_dir);
Markus Pargmann30d53d92015-08-17 08:20:06 +02001511}
1512
1513static int nbd_dbg_init(void)
1514{
1515 struct dentry *dbg_dir;
1516
1517 dbg_dir = debugfs_create_dir("nbd", NULL);
Markus Pargmann27ea43f2015-10-24 21:15:34 +02001518 if (!dbg_dir)
1519 return -EIO;
Markus Pargmann30d53d92015-08-17 08:20:06 +02001520
1521 nbd_dbg_dir = dbg_dir;
1522
1523 return 0;
1524}
1525
1526static void nbd_dbg_close(void)
1527{
1528 debugfs_remove_recursive(nbd_dbg_dir);
1529}
1530
1531#else /* IS_ENABLED(CONFIG_DEBUG_FS) */
1532
1533static int nbd_dev_dbg_init(struct nbd_device *nbd)
1534{
1535 return 0;
1536}
1537
1538static void nbd_dev_dbg_close(struct nbd_device *nbd)
1539{
1540}
1541
1542static int nbd_dbg_init(void)
1543{
1544 return 0;
1545}
1546
1547static void nbd_dbg_close(void)
1548{
1549}
1550
1551#endif
1552
Christoph Hellwigd6296d392017-05-01 10:19:08 -06001553static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
1554 unsigned int hctx_idx, unsigned int numa_node)
Josef Bacikfd8383f2016-09-08 12:33:37 -07001555{
1556 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
Christoph Hellwigd6296d392017-05-01 10:19:08 -06001557 cmd->nbd = set->driver_data;
Josef Bacikd7d94d42018-07-16 12:11:34 -04001558 cmd->flags = 0;
Josef Bacik8f3ea352018-07-16 12:11:35 -04001559 mutex_init(&cmd->lock);
Josef Bacikfd8383f2016-09-08 12:33:37 -07001560 return 0;
1561}
1562
Eric Biggersf363b082017-03-30 13:39:16 -07001563static const struct blk_mq_ops nbd_mq_ops = {
Josef Bacikfd8383f2016-09-08 12:33:37 -07001564 .queue_rq = nbd_queue_rq,
Christoph Hellwig1e388ae2017-04-20 16:03:06 +02001565 .complete = nbd_complete_rq,
Josef Bacikfd8383f2016-09-08 12:33:37 -07001566 .init_request = nbd_init_request,
Josef Bacik0eadf372016-09-08 12:33:40 -07001567 .timeout = nbd_xmit_timeout,
Josef Bacikfd8383f2016-09-08 12:33:37 -07001568};
1569
Josef Bacikb0d91112017-02-01 16:11:40 -05001570static int nbd_dev_add(int index)
1571{
1572 struct nbd_device *nbd;
1573 struct gendisk *disk;
1574 struct request_queue *q;
1575 int err = -ENOMEM;
1576
1577 nbd = kzalloc(sizeof(struct nbd_device), GFP_KERNEL);
1578 if (!nbd)
1579 goto out;
1580
1581 disk = alloc_disk(1 << part_shift);
1582 if (!disk)
1583 goto out_free_nbd;
1584
1585 if (index >= 0) {
1586 err = idr_alloc(&nbd_index_idr, nbd, index, index + 1,
1587 GFP_KERNEL);
1588 if (err == -ENOSPC)
1589 err = -EEXIST;
1590 } else {
1591 err = idr_alloc(&nbd_index_idr, nbd, 0, 0, GFP_KERNEL);
1592 if (err >= 0)
1593 index = err;
1594 }
1595 if (err < 0)
1596 goto out_free_disk;
1597
Josef Bacike46c7282017-04-06 17:02:00 -04001598 nbd->index = index;
Josef Bacikb0d91112017-02-01 16:11:40 -05001599 nbd->disk = disk;
1600 nbd->tag_set.ops = &nbd_mq_ops;
1601 nbd->tag_set.nr_hw_queues = 1;
1602 nbd->tag_set.queue_depth = 128;
1603 nbd->tag_set.numa_node = NUMA_NO_NODE;
1604 nbd->tag_set.cmd_size = sizeof(struct nbd_cmd);
1605 nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
Ming Lei56d18f62019-02-15 19:13:24 +08001606 BLK_MQ_F_BLOCKING;
Josef Bacikb0d91112017-02-01 16:11:40 -05001607 nbd->tag_set.driver_data = nbd;
1608
1609 err = blk_mq_alloc_tag_set(&nbd->tag_set);
1610 if (err)
1611 goto out_free_idr;
1612
1613 q = blk_mq_init_queue(&nbd->tag_set);
1614 if (IS_ERR(q)) {
1615 err = PTR_ERR(q);
1616 goto out_free_tags;
1617 }
1618 disk->queue = q;
1619
1620 /*
1621 * Tell the block layer that we are not a rotational device
1622 */
Bart Van Assche8b904b52018-03-07 17:10:10 -08001623 blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
1624 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue);
Josef Bacik6df133a2018-05-23 13:35:59 -04001625 disk->queue->limits.discard_granularity = 0;
Josef Bacik07ce2132018-06-05 11:41:23 -04001626 disk->queue->limits.discard_alignment = 0;
Josef Bacik6df133a2018-05-23 13:35:59 -04001627 blk_queue_max_discard_sectors(disk->queue, 0);
Josef Bacikebb16d02017-04-18 16:22:51 -04001628 blk_queue_max_segment_size(disk->queue, UINT_MAX);
Josef Bacik1cc1f172017-04-20 15:47:01 -04001629 blk_queue_max_segments(disk->queue, USHRT_MAX);
Josef Bacikb0d91112017-02-01 16:11:40 -05001630 blk_queue_max_hw_sectors(disk->queue, 65536);
1631 disk->queue->limits.max_sectors = 256;
1632
Josef Bacikb0d91112017-02-01 16:11:40 -05001633 mutex_init(&nbd->config_lock);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001634 refcount_set(&nbd->config_refs, 0);
Josef Bacikc6a47592017-04-06 17:02:06 -04001635 refcount_set(&nbd->refs, 1);
1636 INIT_LIST_HEAD(&nbd->list);
Josef Bacikb0d91112017-02-01 16:11:40 -05001637 disk->major = NBD_MAJOR;
1638 disk->first_minor = index << part_shift;
1639 disk->fops = &nbd_fops;
1640 disk->private_data = nbd;
1641 sprintf(disk->disk_name, "nbd%d", index);
Josef Bacikb0d91112017-02-01 16:11:40 -05001642 add_disk(disk);
Josef Bacik47d902b2017-04-06 17:02:05 -04001643 nbd_total_devices++;
Josef Bacikb0d91112017-02-01 16:11:40 -05001644 return index;
1645
1646out_free_tags:
1647 blk_mq_free_tag_set(&nbd->tag_set);
1648out_free_idr:
1649 idr_remove(&nbd_index_idr, index);
1650out_free_disk:
1651 put_disk(disk);
1652out_free_nbd:
1653 kfree(nbd);
1654out:
1655 return err;
1656}
1657
Josef Bacike46c7282017-04-06 17:02:00 -04001658static int find_free_cb(int id, void *ptr, void *data)
1659{
1660 struct nbd_device *nbd = ptr;
1661 struct nbd_device **found = data;
1662
1663 if (!refcount_read(&nbd->config_refs)) {
1664 *found = nbd;
1665 return 1;
1666 }
1667 return 0;
1668}
1669
1670/* Netlink interface. */
Stephen Hemmingera86c4122018-07-18 09:32:43 -07001671static const struct nla_policy nbd_attr_policy[NBD_ATTR_MAX + 1] = {
Josef Bacike46c7282017-04-06 17:02:00 -04001672 [NBD_ATTR_INDEX] = { .type = NLA_U32 },
1673 [NBD_ATTR_SIZE_BYTES] = { .type = NLA_U64 },
1674 [NBD_ATTR_BLOCK_SIZE_BYTES] = { .type = NLA_U64 },
1675 [NBD_ATTR_TIMEOUT] = { .type = NLA_U64 },
1676 [NBD_ATTR_SERVER_FLAGS] = { .type = NLA_U64 },
1677 [NBD_ATTR_CLIENT_FLAGS] = { .type = NLA_U64 },
1678 [NBD_ATTR_SOCKETS] = { .type = NLA_NESTED},
Josef Bacik560bc4b2017-04-06 17:02:04 -04001679 [NBD_ATTR_DEAD_CONN_TIMEOUT] = { .type = NLA_U64 },
Josef Bacik47d902b2017-04-06 17:02:05 -04001680 [NBD_ATTR_DEVICE_LIST] = { .type = NLA_NESTED},
Josef Bacike46c7282017-04-06 17:02:00 -04001681};
1682
Stephen Hemmingera86c4122018-07-18 09:32:43 -07001683static const struct nla_policy nbd_sock_policy[NBD_SOCK_MAX + 1] = {
Josef Bacike46c7282017-04-06 17:02:00 -04001684 [NBD_SOCK_FD] = { .type = NLA_U32 },
1685};
1686
Josef Bacik47d902b2017-04-06 17:02:05 -04001687/* We don't use this right now since we don't parse the incoming list, but we
1688 * still want it here so userspace knows what to expect.
1689 */
Stephen Hemmingera86c4122018-07-18 09:32:43 -07001690static const struct nla_policy __attribute__((unused))
Josef Bacik47d902b2017-04-06 17:02:05 -04001691nbd_device_policy[NBD_DEVICE_ATTR_MAX + 1] = {
1692 [NBD_DEVICE_INDEX] = { .type = NLA_U32 },
1693 [NBD_DEVICE_CONNECTED] = { .type = NLA_U8 },
1694};
1695
Mike Christie4ddeaae82019-05-29 15:16:06 -05001696static int nbd_genl_size_set(struct genl_info *info, struct nbd_device *nbd)
1697{
1698 struct nbd_config *config = nbd->config;
1699 u64 bsize = config->blksize;
1700 u64 bytes = config->bytesize;
1701
1702 if (info->attrs[NBD_ATTR_SIZE_BYTES])
1703 bytes = nla_get_u64(info->attrs[NBD_ATTR_SIZE_BYTES]);
1704
1705 if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]) {
1706 bsize = nla_get_u64(info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]);
1707 if (!bsize)
1708 bsize = NBD_DEF_BLKSIZE;
1709 if (!nbd_is_valid_blksize(bsize)) {
1710 printk(KERN_ERR "Invalid block size %llu\n", bsize);
1711 return -EINVAL;
1712 }
1713 }
1714
1715 if (bytes != config->bytesize || bsize != config->blksize)
1716 nbd_size_set(nbd, bsize, div64_u64(bytes, bsize));
1717 return 0;
1718}
1719
Josef Bacike46c7282017-04-06 17:02:00 -04001720static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
1721{
1722 struct nbd_device *nbd = NULL;
1723 struct nbd_config *config;
1724 int index = -1;
1725 int ret;
Josef Bacika2c97902017-04-06 17:02:07 -04001726 bool put_dev = false;
Josef Bacike46c7282017-04-06 17:02:00 -04001727
1728 if (!netlink_capable(skb, CAP_SYS_ADMIN))
1729 return -EPERM;
1730
1731 if (info->attrs[NBD_ATTR_INDEX])
1732 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
1733 if (!info->attrs[NBD_ATTR_SOCKETS]) {
1734 printk(KERN_ERR "nbd: must specify at least one socket\n");
1735 return -EINVAL;
1736 }
1737 if (!info->attrs[NBD_ATTR_SIZE_BYTES]) {
1738 printk(KERN_ERR "nbd: must specify a size in bytes for the device\n");
1739 return -EINVAL;
1740 }
1741again:
1742 mutex_lock(&nbd_index_mutex);
1743 if (index == -1) {
1744 ret = idr_for_each(&nbd_index_idr, &find_free_cb, &nbd);
1745 if (ret == 0) {
1746 int new_index;
1747 new_index = nbd_dev_add(-1);
1748 if (new_index < 0) {
1749 mutex_unlock(&nbd_index_mutex);
1750 printk(KERN_ERR "nbd: failed to add new device\n");
Gustavo A. R. Silva09799622018-02-12 11:14:55 -06001751 return new_index;
Josef Bacike46c7282017-04-06 17:02:00 -04001752 }
1753 nbd = idr_find(&nbd_index_idr, new_index);
1754 }
1755 } else {
1756 nbd = idr_find(&nbd_index_idr, index);
Josef Bacike6a76272017-08-14 18:25:33 +00001757 if (!nbd) {
1758 ret = nbd_dev_add(index);
1759 if (ret < 0) {
1760 mutex_unlock(&nbd_index_mutex);
1761 printk(KERN_ERR "nbd: failed to add new device\n");
1762 return ret;
1763 }
1764 nbd = idr_find(&nbd_index_idr, index);
1765 }
Josef Bacike46c7282017-04-06 17:02:00 -04001766 }
Josef Bacike46c7282017-04-06 17:02:00 -04001767 if (!nbd) {
1768 printk(KERN_ERR "nbd: couldn't find device at index %d\n",
1769 index);
Josef Bacikc6a47592017-04-06 17:02:06 -04001770 mutex_unlock(&nbd_index_mutex);
Josef Bacike46c7282017-04-06 17:02:00 -04001771 return -EINVAL;
1772 }
Josef Bacikc6a47592017-04-06 17:02:06 -04001773 if (!refcount_inc_not_zero(&nbd->refs)) {
1774 mutex_unlock(&nbd_index_mutex);
1775 if (index == -1)
1776 goto again;
1777 printk(KERN_ERR "nbd: device at index %d is going down\n",
1778 index);
1779 return -EINVAL;
1780 }
1781 mutex_unlock(&nbd_index_mutex);
Josef Bacike46c7282017-04-06 17:02:00 -04001782
1783 mutex_lock(&nbd->config_lock);
1784 if (refcount_read(&nbd->config_refs)) {
1785 mutex_unlock(&nbd->config_lock);
Josef Bacikc6a47592017-04-06 17:02:06 -04001786 nbd_put(nbd);
Josef Bacike46c7282017-04-06 17:02:00 -04001787 if (index == -1)
1788 goto again;
1789 printk(KERN_ERR "nbd: nbd%d already in use\n", index);
1790 return -EBUSY;
1791 }
1792 if (WARN_ON(nbd->config)) {
1793 mutex_unlock(&nbd->config_lock);
Josef Bacikc6a47592017-04-06 17:02:06 -04001794 nbd_put(nbd);
Josef Bacike46c7282017-04-06 17:02:00 -04001795 return -EINVAL;
1796 }
1797 config = nbd->config = nbd_alloc_config();
1798 if (!nbd->config) {
1799 mutex_unlock(&nbd->config_lock);
Josef Bacikc6a47592017-04-06 17:02:06 -04001800 nbd_put(nbd);
Josef Bacike46c7282017-04-06 17:02:00 -04001801 printk(KERN_ERR "nbd: couldn't allocate config\n");
1802 return -ENOMEM;
1803 }
1804 refcount_set(&nbd->config_refs, 1);
1805 set_bit(NBD_BOUND, &config->runtime_flags);
1806
Mike Christie4ddeaae82019-05-29 15:16:06 -05001807 ret = nbd_genl_size_set(info, nbd);
1808 if (ret)
1809 goto out;
1810
Mike Christie55313e92019-08-13 11:39:49 -05001811 if (info->attrs[NBD_ATTR_TIMEOUT])
1812 nbd_set_cmd_timeout(nbd,
1813 nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]));
Josef Bacik560bc4b2017-04-06 17:02:04 -04001814 if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
1815 config->dead_conn_timeout =
1816 nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
1817 config->dead_conn_timeout *= HZ;
1818 }
Josef Bacike46c7282017-04-06 17:02:00 -04001819 if (info->attrs[NBD_ATTR_SERVER_FLAGS])
1820 config->flags =
1821 nla_get_u64(info->attrs[NBD_ATTR_SERVER_FLAGS]);
Josef Bacika2c97902017-04-06 17:02:07 -04001822 if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
1823 u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
1824 if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
1825 set_bit(NBD_DESTROY_ON_DISCONNECT,
1826 &config->runtime_flags);
1827 put_dev = true;
1828 }
Doron Roberts-Kedes08ba91e2018-06-15 14:05:32 -07001829 if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
1830 set_bit(NBD_DISCONNECT_ON_CLOSE,
1831 &config->runtime_flags);
1832 }
Josef Bacika2c97902017-04-06 17:02:07 -04001833 }
1834
Josef Bacike46c7282017-04-06 17:02:00 -04001835 if (info->attrs[NBD_ATTR_SOCKETS]) {
1836 struct nlattr *attr;
1837 int rem, fd;
1838
1839 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
1840 rem) {
1841 struct nlattr *socks[NBD_SOCK_MAX+1];
1842
1843 if (nla_type(attr) != NBD_SOCK_ITEM) {
1844 printk(KERN_ERR "nbd: socks must be embedded in a SOCK_ITEM attr\n");
1845 ret = -EINVAL;
1846 goto out;
1847 }
Johannes Berg8cb08172019-04-26 14:07:28 +02001848 ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX,
1849 attr,
1850 nbd_sock_policy,
1851 info->extack);
Josef Bacike46c7282017-04-06 17:02:00 -04001852 if (ret != 0) {
1853 printk(KERN_ERR "nbd: error processing sock list\n");
1854 ret = -EINVAL;
1855 goto out;
1856 }
1857 if (!socks[NBD_SOCK_FD])
1858 continue;
1859 fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
1860 ret = nbd_add_socket(nbd, fd, true);
1861 if (ret)
1862 goto out;
1863 }
1864 }
1865 ret = nbd_start_device(nbd);
1866out:
1867 mutex_unlock(&nbd->config_lock);
1868 if (!ret) {
1869 set_bit(NBD_HAS_CONFIG_REF, &config->runtime_flags);
1870 refcount_inc(&nbd->config_refs);
1871 nbd_connect_reply(info, nbd->index);
1872 }
1873 nbd_config_put(nbd);
Josef Bacika2c97902017-04-06 17:02:07 -04001874 if (put_dev)
1875 nbd_put(nbd);
Josef Bacike46c7282017-04-06 17:02:00 -04001876 return ret;
1877}
1878
Doron Roberts-Kedes08ba91e2018-06-15 14:05:32 -07001879static void nbd_disconnect_and_put(struct nbd_device *nbd)
1880{
1881 mutex_lock(&nbd->config_lock);
1882 nbd_disconnect(nbd);
1883 nbd_clear_sock(nbd);
1884 mutex_unlock(&nbd->config_lock);
1885 if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
1886 &nbd->config->runtime_flags))
1887 nbd_config_put(nbd);
1888}
1889
Josef Bacike46c7282017-04-06 17:02:00 -04001890static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
1891{
1892 struct nbd_device *nbd;
1893 int index;
1894
1895 if (!netlink_capable(skb, CAP_SYS_ADMIN))
1896 return -EPERM;
1897
1898 if (!info->attrs[NBD_ATTR_INDEX]) {
1899 printk(KERN_ERR "nbd: must specify an index to disconnect\n");
1900 return -EINVAL;
1901 }
1902 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
1903 mutex_lock(&nbd_index_mutex);
1904 nbd = idr_find(&nbd_index_idr, index);
Josef Bacike46c7282017-04-06 17:02:00 -04001905 if (!nbd) {
Josef Bacikc6a47592017-04-06 17:02:06 -04001906 mutex_unlock(&nbd_index_mutex);
Josef Bacike46c7282017-04-06 17:02:00 -04001907 printk(KERN_ERR "nbd: couldn't find device at index %d\n",
1908 index);
1909 return -EINVAL;
1910 }
Josef Bacikc6a47592017-04-06 17:02:06 -04001911 if (!refcount_inc_not_zero(&nbd->refs)) {
1912 mutex_unlock(&nbd_index_mutex);
1913 printk(KERN_ERR "nbd: device at index %d is going down\n",
1914 index);
1915 return -EINVAL;
1916 }
1917 mutex_unlock(&nbd_index_mutex);
1918 if (!refcount_inc_not_zero(&nbd->config_refs)) {
1919 nbd_put(nbd);
Josef Bacike46c7282017-04-06 17:02:00 -04001920 return 0;
Josef Bacikc6a47592017-04-06 17:02:06 -04001921 }
Doron Roberts-Kedes08ba91e2018-06-15 14:05:32 -07001922 nbd_disconnect_and_put(nbd);
Josef Bacike46c7282017-04-06 17:02:00 -04001923 nbd_config_put(nbd);
Josef Bacikc6a47592017-04-06 17:02:06 -04001924 nbd_put(nbd);
Josef Bacike46c7282017-04-06 17:02:00 -04001925 return 0;
1926}
1927
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001928static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
1929{
1930 struct nbd_device *nbd = NULL;
1931 struct nbd_config *config;
1932 int index;
Doron Roberts-Kedes08ba91e2018-06-15 14:05:32 -07001933 int ret = 0;
Josef Bacika2c97902017-04-06 17:02:07 -04001934 bool put_dev = false;
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001935
1936 if (!netlink_capable(skb, CAP_SYS_ADMIN))
1937 return -EPERM;
1938
1939 if (!info->attrs[NBD_ATTR_INDEX]) {
1940 printk(KERN_ERR "nbd: must specify a device to reconfigure\n");
1941 return -EINVAL;
1942 }
1943 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
1944 mutex_lock(&nbd_index_mutex);
1945 nbd = idr_find(&nbd_index_idr, index);
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001946 if (!nbd) {
Josef Bacikc6a47592017-04-06 17:02:06 -04001947 mutex_unlock(&nbd_index_mutex);
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001948 printk(KERN_ERR "nbd: couldn't find a device at index %d\n",
1949 index);
1950 return -EINVAL;
1951 }
Josef Bacikc6a47592017-04-06 17:02:06 -04001952 if (!refcount_inc_not_zero(&nbd->refs)) {
1953 mutex_unlock(&nbd_index_mutex);
1954 printk(KERN_ERR "nbd: device at index %d is going down\n",
1955 index);
1956 return -EINVAL;
1957 }
1958 mutex_unlock(&nbd_index_mutex);
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001959
1960 if (!refcount_inc_not_zero(&nbd->config_refs)) {
1961 dev_err(nbd_to_dev(nbd),
1962 "not configured, cannot reconfigure\n");
Josef Bacikc6a47592017-04-06 17:02:06 -04001963 nbd_put(nbd);
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001964 return -EINVAL;
1965 }
1966
1967 mutex_lock(&nbd->config_lock);
1968 config = nbd->config;
1969 if (!test_bit(NBD_BOUND, &config->runtime_flags) ||
1970 !nbd->task_recv) {
1971 dev_err(nbd_to_dev(nbd),
1972 "not configured, cannot reconfigure\n");
Doron Roberts-Kedes08ba91e2018-06-15 14:05:32 -07001973 ret = -EINVAL;
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001974 goto out;
1975 }
1976
Mike Christie4ddeaae82019-05-29 15:16:06 -05001977 ret = nbd_genl_size_set(info, nbd);
1978 if (ret)
1979 goto out;
1980
Mike Christie55313e92019-08-13 11:39:49 -05001981 if (info->attrs[NBD_ATTR_TIMEOUT])
1982 nbd_set_cmd_timeout(nbd,
1983 nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]));
Josef Bacik560bc4b2017-04-06 17:02:04 -04001984 if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
1985 config->dead_conn_timeout =
1986 nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
1987 config->dead_conn_timeout *= HZ;
1988 }
Josef Bacika2c97902017-04-06 17:02:07 -04001989 if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
1990 u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
1991 if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
1992 if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT,
1993 &config->runtime_flags))
1994 put_dev = true;
1995 } else {
1996 if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT,
1997 &config->runtime_flags))
1998 refcount_inc(&nbd->refs);
1999 }
Doron Roberts-Kedes08ba91e2018-06-15 14:05:32 -07002000
2001 if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
2002 set_bit(NBD_DISCONNECT_ON_CLOSE,
2003 &config->runtime_flags);
2004 } else {
2005 clear_bit(NBD_DISCONNECT_ON_CLOSE,
2006 &config->runtime_flags);
2007 }
Josef Bacika2c97902017-04-06 17:02:07 -04002008 }
Josef Bacikb7aa3d32017-04-06 17:02:01 -04002009
2010 if (info->attrs[NBD_ATTR_SOCKETS]) {
2011 struct nlattr *attr;
2012 int rem, fd;
2013
2014 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
2015 rem) {
2016 struct nlattr *socks[NBD_SOCK_MAX+1];
2017
2018 if (nla_type(attr) != NBD_SOCK_ITEM) {
2019 printk(KERN_ERR "nbd: socks must be embedded in a SOCK_ITEM attr\n");
2020 ret = -EINVAL;
2021 goto out;
2022 }
Johannes Berg8cb08172019-04-26 14:07:28 +02002023 ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX,
2024 attr,
2025 nbd_sock_policy,
2026 info->extack);
Josef Bacikb7aa3d32017-04-06 17:02:01 -04002027 if (ret != 0) {
2028 printk(KERN_ERR "nbd: error processing sock list\n");
2029 ret = -EINVAL;
2030 goto out;
2031 }
2032 if (!socks[NBD_SOCK_FD])
2033 continue;
2034 fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
2035 ret = nbd_reconnect_socket(nbd, fd);
2036 if (ret) {
2037 if (ret == -ENOSPC)
2038 ret = 0;
2039 goto out;
2040 }
2041 dev_info(nbd_to_dev(nbd), "reconnected socket\n");
2042 }
2043 }
2044out:
2045 mutex_unlock(&nbd->config_lock);
2046 nbd_config_put(nbd);
Josef Bacikc6a47592017-04-06 17:02:06 -04002047 nbd_put(nbd);
Josef Bacika2c97902017-04-06 17:02:07 -04002048 if (put_dev)
2049 nbd_put(nbd);
Josef Bacikb7aa3d32017-04-06 17:02:01 -04002050 return ret;
2051}
2052
Josef Bacike46c7282017-04-06 17:02:00 -04002053static const struct genl_ops nbd_connect_genl_ops[] = {
2054 {
2055 .cmd = NBD_CMD_CONNECT,
Johannes Bergef6243a2019-04-26 14:07:31 +02002056 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
Josef Bacike46c7282017-04-06 17:02:00 -04002057 .doit = nbd_genl_connect,
2058 },
2059 {
2060 .cmd = NBD_CMD_DISCONNECT,
Johannes Bergef6243a2019-04-26 14:07:31 +02002061 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
Josef Bacike46c7282017-04-06 17:02:00 -04002062 .doit = nbd_genl_disconnect,
2063 },
Josef Bacikb7aa3d32017-04-06 17:02:01 -04002064 {
2065 .cmd = NBD_CMD_RECONFIGURE,
Johannes Bergef6243a2019-04-26 14:07:31 +02002066 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
Josef Bacikb7aa3d32017-04-06 17:02:01 -04002067 .doit = nbd_genl_reconfigure,
2068 },
Josef Bacik47d902b2017-04-06 17:02:05 -04002069 {
2070 .cmd = NBD_CMD_STATUS,
Johannes Bergef6243a2019-04-26 14:07:31 +02002071 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
Josef Bacik47d902b2017-04-06 17:02:05 -04002072 .doit = nbd_genl_status,
2073 },
Josef Bacike46c7282017-04-06 17:02:00 -04002074};
2075
Josef Bacik799f9a32017-04-06 17:02:02 -04002076static const struct genl_multicast_group nbd_mcast_grps[] = {
2077 { .name = NBD_GENL_MCAST_GROUP_NAME, },
2078};
2079
Josef Bacike46c7282017-04-06 17:02:00 -04002080static struct genl_family nbd_genl_family __ro_after_init = {
2081 .hdrsize = 0,
2082 .name = NBD_GENL_FAMILY_NAME,
2083 .version = NBD_GENL_VERSION,
2084 .module = THIS_MODULE,
2085 .ops = nbd_connect_genl_ops,
2086 .n_ops = ARRAY_SIZE(nbd_connect_genl_ops),
2087 .maxattr = NBD_ATTR_MAX,
Johannes Berg3b0f31f2019-03-21 22:51:02 +01002088 .policy = nbd_attr_policy,
Josef Bacik799f9a32017-04-06 17:02:02 -04002089 .mcgrps = nbd_mcast_grps,
2090 .n_mcgrps = ARRAY_SIZE(nbd_mcast_grps),
Josef Bacike46c7282017-04-06 17:02:00 -04002091};
2092
Josef Bacik47d902b2017-04-06 17:02:05 -04002093static int populate_nbd_status(struct nbd_device *nbd, struct sk_buff *reply)
2094{
2095 struct nlattr *dev_opt;
2096 u8 connected = 0;
2097 int ret;
2098
2099 /* This is a little racey, but for status it's ok. The
2100 * reason we don't take a ref here is because we can't
2101 * take a ref in the index == -1 case as we would need
2102 * to put under the nbd_index_mutex, which could
2103 * deadlock if we are configured to remove ourselves
2104 * once we're disconnected.
2105 */
2106 if (refcount_read(&nbd->config_refs))
2107 connected = 1;
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002108 dev_opt = nla_nest_start_noflag(reply, NBD_DEVICE_ITEM);
Josef Bacik47d902b2017-04-06 17:02:05 -04002109 if (!dev_opt)
2110 return -EMSGSIZE;
2111 ret = nla_put_u32(reply, NBD_DEVICE_INDEX, nbd->index);
2112 if (ret)
2113 return -EMSGSIZE;
2114 ret = nla_put_u8(reply, NBD_DEVICE_CONNECTED,
2115 connected);
2116 if (ret)
2117 return -EMSGSIZE;
2118 nla_nest_end(reply, dev_opt);
2119 return 0;
2120}
2121
2122static int status_cb(int id, void *ptr, void *data)
2123{
2124 struct nbd_device *nbd = ptr;
2125 return populate_nbd_status(nbd, (struct sk_buff *)data);
2126}
2127
2128static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info)
2129{
2130 struct nlattr *dev_list;
2131 struct sk_buff *reply;
2132 void *reply_head;
2133 size_t msg_size;
2134 int index = -1;
2135 int ret = -ENOMEM;
2136
2137 if (info->attrs[NBD_ATTR_INDEX])
2138 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
2139
2140 mutex_lock(&nbd_index_mutex);
2141
2142 msg_size = nla_total_size(nla_attr_size(sizeof(u32)) +
2143 nla_attr_size(sizeof(u8)));
2144 msg_size *= (index == -1) ? nbd_total_devices : 1;
2145
2146 reply = genlmsg_new(msg_size, GFP_KERNEL);
2147 if (!reply)
2148 goto out;
2149 reply_head = genlmsg_put_reply(reply, info, &nbd_genl_family, 0,
2150 NBD_CMD_STATUS);
2151 if (!reply_head) {
2152 nlmsg_free(reply);
2153 goto out;
2154 }
2155
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002156 dev_list = nla_nest_start_noflag(reply, NBD_ATTR_DEVICE_LIST);
Josef Bacik47d902b2017-04-06 17:02:05 -04002157 if (index == -1) {
2158 ret = idr_for_each(&nbd_index_idr, &status_cb, reply);
2159 if (ret) {
2160 nlmsg_free(reply);
2161 goto out;
2162 }
2163 } else {
2164 struct nbd_device *nbd;
2165 nbd = idr_find(&nbd_index_idr, index);
2166 if (nbd) {
2167 ret = populate_nbd_status(nbd, reply);
2168 if (ret) {
2169 nlmsg_free(reply);
2170 goto out;
2171 }
2172 }
2173 }
2174 nla_nest_end(reply, dev_list);
2175 genlmsg_end(reply, reply_head);
Li RongQingcd46eb82019-02-19 13:14:07 +08002176 ret = genlmsg_reply(reply, info);
Josef Bacik47d902b2017-04-06 17:02:05 -04002177out:
2178 mutex_unlock(&nbd_index_mutex);
2179 return ret;
2180}
2181
Josef Bacike46c7282017-04-06 17:02:00 -04002182static void nbd_connect_reply(struct genl_info *info, int index)
2183{
2184 struct sk_buff *skb;
2185 void *msg_head;
2186 int ret;
2187
2188 skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
2189 if (!skb)
2190 return;
2191 msg_head = genlmsg_put_reply(skb, info, &nbd_genl_family, 0,
2192 NBD_CMD_CONNECT);
2193 if (!msg_head) {
2194 nlmsg_free(skb);
2195 return;
2196 }
2197 ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
2198 if (ret) {
2199 nlmsg_free(skb);
2200 return;
2201 }
2202 genlmsg_end(skb, msg_head);
2203 genlmsg_reply(skb, info);
2204}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002205
Josef Bacik799f9a32017-04-06 17:02:02 -04002206static void nbd_mcast_index(int index)
2207{
2208 struct sk_buff *skb;
2209 void *msg_head;
2210 int ret;
2211
2212 skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
2213 if (!skb)
2214 return;
2215 msg_head = genlmsg_put(skb, 0, 0, &nbd_genl_family, 0,
2216 NBD_CMD_LINK_DEAD);
2217 if (!msg_head) {
2218 nlmsg_free(skb);
2219 return;
2220 }
2221 ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
2222 if (ret) {
2223 nlmsg_free(skb);
2224 return;
2225 }
2226 genlmsg_end(skb, msg_head);
2227 genlmsg_multicast(&nbd_genl_family, skb, 0, 0, GFP_KERNEL);
2228}
2229
2230static void nbd_dead_link_work(struct work_struct *work)
2231{
2232 struct link_dead_args *args = container_of(work, struct link_dead_args,
2233 work);
2234 nbd_mcast_index(args->index);
2235 kfree(args);
2236}
2237
Linus Torvalds1da177e2005-04-16 15:20:36 -07002238static int __init nbd_init(void)
2239{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240 int i;
2241
Adrian Bunk5b7b18c2006-03-25 03:07:04 -08002242 BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243
Laurent Vivierd71a6d72008-04-29 01:02:51 -07002244 if (max_part < 0) {
WANG Cong7742ce42011-08-19 14:48:28 +02002245 printk(KERN_ERR "nbd: max_part must be >= 0\n");
Laurent Vivierd71a6d72008-04-29 01:02:51 -07002246 return -EINVAL;
2247 }
2248
2249 part_shift = 0;
Namhyung Kim5988ce22011-05-28 14:44:46 +02002250 if (max_part > 0) {
Laurent Vivierd71a6d72008-04-29 01:02:51 -07002251 part_shift = fls(max_part);
2252
Namhyung Kim5988ce22011-05-28 14:44:46 +02002253 /*
2254 * Adjust max_part according to part_shift as it is exported
2255 * to user space so that user can know the max number of
2256 * partition kernel should be able to manage.
2257 *
2258 * Note that -1 is required because partition 0 is reserved
2259 * for the whole disk.
2260 */
2261 max_part = (1UL << part_shift) - 1;
2262 }
2263
Namhyung Kim3b271082011-05-28 14:44:46 +02002264 if ((1UL << part_shift) > DISK_MAX_PARTS)
2265 return -EINVAL;
2266
2267 if (nbds_max > 1UL << (MINORBITS - part_shift))
2268 return -EINVAL;
Josef Bacik124d6db2017-02-01 16:11:11 -05002269 recv_workqueue = alloc_workqueue("knbd-recv",
Dan Melnic2189c972017-09-18 13:08:51 -07002270 WQ_MEM_RECLAIM | WQ_HIGHPRI |
2271 WQ_UNBOUND, 0);
Josef Bacik124d6db2017-02-01 16:11:11 -05002272 if (!recv_workqueue)
2273 return -ENOMEM;
Namhyung Kim3b271082011-05-28 14:44:46 +02002274
Josef Bacik6330a2d2017-02-15 16:49:48 -05002275 if (register_blkdev(NBD_MAJOR, "nbd")) {
2276 destroy_workqueue(recv_workqueue);
Josef Bacikb0d91112017-02-01 16:11:40 -05002277 return -EIO;
Josef Bacik6330a2d2017-02-15 16:49:48 -05002278 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002279
Josef Bacike46c7282017-04-06 17:02:00 -04002280 if (genl_register_family(&nbd_genl_family)) {
2281 unregister_blkdev(NBD_MAJOR, "nbd");
2282 destroy_workqueue(recv_workqueue);
2283 return -EINVAL;
2284 }
Markus Pargmann30d53d92015-08-17 08:20:06 +02002285 nbd_dbg_init();
2286
Josef Bacikb0d91112017-02-01 16:11:40 -05002287 mutex_lock(&nbd_index_mutex);
2288 for (i = 0; i < nbds_max; i++)
2289 nbd_dev_add(i);
2290 mutex_unlock(&nbd_index_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002291 return 0;
Josef Bacikb0d91112017-02-01 16:11:40 -05002292}
2293
2294static int nbd_exit_cb(int id, void *ptr, void *data)
2295{
Josef Bacikc6a47592017-04-06 17:02:06 -04002296 struct list_head *list = (struct list_head *)data;
Josef Bacikb0d91112017-02-01 16:11:40 -05002297 struct nbd_device *nbd = ptr;
Josef Bacikc6a47592017-04-06 17:02:06 -04002298
Josef Bacikc6a47592017-04-06 17:02:06 -04002299 list_add_tail(&nbd->list, list);
Josef Bacikb0d91112017-02-01 16:11:40 -05002300 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002301}
2302
2303static void __exit nbd_cleanup(void)
2304{
Josef Bacikc6a47592017-04-06 17:02:06 -04002305 struct nbd_device *nbd;
2306 LIST_HEAD(del_list);
2307
Markus Pargmann30d53d92015-08-17 08:20:06 +02002308 nbd_dbg_close();
2309
Josef Bacikc6a47592017-04-06 17:02:06 -04002310 mutex_lock(&nbd_index_mutex);
2311 idr_for_each(&nbd_index_idr, &nbd_exit_cb, &del_list);
2312 mutex_unlock(&nbd_index_mutex);
2313
Josef Bacik60ae36a2017-04-28 09:49:19 -04002314 while (!list_empty(&del_list)) {
2315 nbd = list_first_entry(&del_list, struct nbd_device, list);
2316 list_del_init(&nbd->list);
2317 if (refcount_read(&nbd->refs) != 1)
Josef Bacikc6a47592017-04-06 17:02:06 -04002318 printk(KERN_ERR "nbd: possibly leaking a device\n");
2319 nbd_put(nbd);
Josef Bacikc6a47592017-04-06 17:02:06 -04002320 }
2321
Josef Bacikb0d91112017-02-01 16:11:40 -05002322 idr_destroy(&nbd_index_idr);
Josef Bacike46c7282017-04-06 17:02:00 -04002323 genl_unregister_family(&nbd_genl_family);
Josef Bacik124d6db2017-02-01 16:11:11 -05002324 destroy_workqueue(recv_workqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002325 unregister_blkdev(NBD_MAJOR, "nbd");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002326}
2327
2328module_init(nbd_init);
2329module_exit(nbd_cleanup);
2330
2331MODULE_DESCRIPTION("Network Block Device");
2332MODULE_LICENSE("GPL");
2333
Lars Marowsky-Bree40be0c22005-05-01 08:59:07 -07002334module_param(nbds_max, int, 0444);
Laurent Vivierd71a6d72008-04-29 01:02:51 -07002335MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
2336module_param(max_part, int, 0444);
Josef Bacik7a8362a2017-08-14 18:56:16 +00002337MODULE_PARM_DESC(max_part, "number of partitions per device (default: 16)");