blob: 08696f5f00bb293c087a7725fd96113ef9ca105c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Network block device - make block devices work over TCP
3 *
4 * Note that you can not swap over this thing, yet. Seems to work but
5 * deadlocks sometimes - you can not swap over TCP in general.
6 *
Pavel Macheka2531292010-07-18 14:27:13 +02007 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
9 *
Pavel Machekdbf492d2006-06-25 05:47:42 -070010 * This file is released under GPLv2 or later.
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 *
Pavel Machekdbf492d2006-06-25 05:47:42 -070012 * (part of code stolen from loop.c)
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 */
14
15#include <linux/major.h>
16
17#include <linux/blkdev.h>
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/sched.h>
Vlastimil Babkaf1083042017-05-08 15:59:53 -070021#include <linux/sched/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/fs.h>
23#include <linux/bio.h>
24#include <linux/stat.h>
25#include <linux/errno.h>
26#include <linux/file.h>
27#include <linux/ioctl.h>
Arnd Bergmann2a48fc02010-06-02 14:28:52 +020028#include <linux/mutex.h>
Herbert Xu4b2f0262006-01-06 00:09:47 -080029#include <linux/compiler.h>
30#include <linux/err.h>
31#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090032#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <net/sock.h>
Trond Myklebust91cf45f2007-11-12 18:10:39 -080034#include <linux/net.h>
Laurent Vivier48cf6062008-04-29 01:02:46 -070035#include <linux/kthread.h>
Markus Pargmannb9c495b2015-04-02 10:11:37 +020036#include <linux/types.h>
Markus Pargmann30d53d92015-08-17 08:20:06 +020037#include <linux/debugfs.h>
Josef Bacikfd8383f2016-09-08 12:33:37 -070038#include <linux/blk-mq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080040#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <asm/types.h>
42
43#include <linux/nbd.h>
Josef Bacike46c7282017-04-06 17:02:00 -040044#include <linux/nbd-netlink.h>
45#include <net/genetlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
Josef Bacikb0d91112017-02-01 16:11:40 -050047static DEFINE_IDR(nbd_index_idr);
48static DEFINE_MUTEX(nbd_index_mutex);
Josef Bacik47d902b2017-04-06 17:02:05 -040049static int nbd_total_devices = 0;
Josef Bacikb0d91112017-02-01 16:11:40 -050050
Josef Bacik9561a7a2016-11-22 14:04:40 -050051struct nbd_sock {
52 struct socket *sock;
53 struct mutex tx_lock;
Josef Bacik9dd5d3a2017-03-24 14:08:26 -040054 struct request *pending;
55 int sent;
Josef Bacikf3733242017-04-06 17:01:57 -040056 bool dead;
57 int fallback_index;
Josef Bacik799f9a32017-04-06 17:02:02 -040058 int cookie;
Josef Bacik9561a7a2016-11-22 14:04:40 -050059};
60
Josef Bacik5ea8d102017-04-06 17:01:58 -040061struct recv_thread_args {
62 struct work_struct work;
63 struct nbd_device *nbd;
64 int index;
65};
66
Josef Bacik799f9a32017-04-06 17:02:02 -040067struct link_dead_args {
68 struct work_struct work;
69 int index;
70};
71
Josef Bacik9b4a6ba2016-09-08 12:33:39 -070072#define NBD_TIMEDOUT 0
73#define NBD_DISCONNECT_REQUESTED 1
Josef Bacik9561a7a2016-11-22 14:04:40 -050074#define NBD_DISCONNECTED 2
Josef Bacik5ea8d102017-04-06 17:01:58 -040075#define NBD_HAS_PID_FILE 3
Josef Bacike46c7282017-04-06 17:02:00 -040076#define NBD_HAS_CONFIG_REF 4
77#define NBD_BOUND 5
Josef Bacika2c97902017-04-06 17:02:07 -040078#define NBD_DESTROY_ON_DISCONNECT 6
Doron Roberts-Kedes08ba91e2018-06-15 14:05:32 -070079#define NBD_DISCONNECT_ON_CLOSE 7
Josef Bacik9b4a6ba2016-09-08 12:33:39 -070080
Josef Bacik5ea8d102017-04-06 17:01:58 -040081struct nbd_config {
Markus Pargmann22d109c2015-08-17 08:20:09 +020082 u32 flags;
Josef Bacik9b4a6ba2016-09-08 12:33:39 -070083 unsigned long runtime_flags;
Josef Bacik560bc4b2017-04-06 17:02:04 -040084 u64 dead_conn_timeout;
Josef Bacik5ea8d102017-04-06 17:01:58 -040085
Josef Bacik9561a7a2016-11-22 14:04:40 -050086 struct nbd_sock **socks;
Josef Bacik9561a7a2016-11-22 14:04:40 -050087 int num_connections;
Josef Bacik560bc4b2017-04-06 17:02:04 -040088 atomic_t live_connections;
89 wait_queue_head_t conn_wait;
Josef Bacik5ea8d102017-04-06 17:01:58 -040090
Josef Bacik9561a7a2016-11-22 14:04:40 -050091 atomic_t recv_threads;
92 wait_queue_head_t recv_wq;
Josef Bacikef77b512016-12-02 16:19:12 -050093 loff_t blksize;
Markus Pargmannb9c495b2015-04-02 10:11:37 +020094 loff_t bytesize;
Markus Pargmann30d53d92015-08-17 08:20:06 +020095#if IS_ENABLED(CONFIG_DEBUG_FS)
96 struct dentry *dbg_dir;
97#endif
Markus Pargmann13e71d62015-04-02 10:11:35 +020098};
99
Josef Bacik5ea8d102017-04-06 17:01:58 -0400100struct nbd_device {
101 struct blk_mq_tag_set tag_set;
102
Josef Bacike46c7282017-04-06 17:02:00 -0400103 int index;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400104 refcount_t config_refs;
Josef Bacikc6a47592017-04-06 17:02:06 -0400105 refcount_t refs;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400106 struct nbd_config *config;
107 struct mutex config_lock;
108 struct gendisk *disk;
109
Josef Bacikc6a47592017-04-06 17:02:06 -0400110 struct list_head list;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400111 struct task_struct *task_recv;
112 struct task_struct *task_setup;
113};
114
Josef Bacikd7d94d42018-07-16 12:11:34 -0400115#define NBD_CMD_REQUEUED 1
116
Josef Bacikfd8383f2016-09-08 12:33:37 -0700117struct nbd_cmd {
118 struct nbd_device *nbd;
Josef Bacik8f3ea352018-07-16 12:11:35 -0400119 struct mutex lock;
Josef Bacikf3733242017-04-06 17:01:57 -0400120 int index;
Josef Bacik799f9a32017-04-06 17:02:02 -0400121 int cookie;
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200122 blk_status_t status;
Josef Bacikd7d94d42018-07-16 12:11:34 -0400123 unsigned long flags;
Josef Bacik8f3ea352018-07-16 12:11:35 -0400124 u32 cmd_cookie;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700125};
126
Markus Pargmann30d53d92015-08-17 08:20:06 +0200127#if IS_ENABLED(CONFIG_DEBUG_FS)
128static struct dentry *nbd_dbg_dir;
129#endif
130
131#define nbd_name(nbd) ((nbd)->disk->disk_name)
132
Wanlong Gaof4507162012-03-28 14:42:51 -0700133#define NBD_MAGIC 0x68797548
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134
Ingo van Lil9c7a4162006-07-01 04:36:36 -0700135static unsigned int nbds_max = 16;
Josef Bacik7a8362a2017-08-14 18:56:16 +0000136static int max_part = 16;
Josef Bacik124d6db2017-02-01 16:11:11 -0500137static struct workqueue_struct *recv_workqueue;
Josef Bacikb0d91112017-02-01 16:11:40 -0500138static int part_shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139
Josef Bacik9442b732017-02-07 17:10:22 -0500140static int nbd_dev_dbg_init(struct nbd_device *nbd);
141static void nbd_dev_dbg_close(struct nbd_device *nbd);
Josef Bacik5ea8d102017-04-06 17:01:58 -0400142static void nbd_config_put(struct nbd_device *nbd);
Josef Bacike46c7282017-04-06 17:02:00 -0400143static void nbd_connect_reply(struct genl_info *info, int index);
Josef Bacik47d902b2017-04-06 17:02:05 -0400144static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info);
Josef Bacik799f9a32017-04-06 17:02:02 -0400145static void nbd_dead_link_work(struct work_struct *work);
Doron Roberts-Kedes08ba91e2018-06-15 14:05:32 -0700146static void nbd_disconnect_and_put(struct nbd_device *nbd);
Josef Bacik9442b732017-02-07 17:10:22 -0500147
Markus Pargmannd18509f2015-04-02 10:11:38 +0200148static inline struct device *nbd_to_dev(struct nbd_device *nbd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149{
Markus Pargmannd18509f2015-04-02 10:11:38 +0200150 return disk_to_dev(nbd->disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151}
152
Josef Bacikd7d94d42018-07-16 12:11:34 -0400153static void nbd_requeue_cmd(struct nbd_cmd *cmd)
154{
155 struct request *req = blk_mq_rq_from_pdu(cmd);
156
157 if (!test_and_set_bit(NBD_CMD_REQUEUED, &cmd->flags))
158 blk_mq_requeue_request(req, true);
159}
160
Josef Bacik8f3ea352018-07-16 12:11:35 -0400161#define NBD_COOKIE_BITS 32
162
163static u64 nbd_cmd_handle(struct nbd_cmd *cmd)
164{
165 struct request *req = blk_mq_rq_from_pdu(cmd);
166 u32 tag = blk_mq_unique_tag(req);
167 u64 cookie = cmd->cmd_cookie;
168
169 return (cookie << NBD_COOKIE_BITS) | tag;
170}
171
172static u32 nbd_handle_to_tag(u64 handle)
173{
174 return (u32)handle;
175}
176
177static u32 nbd_handle_to_cookie(u64 handle)
178{
179 return (u32)(handle >> NBD_COOKIE_BITS);
180}
181
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182static const char *nbdcmd_to_ascii(int cmd)
183{
184 switch (cmd) {
185 case NBD_CMD_READ: return "read";
186 case NBD_CMD_WRITE: return "write";
187 case NBD_CMD_DISC: return "disconnect";
Alex Bligh75f187a2013-02-27 17:05:23 -0800188 case NBD_CMD_FLUSH: return "flush";
Paul Clementsa336d292012-10-04 17:16:18 -0700189 case NBD_CMD_TRIM: return "trim/discard";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190 }
191 return "invalid";
192}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193
Josef Bacik5ea8d102017-04-06 17:01:58 -0400194static ssize_t pid_show(struct device *dev,
195 struct device_attribute *attr, char *buf)
196{
197 struct gendisk *disk = dev_to_disk(dev);
198 struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
199
200 return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv));
201}
202
Bhumika Goyaldfbde552017-08-21 17:13:08 +0530203static const struct device_attribute pid_attr = {
Joe Perches5657a812018-05-24 13:38:59 -0600204 .attr = { .name = "pid", .mode = 0444},
Josef Bacik5ea8d102017-04-06 17:01:58 -0400205 .show = pid_show,
206};
207
Josef Bacikc6a47592017-04-06 17:02:06 -0400208static void nbd_dev_remove(struct nbd_device *nbd)
209{
210 struct gendisk *disk = nbd->disk;
Josef Bacik8364da42018-05-16 14:51:17 -0400211 struct request_queue *q;
212
Josef Bacikc6a47592017-04-06 17:02:06 -0400213 if (disk) {
Josef Bacik8364da42018-05-16 14:51:17 -0400214 q = disk->queue;
Josef Bacikc6a47592017-04-06 17:02:06 -0400215 del_gendisk(disk);
Josef Bacik8364da42018-05-16 14:51:17 -0400216 blk_cleanup_queue(q);
Josef Bacikc6a47592017-04-06 17:02:06 -0400217 blk_mq_free_tag_set(&nbd->tag_set);
Josef Bacika2c97902017-04-06 17:02:07 -0400218 disk->private_data = NULL;
Josef Bacikc6a47592017-04-06 17:02:06 -0400219 put_disk(disk);
220 }
221 kfree(nbd);
222}
223
224static void nbd_put(struct nbd_device *nbd)
225{
226 if (refcount_dec_and_mutex_lock(&nbd->refs,
227 &nbd_index_mutex)) {
228 idr_remove(&nbd_index_idr, nbd->index);
229 mutex_unlock(&nbd_index_mutex);
230 nbd_dev_remove(nbd);
231 }
232}
233
Josef Bacik799f9a32017-04-06 17:02:02 -0400234static int nbd_disconnected(struct nbd_config *config)
Josef Bacikf3733242017-04-06 17:01:57 -0400235{
Josef Bacik799f9a32017-04-06 17:02:02 -0400236 return test_bit(NBD_DISCONNECTED, &config->runtime_flags) ||
237 test_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags);
238}
239
240static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock,
241 int notify)
242{
243 if (!nsock->dead && notify && !nbd_disconnected(nbd->config)) {
244 struct link_dead_args *args;
245 args = kmalloc(sizeof(struct link_dead_args), GFP_NOIO);
246 if (args) {
247 INIT_WORK(&args->work, nbd_dead_link_work);
248 args->index = nbd->index;
249 queue_work(system_wq, &args->work);
250 }
251 }
Josef Bacik560bc4b2017-04-06 17:02:04 -0400252 if (!nsock->dead) {
Josef Bacikf3733242017-04-06 17:01:57 -0400253 kernel_sock_shutdown(nsock->sock, SHUT_RDWR);
Kevin Vigor5e3c3a72018-05-30 10:45:11 -0600254 if (atomic_dec_return(&nbd->config->live_connections) == 0) {
255 if (test_and_clear_bit(NBD_DISCONNECT_REQUESTED,
256 &nbd->config->runtime_flags)) {
257 set_bit(NBD_DISCONNECTED,
258 &nbd->config->runtime_flags);
259 dev_info(nbd_to_dev(nbd),
260 "Disconnected due to user request.\n");
261 }
262 }
Josef Bacik560bc4b2017-04-06 17:02:04 -0400263 }
Josef Bacikf3733242017-04-06 17:01:57 -0400264 nsock->dead = true;
265 nsock->pending = NULL;
266 nsock->sent = 0;
267}
268
Josef Bacik29eaadc2017-04-06 17:01:59 -0400269static void nbd_size_clear(struct nbd_device *nbd)
Markus Pargmann37091fd2015-07-27 07:36:49 +0200270{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400271 if (nbd->config->bytesize) {
Josef Bacik5ea8d102017-04-06 17:01:58 -0400272 set_capacity(nbd->disk, 0);
273 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
274 }
Markus Pargmann37091fd2015-07-27 07:36:49 +0200275}
276
Josef Bacik29eaadc2017-04-06 17:01:59 -0400277static void nbd_size_update(struct nbd_device *nbd)
Markus Pargmann37091fd2015-07-27 07:36:49 +0200278{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400279 struct nbd_config *config = nbd->config;
Josef Bacik9e2b19672018-05-16 14:51:19 -0400280 struct block_device *bdev = bdget_disk(nbd->disk, 0);
281
Josef Bacik6df133a2018-05-23 13:35:59 -0400282 if (config->flags & NBD_FLAG_SEND_TRIM) {
283 nbd->disk->queue->limits.discard_granularity = config->blksize;
Josef Bacik07ce2132018-06-05 11:41:23 -0400284 nbd->disk->queue->limits.discard_alignment = config->blksize;
Josef Bacik6df133a2018-05-23 13:35:59 -0400285 blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX);
286 }
Josef Bacik5ea8d102017-04-06 17:01:58 -0400287 blk_queue_logical_block_size(nbd->disk->queue, config->blksize);
288 blk_queue_physical_block_size(nbd->disk->queue, config->blksize);
Josef Bacik5ea8d102017-04-06 17:01:58 -0400289 set_capacity(nbd->disk, config->bytesize >> 9);
Josef Bacik9e2b19672018-05-16 14:51:19 -0400290 if (bdev) {
291 if (bdev->bd_disk)
292 bd_set_size(bdev, config->bytesize);
293 else
294 bdev->bd_invalidated = 1;
295 bdput(bdev);
296 }
Markus Pargmann37091fd2015-07-27 07:36:49 +0200297 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
298}
299
Josef Bacik29eaadc2017-04-06 17:01:59 -0400300static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize,
301 loff_t nr_blocks)
Markus Pargmann37091fd2015-07-27 07:36:49 +0200302{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400303 struct nbd_config *config = nbd->config;
304 config->blksize = blocksize;
305 config->bytesize = blocksize * nr_blocks;
Josef Bacikc3f7c932018-05-16 14:51:18 -0400306 if (nbd->task_recv != NULL)
307 nbd_size_update(nbd);
Markus Pargmann37091fd2015-07-27 07:36:49 +0200308}
309
Christoph Hellwig1e388ae2017-04-20 16:03:06 +0200310static void nbd_complete_rq(struct request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311{
Christoph Hellwig1e388ae2017-04-20 16:03:06 +0200312 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313
Kevin Vigoree57a052018-06-04 10:40:12 -0600314 dev_dbg(nbd_to_dev(cmd->nbd), "request %p: %s\n", req,
Christoph Hellwig1e388ae2017-04-20 16:03:06 +0200315 cmd->status ? "failed" : "done");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316
Christoph Hellwig1e388ae2017-04-20 16:03:06 +0200317 blk_mq_end_request(req, cmd->status);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318}
319
Markus Pargmanne018e752015-04-02 10:11:39 +0200320/*
321 * Forcibly shutdown the socket causing all listeners to error
322 */
Markus Pargmann36e47be2015-08-17 08:20:01 +0200323static void sock_shutdown(struct nbd_device *nbd)
Paul Clements7fdfd402007-10-16 23:27:37 -0700324{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400325 struct nbd_config *config = nbd->config;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500326 int i;
Josef Bacikc2611892016-09-08 12:33:38 -0700327
Josef Bacik5ea8d102017-04-06 17:01:58 -0400328 if (config->num_connections == 0)
Markus Pargmann260bbce2015-08-17 08:20:02 +0200329 return;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400330 if (test_and_set_bit(NBD_DISCONNECTED, &config->runtime_flags))
Josef Bacik9561a7a2016-11-22 14:04:40 -0500331 return;
332
Josef Bacik5ea8d102017-04-06 17:01:58 -0400333 for (i = 0; i < config->num_connections; i++) {
334 struct nbd_sock *nsock = config->socks[i];
Josef Bacik9561a7a2016-11-22 14:04:40 -0500335 mutex_lock(&nsock->tx_lock);
Josef Bacik799f9a32017-04-06 17:02:02 -0400336 nbd_mark_nsock_dead(nbd, nsock, 0);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500337 mutex_unlock(&nsock->tx_lock);
Markus Pargmann23272a672015-10-29 11:51:16 +0100338 }
Josef Bacik9561a7a2016-11-22 14:04:40 -0500339 dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n");
Paul Clements7fdfd402007-10-16 23:27:37 -0700340}
341
Josef Bacik0eadf372016-09-08 12:33:40 -0700342static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
343 bool reserved)
Paul Clements7fdfd402007-10-16 23:27:37 -0700344{
Josef Bacik0eadf372016-09-08 12:33:40 -0700345 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
346 struct nbd_device *nbd = cmd->nbd;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400347 struct nbd_config *config;
Paul Clements7fdfd402007-10-16 23:27:37 -0700348
Josef Bacik5ea8d102017-04-06 17:01:58 -0400349 if (!refcount_inc_not_zero(&nbd->config_refs)) {
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200350 cmd->status = BLK_STS_TIMEOUT;
Christoph Hellwige5eab012018-05-29 15:52:31 +0200351 goto done;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400352 }
Josef Bacik5ea8d102017-04-06 17:01:58 -0400353 config = nbd->config;
354
Josef Bacik8f3ea352018-07-16 12:11:35 -0400355 if (!mutex_trylock(&cmd->lock))
356 return BLK_EH_RESET_TIMER;
357
Josef Bacik5ea8d102017-04-06 17:01:58 -0400358 if (config->num_connections > 1) {
Josef Bacikf3733242017-04-06 17:01:57 -0400359 dev_err_ratelimited(nbd_to_dev(nbd),
Kevin Vigor5e3c3a72018-05-30 10:45:11 -0600360 "Connection timed out, retrying (%d/%d alive)\n",
361 atomic_read(&config->live_connections),
362 config->num_connections);
Josef Bacikf3733242017-04-06 17:01:57 -0400363 /*
364 * Hooray we have more connections, requeue this IO, the submit
365 * path will put it on a real connection.
366 */
Josef Bacik5ea8d102017-04-06 17:01:58 -0400367 if (config->socks && config->num_connections > 1) {
368 if (cmd->index < config->num_connections) {
Josef Bacikf3733242017-04-06 17:01:57 -0400369 struct nbd_sock *nsock =
Josef Bacik5ea8d102017-04-06 17:01:58 -0400370 config->socks[cmd->index];
Josef Bacikf3733242017-04-06 17:01:57 -0400371 mutex_lock(&nsock->tx_lock);
Josef Bacik799f9a32017-04-06 17:02:02 -0400372 /* We can have multiple outstanding requests, so
373 * we don't want to mark the nsock dead if we've
374 * already reconnected with a new socket, so
375 * only mark it dead if its the same socket we
376 * were sent out on.
377 */
378 if (cmd->cookie == nsock->cookie)
379 nbd_mark_nsock_dead(nbd, nsock, 1);
Josef Bacikf3733242017-04-06 17:01:57 -0400380 mutex_unlock(&nsock->tx_lock);
381 }
Josef Bacik8f3ea352018-07-16 12:11:35 -0400382 mutex_unlock(&cmd->lock);
Josef Bacikd7d94d42018-07-16 12:11:34 -0400383 nbd_requeue_cmd(cmd);
Josef Bacik5ea8d102017-04-06 17:01:58 -0400384 nbd_config_put(nbd);
Christoph Hellwig66005932018-05-29 15:52:29 +0200385 return BLK_EH_DONE;
Josef Bacikf3733242017-04-06 17:01:57 -0400386 }
Josef Bacikf3733242017-04-06 17:01:57 -0400387 } else {
388 dev_err_ratelimited(nbd_to_dev(nbd),
389 "Connection timed out\n");
390 }
Josef Bacik5ea8d102017-04-06 17:01:58 -0400391 set_bit(NBD_TIMEDOUT, &config->runtime_flags);
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200392 cmd->status = BLK_STS_IOERR;
Josef Bacik8f3ea352018-07-16 12:11:35 -0400393 mutex_unlock(&cmd->lock);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500394 sock_shutdown(nbd);
Josef Bacik5ea8d102017-04-06 17:01:58 -0400395 nbd_config_put(nbd);
Christoph Hellwige5eab012018-05-29 15:52:31 +0200396done:
397 blk_mq_complete_request(req);
398 return BLK_EH_DONE;
Paul Clements7fdfd402007-10-16 23:27:37 -0700399}
400
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401/*
402 * Send or receive packet.
403 */
Al Viroc9f2b6a2015-11-12 05:09:35 -0500404static int sock_xmit(struct nbd_device *nbd, int index, int send,
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400405 struct iov_iter *iter, int msg_flags, int *sent)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400407 struct nbd_config *config = nbd->config;
408 struct socket *sock = config->socks[index]->sock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 int result;
410 struct msghdr msg;
Vlastimil Babkaf1083042017-05-08 15:59:53 -0700411 unsigned int noreclaim_flag;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412
Mike Snitzerffc41cf2008-04-02 13:04:47 -0700413 if (unlikely(!sock)) {
Josef Bacika897b662016-12-05 16:20:29 -0500414 dev_err_ratelimited(disk_to_dev(nbd->disk),
WANG Cong7f1b90f2011-08-19 14:48:22 +0200415 "Attempted %s on closed socket in sock_xmit\n",
416 (send ? "send" : "recv"));
Mike Snitzerffc41cf2008-04-02 13:04:47 -0700417 return -EINVAL;
418 }
419
Al Viroc9f2b6a2015-11-12 05:09:35 -0500420 msg.msg_iter = *iter;
Al Viroc1696ca2015-11-12 04:51:19 -0500421
Vlastimil Babkaf1083042017-05-08 15:59:53 -0700422 noreclaim_flag = memalloc_noreclaim_save();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 do {
Mel Gorman7f338fe2012-07-31 16:44:32 -0700424 sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425 msg.msg_name = NULL;
426 msg.msg_namelen = 0;
427 msg.msg_control = NULL;
428 msg.msg_controllen = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
430
Markus Pargmann7e2893a2015-08-17 08:20:00 +0200431 if (send)
Al Viroc1696ca2015-11-12 04:51:19 -0500432 result = sock_sendmsg(sock, &msg);
Markus Pargmann7e2893a2015-08-17 08:20:00 +0200433 else
Al Viroc1696ca2015-11-12 04:51:19 -0500434 result = sock_recvmsg(sock, &msg, msg.msg_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436 if (result <= 0) {
437 if (result == 0)
438 result = -EPIPE; /* short read */
439 break;
440 }
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400441 if (sent)
442 *sent += result;
Al Viroc1696ca2015-11-12 04:51:19 -0500443 } while (msg_data_left(&msg));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444
Vlastimil Babkaf1083042017-05-08 15:59:53 -0700445 memalloc_noreclaim_restore(noreclaim_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446
447 return result;
448}
449
Josef Bacik32e67a32017-10-24 15:57:18 -0400450/*
451 * Different settings for sk->sk_sndtimeo can result in different return values
452 * if there is a signal pending when we enter sendmsg, because reasons?
453 */
454static inline int was_interrupted(int result)
455{
456 return result == -ERESTARTSYS || result == -EINTR;
457}
458
Paul Clements7fdfd402007-10-16 23:27:37 -0700459/* always call with the tx_lock held */
Josef Bacik9561a7a2016-11-22 14:04:40 -0500460static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461{
Josef Bacikfd8383f2016-09-08 12:33:37 -0700462 struct request *req = blk_mq_rq_from_pdu(cmd);
Josef Bacik5ea8d102017-04-06 17:01:58 -0400463 struct nbd_config *config = nbd->config;
464 struct nbd_sock *nsock = config->socks[index];
Josef Bacikd61b7f92017-01-19 16:08:49 -0500465 int result;
Al Viroc9f2b6a2015-11-12 05:09:35 -0500466 struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)};
467 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
468 struct iov_iter from;
Tejun Heo1011c1b2009-05-07 22:24:45 +0900469 unsigned long size = blk_rq_bytes(req);
Jens Axboe429a7872016-11-17 12:30:37 -0700470 struct bio *bio;
Josef Bacik8f3ea352018-07-16 12:11:35 -0400471 u64 handle;
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200472 u32 type;
Shaun McDowell685c9b22017-05-25 23:55:54 -0400473 u32 nbd_cmd_flags = 0;
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400474 int sent = nsock->sent, skip = 0;
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200475
David Howellsaa563d72018-10-20 00:57:56 +0100476 iov_iter_kvec(&from, WRITE, &iov, 1, sizeof(request));
Al Viroc9f2b6a2015-11-12 05:09:35 -0500477
Christoph Hellwigaebf5262017-01-31 16:57:31 +0100478 switch (req_op(req)) {
479 case REQ_OP_DISCARD:
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200480 type = NBD_CMD_TRIM;
Christoph Hellwigaebf5262017-01-31 16:57:31 +0100481 break;
482 case REQ_OP_FLUSH:
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200483 type = NBD_CMD_FLUSH;
Christoph Hellwigaebf5262017-01-31 16:57:31 +0100484 break;
485 case REQ_OP_WRITE:
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200486 type = NBD_CMD_WRITE;
Christoph Hellwigaebf5262017-01-31 16:57:31 +0100487 break;
488 case REQ_OP_READ:
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200489 type = NBD_CMD_READ;
Christoph Hellwigaebf5262017-01-31 16:57:31 +0100490 break;
491 default:
492 return -EIO;
493 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494
Christoph Hellwig09fc54cc2017-01-31 16:57:28 +0100495 if (rq_data_dir(req) == WRITE &&
Josef Bacik5ea8d102017-04-06 17:01:58 -0400496 (config->flags & NBD_FLAG_READ_ONLY)) {
Christoph Hellwig09fc54cc2017-01-31 16:57:28 +0100497 dev_err_ratelimited(disk_to_dev(nbd->disk),
498 "Write on read-only\n");
499 return -EIO;
500 }
501
Shaun McDowell685c9b22017-05-25 23:55:54 -0400502 if (req->cmd_flags & REQ_FUA)
503 nbd_cmd_flags |= NBD_CMD_FLAG_FUA;
504
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400505 /* We did a partial send previously, and we at least sent the whole
506 * request struct, so just go and send the rest of the pages in the
507 * request.
508 */
509 if (sent) {
510 if (sent >= sizeof(request)) {
511 skip = sent - sizeof(request);
512 goto send_pages;
513 }
514 iov_iter_advance(&from, sent);
Josef Bacik8f3ea352018-07-16 12:11:35 -0400515 } else {
516 cmd->cmd_cookie++;
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400517 }
Josef Bacikf3733242017-04-06 17:01:57 -0400518 cmd->index = index;
Josef Bacik799f9a32017-04-06 17:02:02 -0400519 cmd->cookie = nsock->cookie;
Shaun McDowell685c9b22017-05-25 23:55:54 -0400520 request.type = htonl(type | nbd_cmd_flags);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500521 if (type != NBD_CMD_FLUSH) {
Alex Bligh75f187a2013-02-27 17:05:23 -0800522 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
523 request.len = htonl(size);
524 }
Josef Bacik8f3ea352018-07-16 12:11:35 -0400525 handle = nbd_cmd_handle(cmd);
526 memcpy(request.handle, &handle, sizeof(handle));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527
Markus Pargmannd18509f2015-04-02 10:11:38 +0200528 dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
Kevin Vigoree57a052018-06-04 10:40:12 -0600529 req, nbdcmd_to_ascii(type),
Markus Pargmannd18509f2015-04-02 10:11:38 +0200530 (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
Al Viroc9f2b6a2015-11-12 05:09:35 -0500531 result = sock_xmit(nbd, index, 1, &from,
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400532 (type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 if (result <= 0) {
Josef Bacik32e67a32017-10-24 15:57:18 -0400534 if (was_interrupted(result)) {
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400535 /* If we havne't sent anything we can just return BUSY,
536 * however if we have sent something we need to make
537 * sure we only allow this req to be sent until we are
538 * completely done.
539 */
540 if (sent) {
541 nsock->pending = req;
542 nsock->sent = sent;
543 }
Josef Bacikd7d94d42018-07-16 12:11:34 -0400544 set_bit(NBD_CMD_REQUEUED, &cmd->flags);
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200545 return BLK_STS_RESOURCE;
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400546 }
Josef Bacika897b662016-12-05 16:20:29 -0500547 dev_err_ratelimited(disk_to_dev(nbd->disk),
WANG Cong7f1b90f2011-08-19 14:48:22 +0200548 "Send control failed (result %d)\n", result);
Josef Bacikf3733242017-04-06 17:01:57 -0400549 return -EAGAIN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 }
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400551send_pages:
Jens Axboe429a7872016-11-17 12:30:37 -0700552 if (type != NBD_CMD_WRITE)
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400553 goto out;
Jens Axboe429a7872016-11-17 12:30:37 -0700554
Jens Axboe429a7872016-11-17 12:30:37 -0700555 bio = req->bio;
556 while (bio) {
557 struct bio *next = bio->bi_next;
558 struct bvec_iter iter;
Kent Overstreet79886132013-11-23 17:19:00 -0800559 struct bio_vec bvec;
Jens Axboe429a7872016-11-17 12:30:37 -0700560
561 bio_for_each_segment(bvec, bio, iter) {
562 bool is_last = !next && bio_iter_last(bvec, iter);
Josef Bacikd61b7f92017-01-19 16:08:49 -0500563 int flags = is_last ? 0 : MSG_MORE;
Jens Axboe429a7872016-11-17 12:30:37 -0700564
Markus Pargmannd18509f2015-04-02 10:11:38 +0200565 dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
Kevin Vigoree57a052018-06-04 10:40:12 -0600566 req, bvec.bv_len);
David Howellsaa563d72018-10-20 00:57:56 +0100567 iov_iter_bvec(&from, WRITE, &bvec, 1, bvec.bv_len);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400568 if (skip) {
569 if (skip >= iov_iter_count(&from)) {
570 skip -= iov_iter_count(&from);
571 continue;
572 }
573 iov_iter_advance(&from, skip);
574 skip = 0;
575 }
576 result = sock_xmit(nbd, index, 1, &from, flags, &sent);
Jens Axboe6c92e692007-08-16 13:43:12 +0200577 if (result <= 0) {
Josef Bacik32e67a32017-10-24 15:57:18 -0400578 if (was_interrupted(result)) {
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400579 /* We've already sent the header, we
580 * have no choice but to set pending and
581 * return BUSY.
582 */
583 nsock->pending = req;
584 nsock->sent = sent;
Josef Bacikd7d94d42018-07-16 12:11:34 -0400585 set_bit(NBD_CMD_REQUEUED, &cmd->flags);
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200586 return BLK_STS_RESOURCE;
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400587 }
Wanlong Gaof4507162012-03-28 14:42:51 -0700588 dev_err(disk_to_dev(nbd->disk),
WANG Cong7f1b90f2011-08-19 14:48:22 +0200589 "Send data failed (result %d)\n",
590 result);
Josef Bacikf3733242017-04-06 17:01:57 -0400591 return -EAGAIN;
Jens Axboe6c92e692007-08-16 13:43:12 +0200592 }
Jens Axboe429a7872016-11-17 12:30:37 -0700593 /*
594 * The completion might already have come in,
595 * so break for the last one instead of letting
596 * the iterator do it. This prevents use-after-free
597 * of the bio.
598 */
599 if (is_last)
600 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601 }
Jens Axboe429a7872016-11-17 12:30:37 -0700602 bio = next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603 }
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400604out:
605 nsock->pending = NULL;
606 nsock->sent = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608}
609
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610/* NULL returned = something went wrong, inform userspace */
Josef Bacik9561a7a2016-11-22 14:04:40 -0500611static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400613 struct nbd_config *config = nbd->config;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614 int result;
615 struct nbd_reply reply;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700616 struct nbd_cmd *cmd;
617 struct request *req = NULL;
Josef Bacik8f3ea352018-07-16 12:11:35 -0400618 u64 handle;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700619 u16 hwq;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500620 u32 tag;
Al Viroc9f2b6a2015-11-12 05:09:35 -0500621 struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)};
622 struct iov_iter to;
Josef Bacik8f3ea352018-07-16 12:11:35 -0400623 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624
625 reply.magic = 0;
David Howellsaa563d72018-10-20 00:57:56 +0100626 iov_iter_kvec(&to, READ, &iov, 1, sizeof(reply));
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400627 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 if (result <= 0) {
Josef Bacik5ea8d102017-04-06 17:01:58 -0400629 if (!nbd_disconnected(config))
Josef Bacik9561a7a2016-11-22 14:04:40 -0500630 dev_err(disk_to_dev(nbd->disk),
631 "Receive control failed (result %d)\n", result);
Markus Pargmann19391832015-08-17 08:20:03 +0200632 return ERR_PTR(result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633 }
Michal Feixe4b57e02006-07-30 03:03:31 -0700634
635 if (ntohl(reply.magic) != NBD_REPLY_MAGIC) {
Wanlong Gaof4507162012-03-28 14:42:51 -0700636 dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
Michal Feixe4b57e02006-07-30 03:03:31 -0700637 (unsigned long)ntohl(reply.magic));
Markus Pargmann19391832015-08-17 08:20:03 +0200638 return ERR_PTR(-EPROTO);
Michal Feixe4b57e02006-07-30 03:03:31 -0700639 }
640
Josef Bacik8f3ea352018-07-16 12:11:35 -0400641 memcpy(&handle, reply.handle, sizeof(handle));
642 tag = nbd_handle_to_tag(handle);
Josef Bacikfd8383f2016-09-08 12:33:37 -0700643 hwq = blk_mq_unique_tag_to_hwq(tag);
644 if (hwq < nbd->tag_set.nr_hw_queues)
645 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq],
646 blk_mq_unique_tag_to_tag(tag));
647 if (!req || !blk_mq_request_started(req)) {
648 dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n",
649 tag, req);
650 return ERR_PTR(-ENOENT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 }
Josef Bacikfd8383f2016-09-08 12:33:37 -0700652 cmd = blk_mq_rq_to_pdu(req);
Josef Bacik8f3ea352018-07-16 12:11:35 -0400653
654 mutex_lock(&cmd->lock);
655 if (cmd->cmd_cookie != nbd_handle_to_cookie(handle)) {
656 dev_err(disk_to_dev(nbd->disk), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n",
657 req, cmd->cmd_cookie, nbd_handle_to_cookie(handle));
658 ret = -ENOENT;
659 goto out;
660 }
661 if (test_bit(NBD_CMD_REQUEUED, &cmd->flags)) {
662 dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n",
663 req);
664 ret = -ENOENT;
665 goto out;
666 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667 if (ntohl(reply.error)) {
Wanlong Gaof4507162012-03-28 14:42:51 -0700668 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
WANG Cong7f1b90f2011-08-19 14:48:22 +0200669 ntohl(reply.error));
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200670 cmd->status = BLK_STS_IOERR;
Josef Bacik8f3ea352018-07-16 12:11:35 -0400671 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672 }
673
Kevin Vigoree57a052018-06-04 10:40:12 -0600674 dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req);
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200675 if (rq_data_dir(req) != WRITE) {
NeilBrown5705f702007-09-25 12:35:59 +0200676 struct req_iterator iter;
Kent Overstreet79886132013-11-23 17:19:00 -0800677 struct bio_vec bvec;
NeilBrown5705f702007-09-25 12:35:59 +0200678
679 rq_for_each_segment(bvec, req, iter) {
David Howellsaa563d72018-10-20 00:57:56 +0100680 iov_iter_bvec(&to, READ, &bvec, 1, bvec.bv_len);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400681 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
Jens Axboe6c92e692007-08-16 13:43:12 +0200682 if (result <= 0) {
Wanlong Gaof4507162012-03-28 14:42:51 -0700683 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
WANG Cong7f1b90f2011-08-19 14:48:22 +0200684 result);
Josef Bacikf3733242017-04-06 17:01:57 -0400685 /*
686 * If we've disconnected or we only have 1
687 * connection then we need to make sure we
688 * complete this request, otherwise error out
689 * and let the timeout stuff handle resubmitting
690 * this request onto another connection.
691 */
Josef Bacik5ea8d102017-04-06 17:01:58 -0400692 if (nbd_disconnected(config) ||
693 config->num_connections <= 1) {
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200694 cmd->status = BLK_STS_IOERR;
Josef Bacik8f3ea352018-07-16 12:11:35 -0400695 goto out;
Josef Bacikf3733242017-04-06 17:01:57 -0400696 }
Josef Bacik8f3ea352018-07-16 12:11:35 -0400697 ret = -EIO;
698 goto out;
Jens Axboe6c92e692007-08-16 13:43:12 +0200699 }
Markus Pargmannd18509f2015-04-02 10:11:38 +0200700 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
Kevin Vigoree57a052018-06-04 10:40:12 -0600701 req, bvec.bv_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702 }
703 }
Josef Bacik8f3ea352018-07-16 12:11:35 -0400704out:
705 mutex_unlock(&cmd->lock);
706 return ret ? ERR_PTR(ret) : cmd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707}
708
Josef Bacik9561a7a2016-11-22 14:04:40 -0500709static void recv_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710{
Josef Bacik9561a7a2016-11-22 14:04:40 -0500711 struct recv_thread_args *args = container_of(work,
712 struct recv_thread_args,
713 work);
714 struct nbd_device *nbd = args->nbd;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400715 struct nbd_config *config = nbd->config;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700716 struct nbd_cmd *cmd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717
Markus Pargmann19391832015-08-17 08:20:03 +0200718 while (1) {
Josef Bacik9561a7a2016-11-22 14:04:40 -0500719 cmd = nbd_read_stat(nbd, args->index);
Josef Bacikfd8383f2016-09-08 12:33:37 -0700720 if (IS_ERR(cmd)) {
Josef Bacik5ea8d102017-04-06 17:01:58 -0400721 struct nbd_sock *nsock = config->socks[args->index];
Josef Bacikf3733242017-04-06 17:01:57 -0400722
723 mutex_lock(&nsock->tx_lock);
Josef Bacik799f9a32017-04-06 17:02:02 -0400724 nbd_mark_nsock_dead(nbd, nsock, 1);
Josef Bacikf3733242017-04-06 17:01:57 -0400725 mutex_unlock(&nsock->tx_lock);
Markus Pargmann19391832015-08-17 08:20:03 +0200726 break;
727 }
728
Christoph Hellwig08e00292017-04-20 16:03:09 +0200729 blk_mq_complete_request(blk_mq_rq_from_pdu(cmd));
Markus Pargmann19391832015-08-17 08:20:03 +0200730 }
Josef Bacik5ea8d102017-04-06 17:01:58 -0400731 atomic_dec(&config->recv_threads);
732 wake_up(&config->recv_wq);
733 nbd_config_put(nbd);
734 kfree(args);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735}
736
Jens Axboe7baa8572018-11-08 10:24:07 -0700737static bool nbd_clear_req(struct request *req, void *data, bool reserved)
Josef Bacikfd8383f2016-09-08 12:33:37 -0700738{
Christoph Hellwigd250bf4e2018-05-30 18:51:00 +0200739 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
Josef Bacikfd8383f2016-09-08 12:33:37 -0700740
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200741 cmd->status = BLK_STS_IOERR;
Christoph Hellwig08e00292017-04-20 16:03:09 +0200742 blk_mq_complete_request(req);
Jens Axboe7baa8572018-11-08 10:24:07 -0700743 return true;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700744}
745
Wanlong Gaof4507162012-03-28 14:42:51 -0700746static void nbd_clear_que(struct nbd_device *nbd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747{
Sagi Grimbergb52c2e92017-07-04 09:57:09 +0300748 blk_mq_quiesce_queue(nbd->disk->queue);
Josef Bacikfd8383f2016-09-08 12:33:37 -0700749 blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL);
Sagi Grimbergb52c2e92017-07-04 09:57:09 +0300750 blk_mq_unquiesce_queue(nbd->disk->queue);
Markus Pargmanne78273c2015-08-17 08:20:04 +0200751 dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752}
753
Josef Bacikf3733242017-04-06 17:01:57 -0400754static int find_fallback(struct nbd_device *nbd, int index)
755{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400756 struct nbd_config *config = nbd->config;
Josef Bacikf3733242017-04-06 17:01:57 -0400757 int new_index = -1;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400758 struct nbd_sock *nsock = config->socks[index];
Josef Bacikf3733242017-04-06 17:01:57 -0400759 int fallback = nsock->fallback_index;
760
Josef Bacik5ea8d102017-04-06 17:01:58 -0400761 if (test_bit(NBD_DISCONNECTED, &config->runtime_flags))
Josef Bacikf3733242017-04-06 17:01:57 -0400762 return new_index;
763
Josef Bacik5ea8d102017-04-06 17:01:58 -0400764 if (config->num_connections <= 1) {
Josef Bacikf3733242017-04-06 17:01:57 -0400765 dev_err_ratelimited(disk_to_dev(nbd->disk),
766 "Attempted send on invalid socket\n");
767 return new_index;
768 }
769
Josef Bacik5ea8d102017-04-06 17:01:58 -0400770 if (fallback >= 0 && fallback < config->num_connections &&
771 !config->socks[fallback]->dead)
Josef Bacikf3733242017-04-06 17:01:57 -0400772 return fallback;
773
774 if (nsock->fallback_index < 0 ||
Josef Bacik5ea8d102017-04-06 17:01:58 -0400775 nsock->fallback_index >= config->num_connections ||
776 config->socks[nsock->fallback_index]->dead) {
Josef Bacikf3733242017-04-06 17:01:57 -0400777 int i;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400778 for (i = 0; i < config->num_connections; i++) {
Josef Bacikf3733242017-04-06 17:01:57 -0400779 if (i == index)
780 continue;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400781 if (!config->socks[i]->dead) {
Josef Bacikf3733242017-04-06 17:01:57 -0400782 new_index = i;
783 break;
784 }
785 }
786 nsock->fallback_index = new_index;
787 if (new_index < 0) {
788 dev_err_ratelimited(disk_to_dev(nbd->disk),
789 "Dead connection, failed to find a fallback\n");
790 return new_index;
791 }
792 }
793 new_index = nsock->fallback_index;
794 return new_index;
795}
Paul Clements7fdfd402007-10-16 23:27:37 -0700796
Josef Bacik560bc4b2017-04-06 17:02:04 -0400797static int wait_for_reconnect(struct nbd_device *nbd)
798{
799 struct nbd_config *config = nbd->config;
800 if (!config->dead_conn_timeout)
801 return 0;
802 if (test_bit(NBD_DISCONNECTED, &config->runtime_flags))
803 return 0;
Kevin Vigor5e3c3a72018-05-30 10:45:11 -0600804 return wait_event_timeout(config->conn_wait,
805 atomic_read(&config->live_connections) > 0,
806 config->dead_conn_timeout) > 0;
Josef Bacik560bc4b2017-04-06 17:02:04 -0400807}
808
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400809static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
Laurent Vivier48cf6062008-04-29 01:02:46 -0700810{
Josef Bacikfd8383f2016-09-08 12:33:37 -0700811 struct request *req = blk_mq_rq_from_pdu(cmd);
812 struct nbd_device *nbd = cmd->nbd;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400813 struct nbd_config *config;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500814 struct nbd_sock *nsock;
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400815 int ret;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700816
Josef Bacik5ea8d102017-04-06 17:01:58 -0400817 if (!refcount_inc_not_zero(&nbd->config_refs)) {
818 dev_err_ratelimited(disk_to_dev(nbd->disk),
819 "Socks array is empty\n");
Josef Bacik6a468d52017-11-06 16:11:58 -0500820 blk_mq_start_request(req);
Josef Bacik5ea8d102017-04-06 17:01:58 -0400821 return -EINVAL;
822 }
823 config = nbd->config;
824
825 if (index >= config->num_connections) {
Josef Bacika897b662016-12-05 16:20:29 -0500826 dev_err_ratelimited(disk_to_dev(nbd->disk),
827 "Attempted send on invalid socket\n");
Josef Bacik5ea8d102017-04-06 17:01:58 -0400828 nbd_config_put(nbd);
Josef Bacik6a468d52017-11-06 16:11:58 -0500829 blk_mq_start_request(req);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400830 return -EINVAL;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500831 }
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200832 cmd->status = BLK_STS_OK;
Josef Bacikf3733242017-04-06 17:01:57 -0400833again:
Josef Bacik5ea8d102017-04-06 17:01:58 -0400834 nsock = config->socks[index];
Josef Bacik9561a7a2016-11-22 14:04:40 -0500835 mutex_lock(&nsock->tx_lock);
Josef Bacikf3733242017-04-06 17:01:57 -0400836 if (nsock->dead) {
Josef Bacik560bc4b2017-04-06 17:02:04 -0400837 int old_index = index;
Josef Bacikf3733242017-04-06 17:01:57 -0400838 index = find_fallback(nbd, index);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500839 mutex_unlock(&nsock->tx_lock);
Josef Bacik560bc4b2017-04-06 17:02:04 -0400840 if (index < 0) {
841 if (wait_for_reconnect(nbd)) {
842 index = old_index;
843 goto again;
844 }
845 /* All the sockets should already be down at this point,
846 * we just want to make sure that DISCONNECTED is set so
847 * any requests that come in that were queue'ed waiting
848 * for the reconnect timer don't trigger the timer again
849 * and instead just error out.
850 */
851 sock_shutdown(nbd);
852 nbd_config_put(nbd);
Josef Bacik6a468d52017-11-06 16:11:58 -0500853 blk_mq_start_request(req);
Josef Bacik560bc4b2017-04-06 17:02:04 -0400854 return -EIO;
855 }
Josef Bacikf3733242017-04-06 17:01:57 -0400856 goto again;
Laurent Vivier48cf6062008-04-29 01:02:46 -0700857 }
858
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400859 /* Handle the case that we have a pending request that was partially
860 * transmitted that _has_ to be serviced first. We need to call requeue
861 * here so that it gets put _after_ the request that is already on the
862 * dispatch list.
863 */
Josef Bacik6a468d52017-11-06 16:11:58 -0500864 blk_mq_start_request(req);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400865 if (unlikely(nsock->pending && nsock->pending != req)) {
Josef Bacikd7d94d42018-07-16 12:11:34 -0400866 nbd_requeue_cmd(cmd);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400867 ret = 0;
868 goto out;
Laurent Vivier48cf6062008-04-29 01:02:46 -0700869 }
Josef Bacikf3733242017-04-06 17:01:57 -0400870 /*
871 * Some failures are related to the link going down, so anything that
872 * returns EAGAIN can be retried on a different socket.
873 */
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400874 ret = nbd_send_cmd(nbd, cmd, index);
Josef Bacikf3733242017-04-06 17:01:57 -0400875 if (ret == -EAGAIN) {
876 dev_err_ratelimited(disk_to_dev(nbd->disk),
Josef Bacik6a468d52017-11-06 16:11:58 -0500877 "Request send failed, requeueing\n");
Josef Bacik799f9a32017-04-06 17:02:02 -0400878 nbd_mark_nsock_dead(nbd, nsock, 1);
Josef Bacikd7d94d42018-07-16 12:11:34 -0400879 nbd_requeue_cmd(cmd);
Josef Bacik6a468d52017-11-06 16:11:58 -0500880 ret = 0;
Josef Bacikf3733242017-04-06 17:01:57 -0400881 }
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400882out:
Josef Bacik9561a7a2016-11-22 14:04:40 -0500883 mutex_unlock(&nsock->tx_lock);
Josef Bacik5ea8d102017-04-06 17:01:58 -0400884 nbd_config_put(nbd);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400885 return ret;
Laurent Vivier48cf6062008-04-29 01:02:46 -0700886}
887
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200888static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
Josef Bacikfd8383f2016-09-08 12:33:37 -0700889 const struct blk_mq_queue_data *bd)
Laurent Vivier48cf6062008-04-29 01:02:46 -0700890{
Josef Bacikfd8383f2016-09-08 12:33:37 -0700891 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400892 int ret;
Laurent Vivier48cf6062008-04-29 01:02:46 -0700893
Josef Bacik9561a7a2016-11-22 14:04:40 -0500894 /*
895 * Since we look at the bio's to send the request over the network we
896 * need to make sure the completion work doesn't mark this request done
897 * before we are done doing our send. This keeps us from dereferencing
898 * freed data if we have particularly fast completions (ie we get the
899 * completion before we exit sock_xmit on the last bvec) or in the case
900 * that the server is misbehaving (or there was an error) before we're
901 * done sending everything over the wire.
902 */
Josef Bacik8f3ea352018-07-16 12:11:35 -0400903 mutex_lock(&cmd->lock);
Josef Bacikd7d94d42018-07-16 12:11:34 -0400904 clear_bit(NBD_CMD_REQUEUED, &cmd->flags);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400905
906 /* We can be called directly from the user space process, which means we
907 * could possibly have signals pending so our sendmsg will fail. In
908 * this case we need to return that we are busy, otherwise error out as
909 * appropriate.
910 */
911 ret = nbd_handle_cmd(cmd, hctx->queue_num);
Josef Bacik6e60a3b2017-10-02 16:22:08 -0400912 if (ret < 0)
913 ret = BLK_STS_IOERR;
914 else if (!ret)
915 ret = BLK_STS_OK;
Josef Bacik8f3ea352018-07-16 12:11:35 -0400916 mutex_unlock(&cmd->lock);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500917
Josef Bacik6e60a3b2017-10-02 16:22:08 -0400918 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919}
920
Josef Bacike46c7282017-04-06 17:02:00 -0400921static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
922 bool netlink)
Markus Pargmann23272a672015-10-29 11:51:16 +0100923{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400924 struct nbd_config *config = nbd->config;
Josef Bacik9442b732017-02-07 17:10:22 -0500925 struct socket *sock;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500926 struct nbd_sock **socks;
927 struct nbd_sock *nsock;
Josef Bacik9442b732017-02-07 17:10:22 -0500928 int err;
929
930 sock = sockfd_lookup(arg, &err);
931 if (!sock)
932 return err;
Markus Pargmann23272a672015-10-29 11:51:16 +0100933
Josef Bacike46c7282017-04-06 17:02:00 -0400934 if (!netlink && !nbd->task_setup &&
935 !test_bit(NBD_BOUND, &config->runtime_flags))
Josef Bacik9561a7a2016-11-22 14:04:40 -0500936 nbd->task_setup = current;
Josef Bacike46c7282017-04-06 17:02:00 -0400937
938 if (!netlink &&
939 (nbd->task_setup != current ||
940 test_bit(NBD_BOUND, &config->runtime_flags))) {
Josef Bacik9561a7a2016-11-22 14:04:40 -0500941 dev_err(disk_to_dev(nbd->disk),
942 "Device being setup by another task");
Josef Bacik9b1355d2017-04-06 17:01:56 -0400943 sockfd_put(sock);
Josef Bacike46c7282017-04-06 17:02:00 -0400944 return -EBUSY;
Markus Pargmann23272a672015-10-29 11:51:16 +0100945 }
946
Josef Bacik5ea8d102017-04-06 17:01:58 -0400947 socks = krealloc(config->socks, (config->num_connections + 1) *
Josef Bacik9561a7a2016-11-22 14:04:40 -0500948 sizeof(struct nbd_sock *), GFP_KERNEL);
Josef Bacik9b1355d2017-04-06 17:01:56 -0400949 if (!socks) {
950 sockfd_put(sock);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500951 return -ENOMEM;
Josef Bacik9b1355d2017-04-06 17:01:56 -0400952 }
Josef Bacik9561a7a2016-11-22 14:04:40 -0500953 nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL);
Josef Bacik9b1355d2017-04-06 17:01:56 -0400954 if (!nsock) {
955 sockfd_put(sock);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500956 return -ENOMEM;
Josef Bacik9b1355d2017-04-06 17:01:56 -0400957 }
Markus Pargmann23272a672015-10-29 11:51:16 +0100958
Josef Bacik5ea8d102017-04-06 17:01:58 -0400959 config->socks = socks;
Markus Pargmann23272a672015-10-29 11:51:16 +0100960
Josef Bacikf3733242017-04-06 17:01:57 -0400961 nsock->fallback_index = -1;
962 nsock->dead = false;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500963 mutex_init(&nsock->tx_lock);
964 nsock->sock = sock;
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400965 nsock->pending = NULL;
966 nsock->sent = 0;
Josef Bacik799f9a32017-04-06 17:02:02 -0400967 nsock->cookie = 0;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400968 socks[config->num_connections++] = nsock;
Josef Bacik560bc4b2017-04-06 17:02:04 -0400969 atomic_inc(&config->live_connections);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500970
971 return 0;
Markus Pargmann23272a672015-10-29 11:51:16 +0100972}
973
Josef Bacikb7aa3d32017-04-06 17:02:01 -0400974static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
975{
976 struct nbd_config *config = nbd->config;
977 struct socket *sock, *old;
978 struct recv_thread_args *args;
979 int i;
980 int err;
981
982 sock = sockfd_lookup(arg, &err);
983 if (!sock)
984 return err;
985
986 args = kzalloc(sizeof(*args), GFP_KERNEL);
987 if (!args) {
988 sockfd_put(sock);
989 return -ENOMEM;
990 }
991
992 for (i = 0; i < config->num_connections; i++) {
993 struct nbd_sock *nsock = config->socks[i];
994
995 if (!nsock->dead)
996 continue;
997
998 mutex_lock(&nsock->tx_lock);
999 if (!nsock->dead) {
1000 mutex_unlock(&nsock->tx_lock);
1001 continue;
1002 }
1003 sk_set_memalloc(sock->sk);
Josef Bacika7ee8cf2017-07-21 10:48:15 -04001004 if (nbd->tag_set.timeout)
1005 sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001006 atomic_inc(&config->recv_threads);
1007 refcount_inc(&nbd->config_refs);
1008 old = nsock->sock;
1009 nsock->fallback_index = -1;
1010 nsock->sock = sock;
1011 nsock->dead = false;
1012 INIT_WORK(&args->work, recv_work);
1013 args->index = i;
1014 args->nbd = nbd;
Josef Bacik799f9a32017-04-06 17:02:02 -04001015 nsock->cookie++;
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001016 mutex_unlock(&nsock->tx_lock);
1017 sockfd_put(old);
1018
Josef Bacik7a362ea2017-07-25 13:31:19 -04001019 clear_bit(NBD_DISCONNECTED, &config->runtime_flags);
1020
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001021 /* We take the tx_mutex in an error path in the recv_work, so we
1022 * need to queue_work outside of the tx_mutex.
1023 */
1024 queue_work(recv_workqueue, &args->work);
Josef Bacik560bc4b2017-04-06 17:02:04 -04001025
1026 atomic_inc(&config->live_connections);
1027 wake_up(&config->conn_wait);
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001028 return 0;
1029 }
1030 sockfd_put(sock);
1031 kfree(args);
1032 return -ENOSPC;
1033}
1034
Markus Pargmann0e4f0f62015-10-29 12:04:51 +01001035static void nbd_bdev_reset(struct block_device *bdev)
1036{
Ratna Manoj Bollaabbbdf12017-03-24 14:08:29 -04001037 if (bdev->bd_openers > 1)
1038 return;
Josef Bacik29eaadc2017-04-06 17:01:59 -04001039 bd_set_size(bdev, 0);
Markus Pargmann0e4f0f62015-10-29 12:04:51 +01001040}
1041
Josef Bacik29eaadc2017-04-06 17:01:59 -04001042static void nbd_parse_flags(struct nbd_device *nbd)
Markus Pargmannd02cf532015-10-29 12:06:15 +01001043{
Josef Bacik5ea8d102017-04-06 17:01:58 -04001044 struct nbd_config *config = nbd->config;
1045 if (config->flags & NBD_FLAG_READ_ONLY)
Josef Bacik29eaadc2017-04-06 17:01:59 -04001046 set_disk_ro(nbd->disk, true);
1047 else
1048 set_disk_ro(nbd->disk, false);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001049 if (config->flags & NBD_FLAG_SEND_TRIM)
Bart Van Assche8b904b52018-03-07 17:10:10 -08001050 blk_queue_flag_set(QUEUE_FLAG_DISCARD, nbd->disk->queue);
Shaun McDowell685c9b22017-05-25 23:55:54 -04001051 if (config->flags & NBD_FLAG_SEND_FLUSH) {
1052 if (config->flags & NBD_FLAG_SEND_FUA)
1053 blk_queue_write_cache(nbd->disk->queue, true, true);
1054 else
1055 blk_queue_write_cache(nbd->disk->queue, true, false);
1056 }
Markus Pargmannd02cf532015-10-29 12:06:15 +01001057 else
Jens Axboeaafb1ee2016-03-30 10:10:53 -06001058 blk_queue_write_cache(nbd->disk->queue, false, false);
Markus Pargmannd02cf532015-10-29 12:06:15 +01001059}
1060
Josef Bacik9561a7a2016-11-22 14:04:40 -05001061static void send_disconnects(struct nbd_device *nbd)
1062{
Josef Bacik5ea8d102017-04-06 17:01:58 -04001063 struct nbd_config *config = nbd->config;
Al Viroc9f2b6a2015-11-12 05:09:35 -05001064 struct nbd_request request = {
1065 .magic = htonl(NBD_REQUEST_MAGIC),
1066 .type = htonl(NBD_CMD_DISC),
1067 };
1068 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
1069 struct iov_iter from;
Josef Bacik9561a7a2016-11-22 14:04:40 -05001070 int i, ret;
1071
Josef Bacik5ea8d102017-04-06 17:01:58 -04001072 for (i = 0; i < config->num_connections; i++) {
Josef Bacikb4b2aec2017-07-21 10:48:14 -04001073 struct nbd_sock *nsock = config->socks[i];
1074
David Howellsaa563d72018-10-20 00:57:56 +01001075 iov_iter_kvec(&from, WRITE, &iov, 1, sizeof(request));
Josef Bacikb4b2aec2017-07-21 10:48:14 -04001076 mutex_lock(&nsock->tx_lock);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -04001077 ret = sock_xmit(nbd, i, 1, &from, 0, NULL);
Josef Bacik9561a7a2016-11-22 14:04:40 -05001078 if (ret <= 0)
1079 dev_err(disk_to_dev(nbd->disk),
1080 "Send disconnect failed %d\n", ret);
Josef Bacikb4b2aec2017-07-21 10:48:14 -04001081 mutex_unlock(&nsock->tx_lock);
Josef Bacik9561a7a2016-11-22 14:04:40 -05001082 }
1083}
1084
Josef Bacik29eaadc2017-04-06 17:01:59 -04001085static int nbd_disconnect(struct nbd_device *nbd)
Josef Bacik9442b732017-02-07 17:10:22 -05001086{
Josef Bacik5ea8d102017-04-06 17:01:58 -04001087 struct nbd_config *config = nbd->config;
Josef Bacik9442b732017-02-07 17:10:22 -05001088
Josef Bacik5ea8d102017-04-06 17:01:58 -04001089 dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
Josef Bacik2e134562017-07-21 10:48:13 -04001090 set_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags);
1091 send_disconnects(nbd);
Josef Bacik9442b732017-02-07 17:10:22 -05001092 return 0;
1093}
1094
Josef Bacik29eaadc2017-04-06 17:01:59 -04001095static void nbd_clear_sock(struct nbd_device *nbd)
Josef Bacik9442b732017-02-07 17:10:22 -05001096{
1097 sock_shutdown(nbd);
1098 nbd_clear_que(nbd);
Josef Bacik9442b732017-02-07 17:10:22 -05001099 nbd->task_setup = NULL;
Josef Bacik9442b732017-02-07 17:10:22 -05001100}
1101
Josef Bacik5ea8d102017-04-06 17:01:58 -04001102static void nbd_config_put(struct nbd_device *nbd)
1103{
1104 if (refcount_dec_and_mutex_lock(&nbd->config_refs,
1105 &nbd->config_lock)) {
Josef Bacik5ea8d102017-04-06 17:01:58 -04001106 struct nbd_config *config = nbd->config;
Josef Bacik5ea8d102017-04-06 17:01:58 -04001107 nbd_dev_dbg_close(nbd);
Josef Bacik29eaadc2017-04-06 17:01:59 -04001108 nbd_size_clear(nbd);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001109 if (test_and_clear_bit(NBD_HAS_PID_FILE,
1110 &config->runtime_flags))
1111 device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
1112 nbd->task_recv = NULL;
Josef Bacik29eaadc2017-04-06 17:01:59 -04001113 nbd_clear_sock(nbd);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001114 if (config->num_connections) {
1115 int i;
1116 for (i = 0; i < config->num_connections; i++) {
1117 sockfd_put(config->socks[i]->sock);
1118 kfree(config->socks[i]);
1119 }
1120 kfree(config->socks);
1121 }
Ilya Dryomovfa976532017-05-23 17:49:55 +02001122 kfree(nbd->config);
Ilya Dryomovaf622b82017-05-23 17:49:54 +02001123 nbd->config = NULL;
1124
1125 nbd->tag_set.timeout = 0;
Josef Bacik6df133a2018-05-23 13:35:59 -04001126 nbd->disk->queue->limits.discard_granularity = 0;
Josef Bacik07ce2132018-06-05 11:41:23 -04001127 nbd->disk->queue->limits.discard_alignment = 0;
Josef Bacik6df133a2018-05-23 13:35:59 -04001128 blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX);
Bart Van Assche8b904b52018-03-07 17:10:10 -08001129 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, nbd->disk->queue);
Josef Bacika2c97902017-04-06 17:02:07 -04001130
Josef Bacik5ea8d102017-04-06 17:01:58 -04001131 mutex_unlock(&nbd->config_lock);
Josef Bacikc6a47592017-04-06 17:02:06 -04001132 nbd_put(nbd);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001133 module_put(THIS_MODULE);
1134 }
1135}
1136
Josef Bacike46c7282017-04-06 17:02:00 -04001137static int nbd_start_device(struct nbd_device *nbd)
Josef Bacik9442b732017-02-07 17:10:22 -05001138{
Josef Bacik5ea8d102017-04-06 17:01:58 -04001139 struct nbd_config *config = nbd->config;
1140 int num_connections = config->num_connections;
Josef Bacik9442b732017-02-07 17:10:22 -05001141 int error = 0, i;
1142
1143 if (nbd->task_recv)
1144 return -EBUSY;
Josef Bacik5ea8d102017-04-06 17:01:58 -04001145 if (!config->socks)
Josef Bacik9442b732017-02-07 17:10:22 -05001146 return -EINVAL;
1147 if (num_connections > 1 &&
Josef Bacik5ea8d102017-04-06 17:01:58 -04001148 !(config->flags & NBD_FLAG_CAN_MULTI_CONN)) {
Josef Bacik9442b732017-02-07 17:10:22 -05001149 dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n");
Josef Bacik5ea8d102017-04-06 17:01:58 -04001150 return -EINVAL;
Josef Bacik9442b732017-02-07 17:10:22 -05001151 }
1152
Josef Bacik5ea8d102017-04-06 17:01:58 -04001153 blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections);
Josef Bacik9442b732017-02-07 17:10:22 -05001154 nbd->task_recv = current;
Josef Bacik9442b732017-02-07 17:10:22 -05001155
Josef Bacik29eaadc2017-04-06 17:01:59 -04001156 nbd_parse_flags(nbd);
Josef Bacik9442b732017-02-07 17:10:22 -05001157
1158 error = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
1159 if (error) {
1160 dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
Josef Bacik5ea8d102017-04-06 17:01:58 -04001161 return error;
Josef Bacik9442b732017-02-07 17:10:22 -05001162 }
Josef Bacik29eaadc2017-04-06 17:01:59 -04001163 set_bit(NBD_HAS_PID_FILE, &config->runtime_flags);
Josef Bacik9442b732017-02-07 17:10:22 -05001164
1165 nbd_dev_dbg_init(nbd);
1166 for (i = 0; i < num_connections; i++) {
Josef Bacik5ea8d102017-04-06 17:01:58 -04001167 struct recv_thread_args *args;
1168
1169 args = kzalloc(sizeof(*args), GFP_KERNEL);
1170 if (!args) {
1171 sock_shutdown(nbd);
1172 return -ENOMEM;
1173 }
1174 sk_set_memalloc(config->socks[i]->sock->sk);
Josef Bacika7ee8cf2017-07-21 10:48:15 -04001175 if (nbd->tag_set.timeout)
1176 config->socks[i]->sock->sk->sk_sndtimeo =
1177 nbd->tag_set.timeout;
Josef Bacik5ea8d102017-04-06 17:01:58 -04001178 atomic_inc(&config->recv_threads);
1179 refcount_inc(&nbd->config_refs);
1180 INIT_WORK(&args->work, recv_work);
1181 args->nbd = nbd;
1182 args->index = i;
1183 queue_work(recv_workqueue, &args->work);
Josef Bacik9442b732017-02-07 17:10:22 -05001184 }
Josef Bacik639812a2017-10-09 13:12:10 -04001185 nbd_size_update(nbd);
Josef Bacike46c7282017-04-06 17:02:00 -04001186 return error;
1187}
1188
1189static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *bdev)
1190{
1191 struct nbd_config *config = nbd->config;
1192 int ret;
1193
1194 ret = nbd_start_device(nbd);
1195 if (ret)
1196 return ret;
1197
Josef Bacike46c7282017-04-06 17:02:00 -04001198 if (max_part)
1199 bdev->bd_invalidated = 1;
1200 mutex_unlock(&nbd->config_lock);
1201 ret = wait_event_interruptible(config->recv_wq,
Josef Bacik5ea8d102017-04-06 17:01:58 -04001202 atomic_read(&config->recv_threads) == 0);
Josef Bacike46c7282017-04-06 17:02:00 -04001203 if (ret)
Josef Bacik5ea8d102017-04-06 17:01:58 -04001204 sock_shutdown(nbd);
Josef Bacik9442b732017-02-07 17:10:22 -05001205 mutex_lock(&nbd->config_lock);
Josef Bacik76aa1d32018-05-16 14:51:22 -04001206 nbd_bdev_reset(bdev);
Josef Bacik9442b732017-02-07 17:10:22 -05001207 /* user requested, ignore socket errors */
Josef Bacik5ea8d102017-04-06 17:01:58 -04001208 if (test_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags))
Josef Bacike46c7282017-04-06 17:02:00 -04001209 ret = 0;
Josef Bacik5ea8d102017-04-06 17:01:58 -04001210 if (test_bit(NBD_TIMEDOUT, &config->runtime_flags))
Josef Bacike46c7282017-04-06 17:02:00 -04001211 ret = -ETIMEDOUT;
1212 return ret;
Josef Bacik9442b732017-02-07 17:10:22 -05001213}
Markus Pargmann30d53d92015-08-17 08:20:06 +02001214
Josef Bacik29eaadc2017-04-06 17:01:59 -04001215static void nbd_clear_sock_ioctl(struct nbd_device *nbd,
1216 struct block_device *bdev)
1217{
Josef Bacik2516ab12017-04-06 17:02:03 -04001218 sock_shutdown(nbd);
Josef Bacik29eaadc2017-04-06 17:01:59 -04001219 kill_bdev(bdev);
1220 nbd_bdev_reset(bdev);
Josef Bacike46c7282017-04-06 17:02:00 -04001221 if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
1222 &nbd->config->runtime_flags))
1223 nbd_config_put(nbd);
Josef Bacik29eaadc2017-04-06 17:01:59 -04001224}
1225
Josef Bacik9561a7a2016-11-22 14:04:40 -05001226/* Must be called with config_lock held */
Wanlong Gaof4507162012-03-28 14:42:51 -07001227static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
Pavel Machek1a2ad212009-04-02 16:58:41 -07001228 unsigned int cmd, unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229{
Josef Bacik5ea8d102017-04-06 17:01:58 -04001230 struct nbd_config *config = nbd->config;
1231
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232 switch (cmd) {
Josef Bacik9442b732017-02-07 17:10:22 -05001233 case NBD_DISCONNECT:
Josef Bacik29eaadc2017-04-06 17:01:59 -04001234 return nbd_disconnect(nbd);
Markus Pargmann23272a672015-10-29 11:51:16 +01001235 case NBD_CLEAR_SOCK:
Josef Bacik29eaadc2017-04-06 17:01:59 -04001236 nbd_clear_sock_ioctl(nbd, bdev);
1237 return 0;
Josef Bacik9442b732017-02-07 17:10:22 -05001238 case NBD_SET_SOCK:
Josef Bacike46c7282017-04-06 17:02:00 -04001239 return nbd_add_socket(nbd, arg, false);
Josef Bacik9442b732017-02-07 17:10:22 -05001240 case NBD_SET_BLKSIZE:
Jens Axboebc811f02018-09-04 11:52:34 -06001241 if (!arg || !is_power_of_2(arg) || arg < 512 ||
1242 arg > PAGE_SIZE)
1243 return -EINVAL;
Josef Bacik29eaadc2017-04-06 17:01:59 -04001244 nbd_size_set(nbd, arg,
Josef Bacik5ea8d102017-04-06 17:01:58 -04001245 div_s64(config->bytesize, arg));
Josef Bacike5445412017-02-13 10:39:47 -05001246 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247 case NBD_SET_SIZE:
Josef Bacik29eaadc2017-04-06 17:01:59 -04001248 nbd_size_set(nbd, config->blksize,
Josef Bacik5ea8d102017-04-06 17:01:58 -04001249 div_s64(arg, config->blksize));
Josef Bacike5445412017-02-13 10:39:47 -05001250 return 0;
Markus Pargmann37091fd2015-07-27 07:36:49 +02001251 case NBD_SET_SIZE_BLOCKS:
Josef Bacik29eaadc2017-04-06 17:01:59 -04001252 nbd_size_set(nbd, config->blksize, arg);
Josef Bacike5445412017-02-13 10:39:47 -05001253 return 0;
Paul Clements7fdfd402007-10-16 23:27:37 -07001254 case NBD_SET_TIMEOUT:
Josef Bacikf8586852017-03-24 14:08:28 -04001255 if (arg) {
1256 nbd->tag_set.timeout = arg * HZ;
1257 blk_queue_rq_timeout(nbd->disk->queue, arg * HZ);
1258 }
Paul Clements7fdfd402007-10-16 23:27:37 -07001259 return 0;
Pavel Machek1a2ad212009-04-02 16:58:41 -07001260
Paul Clements2f012502012-10-04 17:16:15 -07001261 case NBD_SET_FLAGS:
Josef Bacik5ea8d102017-04-06 17:01:58 -04001262 config->flags = arg;
Paul Clements2f012502012-10-04 17:16:15 -07001263 return 0;
Josef Bacik9442b732017-02-07 17:10:22 -05001264 case NBD_DO_IT:
Josef Bacike46c7282017-04-06 17:02:00 -04001265 return nbd_start_device_ioctl(nbd, bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001266 case NBD_CLEAR_QUE:
Herbert Xu4b2f0262006-01-06 00:09:47 -08001267 /*
1268 * This is for compatibility only. The queue is always cleared
1269 * by NBD_DO_IT or NBD_CLEAR_SOCK.
1270 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001271 return 0;
1272 case NBD_PRINT_DEBUG:
Josef Bacikfd8383f2016-09-08 12:33:37 -07001273 /*
1274 * For compatibility only, we no longer keep a list of
1275 * outstanding requests.
1276 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001277 return 0;
1278 }
Pavel Machek1a2ad212009-04-02 16:58:41 -07001279 return -ENOTTY;
1280}
1281
1282static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
1283 unsigned int cmd, unsigned long arg)
1284{
Wanlong Gaof4507162012-03-28 14:42:51 -07001285 struct nbd_device *nbd = bdev->bd_disk->private_data;
Josef Bacike46c7282017-04-06 17:02:00 -04001286 struct nbd_config *config = nbd->config;
1287 int error = -EINVAL;
Pavel Machek1a2ad212009-04-02 16:58:41 -07001288
1289 if (!capable(CAP_SYS_ADMIN))
1290 return -EPERM;
1291
Josef Bacik1dae69b2017-05-05 22:25:18 -04001292 /* The block layer will pass back some non-nbd ioctls in case we have
1293 * special handling for them, but we don't so just return an error.
1294 */
1295 if (_IOC_TYPE(cmd) != 0xab)
1296 return -EINVAL;
1297
Josef Bacik9561a7a2016-11-22 14:04:40 -05001298 mutex_lock(&nbd->config_lock);
Josef Bacike46c7282017-04-06 17:02:00 -04001299
1300 /* Don't allow ioctl operations on a nbd device that was created with
1301 * netlink, unless it's DISCONNECT or CLEAR_SOCK, which are fine.
1302 */
1303 if (!test_bit(NBD_BOUND, &config->runtime_flags) ||
1304 (cmd == NBD_DISCONNECT || cmd == NBD_CLEAR_SOCK))
1305 error = __nbd_ioctl(bdev, nbd, cmd, arg);
1306 else
1307 dev_err(nbd_to_dev(nbd), "Cannot use ioctl interface on a netlink controlled device.\n");
Josef Bacik9561a7a2016-11-22 14:04:40 -05001308 mutex_unlock(&nbd->config_lock);
Pavel Machek1a2ad212009-04-02 16:58:41 -07001309 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310}
1311
Josef Bacik5ea8d102017-04-06 17:01:58 -04001312static struct nbd_config *nbd_alloc_config(void)
1313{
1314 struct nbd_config *config;
1315
1316 config = kzalloc(sizeof(struct nbd_config), GFP_NOFS);
1317 if (!config)
1318 return NULL;
1319 atomic_set(&config->recv_threads, 0);
1320 init_waitqueue_head(&config->recv_wq);
Josef Bacik560bc4b2017-04-06 17:02:04 -04001321 init_waitqueue_head(&config->conn_wait);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001322 config->blksize = 1024;
Josef Bacik560bc4b2017-04-06 17:02:04 -04001323 atomic_set(&config->live_connections, 0);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001324 try_module_get(THIS_MODULE);
1325 return config;
1326}
1327
1328static int nbd_open(struct block_device *bdev, fmode_t mode)
1329{
1330 struct nbd_device *nbd;
1331 int ret = 0;
1332
1333 mutex_lock(&nbd_index_mutex);
1334 nbd = bdev->bd_disk->private_data;
1335 if (!nbd) {
1336 ret = -ENXIO;
1337 goto out;
1338 }
Josef Bacikc6a47592017-04-06 17:02:06 -04001339 if (!refcount_inc_not_zero(&nbd->refs)) {
1340 ret = -ENXIO;
1341 goto out;
1342 }
Josef Bacik5ea8d102017-04-06 17:01:58 -04001343 if (!refcount_inc_not_zero(&nbd->config_refs)) {
1344 struct nbd_config *config;
1345
1346 mutex_lock(&nbd->config_lock);
1347 if (refcount_inc_not_zero(&nbd->config_refs)) {
1348 mutex_unlock(&nbd->config_lock);
1349 goto out;
1350 }
1351 config = nbd->config = nbd_alloc_config();
1352 if (!config) {
1353 ret = -ENOMEM;
1354 mutex_unlock(&nbd->config_lock);
1355 goto out;
1356 }
1357 refcount_set(&nbd->config_refs, 1);
Josef Bacikc6a47592017-04-06 17:02:06 -04001358 refcount_inc(&nbd->refs);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001359 mutex_unlock(&nbd->config_lock);
Josef Bacikfe1f9e62018-05-16 14:51:21 -04001360 bdev->bd_invalidated = 1;
1361 } else if (nbd_disconnected(nbd->config)) {
1362 bdev->bd_invalidated = 1;
Josef Bacik5ea8d102017-04-06 17:01:58 -04001363 }
1364out:
1365 mutex_unlock(&nbd_index_mutex);
1366 return ret;
1367}
1368
1369static void nbd_release(struct gendisk *disk, fmode_t mode)
1370{
1371 struct nbd_device *nbd = disk->private_data;
Doron Roberts-Kedes08ba91e2018-06-15 14:05:32 -07001372 struct block_device *bdev = bdget_disk(disk, 0);
1373
1374 if (test_bit(NBD_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) &&
1375 bdev->bd_openers == 0)
1376 nbd_disconnect_and_put(nbd);
1377
Josef Bacik5ea8d102017-04-06 17:01:58 -04001378 nbd_config_put(nbd);
Josef Bacikc6a47592017-04-06 17:02:06 -04001379 nbd_put(nbd);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001380}
1381
Alexey Dobriyan83d5cde2009-09-21 17:01:13 -07001382static const struct block_device_operations nbd_fops =
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383{
1384 .owner = THIS_MODULE,
Josef Bacik5ea8d102017-04-06 17:01:58 -04001385 .open = nbd_open,
1386 .release = nbd_release,
Arnd Bergmann8a6cfeb2010-07-08 10:18:46 +02001387 .ioctl = nbd_ioctl,
Al Viro263a3df2016-01-07 10:04:37 -05001388 .compat_ioctl = nbd_ioctl,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389};
1390
Markus Pargmann30d53d92015-08-17 08:20:06 +02001391#if IS_ENABLED(CONFIG_DEBUG_FS)
1392
1393static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
1394{
1395 struct nbd_device *nbd = s->private;
1396
1397 if (nbd->task_recv)
1398 seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv));
Markus Pargmann30d53d92015-08-17 08:20:06 +02001399
1400 return 0;
1401}
1402
1403static int nbd_dbg_tasks_open(struct inode *inode, struct file *file)
1404{
1405 return single_open(file, nbd_dbg_tasks_show, inode->i_private);
1406}
1407
1408static const struct file_operations nbd_dbg_tasks_ops = {
1409 .open = nbd_dbg_tasks_open,
1410 .read = seq_read,
1411 .llseek = seq_lseek,
1412 .release = single_release,
1413};
1414
1415static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
1416{
1417 struct nbd_device *nbd = s->private;
Josef Bacik5ea8d102017-04-06 17:01:58 -04001418 u32 flags = nbd->config->flags;
Markus Pargmann30d53d92015-08-17 08:20:06 +02001419
1420 seq_printf(s, "Hex: 0x%08x\n\n", flags);
1421
1422 seq_puts(s, "Known flags:\n");
1423
1424 if (flags & NBD_FLAG_HAS_FLAGS)
1425 seq_puts(s, "NBD_FLAG_HAS_FLAGS\n");
1426 if (flags & NBD_FLAG_READ_ONLY)
1427 seq_puts(s, "NBD_FLAG_READ_ONLY\n");
1428 if (flags & NBD_FLAG_SEND_FLUSH)
1429 seq_puts(s, "NBD_FLAG_SEND_FLUSH\n");
Shaun McDowell685c9b22017-05-25 23:55:54 -04001430 if (flags & NBD_FLAG_SEND_FUA)
1431 seq_puts(s, "NBD_FLAG_SEND_FUA\n");
Markus Pargmann30d53d92015-08-17 08:20:06 +02001432 if (flags & NBD_FLAG_SEND_TRIM)
1433 seq_puts(s, "NBD_FLAG_SEND_TRIM\n");
1434
1435 return 0;
1436}
1437
1438static int nbd_dbg_flags_open(struct inode *inode, struct file *file)
1439{
1440 return single_open(file, nbd_dbg_flags_show, inode->i_private);
1441}
1442
1443static const struct file_operations nbd_dbg_flags_ops = {
1444 .open = nbd_dbg_flags_open,
1445 .read = seq_read,
1446 .llseek = seq_lseek,
1447 .release = single_release,
1448};
1449
1450static int nbd_dev_dbg_init(struct nbd_device *nbd)
1451{
1452 struct dentry *dir;
Josef Bacik5ea8d102017-04-06 17:01:58 -04001453 struct nbd_config *config = nbd->config;
Markus Pargmann27ea43f2015-10-24 21:15:34 +02001454
1455 if (!nbd_dbg_dir)
1456 return -EIO;
Markus Pargmann30d53d92015-08-17 08:20:06 +02001457
1458 dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir);
Markus Pargmann27ea43f2015-10-24 21:15:34 +02001459 if (!dir) {
1460 dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n",
1461 nbd_name(nbd));
1462 return -EIO;
Markus Pargmann30d53d92015-08-17 08:20:06 +02001463 }
Josef Bacik5ea8d102017-04-06 17:01:58 -04001464 config->dbg_dir = dir;
Markus Pargmann30d53d92015-08-17 08:20:06 +02001465
Markus Pargmann27ea43f2015-10-24 21:15:34 +02001466 debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001467 debugfs_create_u64("size_bytes", 0444, dir, &config->bytesize);
Josef Bacik0eadf372016-09-08 12:33:40 -07001468 debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001469 debugfs_create_u64("blocksize", 0444, dir, &config->blksize);
Josef Bacikd366a0f2016-06-08 10:32:10 -04001470 debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops);
Markus Pargmann30d53d92015-08-17 08:20:06 +02001471
1472 return 0;
1473}
1474
1475static void nbd_dev_dbg_close(struct nbd_device *nbd)
1476{
Josef Bacik5ea8d102017-04-06 17:01:58 -04001477 debugfs_remove_recursive(nbd->config->dbg_dir);
Markus Pargmann30d53d92015-08-17 08:20:06 +02001478}
1479
1480static int nbd_dbg_init(void)
1481{
1482 struct dentry *dbg_dir;
1483
1484 dbg_dir = debugfs_create_dir("nbd", NULL);
Markus Pargmann27ea43f2015-10-24 21:15:34 +02001485 if (!dbg_dir)
1486 return -EIO;
Markus Pargmann30d53d92015-08-17 08:20:06 +02001487
1488 nbd_dbg_dir = dbg_dir;
1489
1490 return 0;
1491}
1492
1493static void nbd_dbg_close(void)
1494{
1495 debugfs_remove_recursive(nbd_dbg_dir);
1496}
1497
1498#else /* IS_ENABLED(CONFIG_DEBUG_FS) */
1499
1500static int nbd_dev_dbg_init(struct nbd_device *nbd)
1501{
1502 return 0;
1503}
1504
1505static void nbd_dev_dbg_close(struct nbd_device *nbd)
1506{
1507}
1508
1509static int nbd_dbg_init(void)
1510{
1511 return 0;
1512}
1513
1514static void nbd_dbg_close(void)
1515{
1516}
1517
1518#endif
1519
Christoph Hellwigd6296d392017-05-01 10:19:08 -06001520static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
1521 unsigned int hctx_idx, unsigned int numa_node)
Josef Bacikfd8383f2016-09-08 12:33:37 -07001522{
1523 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
Christoph Hellwigd6296d392017-05-01 10:19:08 -06001524 cmd->nbd = set->driver_data;
Josef Bacikd7d94d42018-07-16 12:11:34 -04001525 cmd->flags = 0;
Josef Bacik8f3ea352018-07-16 12:11:35 -04001526 mutex_init(&cmd->lock);
Josef Bacikfd8383f2016-09-08 12:33:37 -07001527 return 0;
1528}
1529
Eric Biggersf363b082017-03-30 13:39:16 -07001530static const struct blk_mq_ops nbd_mq_ops = {
Josef Bacikfd8383f2016-09-08 12:33:37 -07001531 .queue_rq = nbd_queue_rq,
Christoph Hellwig1e388ae2017-04-20 16:03:06 +02001532 .complete = nbd_complete_rq,
Josef Bacikfd8383f2016-09-08 12:33:37 -07001533 .init_request = nbd_init_request,
Josef Bacik0eadf372016-09-08 12:33:40 -07001534 .timeout = nbd_xmit_timeout,
Josef Bacikfd8383f2016-09-08 12:33:37 -07001535};
1536
Josef Bacikb0d91112017-02-01 16:11:40 -05001537static int nbd_dev_add(int index)
1538{
1539 struct nbd_device *nbd;
1540 struct gendisk *disk;
1541 struct request_queue *q;
1542 int err = -ENOMEM;
1543
1544 nbd = kzalloc(sizeof(struct nbd_device), GFP_KERNEL);
1545 if (!nbd)
1546 goto out;
1547
1548 disk = alloc_disk(1 << part_shift);
1549 if (!disk)
1550 goto out_free_nbd;
1551
1552 if (index >= 0) {
1553 err = idr_alloc(&nbd_index_idr, nbd, index, index + 1,
1554 GFP_KERNEL);
1555 if (err == -ENOSPC)
1556 err = -EEXIST;
1557 } else {
1558 err = idr_alloc(&nbd_index_idr, nbd, 0, 0, GFP_KERNEL);
1559 if (err >= 0)
1560 index = err;
1561 }
1562 if (err < 0)
1563 goto out_free_disk;
1564
Josef Bacike46c7282017-04-06 17:02:00 -04001565 nbd->index = index;
Josef Bacikb0d91112017-02-01 16:11:40 -05001566 nbd->disk = disk;
1567 nbd->tag_set.ops = &nbd_mq_ops;
1568 nbd->tag_set.nr_hw_queues = 1;
1569 nbd->tag_set.queue_depth = 128;
1570 nbd->tag_set.numa_node = NUMA_NO_NODE;
1571 nbd->tag_set.cmd_size = sizeof(struct nbd_cmd);
1572 nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
1573 BLK_MQ_F_SG_MERGE | BLK_MQ_F_BLOCKING;
1574 nbd->tag_set.driver_data = nbd;
1575
1576 err = blk_mq_alloc_tag_set(&nbd->tag_set);
1577 if (err)
1578 goto out_free_idr;
1579
1580 q = blk_mq_init_queue(&nbd->tag_set);
1581 if (IS_ERR(q)) {
1582 err = PTR_ERR(q);
1583 goto out_free_tags;
1584 }
1585 disk->queue = q;
1586
1587 /*
1588 * Tell the block layer that we are not a rotational device
1589 */
Bart Van Assche8b904b52018-03-07 17:10:10 -08001590 blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
1591 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue);
Josef Bacik6df133a2018-05-23 13:35:59 -04001592 disk->queue->limits.discard_granularity = 0;
Josef Bacik07ce2132018-06-05 11:41:23 -04001593 disk->queue->limits.discard_alignment = 0;
Josef Bacik6df133a2018-05-23 13:35:59 -04001594 blk_queue_max_discard_sectors(disk->queue, 0);
Josef Bacikebb16d02017-04-18 16:22:51 -04001595 blk_queue_max_segment_size(disk->queue, UINT_MAX);
Josef Bacik1cc1f172017-04-20 15:47:01 -04001596 blk_queue_max_segments(disk->queue, USHRT_MAX);
Josef Bacikb0d91112017-02-01 16:11:40 -05001597 blk_queue_max_hw_sectors(disk->queue, 65536);
1598 disk->queue->limits.max_sectors = 256;
1599
Josef Bacikb0d91112017-02-01 16:11:40 -05001600 mutex_init(&nbd->config_lock);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001601 refcount_set(&nbd->config_refs, 0);
Josef Bacikc6a47592017-04-06 17:02:06 -04001602 refcount_set(&nbd->refs, 1);
1603 INIT_LIST_HEAD(&nbd->list);
Josef Bacikb0d91112017-02-01 16:11:40 -05001604 disk->major = NBD_MAJOR;
1605 disk->first_minor = index << part_shift;
1606 disk->fops = &nbd_fops;
1607 disk->private_data = nbd;
1608 sprintf(disk->disk_name, "nbd%d", index);
Josef Bacikb0d91112017-02-01 16:11:40 -05001609 add_disk(disk);
Josef Bacik47d902b2017-04-06 17:02:05 -04001610 nbd_total_devices++;
Josef Bacikb0d91112017-02-01 16:11:40 -05001611 return index;
1612
1613out_free_tags:
1614 blk_mq_free_tag_set(&nbd->tag_set);
1615out_free_idr:
1616 idr_remove(&nbd_index_idr, index);
1617out_free_disk:
1618 put_disk(disk);
1619out_free_nbd:
1620 kfree(nbd);
1621out:
1622 return err;
1623}
1624
Josef Bacike46c7282017-04-06 17:02:00 -04001625static int find_free_cb(int id, void *ptr, void *data)
1626{
1627 struct nbd_device *nbd = ptr;
1628 struct nbd_device **found = data;
1629
1630 if (!refcount_read(&nbd->config_refs)) {
1631 *found = nbd;
1632 return 1;
1633 }
1634 return 0;
1635}
1636
1637/* Netlink interface. */
Stephen Hemmingera86c4122018-07-18 09:32:43 -07001638static const struct nla_policy nbd_attr_policy[NBD_ATTR_MAX + 1] = {
Josef Bacike46c7282017-04-06 17:02:00 -04001639 [NBD_ATTR_INDEX] = { .type = NLA_U32 },
1640 [NBD_ATTR_SIZE_BYTES] = { .type = NLA_U64 },
1641 [NBD_ATTR_BLOCK_SIZE_BYTES] = { .type = NLA_U64 },
1642 [NBD_ATTR_TIMEOUT] = { .type = NLA_U64 },
1643 [NBD_ATTR_SERVER_FLAGS] = { .type = NLA_U64 },
1644 [NBD_ATTR_CLIENT_FLAGS] = { .type = NLA_U64 },
1645 [NBD_ATTR_SOCKETS] = { .type = NLA_NESTED},
Josef Bacik560bc4b2017-04-06 17:02:04 -04001646 [NBD_ATTR_DEAD_CONN_TIMEOUT] = { .type = NLA_U64 },
Josef Bacik47d902b2017-04-06 17:02:05 -04001647 [NBD_ATTR_DEVICE_LIST] = { .type = NLA_NESTED},
Josef Bacike46c7282017-04-06 17:02:00 -04001648};
1649
Stephen Hemmingera86c4122018-07-18 09:32:43 -07001650static const struct nla_policy nbd_sock_policy[NBD_SOCK_MAX + 1] = {
Josef Bacike46c7282017-04-06 17:02:00 -04001651 [NBD_SOCK_FD] = { .type = NLA_U32 },
1652};
1653
Josef Bacik47d902b2017-04-06 17:02:05 -04001654/* We don't use this right now since we don't parse the incoming list, but we
1655 * still want it here so userspace knows what to expect.
1656 */
Stephen Hemmingera86c4122018-07-18 09:32:43 -07001657static const struct nla_policy __attribute__((unused))
Josef Bacik47d902b2017-04-06 17:02:05 -04001658nbd_device_policy[NBD_DEVICE_ATTR_MAX + 1] = {
1659 [NBD_DEVICE_INDEX] = { .type = NLA_U32 },
1660 [NBD_DEVICE_CONNECTED] = { .type = NLA_U8 },
1661};
1662
Josef Bacike46c7282017-04-06 17:02:00 -04001663static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
1664{
1665 struct nbd_device *nbd = NULL;
1666 struct nbd_config *config;
1667 int index = -1;
1668 int ret;
Josef Bacika2c97902017-04-06 17:02:07 -04001669 bool put_dev = false;
Josef Bacike46c7282017-04-06 17:02:00 -04001670
1671 if (!netlink_capable(skb, CAP_SYS_ADMIN))
1672 return -EPERM;
1673
1674 if (info->attrs[NBD_ATTR_INDEX])
1675 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
1676 if (!info->attrs[NBD_ATTR_SOCKETS]) {
1677 printk(KERN_ERR "nbd: must specify at least one socket\n");
1678 return -EINVAL;
1679 }
1680 if (!info->attrs[NBD_ATTR_SIZE_BYTES]) {
1681 printk(KERN_ERR "nbd: must specify a size in bytes for the device\n");
1682 return -EINVAL;
1683 }
1684again:
1685 mutex_lock(&nbd_index_mutex);
1686 if (index == -1) {
1687 ret = idr_for_each(&nbd_index_idr, &find_free_cb, &nbd);
1688 if (ret == 0) {
1689 int new_index;
1690 new_index = nbd_dev_add(-1);
1691 if (new_index < 0) {
1692 mutex_unlock(&nbd_index_mutex);
1693 printk(KERN_ERR "nbd: failed to add new device\n");
Gustavo A. R. Silva09799622018-02-12 11:14:55 -06001694 return new_index;
Josef Bacike46c7282017-04-06 17:02:00 -04001695 }
1696 nbd = idr_find(&nbd_index_idr, new_index);
1697 }
1698 } else {
1699 nbd = idr_find(&nbd_index_idr, index);
Josef Bacike6a76272017-08-14 18:25:33 +00001700 if (!nbd) {
1701 ret = nbd_dev_add(index);
1702 if (ret < 0) {
1703 mutex_unlock(&nbd_index_mutex);
1704 printk(KERN_ERR "nbd: failed to add new device\n");
1705 return ret;
1706 }
1707 nbd = idr_find(&nbd_index_idr, index);
1708 }
Josef Bacike46c7282017-04-06 17:02:00 -04001709 }
Josef Bacike46c7282017-04-06 17:02:00 -04001710 if (!nbd) {
1711 printk(KERN_ERR "nbd: couldn't find device at index %d\n",
1712 index);
Josef Bacikc6a47592017-04-06 17:02:06 -04001713 mutex_unlock(&nbd_index_mutex);
Josef Bacike46c7282017-04-06 17:02:00 -04001714 return -EINVAL;
1715 }
Josef Bacikc6a47592017-04-06 17:02:06 -04001716 if (!refcount_inc_not_zero(&nbd->refs)) {
1717 mutex_unlock(&nbd_index_mutex);
1718 if (index == -1)
1719 goto again;
1720 printk(KERN_ERR "nbd: device at index %d is going down\n",
1721 index);
1722 return -EINVAL;
1723 }
1724 mutex_unlock(&nbd_index_mutex);
Josef Bacike46c7282017-04-06 17:02:00 -04001725
1726 mutex_lock(&nbd->config_lock);
1727 if (refcount_read(&nbd->config_refs)) {
1728 mutex_unlock(&nbd->config_lock);
Josef Bacikc6a47592017-04-06 17:02:06 -04001729 nbd_put(nbd);
Josef Bacike46c7282017-04-06 17:02:00 -04001730 if (index == -1)
1731 goto again;
1732 printk(KERN_ERR "nbd: nbd%d already in use\n", index);
1733 return -EBUSY;
1734 }
1735 if (WARN_ON(nbd->config)) {
1736 mutex_unlock(&nbd->config_lock);
Josef Bacikc6a47592017-04-06 17:02:06 -04001737 nbd_put(nbd);
Josef Bacike46c7282017-04-06 17:02:00 -04001738 return -EINVAL;
1739 }
1740 config = nbd->config = nbd_alloc_config();
1741 if (!nbd->config) {
1742 mutex_unlock(&nbd->config_lock);
Josef Bacikc6a47592017-04-06 17:02:06 -04001743 nbd_put(nbd);
Josef Bacike46c7282017-04-06 17:02:00 -04001744 printk(KERN_ERR "nbd: couldn't allocate config\n");
1745 return -ENOMEM;
1746 }
1747 refcount_set(&nbd->config_refs, 1);
1748 set_bit(NBD_BOUND, &config->runtime_flags);
1749
1750 if (info->attrs[NBD_ATTR_SIZE_BYTES]) {
1751 u64 bytes = nla_get_u64(info->attrs[NBD_ATTR_SIZE_BYTES]);
1752 nbd_size_set(nbd, config->blksize,
1753 div64_u64(bytes, config->blksize));
1754 }
1755 if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]) {
1756 u64 bsize =
1757 nla_get_u64(info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]);
1758 nbd_size_set(nbd, bsize, div64_u64(config->bytesize, bsize));
1759 }
1760 if (info->attrs[NBD_ATTR_TIMEOUT]) {
1761 u64 timeout = nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]);
1762 nbd->tag_set.timeout = timeout * HZ;
1763 blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ);
1764 }
Josef Bacik560bc4b2017-04-06 17:02:04 -04001765 if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
1766 config->dead_conn_timeout =
1767 nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
1768 config->dead_conn_timeout *= HZ;
1769 }
Josef Bacike46c7282017-04-06 17:02:00 -04001770 if (info->attrs[NBD_ATTR_SERVER_FLAGS])
1771 config->flags =
1772 nla_get_u64(info->attrs[NBD_ATTR_SERVER_FLAGS]);
Josef Bacika2c97902017-04-06 17:02:07 -04001773 if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
1774 u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
1775 if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
1776 set_bit(NBD_DESTROY_ON_DISCONNECT,
1777 &config->runtime_flags);
1778 put_dev = true;
1779 }
Doron Roberts-Kedes08ba91e2018-06-15 14:05:32 -07001780 if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
1781 set_bit(NBD_DISCONNECT_ON_CLOSE,
1782 &config->runtime_flags);
1783 }
Josef Bacika2c97902017-04-06 17:02:07 -04001784 }
1785
Josef Bacike46c7282017-04-06 17:02:00 -04001786 if (info->attrs[NBD_ATTR_SOCKETS]) {
1787 struct nlattr *attr;
1788 int rem, fd;
1789
1790 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
1791 rem) {
1792 struct nlattr *socks[NBD_SOCK_MAX+1];
1793
1794 if (nla_type(attr) != NBD_SOCK_ITEM) {
1795 printk(KERN_ERR "nbd: socks must be embedded in a SOCK_ITEM attr\n");
1796 ret = -EINVAL;
1797 goto out;
1798 }
1799 ret = nla_parse_nested(socks, NBD_SOCK_MAX, attr,
Linus Torvalds8d65b082017-05-02 16:40:27 -07001800 nbd_sock_policy, info->extack);
Josef Bacike46c7282017-04-06 17:02:00 -04001801 if (ret != 0) {
1802 printk(KERN_ERR "nbd: error processing sock list\n");
1803 ret = -EINVAL;
1804 goto out;
1805 }
1806 if (!socks[NBD_SOCK_FD])
1807 continue;
1808 fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
1809 ret = nbd_add_socket(nbd, fd, true);
1810 if (ret)
1811 goto out;
1812 }
1813 }
1814 ret = nbd_start_device(nbd);
1815out:
1816 mutex_unlock(&nbd->config_lock);
1817 if (!ret) {
1818 set_bit(NBD_HAS_CONFIG_REF, &config->runtime_flags);
1819 refcount_inc(&nbd->config_refs);
1820 nbd_connect_reply(info, nbd->index);
1821 }
1822 nbd_config_put(nbd);
Josef Bacika2c97902017-04-06 17:02:07 -04001823 if (put_dev)
1824 nbd_put(nbd);
Josef Bacike46c7282017-04-06 17:02:00 -04001825 return ret;
1826}
1827
Doron Roberts-Kedes08ba91e2018-06-15 14:05:32 -07001828static void nbd_disconnect_and_put(struct nbd_device *nbd)
1829{
1830 mutex_lock(&nbd->config_lock);
1831 nbd_disconnect(nbd);
1832 nbd_clear_sock(nbd);
1833 mutex_unlock(&nbd->config_lock);
1834 if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
1835 &nbd->config->runtime_flags))
1836 nbd_config_put(nbd);
1837}
1838
Josef Bacike46c7282017-04-06 17:02:00 -04001839static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
1840{
1841 struct nbd_device *nbd;
1842 int index;
1843
1844 if (!netlink_capable(skb, CAP_SYS_ADMIN))
1845 return -EPERM;
1846
1847 if (!info->attrs[NBD_ATTR_INDEX]) {
1848 printk(KERN_ERR "nbd: must specify an index to disconnect\n");
1849 return -EINVAL;
1850 }
1851 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
1852 mutex_lock(&nbd_index_mutex);
1853 nbd = idr_find(&nbd_index_idr, index);
Josef Bacike46c7282017-04-06 17:02:00 -04001854 if (!nbd) {
Josef Bacikc6a47592017-04-06 17:02:06 -04001855 mutex_unlock(&nbd_index_mutex);
Josef Bacike46c7282017-04-06 17:02:00 -04001856 printk(KERN_ERR "nbd: couldn't find device at index %d\n",
1857 index);
1858 return -EINVAL;
1859 }
Josef Bacikc6a47592017-04-06 17:02:06 -04001860 if (!refcount_inc_not_zero(&nbd->refs)) {
1861 mutex_unlock(&nbd_index_mutex);
1862 printk(KERN_ERR "nbd: device at index %d is going down\n",
1863 index);
1864 return -EINVAL;
1865 }
1866 mutex_unlock(&nbd_index_mutex);
1867 if (!refcount_inc_not_zero(&nbd->config_refs)) {
1868 nbd_put(nbd);
Josef Bacike46c7282017-04-06 17:02:00 -04001869 return 0;
Josef Bacikc6a47592017-04-06 17:02:06 -04001870 }
Doron Roberts-Kedes08ba91e2018-06-15 14:05:32 -07001871 nbd_disconnect_and_put(nbd);
Josef Bacike46c7282017-04-06 17:02:00 -04001872 nbd_config_put(nbd);
Josef Bacikc6a47592017-04-06 17:02:06 -04001873 nbd_put(nbd);
Josef Bacike46c7282017-04-06 17:02:00 -04001874 return 0;
1875}
1876
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001877static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
1878{
1879 struct nbd_device *nbd = NULL;
1880 struct nbd_config *config;
1881 int index;
Doron Roberts-Kedes08ba91e2018-06-15 14:05:32 -07001882 int ret = 0;
Josef Bacika2c97902017-04-06 17:02:07 -04001883 bool put_dev = false;
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001884
1885 if (!netlink_capable(skb, CAP_SYS_ADMIN))
1886 return -EPERM;
1887
1888 if (!info->attrs[NBD_ATTR_INDEX]) {
1889 printk(KERN_ERR "nbd: must specify a device to reconfigure\n");
1890 return -EINVAL;
1891 }
1892 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
1893 mutex_lock(&nbd_index_mutex);
1894 nbd = idr_find(&nbd_index_idr, index);
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001895 if (!nbd) {
Josef Bacikc6a47592017-04-06 17:02:06 -04001896 mutex_unlock(&nbd_index_mutex);
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001897 printk(KERN_ERR "nbd: couldn't find a device at index %d\n",
1898 index);
1899 return -EINVAL;
1900 }
Josef Bacikc6a47592017-04-06 17:02:06 -04001901 if (!refcount_inc_not_zero(&nbd->refs)) {
1902 mutex_unlock(&nbd_index_mutex);
1903 printk(KERN_ERR "nbd: device at index %d is going down\n",
1904 index);
1905 return -EINVAL;
1906 }
1907 mutex_unlock(&nbd_index_mutex);
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001908
1909 if (!refcount_inc_not_zero(&nbd->config_refs)) {
1910 dev_err(nbd_to_dev(nbd),
1911 "not configured, cannot reconfigure\n");
Josef Bacikc6a47592017-04-06 17:02:06 -04001912 nbd_put(nbd);
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001913 return -EINVAL;
1914 }
1915
1916 mutex_lock(&nbd->config_lock);
1917 config = nbd->config;
1918 if (!test_bit(NBD_BOUND, &config->runtime_flags) ||
1919 !nbd->task_recv) {
1920 dev_err(nbd_to_dev(nbd),
1921 "not configured, cannot reconfigure\n");
Doron Roberts-Kedes08ba91e2018-06-15 14:05:32 -07001922 ret = -EINVAL;
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001923 goto out;
1924 }
1925
1926 if (info->attrs[NBD_ATTR_TIMEOUT]) {
1927 u64 timeout = nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]);
1928 nbd->tag_set.timeout = timeout * HZ;
1929 blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ);
1930 }
Josef Bacik560bc4b2017-04-06 17:02:04 -04001931 if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
1932 config->dead_conn_timeout =
1933 nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
1934 config->dead_conn_timeout *= HZ;
1935 }
Josef Bacika2c97902017-04-06 17:02:07 -04001936 if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
1937 u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
1938 if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
1939 if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT,
1940 &config->runtime_flags))
1941 put_dev = true;
1942 } else {
1943 if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT,
1944 &config->runtime_flags))
1945 refcount_inc(&nbd->refs);
1946 }
Doron Roberts-Kedes08ba91e2018-06-15 14:05:32 -07001947
1948 if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
1949 set_bit(NBD_DISCONNECT_ON_CLOSE,
1950 &config->runtime_flags);
1951 } else {
1952 clear_bit(NBD_DISCONNECT_ON_CLOSE,
1953 &config->runtime_flags);
1954 }
Josef Bacika2c97902017-04-06 17:02:07 -04001955 }
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001956
1957 if (info->attrs[NBD_ATTR_SOCKETS]) {
1958 struct nlattr *attr;
1959 int rem, fd;
1960
1961 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
1962 rem) {
1963 struct nlattr *socks[NBD_SOCK_MAX+1];
1964
1965 if (nla_type(attr) != NBD_SOCK_ITEM) {
1966 printk(KERN_ERR "nbd: socks must be embedded in a SOCK_ITEM attr\n");
1967 ret = -EINVAL;
1968 goto out;
1969 }
1970 ret = nla_parse_nested(socks, NBD_SOCK_MAX, attr,
Linus Torvalds8d65b082017-05-02 16:40:27 -07001971 nbd_sock_policy, info->extack);
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001972 if (ret != 0) {
1973 printk(KERN_ERR "nbd: error processing sock list\n");
1974 ret = -EINVAL;
1975 goto out;
1976 }
1977 if (!socks[NBD_SOCK_FD])
1978 continue;
1979 fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
1980 ret = nbd_reconnect_socket(nbd, fd);
1981 if (ret) {
1982 if (ret == -ENOSPC)
1983 ret = 0;
1984 goto out;
1985 }
1986 dev_info(nbd_to_dev(nbd), "reconnected socket\n");
1987 }
1988 }
1989out:
1990 mutex_unlock(&nbd->config_lock);
1991 nbd_config_put(nbd);
Josef Bacikc6a47592017-04-06 17:02:06 -04001992 nbd_put(nbd);
Josef Bacika2c97902017-04-06 17:02:07 -04001993 if (put_dev)
1994 nbd_put(nbd);
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001995 return ret;
1996}
1997
Josef Bacike46c7282017-04-06 17:02:00 -04001998static const struct genl_ops nbd_connect_genl_ops[] = {
1999 {
2000 .cmd = NBD_CMD_CONNECT,
2001 .policy = nbd_attr_policy,
2002 .doit = nbd_genl_connect,
2003 },
2004 {
2005 .cmd = NBD_CMD_DISCONNECT,
2006 .policy = nbd_attr_policy,
2007 .doit = nbd_genl_disconnect,
2008 },
Josef Bacikb7aa3d32017-04-06 17:02:01 -04002009 {
2010 .cmd = NBD_CMD_RECONFIGURE,
2011 .policy = nbd_attr_policy,
2012 .doit = nbd_genl_reconfigure,
2013 },
Josef Bacik47d902b2017-04-06 17:02:05 -04002014 {
2015 .cmd = NBD_CMD_STATUS,
2016 .policy = nbd_attr_policy,
2017 .doit = nbd_genl_status,
2018 },
Josef Bacike46c7282017-04-06 17:02:00 -04002019};
2020
Josef Bacik799f9a32017-04-06 17:02:02 -04002021static const struct genl_multicast_group nbd_mcast_grps[] = {
2022 { .name = NBD_GENL_MCAST_GROUP_NAME, },
2023};
2024
Josef Bacike46c7282017-04-06 17:02:00 -04002025static struct genl_family nbd_genl_family __ro_after_init = {
2026 .hdrsize = 0,
2027 .name = NBD_GENL_FAMILY_NAME,
2028 .version = NBD_GENL_VERSION,
2029 .module = THIS_MODULE,
2030 .ops = nbd_connect_genl_ops,
2031 .n_ops = ARRAY_SIZE(nbd_connect_genl_ops),
2032 .maxattr = NBD_ATTR_MAX,
Josef Bacik799f9a32017-04-06 17:02:02 -04002033 .mcgrps = nbd_mcast_grps,
2034 .n_mcgrps = ARRAY_SIZE(nbd_mcast_grps),
Josef Bacike46c7282017-04-06 17:02:00 -04002035};
2036
Josef Bacik47d902b2017-04-06 17:02:05 -04002037static int populate_nbd_status(struct nbd_device *nbd, struct sk_buff *reply)
2038{
2039 struct nlattr *dev_opt;
2040 u8 connected = 0;
2041 int ret;
2042
2043 /* This is a little racey, but for status it's ok. The
2044 * reason we don't take a ref here is because we can't
2045 * take a ref in the index == -1 case as we would need
2046 * to put under the nbd_index_mutex, which could
2047 * deadlock if we are configured to remove ourselves
2048 * once we're disconnected.
2049 */
2050 if (refcount_read(&nbd->config_refs))
2051 connected = 1;
2052 dev_opt = nla_nest_start(reply, NBD_DEVICE_ITEM);
2053 if (!dev_opt)
2054 return -EMSGSIZE;
2055 ret = nla_put_u32(reply, NBD_DEVICE_INDEX, nbd->index);
2056 if (ret)
2057 return -EMSGSIZE;
2058 ret = nla_put_u8(reply, NBD_DEVICE_CONNECTED,
2059 connected);
2060 if (ret)
2061 return -EMSGSIZE;
2062 nla_nest_end(reply, dev_opt);
2063 return 0;
2064}
2065
2066static int status_cb(int id, void *ptr, void *data)
2067{
2068 struct nbd_device *nbd = ptr;
2069 return populate_nbd_status(nbd, (struct sk_buff *)data);
2070}
2071
2072static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info)
2073{
2074 struct nlattr *dev_list;
2075 struct sk_buff *reply;
2076 void *reply_head;
2077 size_t msg_size;
2078 int index = -1;
2079 int ret = -ENOMEM;
2080
2081 if (info->attrs[NBD_ATTR_INDEX])
2082 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
2083
2084 mutex_lock(&nbd_index_mutex);
2085
2086 msg_size = nla_total_size(nla_attr_size(sizeof(u32)) +
2087 nla_attr_size(sizeof(u8)));
2088 msg_size *= (index == -1) ? nbd_total_devices : 1;
2089
2090 reply = genlmsg_new(msg_size, GFP_KERNEL);
2091 if (!reply)
2092 goto out;
2093 reply_head = genlmsg_put_reply(reply, info, &nbd_genl_family, 0,
2094 NBD_CMD_STATUS);
2095 if (!reply_head) {
2096 nlmsg_free(reply);
2097 goto out;
2098 }
2099
2100 dev_list = nla_nest_start(reply, NBD_ATTR_DEVICE_LIST);
2101 if (index == -1) {
2102 ret = idr_for_each(&nbd_index_idr, &status_cb, reply);
2103 if (ret) {
2104 nlmsg_free(reply);
2105 goto out;
2106 }
2107 } else {
2108 struct nbd_device *nbd;
2109 nbd = idr_find(&nbd_index_idr, index);
2110 if (nbd) {
2111 ret = populate_nbd_status(nbd, reply);
2112 if (ret) {
2113 nlmsg_free(reply);
2114 goto out;
2115 }
2116 }
2117 }
2118 nla_nest_end(reply, dev_list);
2119 genlmsg_end(reply, reply_head);
2120 genlmsg_reply(reply, info);
2121 ret = 0;
2122out:
2123 mutex_unlock(&nbd_index_mutex);
2124 return ret;
2125}
2126
Josef Bacike46c7282017-04-06 17:02:00 -04002127static void nbd_connect_reply(struct genl_info *info, int index)
2128{
2129 struct sk_buff *skb;
2130 void *msg_head;
2131 int ret;
2132
2133 skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
2134 if (!skb)
2135 return;
2136 msg_head = genlmsg_put_reply(skb, info, &nbd_genl_family, 0,
2137 NBD_CMD_CONNECT);
2138 if (!msg_head) {
2139 nlmsg_free(skb);
2140 return;
2141 }
2142 ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
2143 if (ret) {
2144 nlmsg_free(skb);
2145 return;
2146 }
2147 genlmsg_end(skb, msg_head);
2148 genlmsg_reply(skb, info);
2149}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002150
Josef Bacik799f9a32017-04-06 17:02:02 -04002151static void nbd_mcast_index(int index)
2152{
2153 struct sk_buff *skb;
2154 void *msg_head;
2155 int ret;
2156
2157 skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
2158 if (!skb)
2159 return;
2160 msg_head = genlmsg_put(skb, 0, 0, &nbd_genl_family, 0,
2161 NBD_CMD_LINK_DEAD);
2162 if (!msg_head) {
2163 nlmsg_free(skb);
2164 return;
2165 }
2166 ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
2167 if (ret) {
2168 nlmsg_free(skb);
2169 return;
2170 }
2171 genlmsg_end(skb, msg_head);
2172 genlmsg_multicast(&nbd_genl_family, skb, 0, 0, GFP_KERNEL);
2173}
2174
2175static void nbd_dead_link_work(struct work_struct *work)
2176{
2177 struct link_dead_args *args = container_of(work, struct link_dead_args,
2178 work);
2179 nbd_mcast_index(args->index);
2180 kfree(args);
2181}
2182
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183static int __init nbd_init(void)
2184{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185 int i;
2186
Adrian Bunk5b7b18c2006-03-25 03:07:04 -08002187 BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188
Laurent Vivierd71a6d72008-04-29 01:02:51 -07002189 if (max_part < 0) {
WANG Cong7742ce42011-08-19 14:48:28 +02002190 printk(KERN_ERR "nbd: max_part must be >= 0\n");
Laurent Vivierd71a6d72008-04-29 01:02:51 -07002191 return -EINVAL;
2192 }
2193
2194 part_shift = 0;
Namhyung Kim5988ce22011-05-28 14:44:46 +02002195 if (max_part > 0) {
Laurent Vivierd71a6d72008-04-29 01:02:51 -07002196 part_shift = fls(max_part);
2197
Namhyung Kim5988ce22011-05-28 14:44:46 +02002198 /*
2199 * Adjust max_part according to part_shift as it is exported
2200 * to user space so that user can know the max number of
2201 * partition kernel should be able to manage.
2202 *
2203 * Note that -1 is required because partition 0 is reserved
2204 * for the whole disk.
2205 */
2206 max_part = (1UL << part_shift) - 1;
2207 }
2208
Namhyung Kim3b271082011-05-28 14:44:46 +02002209 if ((1UL << part_shift) > DISK_MAX_PARTS)
2210 return -EINVAL;
2211
2212 if (nbds_max > 1UL << (MINORBITS - part_shift))
2213 return -EINVAL;
Josef Bacik124d6db2017-02-01 16:11:11 -05002214 recv_workqueue = alloc_workqueue("knbd-recv",
Dan Melnic2189c972017-09-18 13:08:51 -07002215 WQ_MEM_RECLAIM | WQ_HIGHPRI |
2216 WQ_UNBOUND, 0);
Josef Bacik124d6db2017-02-01 16:11:11 -05002217 if (!recv_workqueue)
2218 return -ENOMEM;
Namhyung Kim3b271082011-05-28 14:44:46 +02002219
Josef Bacik6330a2d2017-02-15 16:49:48 -05002220 if (register_blkdev(NBD_MAJOR, "nbd")) {
2221 destroy_workqueue(recv_workqueue);
Josef Bacikb0d91112017-02-01 16:11:40 -05002222 return -EIO;
Josef Bacik6330a2d2017-02-15 16:49:48 -05002223 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224
Josef Bacike46c7282017-04-06 17:02:00 -04002225 if (genl_register_family(&nbd_genl_family)) {
2226 unregister_blkdev(NBD_MAJOR, "nbd");
2227 destroy_workqueue(recv_workqueue);
2228 return -EINVAL;
2229 }
Markus Pargmann30d53d92015-08-17 08:20:06 +02002230 nbd_dbg_init();
2231
Josef Bacikb0d91112017-02-01 16:11:40 -05002232 mutex_lock(&nbd_index_mutex);
2233 for (i = 0; i < nbds_max; i++)
2234 nbd_dev_add(i);
2235 mutex_unlock(&nbd_index_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002236 return 0;
Josef Bacikb0d91112017-02-01 16:11:40 -05002237}
2238
2239static int nbd_exit_cb(int id, void *ptr, void *data)
2240{
Josef Bacikc6a47592017-04-06 17:02:06 -04002241 struct list_head *list = (struct list_head *)data;
Josef Bacikb0d91112017-02-01 16:11:40 -05002242 struct nbd_device *nbd = ptr;
Josef Bacikc6a47592017-04-06 17:02:06 -04002243
Josef Bacikc6a47592017-04-06 17:02:06 -04002244 list_add_tail(&nbd->list, list);
Josef Bacikb0d91112017-02-01 16:11:40 -05002245 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002246}
2247
2248static void __exit nbd_cleanup(void)
2249{
Josef Bacikc6a47592017-04-06 17:02:06 -04002250 struct nbd_device *nbd;
2251 LIST_HEAD(del_list);
2252
Markus Pargmann30d53d92015-08-17 08:20:06 +02002253 nbd_dbg_close();
2254
Josef Bacikc6a47592017-04-06 17:02:06 -04002255 mutex_lock(&nbd_index_mutex);
2256 idr_for_each(&nbd_index_idr, &nbd_exit_cb, &del_list);
2257 mutex_unlock(&nbd_index_mutex);
2258
Josef Bacik60ae36a2017-04-28 09:49:19 -04002259 while (!list_empty(&del_list)) {
2260 nbd = list_first_entry(&del_list, struct nbd_device, list);
2261 list_del_init(&nbd->list);
2262 if (refcount_read(&nbd->refs) != 1)
Josef Bacikc6a47592017-04-06 17:02:06 -04002263 printk(KERN_ERR "nbd: possibly leaking a device\n");
2264 nbd_put(nbd);
Josef Bacikc6a47592017-04-06 17:02:06 -04002265 }
2266
Josef Bacikb0d91112017-02-01 16:11:40 -05002267 idr_destroy(&nbd_index_idr);
Josef Bacike46c7282017-04-06 17:02:00 -04002268 genl_unregister_family(&nbd_genl_family);
Josef Bacik124d6db2017-02-01 16:11:11 -05002269 destroy_workqueue(recv_workqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002270 unregister_blkdev(NBD_MAJOR, "nbd");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271}
2272
2273module_init(nbd_init);
2274module_exit(nbd_cleanup);
2275
2276MODULE_DESCRIPTION("Network Block Device");
2277MODULE_LICENSE("GPL");
2278
Lars Marowsky-Bree40be0c22005-05-01 08:59:07 -07002279module_param(nbds_max, int, 0444);
Laurent Vivierd71a6d72008-04-29 01:02:51 -07002280MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
2281module_param(max_part, int, 0444);
Josef Bacik7a8362a2017-08-14 18:56:16 +00002282MODULE_PARM_DESC(max_part, "number of partitions per device (default: 16)");