blob: b55cc057f569e83afbe3c21133a12831aea63cac [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Network block device - make block devices work over TCP
3 *
4 * Note that you can not swap over this thing, yet. Seems to work but
5 * deadlocks sometimes - you can not swap over TCP in general.
6 *
Pavel Macheka2531292010-07-18 14:27:13 +02007 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
9 *
Pavel Machekdbf492d2006-06-25 05:47:42 -070010 * This file is released under GPLv2 or later.
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 *
Pavel Machekdbf492d2006-06-25 05:47:42 -070012 * (part of code stolen from loop.c)
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 */
14
15#include <linux/major.h>
16
17#include <linux/blkdev.h>
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/sched.h>
21#include <linux/fs.h>
22#include <linux/bio.h>
23#include <linux/stat.h>
24#include <linux/errno.h>
25#include <linux/file.h>
26#include <linux/ioctl.h>
Arnd Bergmann2a48fc02010-06-02 14:28:52 +020027#include <linux/mutex.h>
Herbert Xu4b2f0262006-01-06 00:09:47 -080028#include <linux/compiler.h>
29#include <linux/err.h>
30#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090031#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <net/sock.h>
Trond Myklebust91cf45f2007-11-12 18:10:39 -080033#include <linux/net.h>
Laurent Vivier48cf6062008-04-29 01:02:46 -070034#include <linux/kthread.h>
Markus Pargmannb9c495b2015-04-02 10:11:37 +020035#include <linux/types.h>
Markus Pargmann30d53d92015-08-17 08:20:06 +020036#include <linux/debugfs.h>
Josef Bacikfd8383f2016-09-08 12:33:37 -070037#include <linux/blk-mq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080039#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070040#include <asm/types.h>
41
42#include <linux/nbd.h>
Josef Bacike46c7282017-04-06 17:02:00 -040043#include <linux/nbd-netlink.h>
44#include <net/genetlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Josef Bacikb0d91112017-02-01 16:11:40 -050046static DEFINE_IDR(nbd_index_idr);
47static DEFINE_MUTEX(nbd_index_mutex);
48
Josef Bacik9561a7a2016-11-22 14:04:40 -050049struct nbd_sock {
50 struct socket *sock;
51 struct mutex tx_lock;
Josef Bacik9dd5d3a2017-03-24 14:08:26 -040052 struct request *pending;
53 int sent;
Josef Bacikf3733242017-04-06 17:01:57 -040054 bool dead;
55 int fallback_index;
Josef Bacik799f9a32017-04-06 17:02:02 -040056 int cookie;
Josef Bacik9561a7a2016-11-22 14:04:40 -050057};
58
Josef Bacik5ea8d102017-04-06 17:01:58 -040059struct recv_thread_args {
60 struct work_struct work;
61 struct nbd_device *nbd;
62 int index;
63};
64
Josef Bacik799f9a32017-04-06 17:02:02 -040065struct link_dead_args {
66 struct work_struct work;
67 int index;
68};
69
Josef Bacik9b4a6ba2016-09-08 12:33:39 -070070#define NBD_TIMEDOUT 0
71#define NBD_DISCONNECT_REQUESTED 1
Josef Bacik9561a7a2016-11-22 14:04:40 -050072#define NBD_DISCONNECTED 2
Josef Bacik5ea8d102017-04-06 17:01:58 -040073#define NBD_HAS_PID_FILE 3
Josef Bacike46c7282017-04-06 17:02:00 -040074#define NBD_HAS_CONFIG_REF 4
75#define NBD_BOUND 5
Josef Bacik9b4a6ba2016-09-08 12:33:39 -070076
Josef Bacik5ea8d102017-04-06 17:01:58 -040077struct nbd_config {
Markus Pargmann22d109c2015-08-17 08:20:09 +020078 u32 flags;
Josef Bacik9b4a6ba2016-09-08 12:33:39 -070079 unsigned long runtime_flags;
Josef Bacik5ea8d102017-04-06 17:01:58 -040080
Josef Bacik9561a7a2016-11-22 14:04:40 -050081 struct nbd_sock **socks;
Josef Bacik9561a7a2016-11-22 14:04:40 -050082 int num_connections;
Josef Bacik5ea8d102017-04-06 17:01:58 -040083
Josef Bacik9561a7a2016-11-22 14:04:40 -050084 atomic_t recv_threads;
85 wait_queue_head_t recv_wq;
Josef Bacikef77b512016-12-02 16:19:12 -050086 loff_t blksize;
Markus Pargmannb9c495b2015-04-02 10:11:37 +020087 loff_t bytesize;
Markus Pargmann30d53d92015-08-17 08:20:06 +020088#if IS_ENABLED(CONFIG_DEBUG_FS)
89 struct dentry *dbg_dir;
90#endif
Markus Pargmann13e71d62015-04-02 10:11:35 +020091};
92
Josef Bacik5ea8d102017-04-06 17:01:58 -040093struct nbd_device {
94 struct blk_mq_tag_set tag_set;
95
Josef Bacike46c7282017-04-06 17:02:00 -040096 int index;
Josef Bacik5ea8d102017-04-06 17:01:58 -040097 refcount_t config_refs;
98 struct nbd_config *config;
99 struct mutex config_lock;
100 struct gendisk *disk;
101
102 struct task_struct *task_recv;
103 struct task_struct *task_setup;
104};
105
Josef Bacikfd8383f2016-09-08 12:33:37 -0700106struct nbd_cmd {
107 struct nbd_device *nbd;
Josef Bacikf3733242017-04-06 17:01:57 -0400108 int index;
Josef Bacik799f9a32017-04-06 17:02:02 -0400109 int cookie;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500110 struct completion send_complete;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700111};
112
Markus Pargmann30d53d92015-08-17 08:20:06 +0200113#if IS_ENABLED(CONFIG_DEBUG_FS)
114static struct dentry *nbd_dbg_dir;
115#endif
116
117#define nbd_name(nbd) ((nbd)->disk->disk_name)
118
Wanlong Gaof4507162012-03-28 14:42:51 -0700119#define NBD_MAGIC 0x68797548
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120
Ingo van Lil9c7a4162006-07-01 04:36:36 -0700121static unsigned int nbds_max = 16;
Laurent Vivierd71a6d72008-04-29 01:02:51 -0700122static int max_part;
Josef Bacik124d6db2017-02-01 16:11:11 -0500123static struct workqueue_struct *recv_workqueue;
Josef Bacikb0d91112017-02-01 16:11:40 -0500124static int part_shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125
Josef Bacik9442b732017-02-07 17:10:22 -0500126static int nbd_dev_dbg_init(struct nbd_device *nbd);
127static void nbd_dev_dbg_close(struct nbd_device *nbd);
Josef Bacik5ea8d102017-04-06 17:01:58 -0400128static void nbd_config_put(struct nbd_device *nbd);
Josef Bacike46c7282017-04-06 17:02:00 -0400129static void nbd_connect_reply(struct genl_info *info, int index);
Josef Bacik799f9a32017-04-06 17:02:02 -0400130static void nbd_dead_link_work(struct work_struct *work);
Josef Bacik9442b732017-02-07 17:10:22 -0500131
Markus Pargmannd18509f2015-04-02 10:11:38 +0200132static inline struct device *nbd_to_dev(struct nbd_device *nbd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133{
Markus Pargmannd18509f2015-04-02 10:11:38 +0200134 return disk_to_dev(nbd->disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135}
136
137static const char *nbdcmd_to_ascii(int cmd)
138{
139 switch (cmd) {
140 case NBD_CMD_READ: return "read";
141 case NBD_CMD_WRITE: return "write";
142 case NBD_CMD_DISC: return "disconnect";
Alex Bligh75f187a2013-02-27 17:05:23 -0800143 case NBD_CMD_FLUSH: return "flush";
Paul Clementsa336d292012-10-04 17:16:18 -0700144 case NBD_CMD_TRIM: return "trim/discard";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145 }
146 return "invalid";
147}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148
Josef Bacik5ea8d102017-04-06 17:01:58 -0400149static ssize_t pid_show(struct device *dev,
150 struct device_attribute *attr, char *buf)
151{
152 struct gendisk *disk = dev_to_disk(dev);
153 struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
154
155 return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv));
156}
157
158static struct device_attribute pid_attr = {
159 .attr = { .name = "pid", .mode = S_IRUGO},
160 .show = pid_show,
161};
162
Josef Bacik799f9a32017-04-06 17:02:02 -0400163static int nbd_disconnected(struct nbd_config *config)
Josef Bacikf3733242017-04-06 17:01:57 -0400164{
Josef Bacik799f9a32017-04-06 17:02:02 -0400165 return test_bit(NBD_DISCONNECTED, &config->runtime_flags) ||
166 test_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags);
167}
168
169static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock,
170 int notify)
171{
172 if (!nsock->dead && notify && !nbd_disconnected(nbd->config)) {
173 struct link_dead_args *args;
174 args = kmalloc(sizeof(struct link_dead_args), GFP_NOIO);
175 if (args) {
176 INIT_WORK(&args->work, nbd_dead_link_work);
177 args->index = nbd->index;
178 queue_work(system_wq, &args->work);
179 }
180 }
Josef Bacikf3733242017-04-06 17:01:57 -0400181 if (!nsock->dead)
182 kernel_sock_shutdown(nsock->sock, SHUT_RDWR);
183 nsock->dead = true;
184 nsock->pending = NULL;
185 nsock->sent = 0;
186}
187
Josef Bacik29eaadc2017-04-06 17:01:59 -0400188static void nbd_size_clear(struct nbd_device *nbd)
Markus Pargmann37091fd2015-07-27 07:36:49 +0200189{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400190 if (nbd->config->bytesize) {
Josef Bacik5ea8d102017-04-06 17:01:58 -0400191 set_capacity(nbd->disk, 0);
192 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
193 }
Markus Pargmann37091fd2015-07-27 07:36:49 +0200194}
195
Josef Bacik29eaadc2017-04-06 17:01:59 -0400196static void nbd_size_update(struct nbd_device *nbd)
Markus Pargmann37091fd2015-07-27 07:36:49 +0200197{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400198 struct nbd_config *config = nbd->config;
199 blk_queue_logical_block_size(nbd->disk->queue, config->blksize);
200 blk_queue_physical_block_size(nbd->disk->queue, config->blksize);
Josef Bacik5ea8d102017-04-06 17:01:58 -0400201 set_capacity(nbd->disk, config->bytesize >> 9);
Markus Pargmann37091fd2015-07-27 07:36:49 +0200202 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
203}
204
Josef Bacik29eaadc2017-04-06 17:01:59 -0400205static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize,
206 loff_t nr_blocks)
Markus Pargmann37091fd2015-07-27 07:36:49 +0200207{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400208 struct nbd_config *config = nbd->config;
209 config->blksize = blocksize;
210 config->bytesize = blocksize * nr_blocks;
Josef Bacik29eaadc2017-04-06 17:01:59 -0400211 nbd_size_update(nbd);
Markus Pargmann37091fd2015-07-27 07:36:49 +0200212}
213
Josef Bacikfd8383f2016-09-08 12:33:37 -0700214static void nbd_end_request(struct nbd_cmd *cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215{
Josef Bacikfd8383f2016-09-08 12:33:37 -0700216 struct nbd_device *nbd = cmd->nbd;
217 struct request *req = blk_mq_rq_from_pdu(cmd);
Kiyoshi Ueda097c94a2007-12-11 17:44:06 -0500218 int error = req->errors ? -EIO : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219
Josef Bacikfd8383f2016-09-08 12:33:37 -0700220 dev_dbg(nbd_to_dev(nbd), "request %p: %s\n", cmd,
Markus Pargmannd18509f2015-04-02 10:11:38 +0200221 error ? "failed" : "done");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222
Josef Bacikfd8383f2016-09-08 12:33:37 -0700223 blk_mq_complete_request(req, error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224}
225
Markus Pargmanne018e752015-04-02 10:11:39 +0200226/*
227 * Forcibly shutdown the socket causing all listeners to error
228 */
Markus Pargmann36e47be2015-08-17 08:20:01 +0200229static void sock_shutdown(struct nbd_device *nbd)
Paul Clements7fdfd402007-10-16 23:27:37 -0700230{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400231 struct nbd_config *config = nbd->config;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500232 int i;
Josef Bacikc2611892016-09-08 12:33:38 -0700233
Josef Bacik5ea8d102017-04-06 17:01:58 -0400234 if (config->num_connections == 0)
Markus Pargmann260bbce2015-08-17 08:20:02 +0200235 return;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400236 if (test_and_set_bit(NBD_DISCONNECTED, &config->runtime_flags))
Josef Bacik9561a7a2016-11-22 14:04:40 -0500237 return;
238
Josef Bacik5ea8d102017-04-06 17:01:58 -0400239 for (i = 0; i < config->num_connections; i++) {
240 struct nbd_sock *nsock = config->socks[i];
Josef Bacik9561a7a2016-11-22 14:04:40 -0500241 mutex_lock(&nsock->tx_lock);
Josef Bacik799f9a32017-04-06 17:02:02 -0400242 nbd_mark_nsock_dead(nbd, nsock, 0);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500243 mutex_unlock(&nsock->tx_lock);
Markus Pargmann23272a672015-10-29 11:51:16 +0100244 }
Josef Bacik9561a7a2016-11-22 14:04:40 -0500245 dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n");
Paul Clements7fdfd402007-10-16 23:27:37 -0700246}
247
Josef Bacik0eadf372016-09-08 12:33:40 -0700248static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
249 bool reserved)
Paul Clements7fdfd402007-10-16 23:27:37 -0700250{
Josef Bacik0eadf372016-09-08 12:33:40 -0700251 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
252 struct nbd_device *nbd = cmd->nbd;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400253 struct nbd_config *config;
Paul Clements7fdfd402007-10-16 23:27:37 -0700254
Josef Bacik5ea8d102017-04-06 17:01:58 -0400255 if (!refcount_inc_not_zero(&nbd->config_refs)) {
256 req->errors = -EIO;
257 return BLK_EH_HANDLED;
258 }
259
260 config = nbd->config;
261
262 if (config->num_connections > 1) {
Josef Bacikf3733242017-04-06 17:01:57 -0400263 dev_err_ratelimited(nbd_to_dev(nbd),
264 "Connection timed out, retrying\n");
Josef Bacikf3733242017-04-06 17:01:57 -0400265 /*
266 * Hooray we have more connections, requeue this IO, the submit
267 * path will put it on a real connection.
268 */
Josef Bacik5ea8d102017-04-06 17:01:58 -0400269 if (config->socks && config->num_connections > 1) {
270 if (cmd->index < config->num_connections) {
Josef Bacikf3733242017-04-06 17:01:57 -0400271 struct nbd_sock *nsock =
Josef Bacik5ea8d102017-04-06 17:01:58 -0400272 config->socks[cmd->index];
Josef Bacikf3733242017-04-06 17:01:57 -0400273 mutex_lock(&nsock->tx_lock);
Josef Bacik799f9a32017-04-06 17:02:02 -0400274 /* We can have multiple outstanding requests, so
275 * we don't want to mark the nsock dead if we've
276 * already reconnected with a new socket, so
277 * only mark it dead if its the same socket we
278 * were sent out on.
279 */
280 if (cmd->cookie == nsock->cookie)
281 nbd_mark_nsock_dead(nbd, nsock, 1);
Josef Bacikf3733242017-04-06 17:01:57 -0400282 mutex_unlock(&nsock->tx_lock);
283 }
Josef Bacikf3733242017-04-06 17:01:57 -0400284 blk_mq_requeue_request(req, true);
Josef Bacik5ea8d102017-04-06 17:01:58 -0400285 nbd_config_put(nbd);
Josef Bacikf3733242017-04-06 17:01:57 -0400286 return BLK_EH_NOT_HANDLED;
287 }
Josef Bacikf3733242017-04-06 17:01:57 -0400288 } else {
289 dev_err_ratelimited(nbd_to_dev(nbd),
290 "Connection timed out\n");
291 }
Josef Bacik5ea8d102017-04-06 17:01:58 -0400292 set_bit(NBD_TIMEDOUT, &config->runtime_flags);
Josef Bacikc103b4d2017-03-24 14:08:27 -0400293 req->errors = -EIO;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500294 sock_shutdown(nbd);
Josef Bacik5ea8d102017-04-06 17:01:58 -0400295 nbd_config_put(nbd);
296
Josef Bacik0eadf372016-09-08 12:33:40 -0700297 return BLK_EH_HANDLED;
Paul Clements7fdfd402007-10-16 23:27:37 -0700298}
299
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300/*
301 * Send or receive packet.
302 */
Al Viroc9f2b6a2015-11-12 05:09:35 -0500303static int sock_xmit(struct nbd_device *nbd, int index, int send,
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400304 struct iov_iter *iter, int msg_flags, int *sent)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400306 struct nbd_config *config = nbd->config;
307 struct socket *sock = config->socks[index]->sock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 int result;
309 struct msghdr msg;
Mel Gorman7f338fe2012-07-31 16:44:32 -0700310 unsigned long pflags = current->flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311
Mike Snitzerffc41cf2008-04-02 13:04:47 -0700312 if (unlikely(!sock)) {
Josef Bacika897b662016-12-05 16:20:29 -0500313 dev_err_ratelimited(disk_to_dev(nbd->disk),
WANG Cong7f1b90f2011-08-19 14:48:22 +0200314 "Attempted %s on closed socket in sock_xmit\n",
315 (send ? "send" : "recv"));
Mike Snitzerffc41cf2008-04-02 13:04:47 -0700316 return -EINVAL;
317 }
318
Al Viroc9f2b6a2015-11-12 05:09:35 -0500319 msg.msg_iter = *iter;
Al Viroc1696ca2015-11-12 04:51:19 -0500320
Mel Gorman7f338fe2012-07-31 16:44:32 -0700321 current->flags |= PF_MEMALLOC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 do {
Mel Gorman7f338fe2012-07-31 16:44:32 -0700323 sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 msg.msg_name = NULL;
325 msg.msg_namelen = 0;
326 msg.msg_control = NULL;
327 msg.msg_controllen = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
329
Markus Pargmann7e2893a2015-08-17 08:20:00 +0200330 if (send)
Al Viroc1696ca2015-11-12 04:51:19 -0500331 result = sock_sendmsg(sock, &msg);
Markus Pargmann7e2893a2015-08-17 08:20:00 +0200332 else
Al Viroc1696ca2015-11-12 04:51:19 -0500333 result = sock_recvmsg(sock, &msg, msg.msg_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 if (result <= 0) {
336 if (result == 0)
337 result = -EPIPE; /* short read */
338 break;
339 }
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400340 if (sent)
341 *sent += result;
Al Viroc1696ca2015-11-12 04:51:19 -0500342 } while (msg_data_left(&msg));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343
Mel Gorman7f338fe2012-07-31 16:44:32 -0700344 tsk_restore_flags(current, pflags, PF_MEMALLOC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345
346 return result;
347}
348
Paul Clements7fdfd402007-10-16 23:27:37 -0700349/* always call with the tx_lock held */
Josef Bacik9561a7a2016-11-22 14:04:40 -0500350static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351{
Josef Bacikfd8383f2016-09-08 12:33:37 -0700352 struct request *req = blk_mq_rq_from_pdu(cmd);
Josef Bacik5ea8d102017-04-06 17:01:58 -0400353 struct nbd_config *config = nbd->config;
354 struct nbd_sock *nsock = config->socks[index];
Josef Bacikd61b7f92017-01-19 16:08:49 -0500355 int result;
Al Viroc9f2b6a2015-11-12 05:09:35 -0500356 struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)};
357 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
358 struct iov_iter from;
Tejun Heo1011c1b2009-05-07 22:24:45 +0900359 unsigned long size = blk_rq_bytes(req);
Jens Axboe429a7872016-11-17 12:30:37 -0700360 struct bio *bio;
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200361 u32 type;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500362 u32 tag = blk_mq_unique_tag(req);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400363 int sent = nsock->sent, skip = 0;
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200364
Al Viroc9f2b6a2015-11-12 05:09:35 -0500365 iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
366
Christoph Hellwigaebf5262017-01-31 16:57:31 +0100367 switch (req_op(req)) {
368 case REQ_OP_DISCARD:
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200369 type = NBD_CMD_TRIM;
Christoph Hellwigaebf5262017-01-31 16:57:31 +0100370 break;
371 case REQ_OP_FLUSH:
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200372 type = NBD_CMD_FLUSH;
Christoph Hellwigaebf5262017-01-31 16:57:31 +0100373 break;
374 case REQ_OP_WRITE:
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200375 type = NBD_CMD_WRITE;
Christoph Hellwigaebf5262017-01-31 16:57:31 +0100376 break;
377 case REQ_OP_READ:
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200378 type = NBD_CMD_READ;
Christoph Hellwigaebf5262017-01-31 16:57:31 +0100379 break;
380 default:
381 return -EIO;
382 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383
Christoph Hellwig09fc54cc2017-01-31 16:57:28 +0100384 if (rq_data_dir(req) == WRITE &&
Josef Bacik5ea8d102017-04-06 17:01:58 -0400385 (config->flags & NBD_FLAG_READ_ONLY)) {
Christoph Hellwig09fc54cc2017-01-31 16:57:28 +0100386 dev_err_ratelimited(disk_to_dev(nbd->disk),
387 "Write on read-only\n");
388 return -EIO;
389 }
390
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400391 /* We did a partial send previously, and we at least sent the whole
392 * request struct, so just go and send the rest of the pages in the
393 * request.
394 */
395 if (sent) {
396 if (sent >= sizeof(request)) {
397 skip = sent - sizeof(request);
398 goto send_pages;
399 }
400 iov_iter_advance(&from, sent);
401 }
Josef Bacikf3733242017-04-06 17:01:57 -0400402 cmd->index = index;
Josef Bacik799f9a32017-04-06 17:02:02 -0400403 cmd->cookie = nsock->cookie;
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200404 request.type = htonl(type);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500405 if (type != NBD_CMD_FLUSH) {
Alex Bligh75f187a2013-02-27 17:05:23 -0800406 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
407 request.len = htonl(size);
408 }
Josef Bacik9561a7a2016-11-22 14:04:40 -0500409 memcpy(request.handle, &tag, sizeof(tag));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410
Markus Pargmannd18509f2015-04-02 10:11:38 +0200411 dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
Josef Bacikfd8383f2016-09-08 12:33:37 -0700412 cmd, nbdcmd_to_ascii(type),
Markus Pargmannd18509f2015-04-02 10:11:38 +0200413 (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
Al Viroc9f2b6a2015-11-12 05:09:35 -0500414 result = sock_xmit(nbd, index, 1, &from,
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400415 (type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 if (result <= 0) {
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400417 if (result == -ERESTARTSYS) {
418 /* If we havne't sent anything we can just return BUSY,
419 * however if we have sent something we need to make
420 * sure we only allow this req to be sent until we are
421 * completely done.
422 */
423 if (sent) {
424 nsock->pending = req;
425 nsock->sent = sent;
426 }
427 return BLK_MQ_RQ_QUEUE_BUSY;
428 }
Josef Bacika897b662016-12-05 16:20:29 -0500429 dev_err_ratelimited(disk_to_dev(nbd->disk),
WANG Cong7f1b90f2011-08-19 14:48:22 +0200430 "Send control failed (result %d)\n", result);
Josef Bacikf3733242017-04-06 17:01:57 -0400431 return -EAGAIN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 }
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400433send_pages:
Jens Axboe429a7872016-11-17 12:30:37 -0700434 if (type != NBD_CMD_WRITE)
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400435 goto out;
Jens Axboe429a7872016-11-17 12:30:37 -0700436
Jens Axboe429a7872016-11-17 12:30:37 -0700437 bio = req->bio;
438 while (bio) {
439 struct bio *next = bio->bi_next;
440 struct bvec_iter iter;
Kent Overstreet79886132013-11-23 17:19:00 -0800441 struct bio_vec bvec;
Jens Axboe429a7872016-11-17 12:30:37 -0700442
443 bio_for_each_segment(bvec, bio, iter) {
444 bool is_last = !next && bio_iter_last(bvec, iter);
Josef Bacikd61b7f92017-01-19 16:08:49 -0500445 int flags = is_last ? 0 : MSG_MORE;
Jens Axboe429a7872016-11-17 12:30:37 -0700446
Markus Pargmannd18509f2015-04-02 10:11:38 +0200447 dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
Josef Bacikfd8383f2016-09-08 12:33:37 -0700448 cmd, bvec.bv_len);
Al Viroc9f2b6a2015-11-12 05:09:35 -0500449 iov_iter_bvec(&from, ITER_BVEC | WRITE,
450 &bvec, 1, bvec.bv_len);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400451 if (skip) {
452 if (skip >= iov_iter_count(&from)) {
453 skip -= iov_iter_count(&from);
454 continue;
455 }
456 iov_iter_advance(&from, skip);
457 skip = 0;
458 }
459 result = sock_xmit(nbd, index, 1, &from, flags, &sent);
Jens Axboe6c92e692007-08-16 13:43:12 +0200460 if (result <= 0) {
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400461 if (result == -ERESTARTSYS) {
462 /* We've already sent the header, we
463 * have no choice but to set pending and
464 * return BUSY.
465 */
466 nsock->pending = req;
467 nsock->sent = sent;
468 return BLK_MQ_RQ_QUEUE_BUSY;
469 }
Wanlong Gaof4507162012-03-28 14:42:51 -0700470 dev_err(disk_to_dev(nbd->disk),
WANG Cong7f1b90f2011-08-19 14:48:22 +0200471 "Send data failed (result %d)\n",
472 result);
Josef Bacikf3733242017-04-06 17:01:57 -0400473 return -EAGAIN;
Jens Axboe6c92e692007-08-16 13:43:12 +0200474 }
Jens Axboe429a7872016-11-17 12:30:37 -0700475 /*
476 * The completion might already have come in,
477 * so break for the last one instead of letting
478 * the iterator do it. This prevents use-after-free
479 * of the bio.
480 */
481 if (is_last)
482 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483 }
Jens Axboe429a7872016-11-17 12:30:37 -0700484 bio = next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 }
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400486out:
487 nsock->pending = NULL;
488 nsock->sent = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490}
491
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492/* NULL returned = something went wrong, inform userspace */
Josef Bacik9561a7a2016-11-22 14:04:40 -0500493static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400495 struct nbd_config *config = nbd->config;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496 int result;
497 struct nbd_reply reply;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700498 struct nbd_cmd *cmd;
499 struct request *req = NULL;
500 u16 hwq;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500501 u32 tag;
Al Viroc9f2b6a2015-11-12 05:09:35 -0500502 struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)};
503 struct iov_iter to;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504
505 reply.magic = 0;
Al Viroc9f2b6a2015-11-12 05:09:35 -0500506 iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(reply));
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400507 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508 if (result <= 0) {
Josef Bacik5ea8d102017-04-06 17:01:58 -0400509 if (!nbd_disconnected(config))
Josef Bacik9561a7a2016-11-22 14:04:40 -0500510 dev_err(disk_to_dev(nbd->disk),
511 "Receive control failed (result %d)\n", result);
Markus Pargmann19391832015-08-17 08:20:03 +0200512 return ERR_PTR(result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 }
Michal Feixe4b57e02006-07-30 03:03:31 -0700514
515 if (ntohl(reply.magic) != NBD_REPLY_MAGIC) {
Wanlong Gaof4507162012-03-28 14:42:51 -0700516 dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
Michal Feixe4b57e02006-07-30 03:03:31 -0700517 (unsigned long)ntohl(reply.magic));
Markus Pargmann19391832015-08-17 08:20:03 +0200518 return ERR_PTR(-EPROTO);
Michal Feixe4b57e02006-07-30 03:03:31 -0700519 }
520
Josef Bacik9561a7a2016-11-22 14:04:40 -0500521 memcpy(&tag, reply.handle, sizeof(u32));
Herbert Xu4b2f0262006-01-06 00:09:47 -0800522
Josef Bacikfd8383f2016-09-08 12:33:37 -0700523 hwq = blk_mq_unique_tag_to_hwq(tag);
524 if (hwq < nbd->tag_set.nr_hw_queues)
525 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq],
526 blk_mq_unique_tag_to_tag(tag));
527 if (!req || !blk_mq_request_started(req)) {
528 dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n",
529 tag, req);
530 return ERR_PTR(-ENOENT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531 }
Josef Bacikfd8383f2016-09-08 12:33:37 -0700532 cmd = blk_mq_rq_to_pdu(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 if (ntohl(reply.error)) {
Wanlong Gaof4507162012-03-28 14:42:51 -0700534 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
WANG Cong7f1b90f2011-08-19 14:48:22 +0200535 ntohl(reply.error));
Josef Bacikc103b4d2017-03-24 14:08:27 -0400536 req->errors = -EIO;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700537 return cmd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 }
539
Josef Bacikfd8383f2016-09-08 12:33:37 -0700540 dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", cmd);
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200541 if (rq_data_dir(req) != WRITE) {
NeilBrown5705f702007-09-25 12:35:59 +0200542 struct req_iterator iter;
Kent Overstreet79886132013-11-23 17:19:00 -0800543 struct bio_vec bvec;
NeilBrown5705f702007-09-25 12:35:59 +0200544
545 rq_for_each_segment(bvec, req, iter) {
Al Viroc9f2b6a2015-11-12 05:09:35 -0500546 iov_iter_bvec(&to, ITER_BVEC | READ,
547 &bvec, 1, bvec.bv_len);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400548 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
Jens Axboe6c92e692007-08-16 13:43:12 +0200549 if (result <= 0) {
Wanlong Gaof4507162012-03-28 14:42:51 -0700550 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
WANG Cong7f1b90f2011-08-19 14:48:22 +0200551 result);
Josef Bacikf3733242017-04-06 17:01:57 -0400552 /*
553 * If we've disconnected or we only have 1
554 * connection then we need to make sure we
555 * complete this request, otherwise error out
556 * and let the timeout stuff handle resubmitting
557 * this request onto another connection.
558 */
Josef Bacik5ea8d102017-04-06 17:01:58 -0400559 if (nbd_disconnected(config) ||
560 config->num_connections <= 1) {
Josef Bacikf3733242017-04-06 17:01:57 -0400561 req->errors = -EIO;
562 return cmd;
563 }
564 return ERR_PTR(-EIO);
Jens Axboe6c92e692007-08-16 13:43:12 +0200565 }
Markus Pargmannd18509f2015-04-02 10:11:38 +0200566 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
Josef Bacikfd8383f2016-09-08 12:33:37 -0700567 cmd, bvec.bv_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 }
Josef Bacik9561a7a2016-11-22 14:04:40 -0500569 } else {
570 /* See the comment in nbd_queue_rq. */
571 wait_for_completion(&cmd->send_complete);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572 }
Josef Bacikfd8383f2016-09-08 12:33:37 -0700573 return cmd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574}
575
Josef Bacik9561a7a2016-11-22 14:04:40 -0500576static void recv_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577{
Josef Bacik9561a7a2016-11-22 14:04:40 -0500578 struct recv_thread_args *args = container_of(work,
579 struct recv_thread_args,
580 work);
581 struct nbd_device *nbd = args->nbd;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400582 struct nbd_config *config = nbd->config;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700583 struct nbd_cmd *cmd;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500584 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585
Markus Pargmann19391832015-08-17 08:20:03 +0200586 while (1) {
Josef Bacik9561a7a2016-11-22 14:04:40 -0500587 cmd = nbd_read_stat(nbd, args->index);
Josef Bacikfd8383f2016-09-08 12:33:37 -0700588 if (IS_ERR(cmd)) {
Josef Bacik5ea8d102017-04-06 17:01:58 -0400589 struct nbd_sock *nsock = config->socks[args->index];
Josef Bacikf3733242017-04-06 17:01:57 -0400590
591 mutex_lock(&nsock->tx_lock);
Josef Bacik799f9a32017-04-06 17:02:02 -0400592 nbd_mark_nsock_dead(nbd, nsock, 1);
Josef Bacikf3733242017-04-06 17:01:57 -0400593 mutex_unlock(&nsock->tx_lock);
Josef Bacikfd8383f2016-09-08 12:33:37 -0700594 ret = PTR_ERR(cmd);
Markus Pargmann19391832015-08-17 08:20:03 +0200595 break;
596 }
597
Josef Bacikfd8383f2016-09-08 12:33:37 -0700598 nbd_end_request(cmd);
Markus Pargmann19391832015-08-17 08:20:03 +0200599 }
Josef Bacik5ea8d102017-04-06 17:01:58 -0400600 atomic_dec(&config->recv_threads);
601 wake_up(&config->recv_wq);
602 nbd_config_put(nbd);
603 kfree(args);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604}
605
Josef Bacikfd8383f2016-09-08 12:33:37 -0700606static void nbd_clear_req(struct request *req, void *data, bool reserved)
607{
608 struct nbd_cmd *cmd;
609
610 if (!blk_mq_request_started(req))
611 return;
612 cmd = blk_mq_rq_to_pdu(req);
Josef Bacikc103b4d2017-03-24 14:08:27 -0400613 req->errors = -EIO;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700614 nbd_end_request(cmd);
615}
616
Wanlong Gaof4507162012-03-28 14:42:51 -0700617static void nbd_clear_que(struct nbd_device *nbd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618{
Josef Bacikfd8383f2016-09-08 12:33:37 -0700619 blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL);
Markus Pargmanne78273c2015-08-17 08:20:04 +0200620 dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621}
622
Josef Bacikf3733242017-04-06 17:01:57 -0400623static int find_fallback(struct nbd_device *nbd, int index)
624{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400625 struct nbd_config *config = nbd->config;
Josef Bacikf3733242017-04-06 17:01:57 -0400626 int new_index = -1;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400627 struct nbd_sock *nsock = config->socks[index];
Josef Bacikf3733242017-04-06 17:01:57 -0400628 int fallback = nsock->fallback_index;
629
Josef Bacik5ea8d102017-04-06 17:01:58 -0400630 if (test_bit(NBD_DISCONNECTED, &config->runtime_flags))
Josef Bacikf3733242017-04-06 17:01:57 -0400631 return new_index;
632
Josef Bacik5ea8d102017-04-06 17:01:58 -0400633 if (config->num_connections <= 1) {
Josef Bacikf3733242017-04-06 17:01:57 -0400634 dev_err_ratelimited(disk_to_dev(nbd->disk),
635 "Attempted send on invalid socket\n");
636 return new_index;
637 }
638
Josef Bacik5ea8d102017-04-06 17:01:58 -0400639 if (fallback >= 0 && fallback < config->num_connections &&
640 !config->socks[fallback]->dead)
Josef Bacikf3733242017-04-06 17:01:57 -0400641 return fallback;
642
643 if (nsock->fallback_index < 0 ||
Josef Bacik5ea8d102017-04-06 17:01:58 -0400644 nsock->fallback_index >= config->num_connections ||
645 config->socks[nsock->fallback_index]->dead) {
Josef Bacikf3733242017-04-06 17:01:57 -0400646 int i;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400647 for (i = 0; i < config->num_connections; i++) {
Josef Bacikf3733242017-04-06 17:01:57 -0400648 if (i == index)
649 continue;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400650 if (!config->socks[i]->dead) {
Josef Bacikf3733242017-04-06 17:01:57 -0400651 new_index = i;
652 break;
653 }
654 }
655 nsock->fallback_index = new_index;
656 if (new_index < 0) {
657 dev_err_ratelimited(disk_to_dev(nbd->disk),
658 "Dead connection, failed to find a fallback\n");
659 return new_index;
660 }
661 }
662 new_index = nsock->fallback_index;
663 return new_index;
664}
Paul Clements7fdfd402007-10-16 23:27:37 -0700665
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400666static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
Laurent Vivier48cf6062008-04-29 01:02:46 -0700667{
Josef Bacikfd8383f2016-09-08 12:33:37 -0700668 struct request *req = blk_mq_rq_from_pdu(cmd);
669 struct nbd_device *nbd = cmd->nbd;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400670 struct nbd_config *config;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500671 struct nbd_sock *nsock;
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400672 int ret;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700673
Josef Bacik5ea8d102017-04-06 17:01:58 -0400674 if (!refcount_inc_not_zero(&nbd->config_refs)) {
675 dev_err_ratelimited(disk_to_dev(nbd->disk),
676 "Socks array is empty\n");
677 return -EINVAL;
678 }
679 config = nbd->config;
680
681 if (index >= config->num_connections) {
Josef Bacika897b662016-12-05 16:20:29 -0500682 dev_err_ratelimited(disk_to_dev(nbd->disk),
683 "Attempted send on invalid socket\n");
Josef Bacik5ea8d102017-04-06 17:01:58 -0400684 nbd_config_put(nbd);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400685 return -EINVAL;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500686 }
Laurent Vivier48cf6062008-04-29 01:02:46 -0700687 req->errors = 0;
Josef Bacikf3733242017-04-06 17:01:57 -0400688again:
Josef Bacik5ea8d102017-04-06 17:01:58 -0400689 nsock = config->socks[index];
Josef Bacik9561a7a2016-11-22 14:04:40 -0500690 mutex_lock(&nsock->tx_lock);
Josef Bacikf3733242017-04-06 17:01:57 -0400691 if (nsock->dead) {
692 index = find_fallback(nbd, index);
Josef Bacik5ea8d102017-04-06 17:01:58 -0400693 if (index < 0) {
694 ret = -EIO;
695 goto out;
696 }
Josef Bacik9561a7a2016-11-22 14:04:40 -0500697 mutex_unlock(&nsock->tx_lock);
Josef Bacikf3733242017-04-06 17:01:57 -0400698 goto again;
Laurent Vivier48cf6062008-04-29 01:02:46 -0700699 }
700
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400701 /* Handle the case that we have a pending request that was partially
702 * transmitted that _has_ to be serviced first. We need to call requeue
703 * here so that it gets put _after_ the request that is already on the
704 * dispatch list.
705 */
706 if (unlikely(nsock->pending && nsock->pending != req)) {
707 blk_mq_requeue_request(req, true);
708 ret = 0;
709 goto out;
Laurent Vivier48cf6062008-04-29 01:02:46 -0700710 }
Josef Bacikf3733242017-04-06 17:01:57 -0400711 /*
712 * Some failures are related to the link going down, so anything that
713 * returns EAGAIN can be retried on a different socket.
714 */
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400715 ret = nbd_send_cmd(nbd, cmd, index);
Josef Bacikf3733242017-04-06 17:01:57 -0400716 if (ret == -EAGAIN) {
717 dev_err_ratelimited(disk_to_dev(nbd->disk),
718 "Request send failed trying another connection\n");
Josef Bacik799f9a32017-04-06 17:02:02 -0400719 nbd_mark_nsock_dead(nbd, nsock, 1);
Josef Bacikf3733242017-04-06 17:01:57 -0400720 mutex_unlock(&nsock->tx_lock);
721 goto again;
722 }
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400723out:
Josef Bacik9561a7a2016-11-22 14:04:40 -0500724 mutex_unlock(&nsock->tx_lock);
Josef Bacik5ea8d102017-04-06 17:01:58 -0400725 nbd_config_put(nbd);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400726 return ret;
Laurent Vivier48cf6062008-04-29 01:02:46 -0700727}
728
Josef Bacikfd8383f2016-09-08 12:33:37 -0700729static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
730 const struct blk_mq_queue_data *bd)
Laurent Vivier48cf6062008-04-29 01:02:46 -0700731{
Josef Bacikfd8383f2016-09-08 12:33:37 -0700732 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400733 int ret;
Laurent Vivier48cf6062008-04-29 01:02:46 -0700734
Josef Bacik9561a7a2016-11-22 14:04:40 -0500735 /*
736 * Since we look at the bio's to send the request over the network we
737 * need to make sure the completion work doesn't mark this request done
738 * before we are done doing our send. This keeps us from dereferencing
739 * freed data if we have particularly fast completions (ie we get the
740 * completion before we exit sock_xmit on the last bvec) or in the case
741 * that the server is misbehaving (or there was an error) before we're
742 * done sending everything over the wire.
743 */
744 init_completion(&cmd->send_complete);
Josef Bacikfd8383f2016-09-08 12:33:37 -0700745 blk_mq_start_request(bd->rq);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400746
747 /* We can be called directly from the user space process, which means we
748 * could possibly have signals pending so our sendmsg will fail. In
749 * this case we need to return that we are busy, otherwise error out as
750 * appropriate.
751 */
752 ret = nbd_handle_cmd(cmd, hctx->queue_num);
753 if (ret < 0)
754 ret = BLK_MQ_RQ_QUEUE_ERROR;
755 if (!ret)
756 ret = BLK_MQ_RQ_QUEUE_OK;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500757 complete(&cmd->send_complete);
758
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400759 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760}
761
Josef Bacike46c7282017-04-06 17:02:00 -0400762static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
763 bool netlink)
Markus Pargmann23272a672015-10-29 11:51:16 +0100764{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400765 struct nbd_config *config = nbd->config;
Josef Bacik9442b732017-02-07 17:10:22 -0500766 struct socket *sock;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500767 struct nbd_sock **socks;
768 struct nbd_sock *nsock;
Josef Bacik9442b732017-02-07 17:10:22 -0500769 int err;
770
771 sock = sockfd_lookup(arg, &err);
772 if (!sock)
773 return err;
Markus Pargmann23272a672015-10-29 11:51:16 +0100774
Josef Bacike46c7282017-04-06 17:02:00 -0400775 if (!netlink && !nbd->task_setup &&
776 !test_bit(NBD_BOUND, &config->runtime_flags))
Josef Bacik9561a7a2016-11-22 14:04:40 -0500777 nbd->task_setup = current;
Josef Bacike46c7282017-04-06 17:02:00 -0400778
779 if (!netlink &&
780 (nbd->task_setup != current ||
781 test_bit(NBD_BOUND, &config->runtime_flags))) {
Josef Bacik9561a7a2016-11-22 14:04:40 -0500782 dev_err(disk_to_dev(nbd->disk),
783 "Device being setup by another task");
Josef Bacik9b1355d2017-04-06 17:01:56 -0400784 sockfd_put(sock);
Josef Bacike46c7282017-04-06 17:02:00 -0400785 return -EBUSY;
Markus Pargmann23272a672015-10-29 11:51:16 +0100786 }
787
Josef Bacik5ea8d102017-04-06 17:01:58 -0400788 socks = krealloc(config->socks, (config->num_connections + 1) *
Josef Bacik9561a7a2016-11-22 14:04:40 -0500789 sizeof(struct nbd_sock *), GFP_KERNEL);
Josef Bacik9b1355d2017-04-06 17:01:56 -0400790 if (!socks) {
791 sockfd_put(sock);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500792 return -ENOMEM;
Josef Bacik9b1355d2017-04-06 17:01:56 -0400793 }
Josef Bacik9561a7a2016-11-22 14:04:40 -0500794 nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL);
Josef Bacik9b1355d2017-04-06 17:01:56 -0400795 if (!nsock) {
796 sockfd_put(sock);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500797 return -ENOMEM;
Josef Bacik9b1355d2017-04-06 17:01:56 -0400798 }
Markus Pargmann23272a672015-10-29 11:51:16 +0100799
Josef Bacik5ea8d102017-04-06 17:01:58 -0400800 config->socks = socks;
Markus Pargmann23272a672015-10-29 11:51:16 +0100801
Josef Bacikf3733242017-04-06 17:01:57 -0400802 nsock->fallback_index = -1;
803 nsock->dead = false;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500804 mutex_init(&nsock->tx_lock);
805 nsock->sock = sock;
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400806 nsock->pending = NULL;
807 nsock->sent = 0;
Josef Bacik799f9a32017-04-06 17:02:02 -0400808 nsock->cookie = 0;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400809 socks[config->num_connections++] = nsock;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500810
811 return 0;
Markus Pargmann23272a672015-10-29 11:51:16 +0100812}
813
Josef Bacikb7aa3d32017-04-06 17:02:01 -0400814static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
815{
816 struct nbd_config *config = nbd->config;
817 struct socket *sock, *old;
818 struct recv_thread_args *args;
819 int i;
820 int err;
821
822 sock = sockfd_lookup(arg, &err);
823 if (!sock)
824 return err;
825
826 args = kzalloc(sizeof(*args), GFP_KERNEL);
827 if (!args) {
828 sockfd_put(sock);
829 return -ENOMEM;
830 }
831
832 for (i = 0; i < config->num_connections; i++) {
833 struct nbd_sock *nsock = config->socks[i];
834
835 if (!nsock->dead)
836 continue;
837
838 mutex_lock(&nsock->tx_lock);
839 if (!nsock->dead) {
840 mutex_unlock(&nsock->tx_lock);
841 continue;
842 }
843 sk_set_memalloc(sock->sk);
844 atomic_inc(&config->recv_threads);
845 refcount_inc(&nbd->config_refs);
846 old = nsock->sock;
847 nsock->fallback_index = -1;
848 nsock->sock = sock;
849 nsock->dead = false;
850 INIT_WORK(&args->work, recv_work);
851 args->index = i;
852 args->nbd = nbd;
Josef Bacik799f9a32017-04-06 17:02:02 -0400853 nsock->cookie++;
Josef Bacikb7aa3d32017-04-06 17:02:01 -0400854 mutex_unlock(&nsock->tx_lock);
855 sockfd_put(old);
856
857 /* We take the tx_mutex in an error path in the recv_work, so we
858 * need to queue_work outside of the tx_mutex.
859 */
860 queue_work(recv_workqueue, &args->work);
861 return 0;
862 }
863 sockfd_put(sock);
864 kfree(args);
865 return -ENOSPC;
866}
867
Markus Pargmann0e4f0f62015-10-29 12:04:51 +0100868/* Reset all properties of an NBD device */
869static void nbd_reset(struct nbd_device *nbd)
870{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400871 nbd->config = NULL;
Josef Bacik0eadf372016-09-08 12:33:40 -0700872 nbd->tag_set.timeout = 0;
Markus Pargmann0e4f0f62015-10-29 12:04:51 +0100873 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
Markus Pargmann0e4f0f62015-10-29 12:04:51 +0100874}
875
876static void nbd_bdev_reset(struct block_device *bdev)
877{
Ratna Manoj Bollaabbbdf12017-03-24 14:08:29 -0400878 if (bdev->bd_openers > 1)
879 return;
Josef Bacik29eaadc2017-04-06 17:01:59 -0400880 bd_set_size(bdev, 0);
Markus Pargmann0e4f0f62015-10-29 12:04:51 +0100881 if (max_part > 0) {
882 blkdev_reread_part(bdev);
883 bdev->bd_invalidated = 1;
884 }
885}
886
Josef Bacik29eaadc2017-04-06 17:01:59 -0400887static void nbd_parse_flags(struct nbd_device *nbd)
Markus Pargmannd02cf532015-10-29 12:06:15 +0100888{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400889 struct nbd_config *config = nbd->config;
890 if (config->flags & NBD_FLAG_READ_ONLY)
Josef Bacik29eaadc2017-04-06 17:01:59 -0400891 set_disk_ro(nbd->disk, true);
892 else
893 set_disk_ro(nbd->disk, false);
Josef Bacik5ea8d102017-04-06 17:01:58 -0400894 if (config->flags & NBD_FLAG_SEND_TRIM)
Markus Pargmannd02cf532015-10-29 12:06:15 +0100895 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
Josef Bacik5ea8d102017-04-06 17:01:58 -0400896 if (config->flags & NBD_FLAG_SEND_FLUSH)
Jens Axboeaafb1ee2016-03-30 10:10:53 -0600897 blk_queue_write_cache(nbd->disk->queue, true, false);
Markus Pargmannd02cf532015-10-29 12:06:15 +0100898 else
Jens Axboeaafb1ee2016-03-30 10:10:53 -0600899 blk_queue_write_cache(nbd->disk->queue, false, false);
Markus Pargmannd02cf532015-10-29 12:06:15 +0100900}
901
Josef Bacik9561a7a2016-11-22 14:04:40 -0500902static void send_disconnects(struct nbd_device *nbd)
903{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400904 struct nbd_config *config = nbd->config;
Al Viroc9f2b6a2015-11-12 05:09:35 -0500905 struct nbd_request request = {
906 .magic = htonl(NBD_REQUEST_MAGIC),
907 .type = htonl(NBD_CMD_DISC),
908 };
909 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
910 struct iov_iter from;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500911 int i, ret;
912
Josef Bacik5ea8d102017-04-06 17:01:58 -0400913 for (i = 0; i < config->num_connections; i++) {
Al Viroc9f2b6a2015-11-12 05:09:35 -0500914 iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400915 ret = sock_xmit(nbd, i, 1, &from, 0, NULL);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500916 if (ret <= 0)
917 dev_err(disk_to_dev(nbd->disk),
918 "Send disconnect failed %d\n", ret);
919 }
920}
921
Josef Bacik29eaadc2017-04-06 17:01:59 -0400922static int nbd_disconnect(struct nbd_device *nbd)
Josef Bacik9442b732017-02-07 17:10:22 -0500923{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400924 struct nbd_config *config = nbd->config;
Josef Bacik9442b732017-02-07 17:10:22 -0500925
Josef Bacik5ea8d102017-04-06 17:01:58 -0400926 dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
Josef Bacik9442b732017-02-07 17:10:22 -0500927 if (!test_and_set_bit(NBD_DISCONNECT_REQUESTED,
Josef Bacik5ea8d102017-04-06 17:01:58 -0400928 &config->runtime_flags))
Josef Bacik9442b732017-02-07 17:10:22 -0500929 send_disconnects(nbd);
930 return 0;
931}
932
Josef Bacik29eaadc2017-04-06 17:01:59 -0400933static void nbd_clear_sock(struct nbd_device *nbd)
Josef Bacik9442b732017-02-07 17:10:22 -0500934{
935 sock_shutdown(nbd);
936 nbd_clear_que(nbd);
Josef Bacik9442b732017-02-07 17:10:22 -0500937 nbd->task_setup = NULL;
Josef Bacik9442b732017-02-07 17:10:22 -0500938}
939
Josef Bacik5ea8d102017-04-06 17:01:58 -0400940static void nbd_config_put(struct nbd_device *nbd)
941{
942 if (refcount_dec_and_mutex_lock(&nbd->config_refs,
943 &nbd->config_lock)) {
Josef Bacik5ea8d102017-04-06 17:01:58 -0400944 struct nbd_config *config = nbd->config;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400945 nbd_dev_dbg_close(nbd);
Josef Bacik29eaadc2017-04-06 17:01:59 -0400946 nbd_size_clear(nbd);
Josef Bacik5ea8d102017-04-06 17:01:58 -0400947 if (test_and_clear_bit(NBD_HAS_PID_FILE,
948 &config->runtime_flags))
949 device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
950 nbd->task_recv = NULL;
Josef Bacik29eaadc2017-04-06 17:01:59 -0400951 nbd_clear_sock(nbd);
Josef Bacik5ea8d102017-04-06 17:01:58 -0400952 if (config->num_connections) {
953 int i;
954 for (i = 0; i < config->num_connections; i++) {
955 sockfd_put(config->socks[i]->sock);
956 kfree(config->socks[i]);
957 }
958 kfree(config->socks);
959 }
960 nbd_reset(nbd);
961 mutex_unlock(&nbd->config_lock);
Josef Bacik5ea8d102017-04-06 17:01:58 -0400962 module_put(THIS_MODULE);
963 }
964}
965
Josef Bacike46c7282017-04-06 17:02:00 -0400966static int nbd_start_device(struct nbd_device *nbd)
Josef Bacik9442b732017-02-07 17:10:22 -0500967{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400968 struct nbd_config *config = nbd->config;
969 int num_connections = config->num_connections;
Josef Bacik9442b732017-02-07 17:10:22 -0500970 int error = 0, i;
971
972 if (nbd->task_recv)
973 return -EBUSY;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400974 if (!config->socks)
Josef Bacik9442b732017-02-07 17:10:22 -0500975 return -EINVAL;
976 if (num_connections > 1 &&
Josef Bacik5ea8d102017-04-06 17:01:58 -0400977 !(config->flags & NBD_FLAG_CAN_MULTI_CONN)) {
Josef Bacik9442b732017-02-07 17:10:22 -0500978 dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n");
Josef Bacik5ea8d102017-04-06 17:01:58 -0400979 return -EINVAL;
Josef Bacik9442b732017-02-07 17:10:22 -0500980 }
981
Josef Bacik5ea8d102017-04-06 17:01:58 -0400982 blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections);
Josef Bacik9442b732017-02-07 17:10:22 -0500983 nbd->task_recv = current;
Josef Bacik9442b732017-02-07 17:10:22 -0500984
Josef Bacik29eaadc2017-04-06 17:01:59 -0400985 nbd_parse_flags(nbd);
Josef Bacik9442b732017-02-07 17:10:22 -0500986
987 error = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
988 if (error) {
989 dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
Josef Bacik5ea8d102017-04-06 17:01:58 -0400990 return error;
Josef Bacik9442b732017-02-07 17:10:22 -0500991 }
Josef Bacik29eaadc2017-04-06 17:01:59 -0400992 set_bit(NBD_HAS_PID_FILE, &config->runtime_flags);
Josef Bacik9442b732017-02-07 17:10:22 -0500993
994 nbd_dev_dbg_init(nbd);
995 for (i = 0; i < num_connections; i++) {
Josef Bacik5ea8d102017-04-06 17:01:58 -0400996 struct recv_thread_args *args;
997
998 args = kzalloc(sizeof(*args), GFP_KERNEL);
999 if (!args) {
1000 sock_shutdown(nbd);
1001 return -ENOMEM;
1002 }
1003 sk_set_memalloc(config->socks[i]->sock->sk);
1004 atomic_inc(&config->recv_threads);
1005 refcount_inc(&nbd->config_refs);
1006 INIT_WORK(&args->work, recv_work);
1007 args->nbd = nbd;
1008 args->index = i;
1009 queue_work(recv_workqueue, &args->work);
Josef Bacik9442b732017-02-07 17:10:22 -05001010 }
Josef Bacike46c7282017-04-06 17:02:00 -04001011 return error;
1012}
1013
1014static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *bdev)
1015{
1016 struct nbd_config *config = nbd->config;
1017 int ret;
1018
1019 ret = nbd_start_device(nbd);
1020 if (ret)
1021 return ret;
1022
1023 bd_set_size(bdev, config->bytesize);
1024 if (max_part)
1025 bdev->bd_invalidated = 1;
1026 mutex_unlock(&nbd->config_lock);
1027 ret = wait_event_interruptible(config->recv_wq,
Josef Bacik5ea8d102017-04-06 17:01:58 -04001028 atomic_read(&config->recv_threads) == 0);
Josef Bacike46c7282017-04-06 17:02:00 -04001029 if (ret)
Josef Bacik5ea8d102017-04-06 17:01:58 -04001030 sock_shutdown(nbd);
Josef Bacik9442b732017-02-07 17:10:22 -05001031 mutex_lock(&nbd->config_lock);
Josef Bacike46c7282017-04-06 17:02:00 -04001032 bd_set_size(bdev, 0);
Josef Bacik9442b732017-02-07 17:10:22 -05001033 /* user requested, ignore socket errors */
Josef Bacik5ea8d102017-04-06 17:01:58 -04001034 if (test_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags))
Josef Bacike46c7282017-04-06 17:02:00 -04001035 ret = 0;
Josef Bacik5ea8d102017-04-06 17:01:58 -04001036 if (test_bit(NBD_TIMEDOUT, &config->runtime_flags))
Josef Bacike46c7282017-04-06 17:02:00 -04001037 ret = -ETIMEDOUT;
1038 return ret;
Josef Bacik9442b732017-02-07 17:10:22 -05001039}
Markus Pargmann30d53d92015-08-17 08:20:06 +02001040
Josef Bacik29eaadc2017-04-06 17:01:59 -04001041static void nbd_clear_sock_ioctl(struct nbd_device *nbd,
1042 struct block_device *bdev)
1043{
1044 nbd_clear_sock(nbd);
1045 kill_bdev(bdev);
1046 nbd_bdev_reset(bdev);
Josef Bacike46c7282017-04-06 17:02:00 -04001047 if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
1048 &nbd->config->runtime_flags))
1049 nbd_config_put(nbd);
Josef Bacik29eaadc2017-04-06 17:01:59 -04001050}
1051
Josef Bacik9561a7a2016-11-22 14:04:40 -05001052/* Must be called with config_lock held */
Wanlong Gaof4507162012-03-28 14:42:51 -07001053static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
Pavel Machek1a2ad212009-04-02 16:58:41 -07001054 unsigned int cmd, unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055{
Josef Bacik5ea8d102017-04-06 17:01:58 -04001056 struct nbd_config *config = nbd->config;
1057
Linus Torvalds1da177e2005-04-16 15:20:36 -07001058 switch (cmd) {
Josef Bacik9442b732017-02-07 17:10:22 -05001059 case NBD_DISCONNECT:
Josef Bacik29eaadc2017-04-06 17:01:59 -04001060 return nbd_disconnect(nbd);
Markus Pargmann23272a672015-10-29 11:51:16 +01001061 case NBD_CLEAR_SOCK:
Josef Bacik29eaadc2017-04-06 17:01:59 -04001062 nbd_clear_sock_ioctl(nbd, bdev);
1063 return 0;
Josef Bacik9442b732017-02-07 17:10:22 -05001064 case NBD_SET_SOCK:
Josef Bacike46c7282017-04-06 17:02:00 -04001065 return nbd_add_socket(nbd, arg, false);
Josef Bacik9442b732017-02-07 17:10:22 -05001066 case NBD_SET_BLKSIZE:
Josef Bacik29eaadc2017-04-06 17:01:59 -04001067 nbd_size_set(nbd, arg,
Josef Bacik5ea8d102017-04-06 17:01:58 -04001068 div_s64(config->bytesize, arg));
Josef Bacike5445412017-02-13 10:39:47 -05001069 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070 case NBD_SET_SIZE:
Josef Bacik29eaadc2017-04-06 17:01:59 -04001071 nbd_size_set(nbd, config->blksize,
Josef Bacik5ea8d102017-04-06 17:01:58 -04001072 div_s64(arg, config->blksize));
Josef Bacike5445412017-02-13 10:39:47 -05001073 return 0;
Markus Pargmann37091fd2015-07-27 07:36:49 +02001074 case NBD_SET_SIZE_BLOCKS:
Josef Bacik29eaadc2017-04-06 17:01:59 -04001075 nbd_size_set(nbd, config->blksize, arg);
Josef Bacike5445412017-02-13 10:39:47 -05001076 return 0;
Paul Clements7fdfd402007-10-16 23:27:37 -07001077 case NBD_SET_TIMEOUT:
Josef Bacikf8586852017-03-24 14:08:28 -04001078 if (arg) {
1079 nbd->tag_set.timeout = arg * HZ;
1080 blk_queue_rq_timeout(nbd->disk->queue, arg * HZ);
1081 }
Paul Clements7fdfd402007-10-16 23:27:37 -07001082 return 0;
Pavel Machek1a2ad212009-04-02 16:58:41 -07001083
Paul Clements2f012502012-10-04 17:16:15 -07001084 case NBD_SET_FLAGS:
Josef Bacik5ea8d102017-04-06 17:01:58 -04001085 config->flags = arg;
Paul Clements2f012502012-10-04 17:16:15 -07001086 return 0;
Josef Bacik9442b732017-02-07 17:10:22 -05001087 case NBD_DO_IT:
Josef Bacike46c7282017-04-06 17:02:00 -04001088 return nbd_start_device_ioctl(nbd, bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089 case NBD_CLEAR_QUE:
Herbert Xu4b2f0262006-01-06 00:09:47 -08001090 /*
1091 * This is for compatibility only. The queue is always cleared
1092 * by NBD_DO_IT or NBD_CLEAR_SOCK.
1093 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094 return 0;
1095 case NBD_PRINT_DEBUG:
Josef Bacikfd8383f2016-09-08 12:33:37 -07001096 /*
1097 * For compatibility only, we no longer keep a list of
1098 * outstanding requests.
1099 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100 return 0;
1101 }
Pavel Machek1a2ad212009-04-02 16:58:41 -07001102 return -ENOTTY;
1103}
1104
1105static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
1106 unsigned int cmd, unsigned long arg)
1107{
Wanlong Gaof4507162012-03-28 14:42:51 -07001108 struct nbd_device *nbd = bdev->bd_disk->private_data;
Josef Bacike46c7282017-04-06 17:02:00 -04001109 struct nbd_config *config = nbd->config;
1110 int error = -EINVAL;
Pavel Machek1a2ad212009-04-02 16:58:41 -07001111
1112 if (!capable(CAP_SYS_ADMIN))
1113 return -EPERM;
1114
Josef Bacik9561a7a2016-11-22 14:04:40 -05001115 mutex_lock(&nbd->config_lock);
Josef Bacike46c7282017-04-06 17:02:00 -04001116
1117 /* Don't allow ioctl operations on a nbd device that was created with
1118 * netlink, unless it's DISCONNECT or CLEAR_SOCK, which are fine.
1119 */
1120 if (!test_bit(NBD_BOUND, &config->runtime_flags) ||
1121 (cmd == NBD_DISCONNECT || cmd == NBD_CLEAR_SOCK))
1122 error = __nbd_ioctl(bdev, nbd, cmd, arg);
1123 else
1124 dev_err(nbd_to_dev(nbd), "Cannot use ioctl interface on a netlink controlled device.\n");
Josef Bacik9561a7a2016-11-22 14:04:40 -05001125 mutex_unlock(&nbd->config_lock);
Pavel Machek1a2ad212009-04-02 16:58:41 -07001126 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127}
1128
Josef Bacik5ea8d102017-04-06 17:01:58 -04001129static struct nbd_config *nbd_alloc_config(void)
1130{
1131 struct nbd_config *config;
1132
1133 config = kzalloc(sizeof(struct nbd_config), GFP_NOFS);
1134 if (!config)
1135 return NULL;
1136 atomic_set(&config->recv_threads, 0);
1137 init_waitqueue_head(&config->recv_wq);
1138 config->blksize = 1024;
1139 try_module_get(THIS_MODULE);
1140 return config;
1141}
1142
1143static int nbd_open(struct block_device *bdev, fmode_t mode)
1144{
1145 struct nbd_device *nbd;
1146 int ret = 0;
1147
1148 mutex_lock(&nbd_index_mutex);
1149 nbd = bdev->bd_disk->private_data;
1150 if (!nbd) {
1151 ret = -ENXIO;
1152 goto out;
1153 }
1154 if (!refcount_inc_not_zero(&nbd->config_refs)) {
1155 struct nbd_config *config;
1156
1157 mutex_lock(&nbd->config_lock);
1158 if (refcount_inc_not_zero(&nbd->config_refs)) {
1159 mutex_unlock(&nbd->config_lock);
1160 goto out;
1161 }
1162 config = nbd->config = nbd_alloc_config();
1163 if (!config) {
1164 ret = -ENOMEM;
1165 mutex_unlock(&nbd->config_lock);
1166 goto out;
1167 }
1168 refcount_set(&nbd->config_refs, 1);
1169 mutex_unlock(&nbd->config_lock);
1170 }
1171out:
1172 mutex_unlock(&nbd_index_mutex);
1173 return ret;
1174}
1175
1176static void nbd_release(struct gendisk *disk, fmode_t mode)
1177{
1178 struct nbd_device *nbd = disk->private_data;
1179 nbd_config_put(nbd);
1180}
1181
Alexey Dobriyan83d5cde2009-09-21 17:01:13 -07001182static const struct block_device_operations nbd_fops =
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183{
1184 .owner = THIS_MODULE,
Josef Bacik5ea8d102017-04-06 17:01:58 -04001185 .open = nbd_open,
1186 .release = nbd_release,
Arnd Bergmann8a6cfeb2010-07-08 10:18:46 +02001187 .ioctl = nbd_ioctl,
Al Viro263a3df2016-01-07 10:04:37 -05001188 .compat_ioctl = nbd_ioctl,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189};
1190
Markus Pargmann30d53d92015-08-17 08:20:06 +02001191#if IS_ENABLED(CONFIG_DEBUG_FS)
1192
1193static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
1194{
1195 struct nbd_device *nbd = s->private;
1196
1197 if (nbd->task_recv)
1198 seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv));
Markus Pargmann30d53d92015-08-17 08:20:06 +02001199
1200 return 0;
1201}
1202
1203static int nbd_dbg_tasks_open(struct inode *inode, struct file *file)
1204{
1205 return single_open(file, nbd_dbg_tasks_show, inode->i_private);
1206}
1207
1208static const struct file_operations nbd_dbg_tasks_ops = {
1209 .open = nbd_dbg_tasks_open,
1210 .read = seq_read,
1211 .llseek = seq_lseek,
1212 .release = single_release,
1213};
1214
1215static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
1216{
1217 struct nbd_device *nbd = s->private;
Josef Bacik5ea8d102017-04-06 17:01:58 -04001218 u32 flags = nbd->config->flags;
Markus Pargmann30d53d92015-08-17 08:20:06 +02001219
1220 seq_printf(s, "Hex: 0x%08x\n\n", flags);
1221
1222 seq_puts(s, "Known flags:\n");
1223
1224 if (flags & NBD_FLAG_HAS_FLAGS)
1225 seq_puts(s, "NBD_FLAG_HAS_FLAGS\n");
1226 if (flags & NBD_FLAG_READ_ONLY)
1227 seq_puts(s, "NBD_FLAG_READ_ONLY\n");
1228 if (flags & NBD_FLAG_SEND_FLUSH)
1229 seq_puts(s, "NBD_FLAG_SEND_FLUSH\n");
1230 if (flags & NBD_FLAG_SEND_TRIM)
1231 seq_puts(s, "NBD_FLAG_SEND_TRIM\n");
1232
1233 return 0;
1234}
1235
1236static int nbd_dbg_flags_open(struct inode *inode, struct file *file)
1237{
1238 return single_open(file, nbd_dbg_flags_show, inode->i_private);
1239}
1240
1241static const struct file_operations nbd_dbg_flags_ops = {
1242 .open = nbd_dbg_flags_open,
1243 .read = seq_read,
1244 .llseek = seq_lseek,
1245 .release = single_release,
1246};
1247
1248static int nbd_dev_dbg_init(struct nbd_device *nbd)
1249{
1250 struct dentry *dir;
Josef Bacik5ea8d102017-04-06 17:01:58 -04001251 struct nbd_config *config = nbd->config;
Markus Pargmann27ea43f2015-10-24 21:15:34 +02001252
1253 if (!nbd_dbg_dir)
1254 return -EIO;
Markus Pargmann30d53d92015-08-17 08:20:06 +02001255
1256 dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir);
Markus Pargmann27ea43f2015-10-24 21:15:34 +02001257 if (!dir) {
1258 dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n",
1259 nbd_name(nbd));
1260 return -EIO;
Markus Pargmann30d53d92015-08-17 08:20:06 +02001261 }
Josef Bacik5ea8d102017-04-06 17:01:58 -04001262 config->dbg_dir = dir;
Markus Pargmann30d53d92015-08-17 08:20:06 +02001263
Markus Pargmann27ea43f2015-10-24 21:15:34 +02001264 debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001265 debugfs_create_u64("size_bytes", 0444, dir, &config->bytesize);
Josef Bacik0eadf372016-09-08 12:33:40 -07001266 debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001267 debugfs_create_u64("blocksize", 0444, dir, &config->blksize);
Josef Bacikd366a0f2016-06-08 10:32:10 -04001268 debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops);
Markus Pargmann30d53d92015-08-17 08:20:06 +02001269
1270 return 0;
1271}
1272
1273static void nbd_dev_dbg_close(struct nbd_device *nbd)
1274{
Josef Bacik5ea8d102017-04-06 17:01:58 -04001275 debugfs_remove_recursive(nbd->config->dbg_dir);
Markus Pargmann30d53d92015-08-17 08:20:06 +02001276}
1277
1278static int nbd_dbg_init(void)
1279{
1280 struct dentry *dbg_dir;
1281
1282 dbg_dir = debugfs_create_dir("nbd", NULL);
Markus Pargmann27ea43f2015-10-24 21:15:34 +02001283 if (!dbg_dir)
1284 return -EIO;
Markus Pargmann30d53d92015-08-17 08:20:06 +02001285
1286 nbd_dbg_dir = dbg_dir;
1287
1288 return 0;
1289}
1290
1291static void nbd_dbg_close(void)
1292{
1293 debugfs_remove_recursive(nbd_dbg_dir);
1294}
1295
1296#else /* IS_ENABLED(CONFIG_DEBUG_FS) */
1297
1298static int nbd_dev_dbg_init(struct nbd_device *nbd)
1299{
1300 return 0;
1301}
1302
1303static void nbd_dev_dbg_close(struct nbd_device *nbd)
1304{
1305}
1306
1307static int nbd_dbg_init(void)
1308{
1309 return 0;
1310}
1311
1312static void nbd_dbg_close(void)
1313{
1314}
1315
1316#endif
1317
Josef Bacikfd8383f2016-09-08 12:33:37 -07001318static int nbd_init_request(void *data, struct request *rq,
1319 unsigned int hctx_idx, unsigned int request_idx,
1320 unsigned int numa_node)
1321{
1322 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
Josef Bacikfd8383f2016-09-08 12:33:37 -07001323 cmd->nbd = data;
Josef Bacikfd8383f2016-09-08 12:33:37 -07001324 return 0;
1325}
1326
Eric Biggersf363b082017-03-30 13:39:16 -07001327static const struct blk_mq_ops nbd_mq_ops = {
Josef Bacikfd8383f2016-09-08 12:33:37 -07001328 .queue_rq = nbd_queue_rq,
Josef Bacikfd8383f2016-09-08 12:33:37 -07001329 .init_request = nbd_init_request,
Josef Bacik0eadf372016-09-08 12:33:40 -07001330 .timeout = nbd_xmit_timeout,
Josef Bacikfd8383f2016-09-08 12:33:37 -07001331};
1332
Josef Bacikb0d91112017-02-01 16:11:40 -05001333static void nbd_dev_remove(struct nbd_device *nbd)
1334{
1335 struct gendisk *disk = nbd->disk;
Josef Bacikb0d91112017-02-01 16:11:40 -05001336 if (disk) {
1337 del_gendisk(disk);
1338 blk_cleanup_queue(disk->queue);
1339 blk_mq_free_tag_set(&nbd->tag_set);
1340 put_disk(disk);
1341 }
1342 kfree(nbd);
1343}
1344
1345static int nbd_dev_add(int index)
1346{
1347 struct nbd_device *nbd;
1348 struct gendisk *disk;
1349 struct request_queue *q;
1350 int err = -ENOMEM;
1351
1352 nbd = kzalloc(sizeof(struct nbd_device), GFP_KERNEL);
1353 if (!nbd)
1354 goto out;
1355
1356 disk = alloc_disk(1 << part_shift);
1357 if (!disk)
1358 goto out_free_nbd;
1359
1360 if (index >= 0) {
1361 err = idr_alloc(&nbd_index_idr, nbd, index, index + 1,
1362 GFP_KERNEL);
1363 if (err == -ENOSPC)
1364 err = -EEXIST;
1365 } else {
1366 err = idr_alloc(&nbd_index_idr, nbd, 0, 0, GFP_KERNEL);
1367 if (err >= 0)
1368 index = err;
1369 }
1370 if (err < 0)
1371 goto out_free_disk;
1372
Josef Bacike46c7282017-04-06 17:02:00 -04001373 nbd->index = index;
Josef Bacikb0d91112017-02-01 16:11:40 -05001374 nbd->disk = disk;
1375 nbd->tag_set.ops = &nbd_mq_ops;
1376 nbd->tag_set.nr_hw_queues = 1;
1377 nbd->tag_set.queue_depth = 128;
1378 nbd->tag_set.numa_node = NUMA_NO_NODE;
1379 nbd->tag_set.cmd_size = sizeof(struct nbd_cmd);
1380 nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
1381 BLK_MQ_F_SG_MERGE | BLK_MQ_F_BLOCKING;
1382 nbd->tag_set.driver_data = nbd;
1383
1384 err = blk_mq_alloc_tag_set(&nbd->tag_set);
1385 if (err)
1386 goto out_free_idr;
1387
1388 q = blk_mq_init_queue(&nbd->tag_set);
1389 if (IS_ERR(q)) {
1390 err = PTR_ERR(q);
1391 goto out_free_tags;
1392 }
1393 disk->queue = q;
1394
1395 /*
1396 * Tell the block layer that we are not a rotational device
1397 */
1398 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue);
1399 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue);
1400 disk->queue->limits.discard_granularity = 512;
1401 blk_queue_max_discard_sectors(disk->queue, UINT_MAX);
Josef Bacikb0d91112017-02-01 16:11:40 -05001402 blk_queue_max_hw_sectors(disk->queue, 65536);
1403 disk->queue->limits.max_sectors = 256;
1404
Josef Bacikb0d91112017-02-01 16:11:40 -05001405 mutex_init(&nbd->config_lock);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001406 refcount_set(&nbd->config_refs, 0);
Josef Bacikb0d91112017-02-01 16:11:40 -05001407 disk->major = NBD_MAJOR;
1408 disk->first_minor = index << part_shift;
1409 disk->fops = &nbd_fops;
1410 disk->private_data = nbd;
1411 sprintf(disk->disk_name, "nbd%d", index);
Josef Bacikb0d91112017-02-01 16:11:40 -05001412 nbd_reset(nbd);
1413 add_disk(disk);
1414 return index;
1415
1416out_free_tags:
1417 blk_mq_free_tag_set(&nbd->tag_set);
1418out_free_idr:
1419 idr_remove(&nbd_index_idr, index);
1420out_free_disk:
1421 put_disk(disk);
1422out_free_nbd:
1423 kfree(nbd);
1424out:
1425 return err;
1426}
1427
Josef Bacike46c7282017-04-06 17:02:00 -04001428static int find_free_cb(int id, void *ptr, void *data)
1429{
1430 struct nbd_device *nbd = ptr;
1431 struct nbd_device **found = data;
1432
1433 if (!refcount_read(&nbd->config_refs)) {
1434 *found = nbd;
1435 return 1;
1436 }
1437 return 0;
1438}
1439
1440/* Netlink interface. */
1441static struct nla_policy nbd_attr_policy[NBD_ATTR_MAX + 1] = {
1442 [NBD_ATTR_INDEX] = { .type = NLA_U32 },
1443 [NBD_ATTR_SIZE_BYTES] = { .type = NLA_U64 },
1444 [NBD_ATTR_BLOCK_SIZE_BYTES] = { .type = NLA_U64 },
1445 [NBD_ATTR_TIMEOUT] = { .type = NLA_U64 },
1446 [NBD_ATTR_SERVER_FLAGS] = { .type = NLA_U64 },
1447 [NBD_ATTR_CLIENT_FLAGS] = { .type = NLA_U64 },
1448 [NBD_ATTR_SOCKETS] = { .type = NLA_NESTED},
1449};
1450
1451static struct nla_policy nbd_sock_policy[NBD_SOCK_MAX + 1] = {
1452 [NBD_SOCK_FD] = { .type = NLA_U32 },
1453};
1454
1455static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
1456{
1457 struct nbd_device *nbd = NULL;
1458 struct nbd_config *config;
1459 int index = -1;
1460 int ret;
1461
1462 if (!netlink_capable(skb, CAP_SYS_ADMIN))
1463 return -EPERM;
1464
1465 if (info->attrs[NBD_ATTR_INDEX])
1466 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
1467 if (!info->attrs[NBD_ATTR_SOCKETS]) {
1468 printk(KERN_ERR "nbd: must specify at least one socket\n");
1469 return -EINVAL;
1470 }
1471 if (!info->attrs[NBD_ATTR_SIZE_BYTES]) {
1472 printk(KERN_ERR "nbd: must specify a size in bytes for the device\n");
1473 return -EINVAL;
1474 }
1475again:
1476 mutex_lock(&nbd_index_mutex);
1477 if (index == -1) {
1478 ret = idr_for_each(&nbd_index_idr, &find_free_cb, &nbd);
1479 if (ret == 0) {
1480 int new_index;
1481 new_index = nbd_dev_add(-1);
1482 if (new_index < 0) {
1483 mutex_unlock(&nbd_index_mutex);
1484 printk(KERN_ERR "nbd: failed to add new device\n");
1485 return ret;
1486 }
1487 nbd = idr_find(&nbd_index_idr, new_index);
1488 }
1489 } else {
1490 nbd = idr_find(&nbd_index_idr, index);
1491 }
1492 mutex_unlock(&nbd_index_mutex);
1493 if (!nbd) {
1494 printk(KERN_ERR "nbd: couldn't find device at index %d\n",
1495 index);
1496 return -EINVAL;
1497 }
1498
1499 mutex_lock(&nbd->config_lock);
1500 if (refcount_read(&nbd->config_refs)) {
1501 mutex_unlock(&nbd->config_lock);
1502 if (index == -1)
1503 goto again;
1504 printk(KERN_ERR "nbd: nbd%d already in use\n", index);
1505 return -EBUSY;
1506 }
1507 if (WARN_ON(nbd->config)) {
1508 mutex_unlock(&nbd->config_lock);
1509 return -EINVAL;
1510 }
1511 config = nbd->config = nbd_alloc_config();
1512 if (!nbd->config) {
1513 mutex_unlock(&nbd->config_lock);
1514 printk(KERN_ERR "nbd: couldn't allocate config\n");
1515 return -ENOMEM;
1516 }
1517 refcount_set(&nbd->config_refs, 1);
1518 set_bit(NBD_BOUND, &config->runtime_flags);
1519
1520 if (info->attrs[NBD_ATTR_SIZE_BYTES]) {
1521 u64 bytes = nla_get_u64(info->attrs[NBD_ATTR_SIZE_BYTES]);
1522 nbd_size_set(nbd, config->blksize,
1523 div64_u64(bytes, config->blksize));
1524 }
1525 if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]) {
1526 u64 bsize =
1527 nla_get_u64(info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]);
1528 nbd_size_set(nbd, bsize, div64_u64(config->bytesize, bsize));
1529 }
1530 if (info->attrs[NBD_ATTR_TIMEOUT]) {
1531 u64 timeout = nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]);
1532 nbd->tag_set.timeout = timeout * HZ;
1533 blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ);
1534 }
1535 if (info->attrs[NBD_ATTR_SERVER_FLAGS])
1536 config->flags =
1537 nla_get_u64(info->attrs[NBD_ATTR_SERVER_FLAGS]);
1538 if (info->attrs[NBD_ATTR_SOCKETS]) {
1539 struct nlattr *attr;
1540 int rem, fd;
1541
1542 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
1543 rem) {
1544 struct nlattr *socks[NBD_SOCK_MAX+1];
1545
1546 if (nla_type(attr) != NBD_SOCK_ITEM) {
1547 printk(KERN_ERR "nbd: socks must be embedded in a SOCK_ITEM attr\n");
1548 ret = -EINVAL;
1549 goto out;
1550 }
1551 ret = nla_parse_nested(socks, NBD_SOCK_MAX, attr,
1552 nbd_sock_policy);
1553 if (ret != 0) {
1554 printk(KERN_ERR "nbd: error processing sock list\n");
1555 ret = -EINVAL;
1556 goto out;
1557 }
1558 if (!socks[NBD_SOCK_FD])
1559 continue;
1560 fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
1561 ret = nbd_add_socket(nbd, fd, true);
1562 if (ret)
1563 goto out;
1564 }
1565 }
1566 ret = nbd_start_device(nbd);
1567out:
1568 mutex_unlock(&nbd->config_lock);
1569 if (!ret) {
1570 set_bit(NBD_HAS_CONFIG_REF, &config->runtime_flags);
1571 refcount_inc(&nbd->config_refs);
1572 nbd_connect_reply(info, nbd->index);
1573 }
1574 nbd_config_put(nbd);
1575 return ret;
1576}
1577
1578static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
1579{
1580 struct nbd_device *nbd;
1581 int index;
1582
1583 if (!netlink_capable(skb, CAP_SYS_ADMIN))
1584 return -EPERM;
1585
1586 if (!info->attrs[NBD_ATTR_INDEX]) {
1587 printk(KERN_ERR "nbd: must specify an index to disconnect\n");
1588 return -EINVAL;
1589 }
1590 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
1591 mutex_lock(&nbd_index_mutex);
1592 nbd = idr_find(&nbd_index_idr, index);
1593 mutex_unlock(&nbd_index_mutex);
1594 if (!nbd) {
1595 printk(KERN_ERR "nbd: couldn't find device at index %d\n",
1596 index);
1597 return -EINVAL;
1598 }
1599 if (!refcount_inc_not_zero(&nbd->config_refs))
1600 return 0;
1601 mutex_lock(&nbd->config_lock);
1602 nbd_disconnect(nbd);
1603 mutex_unlock(&nbd->config_lock);
1604 if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
1605 &nbd->config->runtime_flags))
1606 nbd_config_put(nbd);
1607 nbd_config_put(nbd);
1608 return 0;
1609}
1610
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001611static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
1612{
1613 struct nbd_device *nbd = NULL;
1614 struct nbd_config *config;
1615 int index;
1616 int ret = -EINVAL;
1617
1618 if (!netlink_capable(skb, CAP_SYS_ADMIN))
1619 return -EPERM;
1620
1621 if (!info->attrs[NBD_ATTR_INDEX]) {
1622 printk(KERN_ERR "nbd: must specify a device to reconfigure\n");
1623 return -EINVAL;
1624 }
1625 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
1626 mutex_lock(&nbd_index_mutex);
1627 nbd = idr_find(&nbd_index_idr, index);
1628 mutex_unlock(&nbd_index_mutex);
1629 if (!nbd) {
1630 printk(KERN_ERR "nbd: couldn't find a device at index %d\n",
1631 index);
1632 return -EINVAL;
1633 }
1634
1635 if (!refcount_inc_not_zero(&nbd->config_refs)) {
1636 dev_err(nbd_to_dev(nbd),
1637 "not configured, cannot reconfigure\n");
1638 return -EINVAL;
1639 }
1640
1641 mutex_lock(&nbd->config_lock);
1642 config = nbd->config;
1643 if (!test_bit(NBD_BOUND, &config->runtime_flags) ||
1644 !nbd->task_recv) {
1645 dev_err(nbd_to_dev(nbd),
1646 "not configured, cannot reconfigure\n");
1647 goto out;
1648 }
1649
1650 if (info->attrs[NBD_ATTR_TIMEOUT]) {
1651 u64 timeout = nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]);
1652 nbd->tag_set.timeout = timeout * HZ;
1653 blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ);
1654 }
1655
1656 if (info->attrs[NBD_ATTR_SOCKETS]) {
1657 struct nlattr *attr;
1658 int rem, fd;
1659
1660 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
1661 rem) {
1662 struct nlattr *socks[NBD_SOCK_MAX+1];
1663
1664 if (nla_type(attr) != NBD_SOCK_ITEM) {
1665 printk(KERN_ERR "nbd: socks must be embedded in a SOCK_ITEM attr\n");
1666 ret = -EINVAL;
1667 goto out;
1668 }
1669 ret = nla_parse_nested(socks, NBD_SOCK_MAX, attr,
1670 nbd_sock_policy);
1671 if (ret != 0) {
1672 printk(KERN_ERR "nbd: error processing sock list\n");
1673 ret = -EINVAL;
1674 goto out;
1675 }
1676 if (!socks[NBD_SOCK_FD])
1677 continue;
1678 fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
1679 ret = nbd_reconnect_socket(nbd, fd);
1680 if (ret) {
1681 if (ret == -ENOSPC)
1682 ret = 0;
1683 goto out;
1684 }
1685 dev_info(nbd_to_dev(nbd), "reconnected socket\n");
1686 }
1687 }
1688out:
1689 mutex_unlock(&nbd->config_lock);
1690 nbd_config_put(nbd);
1691 return ret;
1692}
1693
Josef Bacike46c7282017-04-06 17:02:00 -04001694static const struct genl_ops nbd_connect_genl_ops[] = {
1695 {
1696 .cmd = NBD_CMD_CONNECT,
1697 .policy = nbd_attr_policy,
1698 .doit = nbd_genl_connect,
1699 },
1700 {
1701 .cmd = NBD_CMD_DISCONNECT,
1702 .policy = nbd_attr_policy,
1703 .doit = nbd_genl_disconnect,
1704 },
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001705 {
1706 .cmd = NBD_CMD_RECONFIGURE,
1707 .policy = nbd_attr_policy,
1708 .doit = nbd_genl_reconfigure,
1709 },
Josef Bacike46c7282017-04-06 17:02:00 -04001710};
1711
Josef Bacik799f9a32017-04-06 17:02:02 -04001712static const struct genl_multicast_group nbd_mcast_grps[] = {
1713 { .name = NBD_GENL_MCAST_GROUP_NAME, },
1714};
1715
Josef Bacike46c7282017-04-06 17:02:00 -04001716static struct genl_family nbd_genl_family __ro_after_init = {
1717 .hdrsize = 0,
1718 .name = NBD_GENL_FAMILY_NAME,
1719 .version = NBD_GENL_VERSION,
1720 .module = THIS_MODULE,
1721 .ops = nbd_connect_genl_ops,
1722 .n_ops = ARRAY_SIZE(nbd_connect_genl_ops),
1723 .maxattr = NBD_ATTR_MAX,
Josef Bacik799f9a32017-04-06 17:02:02 -04001724 .mcgrps = nbd_mcast_grps,
1725 .n_mcgrps = ARRAY_SIZE(nbd_mcast_grps),
Josef Bacike46c7282017-04-06 17:02:00 -04001726};
1727
1728static void nbd_connect_reply(struct genl_info *info, int index)
1729{
1730 struct sk_buff *skb;
1731 void *msg_head;
1732 int ret;
1733
1734 skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
1735 if (!skb)
1736 return;
1737 msg_head = genlmsg_put_reply(skb, info, &nbd_genl_family, 0,
1738 NBD_CMD_CONNECT);
1739 if (!msg_head) {
1740 nlmsg_free(skb);
1741 return;
1742 }
1743 ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
1744 if (ret) {
1745 nlmsg_free(skb);
1746 return;
1747 }
1748 genlmsg_end(skb, msg_head);
1749 genlmsg_reply(skb, info);
1750}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751
Josef Bacik799f9a32017-04-06 17:02:02 -04001752static void nbd_mcast_index(int index)
1753{
1754 struct sk_buff *skb;
1755 void *msg_head;
1756 int ret;
1757
1758 skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
1759 if (!skb)
1760 return;
1761 msg_head = genlmsg_put(skb, 0, 0, &nbd_genl_family, 0,
1762 NBD_CMD_LINK_DEAD);
1763 if (!msg_head) {
1764 nlmsg_free(skb);
1765 return;
1766 }
1767 ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
1768 if (ret) {
1769 nlmsg_free(skb);
1770 return;
1771 }
1772 genlmsg_end(skb, msg_head);
1773 genlmsg_multicast(&nbd_genl_family, skb, 0, 0, GFP_KERNEL);
1774}
1775
1776static void nbd_dead_link_work(struct work_struct *work)
1777{
1778 struct link_dead_args *args = container_of(work, struct link_dead_args,
1779 work);
1780 nbd_mcast_index(args->index);
1781 kfree(args);
1782}
1783
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784static int __init nbd_init(void)
1785{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786 int i;
1787
Adrian Bunk5b7b18c2006-03-25 03:07:04 -08001788 BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789
Laurent Vivierd71a6d72008-04-29 01:02:51 -07001790 if (max_part < 0) {
WANG Cong7742ce42011-08-19 14:48:28 +02001791 printk(KERN_ERR "nbd: max_part must be >= 0\n");
Laurent Vivierd71a6d72008-04-29 01:02:51 -07001792 return -EINVAL;
1793 }
1794
1795 part_shift = 0;
Namhyung Kim5988ce22011-05-28 14:44:46 +02001796 if (max_part > 0) {
Laurent Vivierd71a6d72008-04-29 01:02:51 -07001797 part_shift = fls(max_part);
1798
Namhyung Kim5988ce22011-05-28 14:44:46 +02001799 /*
1800 * Adjust max_part according to part_shift as it is exported
1801 * to user space so that user can know the max number of
1802 * partition kernel should be able to manage.
1803 *
1804 * Note that -1 is required because partition 0 is reserved
1805 * for the whole disk.
1806 */
1807 max_part = (1UL << part_shift) - 1;
1808 }
1809
Namhyung Kim3b271082011-05-28 14:44:46 +02001810 if ((1UL << part_shift) > DISK_MAX_PARTS)
1811 return -EINVAL;
1812
1813 if (nbds_max > 1UL << (MINORBITS - part_shift))
1814 return -EINVAL;
Josef Bacik124d6db2017-02-01 16:11:11 -05001815 recv_workqueue = alloc_workqueue("knbd-recv",
1816 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1817 if (!recv_workqueue)
1818 return -ENOMEM;
Namhyung Kim3b271082011-05-28 14:44:46 +02001819
Josef Bacik6330a2d2017-02-15 16:49:48 -05001820 if (register_blkdev(NBD_MAJOR, "nbd")) {
1821 destroy_workqueue(recv_workqueue);
Josef Bacikb0d91112017-02-01 16:11:40 -05001822 return -EIO;
Josef Bacik6330a2d2017-02-15 16:49:48 -05001823 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824
Josef Bacike46c7282017-04-06 17:02:00 -04001825 if (genl_register_family(&nbd_genl_family)) {
1826 unregister_blkdev(NBD_MAJOR, "nbd");
1827 destroy_workqueue(recv_workqueue);
1828 return -EINVAL;
1829 }
Markus Pargmann30d53d92015-08-17 08:20:06 +02001830 nbd_dbg_init();
1831
Josef Bacikb0d91112017-02-01 16:11:40 -05001832 mutex_lock(&nbd_index_mutex);
1833 for (i = 0; i < nbds_max; i++)
1834 nbd_dev_add(i);
1835 mutex_unlock(&nbd_index_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001836 return 0;
Josef Bacikb0d91112017-02-01 16:11:40 -05001837}
1838
1839static int nbd_exit_cb(int id, void *ptr, void *data)
1840{
1841 struct nbd_device *nbd = ptr;
1842 nbd_dev_remove(nbd);
1843 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001844}
1845
1846static void __exit nbd_cleanup(void)
1847{
Markus Pargmann30d53d92015-08-17 08:20:06 +02001848 nbd_dbg_close();
1849
Josef Bacikb0d91112017-02-01 16:11:40 -05001850 idr_for_each(&nbd_index_idr, &nbd_exit_cb, NULL);
1851 idr_destroy(&nbd_index_idr);
Josef Bacike46c7282017-04-06 17:02:00 -04001852 genl_unregister_family(&nbd_genl_family);
Josef Bacik124d6db2017-02-01 16:11:11 -05001853 destroy_workqueue(recv_workqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001854 unregister_blkdev(NBD_MAJOR, "nbd");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001855}
1856
1857module_init(nbd_init);
1858module_exit(nbd_cleanup);
1859
1860MODULE_DESCRIPTION("Network Block Device");
1861MODULE_LICENSE("GPL");
1862
Lars Marowsky-Bree40be0c22005-05-01 08:59:07 -07001863module_param(nbds_max, int, 0444);
Laurent Vivierd71a6d72008-04-29 01:02:51 -07001864MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
1865module_param(max_part, int, 0444);
1866MODULE_PARM_DESC(max_part, "number of partitions per device (default: 0)");