blob: 4237e7286e99c9bbb1d814f3ae9e952ebf1a30f5 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Network block device - make block devices work over TCP
3 *
4 * Note that you can not swap over this thing, yet. Seems to work but
5 * deadlocks sometimes - you can not swap over TCP in general.
6 *
Pavel Macheka2531292010-07-18 14:27:13 +02007 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
9 *
Pavel Machekdbf492d2006-06-25 05:47:42 -070010 * This file is released under GPLv2 or later.
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 *
Pavel Machekdbf492d2006-06-25 05:47:42 -070012 * (part of code stolen from loop.c)
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 */
14
15#include <linux/major.h>
16
17#include <linux/blkdev.h>
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/sched.h>
21#include <linux/fs.h>
22#include <linux/bio.h>
23#include <linux/stat.h>
24#include <linux/errno.h>
25#include <linux/file.h>
26#include <linux/ioctl.h>
Arnd Bergmann2a48fc02010-06-02 14:28:52 +020027#include <linux/mutex.h>
Herbert Xu4b2f0262006-01-06 00:09:47 -080028#include <linux/compiler.h>
29#include <linux/err.h>
30#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090031#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <net/sock.h>
Trond Myklebust91cf45f2007-11-12 18:10:39 -080033#include <linux/net.h>
Laurent Vivier48cf6062008-04-29 01:02:46 -070034#include <linux/kthread.h>
Markus Pargmannb9c495b2015-04-02 10:11:37 +020035#include <linux/types.h>
Markus Pargmann30d53d92015-08-17 08:20:06 +020036#include <linux/debugfs.h>
Josef Bacikfd8383f2016-09-08 12:33:37 -070037#include <linux/blk-mq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080039#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070040#include <asm/types.h>
41
42#include <linux/nbd.h>
Josef Bacike46c7282017-04-06 17:02:00 -040043#include <linux/nbd-netlink.h>
44#include <net/genetlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Josef Bacikb0d91112017-02-01 16:11:40 -050046static DEFINE_IDR(nbd_index_idr);
47static DEFINE_MUTEX(nbd_index_mutex);
Josef Bacik47d902b2017-04-06 17:02:05 -040048static int nbd_total_devices = 0;
Josef Bacikb0d91112017-02-01 16:11:40 -050049
Josef Bacik9561a7a2016-11-22 14:04:40 -050050struct nbd_sock {
51 struct socket *sock;
52 struct mutex tx_lock;
Josef Bacik9dd5d3a2017-03-24 14:08:26 -040053 struct request *pending;
54 int sent;
Josef Bacikf3733242017-04-06 17:01:57 -040055 bool dead;
56 int fallback_index;
Josef Bacik799f9a32017-04-06 17:02:02 -040057 int cookie;
Josef Bacik9561a7a2016-11-22 14:04:40 -050058};
59
Josef Bacik5ea8d102017-04-06 17:01:58 -040060struct recv_thread_args {
61 struct work_struct work;
62 struct nbd_device *nbd;
63 int index;
64};
65
Josef Bacik799f9a32017-04-06 17:02:02 -040066struct link_dead_args {
67 struct work_struct work;
68 int index;
69};
70
Josef Bacik9b4a6ba2016-09-08 12:33:39 -070071#define NBD_TIMEDOUT 0
72#define NBD_DISCONNECT_REQUESTED 1
Josef Bacik9561a7a2016-11-22 14:04:40 -050073#define NBD_DISCONNECTED 2
Josef Bacik5ea8d102017-04-06 17:01:58 -040074#define NBD_HAS_PID_FILE 3
Josef Bacike46c7282017-04-06 17:02:00 -040075#define NBD_HAS_CONFIG_REF 4
76#define NBD_BOUND 5
Josef Bacik9b4a6ba2016-09-08 12:33:39 -070077
Josef Bacik5ea8d102017-04-06 17:01:58 -040078struct nbd_config {
Markus Pargmann22d109c2015-08-17 08:20:09 +020079 u32 flags;
Josef Bacik9b4a6ba2016-09-08 12:33:39 -070080 unsigned long runtime_flags;
Josef Bacik560bc4b2017-04-06 17:02:04 -040081 u64 dead_conn_timeout;
Josef Bacik5ea8d102017-04-06 17:01:58 -040082
Josef Bacik9561a7a2016-11-22 14:04:40 -050083 struct nbd_sock **socks;
Josef Bacik9561a7a2016-11-22 14:04:40 -050084 int num_connections;
Josef Bacik560bc4b2017-04-06 17:02:04 -040085 atomic_t live_connections;
86 wait_queue_head_t conn_wait;
Josef Bacik5ea8d102017-04-06 17:01:58 -040087
Josef Bacik9561a7a2016-11-22 14:04:40 -050088 atomic_t recv_threads;
89 wait_queue_head_t recv_wq;
Josef Bacikef77b512016-12-02 16:19:12 -050090 loff_t blksize;
Markus Pargmannb9c495b2015-04-02 10:11:37 +020091 loff_t bytesize;
Markus Pargmann30d53d92015-08-17 08:20:06 +020092#if IS_ENABLED(CONFIG_DEBUG_FS)
93 struct dentry *dbg_dir;
94#endif
Markus Pargmann13e71d62015-04-02 10:11:35 +020095};
96
Josef Bacik5ea8d102017-04-06 17:01:58 -040097struct nbd_device {
98 struct blk_mq_tag_set tag_set;
99
Josef Bacike46c7282017-04-06 17:02:00 -0400100 int index;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400101 refcount_t config_refs;
Josef Bacikc6a47592017-04-06 17:02:06 -0400102 refcount_t refs;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400103 struct nbd_config *config;
104 struct mutex config_lock;
105 struct gendisk *disk;
106
Josef Bacikc6a47592017-04-06 17:02:06 -0400107 struct list_head list;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400108 struct task_struct *task_recv;
109 struct task_struct *task_setup;
110};
111
Josef Bacikfd8383f2016-09-08 12:33:37 -0700112struct nbd_cmd {
113 struct nbd_device *nbd;
Josef Bacikf3733242017-04-06 17:01:57 -0400114 int index;
Josef Bacik799f9a32017-04-06 17:02:02 -0400115 int cookie;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500116 struct completion send_complete;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700117};
118
Markus Pargmann30d53d92015-08-17 08:20:06 +0200119#if IS_ENABLED(CONFIG_DEBUG_FS)
120static struct dentry *nbd_dbg_dir;
121#endif
122
123#define nbd_name(nbd) ((nbd)->disk->disk_name)
124
Wanlong Gaof4507162012-03-28 14:42:51 -0700125#define NBD_MAGIC 0x68797548
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126
Ingo van Lil9c7a4162006-07-01 04:36:36 -0700127static unsigned int nbds_max = 16;
Laurent Vivierd71a6d72008-04-29 01:02:51 -0700128static int max_part;
Josef Bacik124d6db2017-02-01 16:11:11 -0500129static struct workqueue_struct *recv_workqueue;
Josef Bacikb0d91112017-02-01 16:11:40 -0500130static int part_shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131
Josef Bacik9442b732017-02-07 17:10:22 -0500132static int nbd_dev_dbg_init(struct nbd_device *nbd);
133static void nbd_dev_dbg_close(struct nbd_device *nbd);
Josef Bacik5ea8d102017-04-06 17:01:58 -0400134static void nbd_config_put(struct nbd_device *nbd);
Josef Bacike46c7282017-04-06 17:02:00 -0400135static void nbd_connect_reply(struct genl_info *info, int index);
Josef Bacik47d902b2017-04-06 17:02:05 -0400136static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info);
Josef Bacik799f9a32017-04-06 17:02:02 -0400137static void nbd_dead_link_work(struct work_struct *work);
Josef Bacik9442b732017-02-07 17:10:22 -0500138
Markus Pargmannd18509f2015-04-02 10:11:38 +0200139static inline struct device *nbd_to_dev(struct nbd_device *nbd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140{
Markus Pargmannd18509f2015-04-02 10:11:38 +0200141 return disk_to_dev(nbd->disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142}
143
144static const char *nbdcmd_to_ascii(int cmd)
145{
146 switch (cmd) {
147 case NBD_CMD_READ: return "read";
148 case NBD_CMD_WRITE: return "write";
149 case NBD_CMD_DISC: return "disconnect";
Alex Bligh75f187a2013-02-27 17:05:23 -0800150 case NBD_CMD_FLUSH: return "flush";
Paul Clementsa336d292012-10-04 17:16:18 -0700151 case NBD_CMD_TRIM: return "trim/discard";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 }
153 return "invalid";
154}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155
Josef Bacik5ea8d102017-04-06 17:01:58 -0400156static ssize_t pid_show(struct device *dev,
157 struct device_attribute *attr, char *buf)
158{
159 struct gendisk *disk = dev_to_disk(dev);
160 struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
161
162 return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv));
163}
164
165static struct device_attribute pid_attr = {
166 .attr = { .name = "pid", .mode = S_IRUGO},
167 .show = pid_show,
168};
169
Josef Bacikc6a47592017-04-06 17:02:06 -0400170static void nbd_dev_remove(struct nbd_device *nbd)
171{
172 struct gendisk *disk = nbd->disk;
173 if (disk) {
174 del_gendisk(disk);
175 blk_cleanup_queue(disk->queue);
176 blk_mq_free_tag_set(&nbd->tag_set);
177 put_disk(disk);
178 }
179 kfree(nbd);
180}
181
182static void nbd_put(struct nbd_device *nbd)
183{
184 if (refcount_dec_and_mutex_lock(&nbd->refs,
185 &nbd_index_mutex)) {
186 idr_remove(&nbd_index_idr, nbd->index);
187 mutex_unlock(&nbd_index_mutex);
188 nbd_dev_remove(nbd);
189 }
190}
191
Josef Bacik799f9a32017-04-06 17:02:02 -0400192static int nbd_disconnected(struct nbd_config *config)
Josef Bacikf3733242017-04-06 17:01:57 -0400193{
Josef Bacik799f9a32017-04-06 17:02:02 -0400194 return test_bit(NBD_DISCONNECTED, &config->runtime_flags) ||
195 test_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags);
196}
197
198static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock,
199 int notify)
200{
201 if (!nsock->dead && notify && !nbd_disconnected(nbd->config)) {
202 struct link_dead_args *args;
203 args = kmalloc(sizeof(struct link_dead_args), GFP_NOIO);
204 if (args) {
205 INIT_WORK(&args->work, nbd_dead_link_work);
206 args->index = nbd->index;
207 queue_work(system_wq, &args->work);
208 }
209 }
Josef Bacik560bc4b2017-04-06 17:02:04 -0400210 if (!nsock->dead) {
Josef Bacikf3733242017-04-06 17:01:57 -0400211 kernel_sock_shutdown(nsock->sock, SHUT_RDWR);
Josef Bacik560bc4b2017-04-06 17:02:04 -0400212 atomic_dec(&nbd->config->live_connections);
213 }
Josef Bacikf3733242017-04-06 17:01:57 -0400214 nsock->dead = true;
215 nsock->pending = NULL;
216 nsock->sent = 0;
217}
218
Josef Bacik29eaadc2017-04-06 17:01:59 -0400219static void nbd_size_clear(struct nbd_device *nbd)
Markus Pargmann37091fd2015-07-27 07:36:49 +0200220{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400221 if (nbd->config->bytesize) {
Josef Bacik5ea8d102017-04-06 17:01:58 -0400222 set_capacity(nbd->disk, 0);
223 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
224 }
Markus Pargmann37091fd2015-07-27 07:36:49 +0200225}
226
Josef Bacik29eaadc2017-04-06 17:01:59 -0400227static void nbd_size_update(struct nbd_device *nbd)
Markus Pargmann37091fd2015-07-27 07:36:49 +0200228{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400229 struct nbd_config *config = nbd->config;
230 blk_queue_logical_block_size(nbd->disk->queue, config->blksize);
231 blk_queue_physical_block_size(nbd->disk->queue, config->blksize);
Josef Bacik5ea8d102017-04-06 17:01:58 -0400232 set_capacity(nbd->disk, config->bytesize >> 9);
Markus Pargmann37091fd2015-07-27 07:36:49 +0200233 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
234}
235
Josef Bacik29eaadc2017-04-06 17:01:59 -0400236static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize,
237 loff_t nr_blocks)
Markus Pargmann37091fd2015-07-27 07:36:49 +0200238{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400239 struct nbd_config *config = nbd->config;
240 config->blksize = blocksize;
241 config->bytesize = blocksize * nr_blocks;
Josef Bacik29eaadc2017-04-06 17:01:59 -0400242 nbd_size_update(nbd);
Markus Pargmann37091fd2015-07-27 07:36:49 +0200243}
244
Josef Bacikfd8383f2016-09-08 12:33:37 -0700245static void nbd_end_request(struct nbd_cmd *cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246{
Josef Bacikfd8383f2016-09-08 12:33:37 -0700247 struct nbd_device *nbd = cmd->nbd;
248 struct request *req = blk_mq_rq_from_pdu(cmd);
Kiyoshi Ueda097c94a2007-12-11 17:44:06 -0500249 int error = req->errors ? -EIO : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250
Josef Bacikfd8383f2016-09-08 12:33:37 -0700251 dev_dbg(nbd_to_dev(nbd), "request %p: %s\n", cmd,
Markus Pargmannd18509f2015-04-02 10:11:38 +0200252 error ? "failed" : "done");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253
Josef Bacikfd8383f2016-09-08 12:33:37 -0700254 blk_mq_complete_request(req, error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255}
256
Markus Pargmanne018e752015-04-02 10:11:39 +0200257/*
258 * Forcibly shutdown the socket causing all listeners to error
259 */
Markus Pargmann36e47be2015-08-17 08:20:01 +0200260static void sock_shutdown(struct nbd_device *nbd)
Paul Clements7fdfd402007-10-16 23:27:37 -0700261{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400262 struct nbd_config *config = nbd->config;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500263 int i;
Josef Bacikc2611892016-09-08 12:33:38 -0700264
Josef Bacik5ea8d102017-04-06 17:01:58 -0400265 if (config->num_connections == 0)
Markus Pargmann260bbce2015-08-17 08:20:02 +0200266 return;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400267 if (test_and_set_bit(NBD_DISCONNECTED, &config->runtime_flags))
Josef Bacik9561a7a2016-11-22 14:04:40 -0500268 return;
269
Josef Bacik5ea8d102017-04-06 17:01:58 -0400270 for (i = 0; i < config->num_connections; i++) {
271 struct nbd_sock *nsock = config->socks[i];
Josef Bacik9561a7a2016-11-22 14:04:40 -0500272 mutex_lock(&nsock->tx_lock);
Josef Bacik799f9a32017-04-06 17:02:02 -0400273 nbd_mark_nsock_dead(nbd, nsock, 0);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500274 mutex_unlock(&nsock->tx_lock);
Markus Pargmann23272a672015-10-29 11:51:16 +0100275 }
Josef Bacik9561a7a2016-11-22 14:04:40 -0500276 dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n");
Paul Clements7fdfd402007-10-16 23:27:37 -0700277}
278
Josef Bacik0eadf372016-09-08 12:33:40 -0700279static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
280 bool reserved)
Paul Clements7fdfd402007-10-16 23:27:37 -0700281{
Josef Bacik0eadf372016-09-08 12:33:40 -0700282 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
283 struct nbd_device *nbd = cmd->nbd;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400284 struct nbd_config *config;
Paul Clements7fdfd402007-10-16 23:27:37 -0700285
Josef Bacik5ea8d102017-04-06 17:01:58 -0400286 if (!refcount_inc_not_zero(&nbd->config_refs)) {
287 req->errors = -EIO;
288 return BLK_EH_HANDLED;
289 }
290
Josef Bacik560bc4b2017-04-06 17:02:04 -0400291 /* If we are waiting on our dead timer then we could get timeout
292 * callbacks for our request. For this we just want to reset the timer
293 * and let the queue side take care of everything.
294 */
295 if (!completion_done(&cmd->send_complete)) {
296 nbd_config_put(nbd);
297 return BLK_EH_RESET_TIMER;
298 }
Josef Bacik5ea8d102017-04-06 17:01:58 -0400299 config = nbd->config;
300
301 if (config->num_connections > 1) {
Josef Bacikf3733242017-04-06 17:01:57 -0400302 dev_err_ratelimited(nbd_to_dev(nbd),
303 "Connection timed out, retrying\n");
Josef Bacikf3733242017-04-06 17:01:57 -0400304 /*
305 * Hooray we have more connections, requeue this IO, the submit
306 * path will put it on a real connection.
307 */
Josef Bacik5ea8d102017-04-06 17:01:58 -0400308 if (config->socks && config->num_connections > 1) {
309 if (cmd->index < config->num_connections) {
Josef Bacikf3733242017-04-06 17:01:57 -0400310 struct nbd_sock *nsock =
Josef Bacik5ea8d102017-04-06 17:01:58 -0400311 config->socks[cmd->index];
Josef Bacikf3733242017-04-06 17:01:57 -0400312 mutex_lock(&nsock->tx_lock);
Josef Bacik799f9a32017-04-06 17:02:02 -0400313 /* We can have multiple outstanding requests, so
314 * we don't want to mark the nsock dead if we've
315 * already reconnected with a new socket, so
316 * only mark it dead if its the same socket we
317 * were sent out on.
318 */
319 if (cmd->cookie == nsock->cookie)
320 nbd_mark_nsock_dead(nbd, nsock, 1);
Josef Bacikf3733242017-04-06 17:01:57 -0400321 mutex_unlock(&nsock->tx_lock);
322 }
Josef Bacikf3733242017-04-06 17:01:57 -0400323 blk_mq_requeue_request(req, true);
Josef Bacik5ea8d102017-04-06 17:01:58 -0400324 nbd_config_put(nbd);
Josef Bacikf3733242017-04-06 17:01:57 -0400325 return BLK_EH_NOT_HANDLED;
326 }
Josef Bacikf3733242017-04-06 17:01:57 -0400327 } else {
328 dev_err_ratelimited(nbd_to_dev(nbd),
329 "Connection timed out\n");
330 }
Josef Bacik5ea8d102017-04-06 17:01:58 -0400331 set_bit(NBD_TIMEDOUT, &config->runtime_flags);
Josef Bacikc103b4d2017-03-24 14:08:27 -0400332 req->errors = -EIO;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500333 sock_shutdown(nbd);
Josef Bacik5ea8d102017-04-06 17:01:58 -0400334 nbd_config_put(nbd);
335
Josef Bacik0eadf372016-09-08 12:33:40 -0700336 return BLK_EH_HANDLED;
Paul Clements7fdfd402007-10-16 23:27:37 -0700337}
338
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339/*
340 * Send or receive packet.
341 */
Al Viroc9f2b6a2015-11-12 05:09:35 -0500342static int sock_xmit(struct nbd_device *nbd, int index, int send,
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400343 struct iov_iter *iter, int msg_flags, int *sent)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400345 struct nbd_config *config = nbd->config;
346 struct socket *sock = config->socks[index]->sock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347 int result;
348 struct msghdr msg;
Mel Gorman7f338fe2012-07-31 16:44:32 -0700349 unsigned long pflags = current->flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350
Mike Snitzerffc41cf2008-04-02 13:04:47 -0700351 if (unlikely(!sock)) {
Josef Bacika897b662016-12-05 16:20:29 -0500352 dev_err_ratelimited(disk_to_dev(nbd->disk),
WANG Cong7f1b90f2011-08-19 14:48:22 +0200353 "Attempted %s on closed socket in sock_xmit\n",
354 (send ? "send" : "recv"));
Mike Snitzerffc41cf2008-04-02 13:04:47 -0700355 return -EINVAL;
356 }
357
Al Viroc9f2b6a2015-11-12 05:09:35 -0500358 msg.msg_iter = *iter;
Al Viroc1696ca2015-11-12 04:51:19 -0500359
Mel Gorman7f338fe2012-07-31 16:44:32 -0700360 current->flags |= PF_MEMALLOC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361 do {
Mel Gorman7f338fe2012-07-31 16:44:32 -0700362 sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 msg.msg_name = NULL;
364 msg.msg_namelen = 0;
365 msg.msg_control = NULL;
366 msg.msg_controllen = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
368
Markus Pargmann7e2893a2015-08-17 08:20:00 +0200369 if (send)
Al Viroc1696ca2015-11-12 04:51:19 -0500370 result = sock_sendmsg(sock, &msg);
Markus Pargmann7e2893a2015-08-17 08:20:00 +0200371 else
Al Viroc1696ca2015-11-12 04:51:19 -0500372 result = sock_recvmsg(sock, &msg, msg.msg_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 if (result <= 0) {
375 if (result == 0)
376 result = -EPIPE; /* short read */
377 break;
378 }
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400379 if (sent)
380 *sent += result;
Al Viroc1696ca2015-11-12 04:51:19 -0500381 } while (msg_data_left(&msg));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382
Mel Gorman7f338fe2012-07-31 16:44:32 -0700383 tsk_restore_flags(current, pflags, PF_MEMALLOC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384
385 return result;
386}
387
Paul Clements7fdfd402007-10-16 23:27:37 -0700388/* always call with the tx_lock held */
Josef Bacik9561a7a2016-11-22 14:04:40 -0500389static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390{
Josef Bacikfd8383f2016-09-08 12:33:37 -0700391 struct request *req = blk_mq_rq_from_pdu(cmd);
Josef Bacik5ea8d102017-04-06 17:01:58 -0400392 struct nbd_config *config = nbd->config;
393 struct nbd_sock *nsock = config->socks[index];
Josef Bacikd61b7f92017-01-19 16:08:49 -0500394 int result;
Al Viroc9f2b6a2015-11-12 05:09:35 -0500395 struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)};
396 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
397 struct iov_iter from;
Tejun Heo1011c1b2009-05-07 22:24:45 +0900398 unsigned long size = blk_rq_bytes(req);
Jens Axboe429a7872016-11-17 12:30:37 -0700399 struct bio *bio;
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200400 u32 type;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500401 u32 tag = blk_mq_unique_tag(req);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400402 int sent = nsock->sent, skip = 0;
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200403
Al Viroc9f2b6a2015-11-12 05:09:35 -0500404 iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
405
Christoph Hellwigaebf5262017-01-31 16:57:31 +0100406 switch (req_op(req)) {
407 case REQ_OP_DISCARD:
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200408 type = NBD_CMD_TRIM;
Christoph Hellwigaebf5262017-01-31 16:57:31 +0100409 break;
410 case REQ_OP_FLUSH:
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200411 type = NBD_CMD_FLUSH;
Christoph Hellwigaebf5262017-01-31 16:57:31 +0100412 break;
413 case REQ_OP_WRITE:
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200414 type = NBD_CMD_WRITE;
Christoph Hellwigaebf5262017-01-31 16:57:31 +0100415 break;
416 case REQ_OP_READ:
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200417 type = NBD_CMD_READ;
Christoph Hellwigaebf5262017-01-31 16:57:31 +0100418 break;
419 default:
420 return -EIO;
421 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422
Christoph Hellwig09fc54cc2017-01-31 16:57:28 +0100423 if (rq_data_dir(req) == WRITE &&
Josef Bacik5ea8d102017-04-06 17:01:58 -0400424 (config->flags & NBD_FLAG_READ_ONLY)) {
Christoph Hellwig09fc54cc2017-01-31 16:57:28 +0100425 dev_err_ratelimited(disk_to_dev(nbd->disk),
426 "Write on read-only\n");
427 return -EIO;
428 }
429
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400430 /* We did a partial send previously, and we at least sent the whole
431 * request struct, so just go and send the rest of the pages in the
432 * request.
433 */
434 if (sent) {
435 if (sent >= sizeof(request)) {
436 skip = sent - sizeof(request);
437 goto send_pages;
438 }
439 iov_iter_advance(&from, sent);
440 }
Josef Bacikf3733242017-04-06 17:01:57 -0400441 cmd->index = index;
Josef Bacik799f9a32017-04-06 17:02:02 -0400442 cmd->cookie = nsock->cookie;
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200443 request.type = htonl(type);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500444 if (type != NBD_CMD_FLUSH) {
Alex Bligh75f187a2013-02-27 17:05:23 -0800445 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
446 request.len = htonl(size);
447 }
Josef Bacik9561a7a2016-11-22 14:04:40 -0500448 memcpy(request.handle, &tag, sizeof(tag));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449
Markus Pargmannd18509f2015-04-02 10:11:38 +0200450 dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
Josef Bacikfd8383f2016-09-08 12:33:37 -0700451 cmd, nbdcmd_to_ascii(type),
Markus Pargmannd18509f2015-04-02 10:11:38 +0200452 (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
Al Viroc9f2b6a2015-11-12 05:09:35 -0500453 result = sock_xmit(nbd, index, 1, &from,
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400454 (type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455 if (result <= 0) {
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400456 if (result == -ERESTARTSYS) {
457 /* If we havne't sent anything we can just return BUSY,
458 * however if we have sent something we need to make
459 * sure we only allow this req to be sent until we are
460 * completely done.
461 */
462 if (sent) {
463 nsock->pending = req;
464 nsock->sent = sent;
465 }
466 return BLK_MQ_RQ_QUEUE_BUSY;
467 }
Josef Bacika897b662016-12-05 16:20:29 -0500468 dev_err_ratelimited(disk_to_dev(nbd->disk),
WANG Cong7f1b90f2011-08-19 14:48:22 +0200469 "Send control failed (result %d)\n", result);
Josef Bacikf3733242017-04-06 17:01:57 -0400470 return -EAGAIN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 }
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400472send_pages:
Jens Axboe429a7872016-11-17 12:30:37 -0700473 if (type != NBD_CMD_WRITE)
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400474 goto out;
Jens Axboe429a7872016-11-17 12:30:37 -0700475
Jens Axboe429a7872016-11-17 12:30:37 -0700476 bio = req->bio;
477 while (bio) {
478 struct bio *next = bio->bi_next;
479 struct bvec_iter iter;
Kent Overstreet79886132013-11-23 17:19:00 -0800480 struct bio_vec bvec;
Jens Axboe429a7872016-11-17 12:30:37 -0700481
482 bio_for_each_segment(bvec, bio, iter) {
483 bool is_last = !next && bio_iter_last(bvec, iter);
Josef Bacikd61b7f92017-01-19 16:08:49 -0500484 int flags = is_last ? 0 : MSG_MORE;
Jens Axboe429a7872016-11-17 12:30:37 -0700485
Markus Pargmannd18509f2015-04-02 10:11:38 +0200486 dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
Josef Bacikfd8383f2016-09-08 12:33:37 -0700487 cmd, bvec.bv_len);
Al Viroc9f2b6a2015-11-12 05:09:35 -0500488 iov_iter_bvec(&from, ITER_BVEC | WRITE,
489 &bvec, 1, bvec.bv_len);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400490 if (skip) {
491 if (skip >= iov_iter_count(&from)) {
492 skip -= iov_iter_count(&from);
493 continue;
494 }
495 iov_iter_advance(&from, skip);
496 skip = 0;
497 }
498 result = sock_xmit(nbd, index, 1, &from, flags, &sent);
Jens Axboe6c92e692007-08-16 13:43:12 +0200499 if (result <= 0) {
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400500 if (result == -ERESTARTSYS) {
501 /* We've already sent the header, we
502 * have no choice but to set pending and
503 * return BUSY.
504 */
505 nsock->pending = req;
506 nsock->sent = sent;
507 return BLK_MQ_RQ_QUEUE_BUSY;
508 }
Wanlong Gaof4507162012-03-28 14:42:51 -0700509 dev_err(disk_to_dev(nbd->disk),
WANG Cong7f1b90f2011-08-19 14:48:22 +0200510 "Send data failed (result %d)\n",
511 result);
Josef Bacikf3733242017-04-06 17:01:57 -0400512 return -EAGAIN;
Jens Axboe6c92e692007-08-16 13:43:12 +0200513 }
Jens Axboe429a7872016-11-17 12:30:37 -0700514 /*
515 * The completion might already have come in,
516 * so break for the last one instead of letting
517 * the iterator do it. This prevents use-after-free
518 * of the bio.
519 */
520 if (is_last)
521 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522 }
Jens Axboe429a7872016-11-17 12:30:37 -0700523 bio = next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524 }
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400525out:
526 nsock->pending = NULL;
527 nsock->sent = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529}
530
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531/* NULL returned = something went wrong, inform userspace */
Josef Bacik9561a7a2016-11-22 14:04:40 -0500532static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400534 struct nbd_config *config = nbd->config;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535 int result;
536 struct nbd_reply reply;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700537 struct nbd_cmd *cmd;
538 struct request *req = NULL;
539 u16 hwq;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500540 u32 tag;
Al Viroc9f2b6a2015-11-12 05:09:35 -0500541 struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)};
542 struct iov_iter to;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543
544 reply.magic = 0;
Al Viroc9f2b6a2015-11-12 05:09:35 -0500545 iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(reply));
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400546 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 if (result <= 0) {
Josef Bacik5ea8d102017-04-06 17:01:58 -0400548 if (!nbd_disconnected(config))
Josef Bacik9561a7a2016-11-22 14:04:40 -0500549 dev_err(disk_to_dev(nbd->disk),
550 "Receive control failed (result %d)\n", result);
Markus Pargmann19391832015-08-17 08:20:03 +0200551 return ERR_PTR(result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552 }
Michal Feixe4b57e02006-07-30 03:03:31 -0700553
554 if (ntohl(reply.magic) != NBD_REPLY_MAGIC) {
Wanlong Gaof4507162012-03-28 14:42:51 -0700555 dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
Michal Feixe4b57e02006-07-30 03:03:31 -0700556 (unsigned long)ntohl(reply.magic));
Markus Pargmann19391832015-08-17 08:20:03 +0200557 return ERR_PTR(-EPROTO);
Michal Feixe4b57e02006-07-30 03:03:31 -0700558 }
559
Josef Bacik9561a7a2016-11-22 14:04:40 -0500560 memcpy(&tag, reply.handle, sizeof(u32));
Herbert Xu4b2f0262006-01-06 00:09:47 -0800561
Josef Bacikfd8383f2016-09-08 12:33:37 -0700562 hwq = blk_mq_unique_tag_to_hwq(tag);
563 if (hwq < nbd->tag_set.nr_hw_queues)
564 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq],
565 blk_mq_unique_tag_to_tag(tag));
566 if (!req || !blk_mq_request_started(req)) {
567 dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n",
568 tag, req);
569 return ERR_PTR(-ENOENT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570 }
Josef Bacikfd8383f2016-09-08 12:33:37 -0700571 cmd = blk_mq_rq_to_pdu(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572 if (ntohl(reply.error)) {
Wanlong Gaof4507162012-03-28 14:42:51 -0700573 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
WANG Cong7f1b90f2011-08-19 14:48:22 +0200574 ntohl(reply.error));
Josef Bacikc103b4d2017-03-24 14:08:27 -0400575 req->errors = -EIO;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700576 return cmd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577 }
578
Josef Bacikfd8383f2016-09-08 12:33:37 -0700579 dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", cmd);
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200580 if (rq_data_dir(req) != WRITE) {
NeilBrown5705f702007-09-25 12:35:59 +0200581 struct req_iterator iter;
Kent Overstreet79886132013-11-23 17:19:00 -0800582 struct bio_vec bvec;
NeilBrown5705f702007-09-25 12:35:59 +0200583
584 rq_for_each_segment(bvec, req, iter) {
Al Viroc9f2b6a2015-11-12 05:09:35 -0500585 iov_iter_bvec(&to, ITER_BVEC | READ,
586 &bvec, 1, bvec.bv_len);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400587 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
Jens Axboe6c92e692007-08-16 13:43:12 +0200588 if (result <= 0) {
Wanlong Gaof4507162012-03-28 14:42:51 -0700589 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
WANG Cong7f1b90f2011-08-19 14:48:22 +0200590 result);
Josef Bacikf3733242017-04-06 17:01:57 -0400591 /*
592 * If we've disconnected or we only have 1
593 * connection then we need to make sure we
594 * complete this request, otherwise error out
595 * and let the timeout stuff handle resubmitting
596 * this request onto another connection.
597 */
Josef Bacik5ea8d102017-04-06 17:01:58 -0400598 if (nbd_disconnected(config) ||
599 config->num_connections <= 1) {
Josef Bacikf3733242017-04-06 17:01:57 -0400600 req->errors = -EIO;
601 return cmd;
602 }
603 return ERR_PTR(-EIO);
Jens Axboe6c92e692007-08-16 13:43:12 +0200604 }
Markus Pargmannd18509f2015-04-02 10:11:38 +0200605 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
Josef Bacikfd8383f2016-09-08 12:33:37 -0700606 cmd, bvec.bv_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 }
Josef Bacik9561a7a2016-11-22 14:04:40 -0500608 } else {
609 /* See the comment in nbd_queue_rq. */
610 wait_for_completion(&cmd->send_complete);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611 }
Josef Bacikfd8383f2016-09-08 12:33:37 -0700612 return cmd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613}
614
Josef Bacik9561a7a2016-11-22 14:04:40 -0500615static void recv_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616{
Josef Bacik9561a7a2016-11-22 14:04:40 -0500617 struct recv_thread_args *args = container_of(work,
618 struct recv_thread_args,
619 work);
620 struct nbd_device *nbd = args->nbd;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400621 struct nbd_config *config = nbd->config;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700622 struct nbd_cmd *cmd;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500623 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624
Markus Pargmann19391832015-08-17 08:20:03 +0200625 while (1) {
Josef Bacik9561a7a2016-11-22 14:04:40 -0500626 cmd = nbd_read_stat(nbd, args->index);
Josef Bacikfd8383f2016-09-08 12:33:37 -0700627 if (IS_ERR(cmd)) {
Josef Bacik5ea8d102017-04-06 17:01:58 -0400628 struct nbd_sock *nsock = config->socks[args->index];
Josef Bacikf3733242017-04-06 17:01:57 -0400629
630 mutex_lock(&nsock->tx_lock);
Josef Bacik799f9a32017-04-06 17:02:02 -0400631 nbd_mark_nsock_dead(nbd, nsock, 1);
Josef Bacikf3733242017-04-06 17:01:57 -0400632 mutex_unlock(&nsock->tx_lock);
Josef Bacikfd8383f2016-09-08 12:33:37 -0700633 ret = PTR_ERR(cmd);
Markus Pargmann19391832015-08-17 08:20:03 +0200634 break;
635 }
636
Josef Bacikfd8383f2016-09-08 12:33:37 -0700637 nbd_end_request(cmd);
Markus Pargmann19391832015-08-17 08:20:03 +0200638 }
Josef Bacik5ea8d102017-04-06 17:01:58 -0400639 atomic_dec(&config->recv_threads);
640 wake_up(&config->recv_wq);
641 nbd_config_put(nbd);
642 kfree(args);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643}
644
Josef Bacikfd8383f2016-09-08 12:33:37 -0700645static void nbd_clear_req(struct request *req, void *data, bool reserved)
646{
647 struct nbd_cmd *cmd;
648
649 if (!blk_mq_request_started(req))
650 return;
651 cmd = blk_mq_rq_to_pdu(req);
Josef Bacikc103b4d2017-03-24 14:08:27 -0400652 req->errors = -EIO;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700653 nbd_end_request(cmd);
654}
655
Wanlong Gaof4507162012-03-28 14:42:51 -0700656static void nbd_clear_que(struct nbd_device *nbd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657{
Josef Bacik2516ab12017-04-06 17:02:03 -0400658 blk_mq_stop_hw_queues(nbd->disk->queue);
Josef Bacikfd8383f2016-09-08 12:33:37 -0700659 blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL);
Josef Bacik2516ab12017-04-06 17:02:03 -0400660 blk_mq_start_hw_queues(nbd->disk->queue);
Markus Pargmanne78273c2015-08-17 08:20:04 +0200661 dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662}
663
Josef Bacikf3733242017-04-06 17:01:57 -0400664static int find_fallback(struct nbd_device *nbd, int index)
665{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400666 struct nbd_config *config = nbd->config;
Josef Bacikf3733242017-04-06 17:01:57 -0400667 int new_index = -1;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400668 struct nbd_sock *nsock = config->socks[index];
Josef Bacikf3733242017-04-06 17:01:57 -0400669 int fallback = nsock->fallback_index;
670
Josef Bacik5ea8d102017-04-06 17:01:58 -0400671 if (test_bit(NBD_DISCONNECTED, &config->runtime_flags))
Josef Bacikf3733242017-04-06 17:01:57 -0400672 return new_index;
673
Josef Bacik5ea8d102017-04-06 17:01:58 -0400674 if (config->num_connections <= 1) {
Josef Bacikf3733242017-04-06 17:01:57 -0400675 dev_err_ratelimited(disk_to_dev(nbd->disk),
676 "Attempted send on invalid socket\n");
677 return new_index;
678 }
679
Josef Bacik5ea8d102017-04-06 17:01:58 -0400680 if (fallback >= 0 && fallback < config->num_connections &&
681 !config->socks[fallback]->dead)
Josef Bacikf3733242017-04-06 17:01:57 -0400682 return fallback;
683
684 if (nsock->fallback_index < 0 ||
Josef Bacik5ea8d102017-04-06 17:01:58 -0400685 nsock->fallback_index >= config->num_connections ||
686 config->socks[nsock->fallback_index]->dead) {
Josef Bacikf3733242017-04-06 17:01:57 -0400687 int i;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400688 for (i = 0; i < config->num_connections; i++) {
Josef Bacikf3733242017-04-06 17:01:57 -0400689 if (i == index)
690 continue;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400691 if (!config->socks[i]->dead) {
Josef Bacikf3733242017-04-06 17:01:57 -0400692 new_index = i;
693 break;
694 }
695 }
696 nsock->fallback_index = new_index;
697 if (new_index < 0) {
698 dev_err_ratelimited(disk_to_dev(nbd->disk),
699 "Dead connection, failed to find a fallback\n");
700 return new_index;
701 }
702 }
703 new_index = nsock->fallback_index;
704 return new_index;
705}
Paul Clements7fdfd402007-10-16 23:27:37 -0700706
Josef Bacik560bc4b2017-04-06 17:02:04 -0400707static int wait_for_reconnect(struct nbd_device *nbd)
708{
709 struct nbd_config *config = nbd->config;
710 if (!config->dead_conn_timeout)
711 return 0;
712 if (test_bit(NBD_DISCONNECTED, &config->runtime_flags))
713 return 0;
714 wait_event_interruptible_timeout(config->conn_wait,
715 atomic_read(&config->live_connections),
716 config->dead_conn_timeout);
717 return atomic_read(&config->live_connections);
718}
719
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400720static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
Laurent Vivier48cf6062008-04-29 01:02:46 -0700721{
Josef Bacikfd8383f2016-09-08 12:33:37 -0700722 struct request *req = blk_mq_rq_from_pdu(cmd);
723 struct nbd_device *nbd = cmd->nbd;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400724 struct nbd_config *config;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500725 struct nbd_sock *nsock;
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400726 int ret;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700727
Josef Bacik5ea8d102017-04-06 17:01:58 -0400728 if (!refcount_inc_not_zero(&nbd->config_refs)) {
729 dev_err_ratelimited(disk_to_dev(nbd->disk),
730 "Socks array is empty\n");
731 return -EINVAL;
732 }
733 config = nbd->config;
734
735 if (index >= config->num_connections) {
Josef Bacika897b662016-12-05 16:20:29 -0500736 dev_err_ratelimited(disk_to_dev(nbd->disk),
737 "Attempted send on invalid socket\n");
Josef Bacik5ea8d102017-04-06 17:01:58 -0400738 nbd_config_put(nbd);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400739 return -EINVAL;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500740 }
Laurent Vivier48cf6062008-04-29 01:02:46 -0700741 req->errors = 0;
Josef Bacikf3733242017-04-06 17:01:57 -0400742again:
Josef Bacik5ea8d102017-04-06 17:01:58 -0400743 nsock = config->socks[index];
Josef Bacik9561a7a2016-11-22 14:04:40 -0500744 mutex_lock(&nsock->tx_lock);
Josef Bacikf3733242017-04-06 17:01:57 -0400745 if (nsock->dead) {
Josef Bacik560bc4b2017-04-06 17:02:04 -0400746 int old_index = index;
Josef Bacikf3733242017-04-06 17:01:57 -0400747 index = find_fallback(nbd, index);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500748 mutex_unlock(&nsock->tx_lock);
Josef Bacik560bc4b2017-04-06 17:02:04 -0400749 if (index < 0) {
750 if (wait_for_reconnect(nbd)) {
751 index = old_index;
752 goto again;
753 }
754 /* All the sockets should already be down at this point,
755 * we just want to make sure that DISCONNECTED is set so
756 * any requests that come in that were queue'ed waiting
757 * for the reconnect timer don't trigger the timer again
758 * and instead just error out.
759 */
760 sock_shutdown(nbd);
761 nbd_config_put(nbd);
762 return -EIO;
763 }
Josef Bacikf3733242017-04-06 17:01:57 -0400764 goto again;
Laurent Vivier48cf6062008-04-29 01:02:46 -0700765 }
766
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400767 /* Handle the case that we have a pending request that was partially
768 * transmitted that _has_ to be serviced first. We need to call requeue
769 * here so that it gets put _after_ the request that is already on the
770 * dispatch list.
771 */
772 if (unlikely(nsock->pending && nsock->pending != req)) {
773 blk_mq_requeue_request(req, true);
774 ret = 0;
775 goto out;
Laurent Vivier48cf6062008-04-29 01:02:46 -0700776 }
Josef Bacikf3733242017-04-06 17:01:57 -0400777 /*
778 * Some failures are related to the link going down, so anything that
779 * returns EAGAIN can be retried on a different socket.
780 */
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400781 ret = nbd_send_cmd(nbd, cmd, index);
Josef Bacikf3733242017-04-06 17:01:57 -0400782 if (ret == -EAGAIN) {
783 dev_err_ratelimited(disk_to_dev(nbd->disk),
784 "Request send failed trying another connection\n");
Josef Bacik799f9a32017-04-06 17:02:02 -0400785 nbd_mark_nsock_dead(nbd, nsock, 1);
Josef Bacikf3733242017-04-06 17:01:57 -0400786 mutex_unlock(&nsock->tx_lock);
787 goto again;
788 }
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400789out:
Josef Bacik9561a7a2016-11-22 14:04:40 -0500790 mutex_unlock(&nsock->tx_lock);
Josef Bacik5ea8d102017-04-06 17:01:58 -0400791 nbd_config_put(nbd);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400792 return ret;
Laurent Vivier48cf6062008-04-29 01:02:46 -0700793}
794
Josef Bacikfd8383f2016-09-08 12:33:37 -0700795static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
796 const struct blk_mq_queue_data *bd)
Laurent Vivier48cf6062008-04-29 01:02:46 -0700797{
Josef Bacikfd8383f2016-09-08 12:33:37 -0700798 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400799 int ret;
Laurent Vivier48cf6062008-04-29 01:02:46 -0700800
Josef Bacik9561a7a2016-11-22 14:04:40 -0500801 /*
802 * Since we look at the bio's to send the request over the network we
803 * need to make sure the completion work doesn't mark this request done
804 * before we are done doing our send. This keeps us from dereferencing
805 * freed data if we have particularly fast completions (ie we get the
806 * completion before we exit sock_xmit on the last bvec) or in the case
807 * that the server is misbehaving (or there was an error) before we're
808 * done sending everything over the wire.
809 */
810 init_completion(&cmd->send_complete);
Josef Bacikfd8383f2016-09-08 12:33:37 -0700811 blk_mq_start_request(bd->rq);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400812
813 /* We can be called directly from the user space process, which means we
814 * could possibly have signals pending so our sendmsg will fail. In
815 * this case we need to return that we are busy, otherwise error out as
816 * appropriate.
817 */
818 ret = nbd_handle_cmd(cmd, hctx->queue_num);
819 if (ret < 0)
820 ret = BLK_MQ_RQ_QUEUE_ERROR;
821 if (!ret)
822 ret = BLK_MQ_RQ_QUEUE_OK;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500823 complete(&cmd->send_complete);
824
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400825 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826}
827
Josef Bacike46c7282017-04-06 17:02:00 -0400828static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
829 bool netlink)
Markus Pargmann23272a672015-10-29 11:51:16 +0100830{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400831 struct nbd_config *config = nbd->config;
Josef Bacik9442b732017-02-07 17:10:22 -0500832 struct socket *sock;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500833 struct nbd_sock **socks;
834 struct nbd_sock *nsock;
Josef Bacik9442b732017-02-07 17:10:22 -0500835 int err;
836
837 sock = sockfd_lookup(arg, &err);
838 if (!sock)
839 return err;
Markus Pargmann23272a672015-10-29 11:51:16 +0100840
Josef Bacike46c7282017-04-06 17:02:00 -0400841 if (!netlink && !nbd->task_setup &&
842 !test_bit(NBD_BOUND, &config->runtime_flags))
Josef Bacik9561a7a2016-11-22 14:04:40 -0500843 nbd->task_setup = current;
Josef Bacike46c7282017-04-06 17:02:00 -0400844
845 if (!netlink &&
846 (nbd->task_setup != current ||
847 test_bit(NBD_BOUND, &config->runtime_flags))) {
Josef Bacik9561a7a2016-11-22 14:04:40 -0500848 dev_err(disk_to_dev(nbd->disk),
849 "Device being setup by another task");
Josef Bacik9b1355d2017-04-06 17:01:56 -0400850 sockfd_put(sock);
Josef Bacike46c7282017-04-06 17:02:00 -0400851 return -EBUSY;
Markus Pargmann23272a672015-10-29 11:51:16 +0100852 }
853
Josef Bacik5ea8d102017-04-06 17:01:58 -0400854 socks = krealloc(config->socks, (config->num_connections + 1) *
Josef Bacik9561a7a2016-11-22 14:04:40 -0500855 sizeof(struct nbd_sock *), GFP_KERNEL);
Josef Bacik9b1355d2017-04-06 17:01:56 -0400856 if (!socks) {
857 sockfd_put(sock);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500858 return -ENOMEM;
Josef Bacik9b1355d2017-04-06 17:01:56 -0400859 }
Josef Bacik9561a7a2016-11-22 14:04:40 -0500860 nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL);
Josef Bacik9b1355d2017-04-06 17:01:56 -0400861 if (!nsock) {
862 sockfd_put(sock);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500863 return -ENOMEM;
Josef Bacik9b1355d2017-04-06 17:01:56 -0400864 }
Markus Pargmann23272a672015-10-29 11:51:16 +0100865
Josef Bacik5ea8d102017-04-06 17:01:58 -0400866 config->socks = socks;
Markus Pargmann23272a672015-10-29 11:51:16 +0100867
Josef Bacikf3733242017-04-06 17:01:57 -0400868 nsock->fallback_index = -1;
869 nsock->dead = false;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500870 mutex_init(&nsock->tx_lock);
871 nsock->sock = sock;
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400872 nsock->pending = NULL;
873 nsock->sent = 0;
Josef Bacik799f9a32017-04-06 17:02:02 -0400874 nsock->cookie = 0;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400875 socks[config->num_connections++] = nsock;
Josef Bacik560bc4b2017-04-06 17:02:04 -0400876 atomic_inc(&config->live_connections);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500877
878 return 0;
Markus Pargmann23272a672015-10-29 11:51:16 +0100879}
880
Josef Bacikb7aa3d32017-04-06 17:02:01 -0400881static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
882{
883 struct nbd_config *config = nbd->config;
884 struct socket *sock, *old;
885 struct recv_thread_args *args;
886 int i;
887 int err;
888
889 sock = sockfd_lookup(arg, &err);
890 if (!sock)
891 return err;
892
893 args = kzalloc(sizeof(*args), GFP_KERNEL);
894 if (!args) {
895 sockfd_put(sock);
896 return -ENOMEM;
897 }
898
899 for (i = 0; i < config->num_connections; i++) {
900 struct nbd_sock *nsock = config->socks[i];
901
902 if (!nsock->dead)
903 continue;
904
905 mutex_lock(&nsock->tx_lock);
906 if (!nsock->dead) {
907 mutex_unlock(&nsock->tx_lock);
908 continue;
909 }
910 sk_set_memalloc(sock->sk);
911 atomic_inc(&config->recv_threads);
912 refcount_inc(&nbd->config_refs);
913 old = nsock->sock;
914 nsock->fallback_index = -1;
915 nsock->sock = sock;
916 nsock->dead = false;
917 INIT_WORK(&args->work, recv_work);
918 args->index = i;
919 args->nbd = nbd;
Josef Bacik799f9a32017-04-06 17:02:02 -0400920 nsock->cookie++;
Josef Bacikb7aa3d32017-04-06 17:02:01 -0400921 mutex_unlock(&nsock->tx_lock);
922 sockfd_put(old);
923
924 /* We take the tx_mutex in an error path in the recv_work, so we
925 * need to queue_work outside of the tx_mutex.
926 */
927 queue_work(recv_workqueue, &args->work);
Josef Bacik560bc4b2017-04-06 17:02:04 -0400928
929 atomic_inc(&config->live_connections);
930 wake_up(&config->conn_wait);
Josef Bacikb7aa3d32017-04-06 17:02:01 -0400931 return 0;
932 }
933 sockfd_put(sock);
934 kfree(args);
935 return -ENOSPC;
936}
937
Markus Pargmann0e4f0f62015-10-29 12:04:51 +0100938/* Reset all properties of an NBD device */
939static void nbd_reset(struct nbd_device *nbd)
940{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400941 nbd->config = NULL;
Josef Bacik0eadf372016-09-08 12:33:40 -0700942 nbd->tag_set.timeout = 0;
Markus Pargmann0e4f0f62015-10-29 12:04:51 +0100943 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
Markus Pargmann0e4f0f62015-10-29 12:04:51 +0100944}
945
946static void nbd_bdev_reset(struct block_device *bdev)
947{
Ratna Manoj Bollaabbbdf12017-03-24 14:08:29 -0400948 if (bdev->bd_openers > 1)
949 return;
Josef Bacik29eaadc2017-04-06 17:01:59 -0400950 bd_set_size(bdev, 0);
Markus Pargmann0e4f0f62015-10-29 12:04:51 +0100951 if (max_part > 0) {
952 blkdev_reread_part(bdev);
953 bdev->bd_invalidated = 1;
954 }
955}
956
Josef Bacik29eaadc2017-04-06 17:01:59 -0400957static void nbd_parse_flags(struct nbd_device *nbd)
Markus Pargmannd02cf532015-10-29 12:06:15 +0100958{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400959 struct nbd_config *config = nbd->config;
960 if (config->flags & NBD_FLAG_READ_ONLY)
Josef Bacik29eaadc2017-04-06 17:01:59 -0400961 set_disk_ro(nbd->disk, true);
962 else
963 set_disk_ro(nbd->disk, false);
Josef Bacik5ea8d102017-04-06 17:01:58 -0400964 if (config->flags & NBD_FLAG_SEND_TRIM)
Markus Pargmannd02cf532015-10-29 12:06:15 +0100965 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
Josef Bacik5ea8d102017-04-06 17:01:58 -0400966 if (config->flags & NBD_FLAG_SEND_FLUSH)
Jens Axboeaafb1ee2016-03-30 10:10:53 -0600967 blk_queue_write_cache(nbd->disk->queue, true, false);
Markus Pargmannd02cf532015-10-29 12:06:15 +0100968 else
Jens Axboeaafb1ee2016-03-30 10:10:53 -0600969 blk_queue_write_cache(nbd->disk->queue, false, false);
Markus Pargmannd02cf532015-10-29 12:06:15 +0100970}
971
Josef Bacik9561a7a2016-11-22 14:04:40 -0500972static void send_disconnects(struct nbd_device *nbd)
973{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400974 struct nbd_config *config = nbd->config;
Al Viroc9f2b6a2015-11-12 05:09:35 -0500975 struct nbd_request request = {
976 .magic = htonl(NBD_REQUEST_MAGIC),
977 .type = htonl(NBD_CMD_DISC),
978 };
979 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
980 struct iov_iter from;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500981 int i, ret;
982
Josef Bacik5ea8d102017-04-06 17:01:58 -0400983 for (i = 0; i < config->num_connections; i++) {
Al Viroc9f2b6a2015-11-12 05:09:35 -0500984 iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400985 ret = sock_xmit(nbd, i, 1, &from, 0, NULL);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500986 if (ret <= 0)
987 dev_err(disk_to_dev(nbd->disk),
988 "Send disconnect failed %d\n", ret);
989 }
990}
991
Josef Bacik29eaadc2017-04-06 17:01:59 -0400992static int nbd_disconnect(struct nbd_device *nbd)
Josef Bacik9442b732017-02-07 17:10:22 -0500993{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400994 struct nbd_config *config = nbd->config;
Josef Bacik9442b732017-02-07 17:10:22 -0500995
Josef Bacik5ea8d102017-04-06 17:01:58 -0400996 dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
Josef Bacik9442b732017-02-07 17:10:22 -0500997 if (!test_and_set_bit(NBD_DISCONNECT_REQUESTED,
Josef Bacik5ea8d102017-04-06 17:01:58 -0400998 &config->runtime_flags))
Josef Bacik9442b732017-02-07 17:10:22 -0500999 send_disconnects(nbd);
1000 return 0;
1001}
1002
Josef Bacik29eaadc2017-04-06 17:01:59 -04001003static void nbd_clear_sock(struct nbd_device *nbd)
Josef Bacik9442b732017-02-07 17:10:22 -05001004{
1005 sock_shutdown(nbd);
1006 nbd_clear_que(nbd);
Josef Bacik9442b732017-02-07 17:10:22 -05001007 nbd->task_setup = NULL;
Josef Bacik9442b732017-02-07 17:10:22 -05001008}
1009
Josef Bacik5ea8d102017-04-06 17:01:58 -04001010static void nbd_config_put(struct nbd_device *nbd)
1011{
1012 if (refcount_dec_and_mutex_lock(&nbd->config_refs,
1013 &nbd->config_lock)) {
Josef Bacik5ea8d102017-04-06 17:01:58 -04001014 struct nbd_config *config = nbd->config;
Josef Bacik5ea8d102017-04-06 17:01:58 -04001015 nbd_dev_dbg_close(nbd);
Josef Bacik29eaadc2017-04-06 17:01:59 -04001016 nbd_size_clear(nbd);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001017 if (test_and_clear_bit(NBD_HAS_PID_FILE,
1018 &config->runtime_flags))
1019 device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
1020 nbd->task_recv = NULL;
Josef Bacik29eaadc2017-04-06 17:01:59 -04001021 nbd_clear_sock(nbd);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001022 if (config->num_connections) {
1023 int i;
1024 for (i = 0; i < config->num_connections; i++) {
1025 sockfd_put(config->socks[i]->sock);
1026 kfree(config->socks[i]);
1027 }
1028 kfree(config->socks);
1029 }
1030 nbd_reset(nbd);
1031 mutex_unlock(&nbd->config_lock);
Josef Bacikc6a47592017-04-06 17:02:06 -04001032 nbd_put(nbd);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001033 module_put(THIS_MODULE);
1034 }
1035}
1036
Josef Bacike46c7282017-04-06 17:02:00 -04001037static int nbd_start_device(struct nbd_device *nbd)
Josef Bacik9442b732017-02-07 17:10:22 -05001038{
Josef Bacik5ea8d102017-04-06 17:01:58 -04001039 struct nbd_config *config = nbd->config;
1040 int num_connections = config->num_connections;
Josef Bacik9442b732017-02-07 17:10:22 -05001041 int error = 0, i;
1042
1043 if (nbd->task_recv)
1044 return -EBUSY;
Josef Bacik5ea8d102017-04-06 17:01:58 -04001045 if (!config->socks)
Josef Bacik9442b732017-02-07 17:10:22 -05001046 return -EINVAL;
1047 if (num_connections > 1 &&
Josef Bacik5ea8d102017-04-06 17:01:58 -04001048 !(config->flags & NBD_FLAG_CAN_MULTI_CONN)) {
Josef Bacik9442b732017-02-07 17:10:22 -05001049 dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n");
Josef Bacik5ea8d102017-04-06 17:01:58 -04001050 return -EINVAL;
Josef Bacik9442b732017-02-07 17:10:22 -05001051 }
1052
Josef Bacik5ea8d102017-04-06 17:01:58 -04001053 blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections);
Josef Bacik9442b732017-02-07 17:10:22 -05001054 nbd->task_recv = current;
Josef Bacik9442b732017-02-07 17:10:22 -05001055
Josef Bacik29eaadc2017-04-06 17:01:59 -04001056 nbd_parse_flags(nbd);
Josef Bacik9442b732017-02-07 17:10:22 -05001057
1058 error = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
1059 if (error) {
1060 dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
Josef Bacik5ea8d102017-04-06 17:01:58 -04001061 return error;
Josef Bacik9442b732017-02-07 17:10:22 -05001062 }
Josef Bacik29eaadc2017-04-06 17:01:59 -04001063 set_bit(NBD_HAS_PID_FILE, &config->runtime_flags);
Josef Bacik9442b732017-02-07 17:10:22 -05001064
1065 nbd_dev_dbg_init(nbd);
1066 for (i = 0; i < num_connections; i++) {
Josef Bacik5ea8d102017-04-06 17:01:58 -04001067 struct recv_thread_args *args;
1068
1069 args = kzalloc(sizeof(*args), GFP_KERNEL);
1070 if (!args) {
1071 sock_shutdown(nbd);
1072 return -ENOMEM;
1073 }
1074 sk_set_memalloc(config->socks[i]->sock->sk);
1075 atomic_inc(&config->recv_threads);
1076 refcount_inc(&nbd->config_refs);
1077 INIT_WORK(&args->work, recv_work);
1078 args->nbd = nbd;
1079 args->index = i;
1080 queue_work(recv_workqueue, &args->work);
Josef Bacik9442b732017-02-07 17:10:22 -05001081 }
Josef Bacike46c7282017-04-06 17:02:00 -04001082 return error;
1083}
1084
1085static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *bdev)
1086{
1087 struct nbd_config *config = nbd->config;
1088 int ret;
1089
1090 ret = nbd_start_device(nbd);
1091 if (ret)
1092 return ret;
1093
1094 bd_set_size(bdev, config->bytesize);
1095 if (max_part)
1096 bdev->bd_invalidated = 1;
1097 mutex_unlock(&nbd->config_lock);
1098 ret = wait_event_interruptible(config->recv_wq,
Josef Bacik5ea8d102017-04-06 17:01:58 -04001099 atomic_read(&config->recv_threads) == 0);
Josef Bacike46c7282017-04-06 17:02:00 -04001100 if (ret)
Josef Bacik5ea8d102017-04-06 17:01:58 -04001101 sock_shutdown(nbd);
Josef Bacik9442b732017-02-07 17:10:22 -05001102 mutex_lock(&nbd->config_lock);
Josef Bacike46c7282017-04-06 17:02:00 -04001103 bd_set_size(bdev, 0);
Josef Bacik9442b732017-02-07 17:10:22 -05001104 /* user requested, ignore socket errors */
Josef Bacik5ea8d102017-04-06 17:01:58 -04001105 if (test_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags))
Josef Bacike46c7282017-04-06 17:02:00 -04001106 ret = 0;
Josef Bacik5ea8d102017-04-06 17:01:58 -04001107 if (test_bit(NBD_TIMEDOUT, &config->runtime_flags))
Josef Bacike46c7282017-04-06 17:02:00 -04001108 ret = -ETIMEDOUT;
1109 return ret;
Josef Bacik9442b732017-02-07 17:10:22 -05001110}
Markus Pargmann30d53d92015-08-17 08:20:06 +02001111
Josef Bacik29eaadc2017-04-06 17:01:59 -04001112static void nbd_clear_sock_ioctl(struct nbd_device *nbd,
1113 struct block_device *bdev)
1114{
Josef Bacik2516ab12017-04-06 17:02:03 -04001115 sock_shutdown(nbd);
Josef Bacik29eaadc2017-04-06 17:01:59 -04001116 kill_bdev(bdev);
1117 nbd_bdev_reset(bdev);
Josef Bacike46c7282017-04-06 17:02:00 -04001118 if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
1119 &nbd->config->runtime_flags))
1120 nbd_config_put(nbd);
Josef Bacik29eaadc2017-04-06 17:01:59 -04001121}
1122
Josef Bacik9561a7a2016-11-22 14:04:40 -05001123/* Must be called with config_lock held */
Wanlong Gaof4507162012-03-28 14:42:51 -07001124static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
Pavel Machek1a2ad212009-04-02 16:58:41 -07001125 unsigned int cmd, unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126{
Josef Bacik5ea8d102017-04-06 17:01:58 -04001127 struct nbd_config *config = nbd->config;
1128
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129 switch (cmd) {
Josef Bacik9442b732017-02-07 17:10:22 -05001130 case NBD_DISCONNECT:
Josef Bacik29eaadc2017-04-06 17:01:59 -04001131 return nbd_disconnect(nbd);
Markus Pargmann23272a672015-10-29 11:51:16 +01001132 case NBD_CLEAR_SOCK:
Josef Bacik29eaadc2017-04-06 17:01:59 -04001133 nbd_clear_sock_ioctl(nbd, bdev);
1134 return 0;
Josef Bacik9442b732017-02-07 17:10:22 -05001135 case NBD_SET_SOCK:
Josef Bacike46c7282017-04-06 17:02:00 -04001136 return nbd_add_socket(nbd, arg, false);
Josef Bacik9442b732017-02-07 17:10:22 -05001137 case NBD_SET_BLKSIZE:
Josef Bacik29eaadc2017-04-06 17:01:59 -04001138 nbd_size_set(nbd, arg,
Josef Bacik5ea8d102017-04-06 17:01:58 -04001139 div_s64(config->bytesize, arg));
Josef Bacike5445412017-02-13 10:39:47 -05001140 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141 case NBD_SET_SIZE:
Josef Bacik29eaadc2017-04-06 17:01:59 -04001142 nbd_size_set(nbd, config->blksize,
Josef Bacik5ea8d102017-04-06 17:01:58 -04001143 div_s64(arg, config->blksize));
Josef Bacike5445412017-02-13 10:39:47 -05001144 return 0;
Markus Pargmann37091fd2015-07-27 07:36:49 +02001145 case NBD_SET_SIZE_BLOCKS:
Josef Bacik29eaadc2017-04-06 17:01:59 -04001146 nbd_size_set(nbd, config->blksize, arg);
Josef Bacike5445412017-02-13 10:39:47 -05001147 return 0;
Paul Clements7fdfd402007-10-16 23:27:37 -07001148 case NBD_SET_TIMEOUT:
Josef Bacikf8586852017-03-24 14:08:28 -04001149 if (arg) {
1150 nbd->tag_set.timeout = arg * HZ;
1151 blk_queue_rq_timeout(nbd->disk->queue, arg * HZ);
1152 }
Paul Clements7fdfd402007-10-16 23:27:37 -07001153 return 0;
Pavel Machek1a2ad212009-04-02 16:58:41 -07001154
Paul Clements2f012502012-10-04 17:16:15 -07001155 case NBD_SET_FLAGS:
Josef Bacik5ea8d102017-04-06 17:01:58 -04001156 config->flags = arg;
Paul Clements2f012502012-10-04 17:16:15 -07001157 return 0;
Josef Bacik9442b732017-02-07 17:10:22 -05001158 case NBD_DO_IT:
Josef Bacike46c7282017-04-06 17:02:00 -04001159 return nbd_start_device_ioctl(nbd, bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160 case NBD_CLEAR_QUE:
Herbert Xu4b2f0262006-01-06 00:09:47 -08001161 /*
1162 * This is for compatibility only. The queue is always cleared
1163 * by NBD_DO_IT or NBD_CLEAR_SOCK.
1164 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001165 return 0;
1166 case NBD_PRINT_DEBUG:
Josef Bacikfd8383f2016-09-08 12:33:37 -07001167 /*
1168 * For compatibility only, we no longer keep a list of
1169 * outstanding requests.
1170 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171 return 0;
1172 }
Pavel Machek1a2ad212009-04-02 16:58:41 -07001173 return -ENOTTY;
1174}
1175
1176static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
1177 unsigned int cmd, unsigned long arg)
1178{
Wanlong Gaof4507162012-03-28 14:42:51 -07001179 struct nbd_device *nbd = bdev->bd_disk->private_data;
Josef Bacike46c7282017-04-06 17:02:00 -04001180 struct nbd_config *config = nbd->config;
1181 int error = -EINVAL;
Pavel Machek1a2ad212009-04-02 16:58:41 -07001182
1183 if (!capable(CAP_SYS_ADMIN))
1184 return -EPERM;
1185
Josef Bacik9561a7a2016-11-22 14:04:40 -05001186 mutex_lock(&nbd->config_lock);
Josef Bacike46c7282017-04-06 17:02:00 -04001187
1188 /* Don't allow ioctl operations on a nbd device that was created with
1189 * netlink, unless it's DISCONNECT or CLEAR_SOCK, which are fine.
1190 */
1191 if (!test_bit(NBD_BOUND, &config->runtime_flags) ||
1192 (cmd == NBD_DISCONNECT || cmd == NBD_CLEAR_SOCK))
1193 error = __nbd_ioctl(bdev, nbd, cmd, arg);
1194 else
1195 dev_err(nbd_to_dev(nbd), "Cannot use ioctl interface on a netlink controlled device.\n");
Josef Bacik9561a7a2016-11-22 14:04:40 -05001196 mutex_unlock(&nbd->config_lock);
Pavel Machek1a2ad212009-04-02 16:58:41 -07001197 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198}
1199
Josef Bacik5ea8d102017-04-06 17:01:58 -04001200static struct nbd_config *nbd_alloc_config(void)
1201{
1202 struct nbd_config *config;
1203
1204 config = kzalloc(sizeof(struct nbd_config), GFP_NOFS);
1205 if (!config)
1206 return NULL;
1207 atomic_set(&config->recv_threads, 0);
1208 init_waitqueue_head(&config->recv_wq);
Josef Bacik560bc4b2017-04-06 17:02:04 -04001209 init_waitqueue_head(&config->conn_wait);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001210 config->blksize = 1024;
Josef Bacik560bc4b2017-04-06 17:02:04 -04001211 atomic_set(&config->live_connections, 0);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001212 try_module_get(THIS_MODULE);
1213 return config;
1214}
1215
1216static int nbd_open(struct block_device *bdev, fmode_t mode)
1217{
1218 struct nbd_device *nbd;
1219 int ret = 0;
1220
1221 mutex_lock(&nbd_index_mutex);
1222 nbd = bdev->bd_disk->private_data;
1223 if (!nbd) {
1224 ret = -ENXIO;
1225 goto out;
1226 }
Josef Bacikc6a47592017-04-06 17:02:06 -04001227 if (!refcount_inc_not_zero(&nbd->refs)) {
1228 ret = -ENXIO;
1229 goto out;
1230 }
Josef Bacik5ea8d102017-04-06 17:01:58 -04001231 if (!refcount_inc_not_zero(&nbd->config_refs)) {
1232 struct nbd_config *config;
1233
1234 mutex_lock(&nbd->config_lock);
1235 if (refcount_inc_not_zero(&nbd->config_refs)) {
1236 mutex_unlock(&nbd->config_lock);
1237 goto out;
1238 }
1239 config = nbd->config = nbd_alloc_config();
1240 if (!config) {
1241 ret = -ENOMEM;
1242 mutex_unlock(&nbd->config_lock);
1243 goto out;
1244 }
1245 refcount_set(&nbd->config_refs, 1);
Josef Bacikc6a47592017-04-06 17:02:06 -04001246 refcount_inc(&nbd->refs);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001247 mutex_unlock(&nbd->config_lock);
1248 }
1249out:
1250 mutex_unlock(&nbd_index_mutex);
1251 return ret;
1252}
1253
1254static void nbd_release(struct gendisk *disk, fmode_t mode)
1255{
1256 struct nbd_device *nbd = disk->private_data;
1257 nbd_config_put(nbd);
Josef Bacikc6a47592017-04-06 17:02:06 -04001258 nbd_put(nbd);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001259}
1260
Alexey Dobriyan83d5cde2009-09-21 17:01:13 -07001261static const struct block_device_operations nbd_fops =
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262{
1263 .owner = THIS_MODULE,
Josef Bacik5ea8d102017-04-06 17:01:58 -04001264 .open = nbd_open,
1265 .release = nbd_release,
Arnd Bergmann8a6cfeb2010-07-08 10:18:46 +02001266 .ioctl = nbd_ioctl,
Al Viro263a3df2016-01-07 10:04:37 -05001267 .compat_ioctl = nbd_ioctl,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268};
1269
Markus Pargmann30d53d92015-08-17 08:20:06 +02001270#if IS_ENABLED(CONFIG_DEBUG_FS)
1271
1272static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
1273{
1274 struct nbd_device *nbd = s->private;
1275
1276 if (nbd->task_recv)
1277 seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv));
Markus Pargmann30d53d92015-08-17 08:20:06 +02001278
1279 return 0;
1280}
1281
1282static int nbd_dbg_tasks_open(struct inode *inode, struct file *file)
1283{
1284 return single_open(file, nbd_dbg_tasks_show, inode->i_private);
1285}
1286
1287static const struct file_operations nbd_dbg_tasks_ops = {
1288 .open = nbd_dbg_tasks_open,
1289 .read = seq_read,
1290 .llseek = seq_lseek,
1291 .release = single_release,
1292};
1293
1294static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
1295{
1296 struct nbd_device *nbd = s->private;
Josef Bacik5ea8d102017-04-06 17:01:58 -04001297 u32 flags = nbd->config->flags;
Markus Pargmann30d53d92015-08-17 08:20:06 +02001298
1299 seq_printf(s, "Hex: 0x%08x\n\n", flags);
1300
1301 seq_puts(s, "Known flags:\n");
1302
1303 if (flags & NBD_FLAG_HAS_FLAGS)
1304 seq_puts(s, "NBD_FLAG_HAS_FLAGS\n");
1305 if (flags & NBD_FLAG_READ_ONLY)
1306 seq_puts(s, "NBD_FLAG_READ_ONLY\n");
1307 if (flags & NBD_FLAG_SEND_FLUSH)
1308 seq_puts(s, "NBD_FLAG_SEND_FLUSH\n");
1309 if (flags & NBD_FLAG_SEND_TRIM)
1310 seq_puts(s, "NBD_FLAG_SEND_TRIM\n");
1311
1312 return 0;
1313}
1314
1315static int nbd_dbg_flags_open(struct inode *inode, struct file *file)
1316{
1317 return single_open(file, nbd_dbg_flags_show, inode->i_private);
1318}
1319
1320static const struct file_operations nbd_dbg_flags_ops = {
1321 .open = nbd_dbg_flags_open,
1322 .read = seq_read,
1323 .llseek = seq_lseek,
1324 .release = single_release,
1325};
1326
1327static int nbd_dev_dbg_init(struct nbd_device *nbd)
1328{
1329 struct dentry *dir;
Josef Bacik5ea8d102017-04-06 17:01:58 -04001330 struct nbd_config *config = nbd->config;
Markus Pargmann27ea43f2015-10-24 21:15:34 +02001331
1332 if (!nbd_dbg_dir)
1333 return -EIO;
Markus Pargmann30d53d92015-08-17 08:20:06 +02001334
1335 dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir);
Markus Pargmann27ea43f2015-10-24 21:15:34 +02001336 if (!dir) {
1337 dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n",
1338 nbd_name(nbd));
1339 return -EIO;
Markus Pargmann30d53d92015-08-17 08:20:06 +02001340 }
Josef Bacik5ea8d102017-04-06 17:01:58 -04001341 config->dbg_dir = dir;
Markus Pargmann30d53d92015-08-17 08:20:06 +02001342
Markus Pargmann27ea43f2015-10-24 21:15:34 +02001343 debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001344 debugfs_create_u64("size_bytes", 0444, dir, &config->bytesize);
Josef Bacik0eadf372016-09-08 12:33:40 -07001345 debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001346 debugfs_create_u64("blocksize", 0444, dir, &config->blksize);
Josef Bacikd366a0f2016-06-08 10:32:10 -04001347 debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops);
Markus Pargmann30d53d92015-08-17 08:20:06 +02001348
1349 return 0;
1350}
1351
1352static void nbd_dev_dbg_close(struct nbd_device *nbd)
1353{
Josef Bacik5ea8d102017-04-06 17:01:58 -04001354 debugfs_remove_recursive(nbd->config->dbg_dir);
Markus Pargmann30d53d92015-08-17 08:20:06 +02001355}
1356
1357static int nbd_dbg_init(void)
1358{
1359 struct dentry *dbg_dir;
1360
1361 dbg_dir = debugfs_create_dir("nbd", NULL);
Markus Pargmann27ea43f2015-10-24 21:15:34 +02001362 if (!dbg_dir)
1363 return -EIO;
Markus Pargmann30d53d92015-08-17 08:20:06 +02001364
1365 nbd_dbg_dir = dbg_dir;
1366
1367 return 0;
1368}
1369
1370static void nbd_dbg_close(void)
1371{
1372 debugfs_remove_recursive(nbd_dbg_dir);
1373}
1374
1375#else /* IS_ENABLED(CONFIG_DEBUG_FS) */
1376
1377static int nbd_dev_dbg_init(struct nbd_device *nbd)
1378{
1379 return 0;
1380}
1381
1382static void nbd_dev_dbg_close(struct nbd_device *nbd)
1383{
1384}
1385
1386static int nbd_dbg_init(void)
1387{
1388 return 0;
1389}
1390
1391static void nbd_dbg_close(void)
1392{
1393}
1394
1395#endif
1396
Josef Bacikfd8383f2016-09-08 12:33:37 -07001397static int nbd_init_request(void *data, struct request *rq,
1398 unsigned int hctx_idx, unsigned int request_idx,
1399 unsigned int numa_node)
1400{
1401 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
Josef Bacikfd8383f2016-09-08 12:33:37 -07001402 cmd->nbd = data;
Josef Bacikfd8383f2016-09-08 12:33:37 -07001403 return 0;
1404}
1405
Eric Biggersf363b082017-03-30 13:39:16 -07001406static const struct blk_mq_ops nbd_mq_ops = {
Josef Bacikfd8383f2016-09-08 12:33:37 -07001407 .queue_rq = nbd_queue_rq,
Josef Bacikfd8383f2016-09-08 12:33:37 -07001408 .init_request = nbd_init_request,
Josef Bacik0eadf372016-09-08 12:33:40 -07001409 .timeout = nbd_xmit_timeout,
Josef Bacikfd8383f2016-09-08 12:33:37 -07001410};
1411
Josef Bacikb0d91112017-02-01 16:11:40 -05001412static int nbd_dev_add(int index)
1413{
1414 struct nbd_device *nbd;
1415 struct gendisk *disk;
1416 struct request_queue *q;
1417 int err = -ENOMEM;
1418
1419 nbd = kzalloc(sizeof(struct nbd_device), GFP_KERNEL);
1420 if (!nbd)
1421 goto out;
1422
1423 disk = alloc_disk(1 << part_shift);
1424 if (!disk)
1425 goto out_free_nbd;
1426
1427 if (index >= 0) {
1428 err = idr_alloc(&nbd_index_idr, nbd, index, index + 1,
1429 GFP_KERNEL);
1430 if (err == -ENOSPC)
1431 err = -EEXIST;
1432 } else {
1433 err = idr_alloc(&nbd_index_idr, nbd, 0, 0, GFP_KERNEL);
1434 if (err >= 0)
1435 index = err;
1436 }
1437 if (err < 0)
1438 goto out_free_disk;
1439
Josef Bacike46c7282017-04-06 17:02:00 -04001440 nbd->index = index;
Josef Bacikb0d91112017-02-01 16:11:40 -05001441 nbd->disk = disk;
1442 nbd->tag_set.ops = &nbd_mq_ops;
1443 nbd->tag_set.nr_hw_queues = 1;
1444 nbd->tag_set.queue_depth = 128;
1445 nbd->tag_set.numa_node = NUMA_NO_NODE;
1446 nbd->tag_set.cmd_size = sizeof(struct nbd_cmd);
1447 nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
1448 BLK_MQ_F_SG_MERGE | BLK_MQ_F_BLOCKING;
1449 nbd->tag_set.driver_data = nbd;
1450
1451 err = blk_mq_alloc_tag_set(&nbd->tag_set);
1452 if (err)
1453 goto out_free_idr;
1454
1455 q = blk_mq_init_queue(&nbd->tag_set);
1456 if (IS_ERR(q)) {
1457 err = PTR_ERR(q);
1458 goto out_free_tags;
1459 }
1460 disk->queue = q;
1461
1462 /*
1463 * Tell the block layer that we are not a rotational device
1464 */
1465 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue);
1466 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue);
1467 disk->queue->limits.discard_granularity = 512;
1468 blk_queue_max_discard_sectors(disk->queue, UINT_MAX);
Josef Bacikb0d91112017-02-01 16:11:40 -05001469 blk_queue_max_hw_sectors(disk->queue, 65536);
1470 disk->queue->limits.max_sectors = 256;
1471
Josef Bacikb0d91112017-02-01 16:11:40 -05001472 mutex_init(&nbd->config_lock);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001473 refcount_set(&nbd->config_refs, 0);
Josef Bacikc6a47592017-04-06 17:02:06 -04001474 refcount_set(&nbd->refs, 1);
1475 INIT_LIST_HEAD(&nbd->list);
Josef Bacikb0d91112017-02-01 16:11:40 -05001476 disk->major = NBD_MAJOR;
1477 disk->first_minor = index << part_shift;
1478 disk->fops = &nbd_fops;
1479 disk->private_data = nbd;
1480 sprintf(disk->disk_name, "nbd%d", index);
Josef Bacikb0d91112017-02-01 16:11:40 -05001481 nbd_reset(nbd);
1482 add_disk(disk);
Josef Bacik47d902b2017-04-06 17:02:05 -04001483 nbd_total_devices++;
Josef Bacikb0d91112017-02-01 16:11:40 -05001484 return index;
1485
1486out_free_tags:
1487 blk_mq_free_tag_set(&nbd->tag_set);
1488out_free_idr:
1489 idr_remove(&nbd_index_idr, index);
1490out_free_disk:
1491 put_disk(disk);
1492out_free_nbd:
1493 kfree(nbd);
1494out:
1495 return err;
1496}
1497
Josef Bacike46c7282017-04-06 17:02:00 -04001498static int find_free_cb(int id, void *ptr, void *data)
1499{
1500 struct nbd_device *nbd = ptr;
1501 struct nbd_device **found = data;
1502
1503 if (!refcount_read(&nbd->config_refs)) {
1504 *found = nbd;
1505 return 1;
1506 }
1507 return 0;
1508}
1509
1510/* Netlink interface. */
1511static struct nla_policy nbd_attr_policy[NBD_ATTR_MAX + 1] = {
1512 [NBD_ATTR_INDEX] = { .type = NLA_U32 },
1513 [NBD_ATTR_SIZE_BYTES] = { .type = NLA_U64 },
1514 [NBD_ATTR_BLOCK_SIZE_BYTES] = { .type = NLA_U64 },
1515 [NBD_ATTR_TIMEOUT] = { .type = NLA_U64 },
1516 [NBD_ATTR_SERVER_FLAGS] = { .type = NLA_U64 },
1517 [NBD_ATTR_CLIENT_FLAGS] = { .type = NLA_U64 },
1518 [NBD_ATTR_SOCKETS] = { .type = NLA_NESTED},
Josef Bacik560bc4b2017-04-06 17:02:04 -04001519 [NBD_ATTR_DEAD_CONN_TIMEOUT] = { .type = NLA_U64 },
Josef Bacik47d902b2017-04-06 17:02:05 -04001520 [NBD_ATTR_DEVICE_LIST] = { .type = NLA_NESTED},
Josef Bacike46c7282017-04-06 17:02:00 -04001521};
1522
1523static struct nla_policy nbd_sock_policy[NBD_SOCK_MAX + 1] = {
1524 [NBD_SOCK_FD] = { .type = NLA_U32 },
1525};
1526
Josef Bacik47d902b2017-04-06 17:02:05 -04001527/* We don't use this right now since we don't parse the incoming list, but we
1528 * still want it here so userspace knows what to expect.
1529 */
1530static struct nla_policy __attribute__((unused))
1531nbd_device_policy[NBD_DEVICE_ATTR_MAX + 1] = {
1532 [NBD_DEVICE_INDEX] = { .type = NLA_U32 },
1533 [NBD_DEVICE_CONNECTED] = { .type = NLA_U8 },
1534};
1535
Josef Bacike46c7282017-04-06 17:02:00 -04001536static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
1537{
1538 struct nbd_device *nbd = NULL;
1539 struct nbd_config *config;
1540 int index = -1;
1541 int ret;
1542
1543 if (!netlink_capable(skb, CAP_SYS_ADMIN))
1544 return -EPERM;
1545
1546 if (info->attrs[NBD_ATTR_INDEX])
1547 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
1548 if (!info->attrs[NBD_ATTR_SOCKETS]) {
1549 printk(KERN_ERR "nbd: must specify at least one socket\n");
1550 return -EINVAL;
1551 }
1552 if (!info->attrs[NBD_ATTR_SIZE_BYTES]) {
1553 printk(KERN_ERR "nbd: must specify a size in bytes for the device\n");
1554 return -EINVAL;
1555 }
1556again:
1557 mutex_lock(&nbd_index_mutex);
1558 if (index == -1) {
1559 ret = idr_for_each(&nbd_index_idr, &find_free_cb, &nbd);
1560 if (ret == 0) {
1561 int new_index;
1562 new_index = nbd_dev_add(-1);
1563 if (new_index < 0) {
1564 mutex_unlock(&nbd_index_mutex);
1565 printk(KERN_ERR "nbd: failed to add new device\n");
1566 return ret;
1567 }
1568 nbd = idr_find(&nbd_index_idr, new_index);
1569 }
1570 } else {
1571 nbd = idr_find(&nbd_index_idr, index);
1572 }
Josef Bacike46c7282017-04-06 17:02:00 -04001573 if (!nbd) {
1574 printk(KERN_ERR "nbd: couldn't find device at index %d\n",
1575 index);
Josef Bacikc6a47592017-04-06 17:02:06 -04001576 mutex_unlock(&nbd_index_mutex);
Josef Bacike46c7282017-04-06 17:02:00 -04001577 return -EINVAL;
1578 }
Josef Bacikc6a47592017-04-06 17:02:06 -04001579 if (!refcount_inc_not_zero(&nbd->refs)) {
1580 mutex_unlock(&nbd_index_mutex);
1581 if (index == -1)
1582 goto again;
1583 printk(KERN_ERR "nbd: device at index %d is going down\n",
1584 index);
1585 return -EINVAL;
1586 }
1587 mutex_unlock(&nbd_index_mutex);
Josef Bacike46c7282017-04-06 17:02:00 -04001588
1589 mutex_lock(&nbd->config_lock);
1590 if (refcount_read(&nbd->config_refs)) {
1591 mutex_unlock(&nbd->config_lock);
Josef Bacikc6a47592017-04-06 17:02:06 -04001592 nbd_put(nbd);
Josef Bacike46c7282017-04-06 17:02:00 -04001593 if (index == -1)
1594 goto again;
1595 printk(KERN_ERR "nbd: nbd%d already in use\n", index);
1596 return -EBUSY;
1597 }
1598 if (WARN_ON(nbd->config)) {
1599 mutex_unlock(&nbd->config_lock);
Josef Bacikc6a47592017-04-06 17:02:06 -04001600 nbd_put(nbd);
Josef Bacike46c7282017-04-06 17:02:00 -04001601 return -EINVAL;
1602 }
1603 config = nbd->config = nbd_alloc_config();
1604 if (!nbd->config) {
1605 mutex_unlock(&nbd->config_lock);
Josef Bacikc6a47592017-04-06 17:02:06 -04001606 nbd_put(nbd);
Josef Bacike46c7282017-04-06 17:02:00 -04001607 printk(KERN_ERR "nbd: couldn't allocate config\n");
1608 return -ENOMEM;
1609 }
1610 refcount_set(&nbd->config_refs, 1);
1611 set_bit(NBD_BOUND, &config->runtime_flags);
1612
1613 if (info->attrs[NBD_ATTR_SIZE_BYTES]) {
1614 u64 bytes = nla_get_u64(info->attrs[NBD_ATTR_SIZE_BYTES]);
1615 nbd_size_set(nbd, config->blksize,
1616 div64_u64(bytes, config->blksize));
1617 }
1618 if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]) {
1619 u64 bsize =
1620 nla_get_u64(info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]);
1621 nbd_size_set(nbd, bsize, div64_u64(config->bytesize, bsize));
1622 }
1623 if (info->attrs[NBD_ATTR_TIMEOUT]) {
1624 u64 timeout = nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]);
1625 nbd->tag_set.timeout = timeout * HZ;
1626 blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ);
1627 }
Josef Bacik560bc4b2017-04-06 17:02:04 -04001628 if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
1629 config->dead_conn_timeout =
1630 nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
1631 config->dead_conn_timeout *= HZ;
1632 }
Josef Bacike46c7282017-04-06 17:02:00 -04001633 if (info->attrs[NBD_ATTR_SERVER_FLAGS])
1634 config->flags =
1635 nla_get_u64(info->attrs[NBD_ATTR_SERVER_FLAGS]);
1636 if (info->attrs[NBD_ATTR_SOCKETS]) {
1637 struct nlattr *attr;
1638 int rem, fd;
1639
1640 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
1641 rem) {
1642 struct nlattr *socks[NBD_SOCK_MAX+1];
1643
1644 if (nla_type(attr) != NBD_SOCK_ITEM) {
1645 printk(KERN_ERR "nbd: socks must be embedded in a SOCK_ITEM attr\n");
1646 ret = -EINVAL;
1647 goto out;
1648 }
1649 ret = nla_parse_nested(socks, NBD_SOCK_MAX, attr,
1650 nbd_sock_policy);
1651 if (ret != 0) {
1652 printk(KERN_ERR "nbd: error processing sock list\n");
1653 ret = -EINVAL;
1654 goto out;
1655 }
1656 if (!socks[NBD_SOCK_FD])
1657 continue;
1658 fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
1659 ret = nbd_add_socket(nbd, fd, true);
1660 if (ret)
1661 goto out;
1662 }
1663 }
1664 ret = nbd_start_device(nbd);
1665out:
1666 mutex_unlock(&nbd->config_lock);
1667 if (!ret) {
1668 set_bit(NBD_HAS_CONFIG_REF, &config->runtime_flags);
1669 refcount_inc(&nbd->config_refs);
1670 nbd_connect_reply(info, nbd->index);
1671 }
1672 nbd_config_put(nbd);
1673 return ret;
1674}
1675
1676static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
1677{
1678 struct nbd_device *nbd;
1679 int index;
1680
1681 if (!netlink_capable(skb, CAP_SYS_ADMIN))
1682 return -EPERM;
1683
1684 if (!info->attrs[NBD_ATTR_INDEX]) {
1685 printk(KERN_ERR "nbd: must specify an index to disconnect\n");
1686 return -EINVAL;
1687 }
1688 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
1689 mutex_lock(&nbd_index_mutex);
1690 nbd = idr_find(&nbd_index_idr, index);
Josef Bacike46c7282017-04-06 17:02:00 -04001691 if (!nbd) {
Josef Bacikc6a47592017-04-06 17:02:06 -04001692 mutex_unlock(&nbd_index_mutex);
Josef Bacike46c7282017-04-06 17:02:00 -04001693 printk(KERN_ERR "nbd: couldn't find device at index %d\n",
1694 index);
1695 return -EINVAL;
1696 }
Josef Bacikc6a47592017-04-06 17:02:06 -04001697 if (!refcount_inc_not_zero(&nbd->refs)) {
1698 mutex_unlock(&nbd_index_mutex);
1699 printk(KERN_ERR "nbd: device at index %d is going down\n",
1700 index);
1701 return -EINVAL;
1702 }
1703 mutex_unlock(&nbd_index_mutex);
1704 if (!refcount_inc_not_zero(&nbd->config_refs)) {
1705 nbd_put(nbd);
Josef Bacike46c7282017-04-06 17:02:00 -04001706 return 0;
Josef Bacikc6a47592017-04-06 17:02:06 -04001707 }
Josef Bacike46c7282017-04-06 17:02:00 -04001708 mutex_lock(&nbd->config_lock);
1709 nbd_disconnect(nbd);
1710 mutex_unlock(&nbd->config_lock);
1711 if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
1712 &nbd->config->runtime_flags))
1713 nbd_config_put(nbd);
1714 nbd_config_put(nbd);
Josef Bacikc6a47592017-04-06 17:02:06 -04001715 nbd_put(nbd);
Josef Bacike46c7282017-04-06 17:02:00 -04001716 return 0;
1717}
1718
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001719static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
1720{
1721 struct nbd_device *nbd = NULL;
1722 struct nbd_config *config;
1723 int index;
1724 int ret = -EINVAL;
1725
1726 if (!netlink_capable(skb, CAP_SYS_ADMIN))
1727 return -EPERM;
1728
1729 if (!info->attrs[NBD_ATTR_INDEX]) {
1730 printk(KERN_ERR "nbd: must specify a device to reconfigure\n");
1731 return -EINVAL;
1732 }
1733 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
1734 mutex_lock(&nbd_index_mutex);
1735 nbd = idr_find(&nbd_index_idr, index);
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001736 if (!nbd) {
Josef Bacikc6a47592017-04-06 17:02:06 -04001737 mutex_unlock(&nbd_index_mutex);
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001738 printk(KERN_ERR "nbd: couldn't find a device at index %d\n",
1739 index);
1740 return -EINVAL;
1741 }
Josef Bacikc6a47592017-04-06 17:02:06 -04001742 if (!refcount_inc_not_zero(&nbd->refs)) {
1743 mutex_unlock(&nbd_index_mutex);
1744 printk(KERN_ERR "nbd: device at index %d is going down\n",
1745 index);
1746 return -EINVAL;
1747 }
1748 mutex_unlock(&nbd_index_mutex);
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001749
1750 if (!refcount_inc_not_zero(&nbd->config_refs)) {
1751 dev_err(nbd_to_dev(nbd),
1752 "not configured, cannot reconfigure\n");
Josef Bacikc6a47592017-04-06 17:02:06 -04001753 nbd_put(nbd);
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001754 return -EINVAL;
1755 }
1756
1757 mutex_lock(&nbd->config_lock);
1758 config = nbd->config;
1759 if (!test_bit(NBD_BOUND, &config->runtime_flags) ||
1760 !nbd->task_recv) {
1761 dev_err(nbd_to_dev(nbd),
1762 "not configured, cannot reconfigure\n");
1763 goto out;
1764 }
1765
1766 if (info->attrs[NBD_ATTR_TIMEOUT]) {
1767 u64 timeout = nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]);
1768 nbd->tag_set.timeout = timeout * HZ;
1769 blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ);
1770 }
Josef Bacik560bc4b2017-04-06 17:02:04 -04001771 if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
1772 config->dead_conn_timeout =
1773 nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
1774 config->dead_conn_timeout *= HZ;
1775 }
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001776
1777 if (info->attrs[NBD_ATTR_SOCKETS]) {
1778 struct nlattr *attr;
1779 int rem, fd;
1780
1781 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
1782 rem) {
1783 struct nlattr *socks[NBD_SOCK_MAX+1];
1784
1785 if (nla_type(attr) != NBD_SOCK_ITEM) {
1786 printk(KERN_ERR "nbd: socks must be embedded in a SOCK_ITEM attr\n");
1787 ret = -EINVAL;
1788 goto out;
1789 }
1790 ret = nla_parse_nested(socks, NBD_SOCK_MAX, attr,
1791 nbd_sock_policy);
1792 if (ret != 0) {
1793 printk(KERN_ERR "nbd: error processing sock list\n");
1794 ret = -EINVAL;
1795 goto out;
1796 }
1797 if (!socks[NBD_SOCK_FD])
1798 continue;
1799 fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
1800 ret = nbd_reconnect_socket(nbd, fd);
1801 if (ret) {
1802 if (ret == -ENOSPC)
1803 ret = 0;
1804 goto out;
1805 }
1806 dev_info(nbd_to_dev(nbd), "reconnected socket\n");
1807 }
1808 }
1809out:
1810 mutex_unlock(&nbd->config_lock);
1811 nbd_config_put(nbd);
Josef Bacikc6a47592017-04-06 17:02:06 -04001812 nbd_put(nbd);
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001813 return ret;
1814}
1815
Josef Bacike46c7282017-04-06 17:02:00 -04001816static const struct genl_ops nbd_connect_genl_ops[] = {
1817 {
1818 .cmd = NBD_CMD_CONNECT,
1819 .policy = nbd_attr_policy,
1820 .doit = nbd_genl_connect,
1821 },
1822 {
1823 .cmd = NBD_CMD_DISCONNECT,
1824 .policy = nbd_attr_policy,
1825 .doit = nbd_genl_disconnect,
1826 },
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001827 {
1828 .cmd = NBD_CMD_RECONFIGURE,
1829 .policy = nbd_attr_policy,
1830 .doit = nbd_genl_reconfigure,
1831 },
Josef Bacik47d902b2017-04-06 17:02:05 -04001832 {
1833 .cmd = NBD_CMD_STATUS,
1834 .policy = nbd_attr_policy,
1835 .doit = nbd_genl_status,
1836 },
Josef Bacike46c7282017-04-06 17:02:00 -04001837};
1838
Josef Bacik799f9a32017-04-06 17:02:02 -04001839static const struct genl_multicast_group nbd_mcast_grps[] = {
1840 { .name = NBD_GENL_MCAST_GROUP_NAME, },
1841};
1842
Josef Bacike46c7282017-04-06 17:02:00 -04001843static struct genl_family nbd_genl_family __ro_after_init = {
1844 .hdrsize = 0,
1845 .name = NBD_GENL_FAMILY_NAME,
1846 .version = NBD_GENL_VERSION,
1847 .module = THIS_MODULE,
1848 .ops = nbd_connect_genl_ops,
1849 .n_ops = ARRAY_SIZE(nbd_connect_genl_ops),
1850 .maxattr = NBD_ATTR_MAX,
Josef Bacik799f9a32017-04-06 17:02:02 -04001851 .mcgrps = nbd_mcast_grps,
1852 .n_mcgrps = ARRAY_SIZE(nbd_mcast_grps),
Josef Bacike46c7282017-04-06 17:02:00 -04001853};
1854
Josef Bacik47d902b2017-04-06 17:02:05 -04001855static int populate_nbd_status(struct nbd_device *nbd, struct sk_buff *reply)
1856{
1857 struct nlattr *dev_opt;
1858 u8 connected = 0;
1859 int ret;
1860
1861 /* This is a little racey, but for status it's ok. The
1862 * reason we don't take a ref here is because we can't
1863 * take a ref in the index == -1 case as we would need
1864 * to put under the nbd_index_mutex, which could
1865 * deadlock if we are configured to remove ourselves
1866 * once we're disconnected.
1867 */
1868 if (refcount_read(&nbd->config_refs))
1869 connected = 1;
1870 dev_opt = nla_nest_start(reply, NBD_DEVICE_ITEM);
1871 if (!dev_opt)
1872 return -EMSGSIZE;
1873 ret = nla_put_u32(reply, NBD_DEVICE_INDEX, nbd->index);
1874 if (ret)
1875 return -EMSGSIZE;
1876 ret = nla_put_u8(reply, NBD_DEVICE_CONNECTED,
1877 connected);
1878 if (ret)
1879 return -EMSGSIZE;
1880 nla_nest_end(reply, dev_opt);
1881 return 0;
1882}
1883
1884static int status_cb(int id, void *ptr, void *data)
1885{
1886 struct nbd_device *nbd = ptr;
1887 return populate_nbd_status(nbd, (struct sk_buff *)data);
1888}
1889
1890static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info)
1891{
1892 struct nlattr *dev_list;
1893 struct sk_buff *reply;
1894 void *reply_head;
1895 size_t msg_size;
1896 int index = -1;
1897 int ret = -ENOMEM;
1898
1899 if (info->attrs[NBD_ATTR_INDEX])
1900 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
1901
1902 mutex_lock(&nbd_index_mutex);
1903
1904 msg_size = nla_total_size(nla_attr_size(sizeof(u32)) +
1905 nla_attr_size(sizeof(u8)));
1906 msg_size *= (index == -1) ? nbd_total_devices : 1;
1907
1908 reply = genlmsg_new(msg_size, GFP_KERNEL);
1909 if (!reply)
1910 goto out;
1911 reply_head = genlmsg_put_reply(reply, info, &nbd_genl_family, 0,
1912 NBD_CMD_STATUS);
1913 if (!reply_head) {
1914 nlmsg_free(reply);
1915 goto out;
1916 }
1917
1918 dev_list = nla_nest_start(reply, NBD_ATTR_DEVICE_LIST);
1919 if (index == -1) {
1920 ret = idr_for_each(&nbd_index_idr, &status_cb, reply);
1921 if (ret) {
1922 nlmsg_free(reply);
1923 goto out;
1924 }
1925 } else {
1926 struct nbd_device *nbd;
1927 nbd = idr_find(&nbd_index_idr, index);
1928 if (nbd) {
1929 ret = populate_nbd_status(nbd, reply);
1930 if (ret) {
1931 nlmsg_free(reply);
1932 goto out;
1933 }
1934 }
1935 }
1936 nla_nest_end(reply, dev_list);
1937 genlmsg_end(reply, reply_head);
1938 genlmsg_reply(reply, info);
1939 ret = 0;
1940out:
1941 mutex_unlock(&nbd_index_mutex);
1942 return ret;
1943}
1944
Josef Bacike46c7282017-04-06 17:02:00 -04001945static void nbd_connect_reply(struct genl_info *info, int index)
1946{
1947 struct sk_buff *skb;
1948 void *msg_head;
1949 int ret;
1950
1951 skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
1952 if (!skb)
1953 return;
1954 msg_head = genlmsg_put_reply(skb, info, &nbd_genl_family, 0,
1955 NBD_CMD_CONNECT);
1956 if (!msg_head) {
1957 nlmsg_free(skb);
1958 return;
1959 }
1960 ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
1961 if (ret) {
1962 nlmsg_free(skb);
1963 return;
1964 }
1965 genlmsg_end(skb, msg_head);
1966 genlmsg_reply(skb, info);
1967}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968
Josef Bacik799f9a32017-04-06 17:02:02 -04001969static void nbd_mcast_index(int index)
1970{
1971 struct sk_buff *skb;
1972 void *msg_head;
1973 int ret;
1974
1975 skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
1976 if (!skb)
1977 return;
1978 msg_head = genlmsg_put(skb, 0, 0, &nbd_genl_family, 0,
1979 NBD_CMD_LINK_DEAD);
1980 if (!msg_head) {
1981 nlmsg_free(skb);
1982 return;
1983 }
1984 ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
1985 if (ret) {
1986 nlmsg_free(skb);
1987 return;
1988 }
1989 genlmsg_end(skb, msg_head);
1990 genlmsg_multicast(&nbd_genl_family, skb, 0, 0, GFP_KERNEL);
1991}
1992
1993static void nbd_dead_link_work(struct work_struct *work)
1994{
1995 struct link_dead_args *args = container_of(work, struct link_dead_args,
1996 work);
1997 nbd_mcast_index(args->index);
1998 kfree(args);
1999}
2000
Linus Torvalds1da177e2005-04-16 15:20:36 -07002001static int __init nbd_init(void)
2002{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003 int i;
2004
Adrian Bunk5b7b18c2006-03-25 03:07:04 -08002005 BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006
Laurent Vivierd71a6d72008-04-29 01:02:51 -07002007 if (max_part < 0) {
WANG Cong7742ce42011-08-19 14:48:28 +02002008 printk(KERN_ERR "nbd: max_part must be >= 0\n");
Laurent Vivierd71a6d72008-04-29 01:02:51 -07002009 return -EINVAL;
2010 }
2011
2012 part_shift = 0;
Namhyung Kim5988ce22011-05-28 14:44:46 +02002013 if (max_part > 0) {
Laurent Vivierd71a6d72008-04-29 01:02:51 -07002014 part_shift = fls(max_part);
2015
Namhyung Kim5988ce22011-05-28 14:44:46 +02002016 /*
2017 * Adjust max_part according to part_shift as it is exported
2018 * to user space so that user can know the max number of
2019 * partition kernel should be able to manage.
2020 *
2021 * Note that -1 is required because partition 0 is reserved
2022 * for the whole disk.
2023 */
2024 max_part = (1UL << part_shift) - 1;
2025 }
2026
Namhyung Kim3b271082011-05-28 14:44:46 +02002027 if ((1UL << part_shift) > DISK_MAX_PARTS)
2028 return -EINVAL;
2029
2030 if (nbds_max > 1UL << (MINORBITS - part_shift))
2031 return -EINVAL;
Josef Bacik124d6db2017-02-01 16:11:11 -05002032 recv_workqueue = alloc_workqueue("knbd-recv",
2033 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
2034 if (!recv_workqueue)
2035 return -ENOMEM;
Namhyung Kim3b271082011-05-28 14:44:46 +02002036
Josef Bacik6330a2d2017-02-15 16:49:48 -05002037 if (register_blkdev(NBD_MAJOR, "nbd")) {
2038 destroy_workqueue(recv_workqueue);
Josef Bacikb0d91112017-02-01 16:11:40 -05002039 return -EIO;
Josef Bacik6330a2d2017-02-15 16:49:48 -05002040 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002041
Josef Bacike46c7282017-04-06 17:02:00 -04002042 if (genl_register_family(&nbd_genl_family)) {
2043 unregister_blkdev(NBD_MAJOR, "nbd");
2044 destroy_workqueue(recv_workqueue);
2045 return -EINVAL;
2046 }
Markus Pargmann30d53d92015-08-17 08:20:06 +02002047 nbd_dbg_init();
2048
Josef Bacikb0d91112017-02-01 16:11:40 -05002049 mutex_lock(&nbd_index_mutex);
2050 for (i = 0; i < nbds_max; i++)
2051 nbd_dev_add(i);
2052 mutex_unlock(&nbd_index_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053 return 0;
Josef Bacikb0d91112017-02-01 16:11:40 -05002054}
2055
2056static int nbd_exit_cb(int id, void *ptr, void *data)
2057{
Josef Bacikc6a47592017-04-06 17:02:06 -04002058 struct list_head *list = (struct list_head *)data;
Josef Bacikb0d91112017-02-01 16:11:40 -05002059 struct nbd_device *nbd = ptr;
Josef Bacikc6a47592017-04-06 17:02:06 -04002060
2061 refcount_inc(&nbd->refs);
2062 list_add_tail(&nbd->list, list);
Josef Bacikb0d91112017-02-01 16:11:40 -05002063 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064}
2065
2066static void __exit nbd_cleanup(void)
2067{
Josef Bacikc6a47592017-04-06 17:02:06 -04002068 struct nbd_device *nbd;
2069 LIST_HEAD(del_list);
2070
Markus Pargmann30d53d92015-08-17 08:20:06 +02002071 nbd_dbg_close();
2072
Josef Bacikc6a47592017-04-06 17:02:06 -04002073 mutex_lock(&nbd_index_mutex);
2074 idr_for_each(&nbd_index_idr, &nbd_exit_cb, &del_list);
2075 mutex_unlock(&nbd_index_mutex);
2076
2077 list_for_each_entry(nbd, &del_list, list) {
2078 if (refcount_read(&nbd->refs) != 2)
2079 printk(KERN_ERR "nbd: possibly leaking a device\n");
2080 nbd_put(nbd);
2081 nbd_put(nbd);
2082 }
2083
Josef Bacikb0d91112017-02-01 16:11:40 -05002084 idr_destroy(&nbd_index_idr);
Josef Bacike46c7282017-04-06 17:02:00 -04002085 genl_unregister_family(&nbd_genl_family);
Josef Bacik124d6db2017-02-01 16:11:11 -05002086 destroy_workqueue(recv_workqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002087 unregister_blkdev(NBD_MAJOR, "nbd");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002088}
2089
2090module_init(nbd_init);
2091module_exit(nbd_cleanup);
2092
2093MODULE_DESCRIPTION("Network Block Device");
2094MODULE_LICENSE("GPL");
2095
Lars Marowsky-Bree40be0c22005-05-01 08:59:07 -07002096module_param(nbds_max, int, 0444);
Laurent Vivierd71a6d72008-04-29 01:02:51 -07002097MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
2098module_param(max_part, int, 0444);
2099MODULE_PARM_DESC(max_part, "number of partitions per device (default: 0)");