blob: 3f58aba6461f099312b2c751e6ee9a220b9b8492 [file] [log] [blame]
Alex Eldere2a58ee2013-04-30 00:44:33 -05001
Yehuda Sadeh602adf42010-08-12 16:11:25 -07002/*
3 rbd.c -- Export ceph rados objects as a Linux block device
4
5
6 based on drivers/block/osdblk.c:
7
8 Copyright 2009 Red Hat, Inc.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
22
23
24
Yehuda Sadehdfc56062010-11-19 14:51:04 -080025 For usage instructions, please refer to:
Yehuda Sadeh602adf42010-08-12 16:11:25 -070026
Yehuda Sadehdfc56062010-11-19 14:51:04 -080027 Documentation/ABI/testing/sysfs-bus-rbd
Yehuda Sadeh602adf42010-08-12 16:11:25 -070028
29 */
30
31#include <linux/ceph/libceph.h>
32#include <linux/ceph/osd_client.h>
33#include <linux/ceph/mon_client.h>
34#include <linux/ceph/decode.h>
Yehuda Sadeh59c2be12011-03-21 15:10:11 -070035#include <linux/parser.h>
Yehuda Sadeh602adf42010-08-12 16:11:25 -070036
37#include <linux/kernel.h>
38#include <linux/device.h>
39#include <linux/module.h>
40#include <linux/fs.h>
41#include <linux/blkdev.h>
42
43#include "rbd_types.h"
44
Alex Elderaafb2302012-09-06 16:00:54 -050045#define RBD_DEBUG /* Activate rbd_assert() calls */
46
Alex Elder593a9e72012-02-07 12:03:37 -060047/*
48 * The basic unit of block I/O is a sector. It is interpreted in a
49 * number of contexts in Linux (blk, bio, genhd), but the default is
50 * universally 512 bytes. These symbols are just slightly more
51 * meaningful than the bare numbers they represent.
52 */
53#define SECTOR_SHIFT 9
54#define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
55
Alex Elderf0f8cef2012-01-29 13:57:44 -060056#define RBD_DRV_NAME "rbd"
57#define RBD_DRV_NAME_LONG "rbd (rados block device)"
Yehuda Sadeh602adf42010-08-12 16:11:25 -070058
59#define RBD_MINORS_PER_MAJOR 256 /* max minors per blkdev */
60
Alex Elderd4b125e2012-07-03 16:01:19 -050061#define RBD_SNAP_DEV_NAME_PREFIX "snap_"
62#define RBD_MAX_SNAP_NAME_LEN \
63 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
64
Alex Elder35d489f2012-07-03 16:01:19 -050065#define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
Yehuda Sadeh602adf42010-08-12 16:11:25 -070066
67#define RBD_SNAP_HEAD_NAME "-"
68
Alex Elder9682fc62013-04-30 00:44:33 -050069#define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
70
Alex Elder9e15b772012-10-30 19:40:33 -050071/* This allows a single page to hold an image name sent by OSD */
72#define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
Alex Elder1e130192012-07-03 16:01:19 -050073#define RBD_IMAGE_ID_LEN_MAX 64
Alex Elder9e15b772012-10-30 19:40:33 -050074
Alex Elder1e130192012-07-03 16:01:19 -050075#define RBD_OBJ_PREFIX_LEN_MAX 64
Alex Elder589d30e2012-07-10 20:30:11 -050076
Alex Elderd8891402012-10-09 13:50:17 -070077/* Feature bits */
78
Alex Elder5cbf6f122013-04-11 09:29:48 -050079#define RBD_FEATURE_LAYERING (1<<0)
80#define RBD_FEATURE_STRIPINGV2 (1<<1)
81#define RBD_FEATURES_ALL \
82 (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
Alex Elderd8891402012-10-09 13:50:17 -070083
84/* Features supported by this (client software) implementation. */
85
Alex Elder770eba62012-10-25 23:34:40 -050086#define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
Alex Elderd8891402012-10-09 13:50:17 -070087
Alex Elder81a89792012-02-02 08:13:30 -060088/*
89 * An RBD device name will be "rbd#", where the "rbd" comes from
90 * RBD_DRV_NAME above, and # is a unique integer identifier.
91 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
92 * enough to hold all possible device names.
93 */
Yehuda Sadeh602adf42010-08-12 16:11:25 -070094#define DEV_NAME_LEN 32
Alex Elder81a89792012-02-02 08:13:30 -060095#define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
Yehuda Sadeh602adf42010-08-12 16:11:25 -070096
97/*
98 * block device image metadata (in-memory version)
99 */
100struct rbd_image_header {
Alex Elderf84344f2012-08-31 17:29:51 -0500101 /* These four fields never change for a given rbd image */
Alex Elder849b4262012-07-09 21:04:24 -0500102 char *object_prefix;
Alex Elder34b13182012-07-13 20:35:12 -0500103 u64 features;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700104 __u8 obj_order;
105 __u8 crypt_type;
106 __u8 comp_type;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700107
Alex Elderf84344f2012-08-31 17:29:51 -0500108 /* The remaining fields need to be updated occasionally */
109 u64 image_size;
110 struct ceph_snap_context *snapc;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700111 char *snap_names;
112 u64 *snap_sizes;
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700113
Alex Elder500d0c02013-04-26 09:43:47 -0500114 u64 stripe_unit;
115 u64 stripe_count;
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700116};
117
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500118/*
119 * An rbd image specification.
120 *
121 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
Alex Elderc66c6e02012-11-01 08:39:26 -0500122 * identify an image. Each rbd_dev structure includes a pointer to
123 * an rbd_spec structure that encapsulates this identity.
124 *
125 * Each of the id's in an rbd_spec has an associated name. For a
126 * user-mapped image, the names are supplied and the id's associated
127 * with them are looked up. For a layered image, a parent image is
128 * defined by the tuple, and the names are looked up.
129 *
130 * An rbd_dev structure contains a parent_spec pointer which is
131 * non-null if the image it represents is a child in a layered
132 * image. This pointer will refer to the rbd_spec structure used
133 * by the parent rbd_dev for its own identity (i.e., the structure
134 * is shared between the parent and child).
135 *
136 * Since these structures are populated once, during the discovery
137 * phase of image construction, they are effectively immutable so
138 * we make no effort to synchronize access to them.
139 *
140 * Note that code herein does not assume the image name is known (it
141 * could be a null pointer).
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500142 */
143struct rbd_spec {
144 u64 pool_id;
Alex Elderecb4dc222013-04-26 09:43:47 -0500145 const char *pool_name;
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500146
Alex Elderecb4dc222013-04-26 09:43:47 -0500147 const char *image_id;
148 const char *image_name;
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500149
150 u64 snap_id;
Alex Elderecb4dc222013-04-26 09:43:47 -0500151 const char *snap_name;
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500152
153 struct kref kref;
154};
155
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700156/*
Alex Elderf0f8cef2012-01-29 13:57:44 -0600157 * an instance of the client. multiple devices may share an rbd client.
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700158 */
159struct rbd_client {
160 struct ceph_client *client;
161 struct kref kref;
162 struct list_head node;
163};
164
Alex Elderbf0d5f502012-11-22 00:00:08 -0600165struct rbd_img_request;
166typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
167
168#define BAD_WHICH U32_MAX /* Good which or bad which, which? */
169
170struct rbd_obj_request;
171typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
172
Alex Elder9969ebc2013-01-18 12:31:10 -0600173enum obj_request_type {
174 OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
175};
Alex Elderbf0d5f502012-11-22 00:00:08 -0600176
Alex Elder926f9b32013-02-11 12:33:24 -0600177enum obj_req_flags {
178 OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */
Alex Elder6365d332013-02-11 12:33:24 -0600179 OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */
Alex Elder5679c592013-02-11 12:33:24 -0600180 OBJ_REQ_KNOWN, /* EXISTS flag valid: no = 0, yes = 1 */
181 OBJ_REQ_EXISTS, /* target exists: no = 0, yes = 1 */
Alex Elder926f9b32013-02-11 12:33:24 -0600182};
183
Alex Elderbf0d5f502012-11-22 00:00:08 -0600184struct rbd_obj_request {
185 const char *object_name;
186 u64 offset; /* object start byte */
187 u64 length; /* bytes from offset */
Alex Elder926f9b32013-02-11 12:33:24 -0600188 unsigned long flags;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600189
Alex Elderc5b5ef62013-02-11 12:33:24 -0600190 /*
191 * An object request associated with an image will have its
192 * img_data flag set; a standalone object request will not.
193 *
194 * A standalone object request will have which == BAD_WHICH
195 * and a null obj_request pointer.
196 *
197 * An object request initiated in support of a layered image
198 * object (to check for its existence before a write) will
199 * have which == BAD_WHICH and a non-null obj_request pointer.
200 *
201 * Finally, an object request for rbd image data will have
202 * which != BAD_WHICH, and will have a non-null img_request
203 * pointer. The value of which will be in the range
204 * 0..(img_request->obj_request_count-1).
205 */
206 union {
207 struct rbd_obj_request *obj_request; /* STAT op */
208 struct {
209 struct rbd_img_request *img_request;
210 u64 img_offset;
211 /* links for img_request->obj_requests list */
212 struct list_head links;
213 };
214 };
Alex Elderbf0d5f502012-11-22 00:00:08 -0600215 u32 which; /* posn image request list */
216
217 enum obj_request_type type;
Alex Elder788e2df2013-01-17 12:25:27 -0600218 union {
219 struct bio *bio_list;
220 struct {
221 struct page **pages;
222 u32 page_count;
223 };
224 };
Alex Elder0eefd472013-04-19 15:34:50 -0500225 struct page **copyup_pages;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600226
227 struct ceph_osd_request *osd_req;
228
229 u64 xferred; /* bytes transferred */
Sage Weil1b83bef2013-02-25 16:11:12 -0800230 int result;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600231
232 rbd_obj_callback_t callback;
Alex Elder788e2df2013-01-17 12:25:27 -0600233 struct completion completion;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600234
235 struct kref kref;
236};
237
Alex Elder0c425242013-02-08 09:55:49 -0600238enum img_req_flags {
Alex Elder9849e982013-01-24 16:13:36 -0600239 IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */
240 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
Alex Elderd0b2e942013-01-24 16:13:36 -0600241 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
Alex Elder0c425242013-02-08 09:55:49 -0600242};
243
Alex Elderbf0d5f502012-11-22 00:00:08 -0600244struct rbd_img_request {
Alex Elderbf0d5f502012-11-22 00:00:08 -0600245 struct rbd_device *rbd_dev;
246 u64 offset; /* starting image byte offset */
247 u64 length; /* byte count from offset */
Alex Elder0c425242013-02-08 09:55:49 -0600248 unsigned long flags;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600249 union {
Alex Elder9849e982013-01-24 16:13:36 -0600250 u64 snap_id; /* for reads */
Alex Elderbf0d5f502012-11-22 00:00:08 -0600251 struct ceph_snap_context *snapc; /* for writes */
Alex Elder9849e982013-01-24 16:13:36 -0600252 };
253 union {
254 struct request *rq; /* block request */
255 struct rbd_obj_request *obj_request; /* obj req initiator */
Alex Elderbf0d5f502012-11-22 00:00:08 -0600256 };
Alex Elder3d7efd12013-04-19 15:34:50 -0500257 struct page **copyup_pages;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600258 spinlock_t completion_lock;/* protects next_completion */
259 u32 next_completion;
260 rbd_img_callback_t callback;
Alex Elder55f27e02013-04-10 12:34:25 -0500261 u64 xferred;/* aggregate bytes transferred */
Alex Eldera5a337d2013-01-24 16:13:36 -0600262 int result; /* first nonzero obj_request result */
Alex Elderbf0d5f502012-11-22 00:00:08 -0600263
264 u32 obj_request_count;
265 struct list_head obj_requests; /* rbd_obj_request structs */
266
267 struct kref kref;
268};
269
270#define for_each_obj_request(ireq, oreq) \
Alex Elderef06f4d32013-02-08 09:55:48 -0600271 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
Alex Elderbf0d5f502012-11-22 00:00:08 -0600272#define for_each_obj_request_from(ireq, oreq) \
Alex Elderef06f4d32013-02-08 09:55:48 -0600273 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
Alex Elderbf0d5f502012-11-22 00:00:08 -0600274#define for_each_obj_request_safe(ireq, oreq, n) \
Alex Elderef06f4d32013-02-08 09:55:48 -0600275 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
Alex Elderbf0d5f502012-11-22 00:00:08 -0600276
Alex Elderf84344f2012-08-31 17:29:51 -0500277struct rbd_mapping {
Alex Elder99c1f082012-08-30 14:42:15 -0500278 u64 size;
Alex Elder34b13182012-07-13 20:35:12 -0500279 u64 features;
Alex Elderf84344f2012-08-31 17:29:51 -0500280 bool read_only;
281};
282
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700283/*
284 * a single device
285 */
286struct rbd_device {
Alex Elderde71a292012-07-03 16:01:19 -0500287 int dev_id; /* blkdev unique id */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700288
289 int major; /* blkdev assigned major */
290 struct gendisk *disk; /* blkdev's gendisk and rq */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700291
Alex Eldera30b71b2012-07-10 20:30:11 -0500292 u32 image_format; /* Either 1 or 2 */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700293 struct rbd_client *rbd_client;
294
295 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
296
Alex Elderb82d1672013-01-14 12:43:31 -0600297 spinlock_t lock; /* queue, flags, open_count */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700298
299 struct rbd_image_header header;
Alex Elderb82d1672013-01-14 12:43:31 -0600300 unsigned long flags; /* possibly lock protected */
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500301 struct rbd_spec *spec;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700302
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500303 char *header_name;
Alex Elder971f8392012-10-25 23:34:41 -0500304
Alex Elder0903e872012-11-14 12:25:19 -0600305 struct ceph_file_layout layout;
306
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700307 struct ceph_osd_event *watch_event;
Alex Elder975241a2013-01-25 17:08:55 -0600308 struct rbd_obj_request *watch_request;
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700309
Alex Elder86b00e02012-10-25 23:34:42 -0500310 struct rbd_spec *parent_spec;
311 u64 parent_overlap;
Alex Elder2f82ee52012-10-30 19:40:33 -0500312 struct rbd_device *parent;
Alex Elder86b00e02012-10-25 23:34:42 -0500313
Josh Durginc6666012011-11-21 17:11:12 -0800314 /* protects updating the header */
315 struct rw_semaphore header_rwsem;
Alex Elderf84344f2012-08-31 17:29:51 -0500316
317 struct rbd_mapping mapping;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700318
319 struct list_head node;
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800320
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800321 /* sysfs related */
322 struct device dev;
Alex Elderb82d1672013-01-14 12:43:31 -0600323 unsigned long open_count; /* protected by lock */
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800324};
325
Alex Elderb82d1672013-01-14 12:43:31 -0600326/*
327 * Flag bits for rbd_dev->flags. If atomicity is required,
328 * rbd_dev->lock is used to protect access.
329 *
330 * Currently, only the "removing" flag (which is coupled with the
331 * "open_count" field) requires atomic access.
332 */
Alex Elder6d292902013-01-14 12:43:31 -0600333enum rbd_dev_flags {
334 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
Alex Elderb82d1672013-01-14 12:43:31 -0600335 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
Alex Elder6d292902013-01-14 12:43:31 -0600336};
337
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700338static DEFINE_MUTEX(ctl_mutex); /* Serialize open/close/setup/teardown */
Alex Eldere124a82f2012-01-29 13:57:44 -0600339
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700340static LIST_HEAD(rbd_dev_list); /* devices */
Alex Eldere124a82f2012-01-29 13:57:44 -0600341static DEFINE_SPINLOCK(rbd_dev_list_lock);
342
Alex Elder432b8582012-01-29 13:57:44 -0600343static LIST_HEAD(rbd_client_list); /* clients */
344static DEFINE_SPINLOCK(rbd_client_list_lock);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700345
Alex Elder3d7efd12013-04-19 15:34:50 -0500346static int rbd_img_request_submit(struct rbd_img_request *img_request);
347
Alex Elder200a6a82013-04-28 23:32:34 -0500348static void rbd_dev_device_release(struct device *dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800349
Alex Elderf0f8cef2012-01-29 13:57:44 -0600350static ssize_t rbd_add(struct bus_type *bus, const char *buf,
351 size_t count);
352static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
353 size_t count);
Alex Elder71f293e2013-04-26 09:43:48 -0500354static int rbd_dev_image_probe(struct rbd_device *rbd_dev);
Alex Elderf0f8cef2012-01-29 13:57:44 -0600355
356static struct bus_attribute rbd_bus_attrs[] = {
357 __ATTR(add, S_IWUSR, NULL, rbd_add),
358 __ATTR(remove, S_IWUSR, NULL, rbd_remove),
359 __ATTR_NULL
360};
361
362static struct bus_type rbd_bus_type = {
363 .name = "rbd",
364 .bus_attrs = rbd_bus_attrs,
365};
366
367static void rbd_root_dev_release(struct device *dev)
368{
369}
370
371static struct device rbd_root_dev = {
372 .init_name = "rbd",
373 .release = rbd_root_dev_release,
374};
375
Alex Elder06ecc6c2012-11-01 10:17:15 -0500376static __printf(2, 3)
377void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
378{
379 struct va_format vaf;
380 va_list args;
381
382 va_start(args, fmt);
383 vaf.fmt = fmt;
384 vaf.va = &args;
385
386 if (!rbd_dev)
387 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
388 else if (rbd_dev->disk)
389 printk(KERN_WARNING "%s: %s: %pV\n",
390 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
391 else if (rbd_dev->spec && rbd_dev->spec->image_name)
392 printk(KERN_WARNING "%s: image %s: %pV\n",
393 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
394 else if (rbd_dev->spec && rbd_dev->spec->image_id)
395 printk(KERN_WARNING "%s: id %s: %pV\n",
396 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
397 else /* punt */
398 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
399 RBD_DRV_NAME, rbd_dev, &vaf);
400 va_end(args);
401}
402
Alex Elderaafb2302012-09-06 16:00:54 -0500403#ifdef RBD_DEBUG
404#define rbd_assert(expr) \
405 if (unlikely(!(expr))) { \
406 printk(KERN_ERR "\nAssertion failure in %s() " \
407 "at line %d:\n\n" \
408 "\trbd_assert(%s);\n\n", \
409 __func__, __LINE__, #expr); \
410 BUG(); \
411 }
412#else /* !RBD_DEBUG */
413# define rbd_assert(expr) ((void) 0)
414#endif /* !RBD_DEBUG */
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800415
Alex Elderb454e362013-04-19 15:34:50 -0500416static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
Alex Elder05a46af2013-04-26 15:44:36 -0500417static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
418static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
Alex Elder8b3e1a52013-01-24 16:13:36 -0600419
Alex Eldercc4a38bd2013-04-30 00:44:33 -0500420static int rbd_dev_refresh(struct rbd_device *rbd_dev);
421static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev);
Alex Elder54cac612013-04-30 00:44:33 -0500422static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
423 u64 snap_id);
Alex Elder2ad3d712013-04-30 00:44:33 -0500424static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
425 u8 *order, u64 *snap_size);
426static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
427 u64 *snap_features);
428static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name);
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700429
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700430static int rbd_open(struct block_device *bdev, fmode_t mode)
431{
Alex Elderf0f8cef2012-01-29 13:57:44 -0600432 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
Alex Elderb82d1672013-01-14 12:43:31 -0600433 bool removing = false;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700434
Alex Elderf84344f2012-08-31 17:29:51 -0500435 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700436 return -EROFS;
437
Alex Eldera14ea262013-02-05 13:23:12 -0600438 spin_lock_irq(&rbd_dev->lock);
Alex Elderb82d1672013-01-14 12:43:31 -0600439 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
440 removing = true;
441 else
442 rbd_dev->open_count++;
Alex Eldera14ea262013-02-05 13:23:12 -0600443 spin_unlock_irq(&rbd_dev->lock);
Alex Elderb82d1672013-01-14 12:43:31 -0600444 if (removing)
445 return -ENOENT;
446
Alex Elder42382b72012-11-16 09:29:16 -0600447 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
Alex Elderc3e946c2012-11-16 09:29:16 -0600448 (void) get_device(&rbd_dev->dev);
Alex Elderf84344f2012-08-31 17:29:51 -0500449 set_device_ro(bdev, rbd_dev->mapping.read_only);
Alex Elder42382b72012-11-16 09:29:16 -0600450 mutex_unlock(&ctl_mutex);
Alex Elder340c7a22012-08-10 13:12:07 -0700451
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700452 return 0;
453}
454
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800455static int rbd_release(struct gendisk *disk, fmode_t mode)
456{
457 struct rbd_device *rbd_dev = disk->private_data;
Alex Elderb82d1672013-01-14 12:43:31 -0600458 unsigned long open_count_before;
459
Alex Eldera14ea262013-02-05 13:23:12 -0600460 spin_lock_irq(&rbd_dev->lock);
Alex Elderb82d1672013-01-14 12:43:31 -0600461 open_count_before = rbd_dev->open_count--;
Alex Eldera14ea262013-02-05 13:23:12 -0600462 spin_unlock_irq(&rbd_dev->lock);
Alex Elderb82d1672013-01-14 12:43:31 -0600463 rbd_assert(open_count_before > 0);
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800464
Alex Elder42382b72012-11-16 09:29:16 -0600465 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
Alex Elderc3e946c2012-11-16 09:29:16 -0600466 put_device(&rbd_dev->dev);
Alex Elder42382b72012-11-16 09:29:16 -0600467 mutex_unlock(&ctl_mutex);
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800468
469 return 0;
470}
471
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700472static const struct block_device_operations rbd_bd_ops = {
473 .owner = THIS_MODULE,
474 .open = rbd_open,
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800475 .release = rbd_release,
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700476};
477
478/*
479 * Initialize an rbd client instance.
Alex Elder43ae4702012-07-03 16:01:18 -0500480 * We own *ceph_opts.
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700481 */
Alex Elderf8c38922012-08-10 13:12:07 -0700482static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700483{
484 struct rbd_client *rbdc;
485 int ret = -ENOMEM;
486
Alex Elder37206ee2013-02-20 17:32:08 -0600487 dout("%s:\n", __func__);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700488 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
489 if (!rbdc)
490 goto out_opt;
491
492 kref_init(&rbdc->kref);
493 INIT_LIST_HEAD(&rbdc->node);
494
Alex Elderbc534d82012-01-29 13:57:44 -0600495 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
496
Alex Elder43ae4702012-07-03 16:01:18 -0500497 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700498 if (IS_ERR(rbdc->client))
Alex Elderbc534d82012-01-29 13:57:44 -0600499 goto out_mutex;
Alex Elder43ae4702012-07-03 16:01:18 -0500500 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700501
502 ret = ceph_open_session(rbdc->client);
503 if (ret < 0)
504 goto out_err;
505
Alex Elder432b8582012-01-29 13:57:44 -0600506 spin_lock(&rbd_client_list_lock);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700507 list_add_tail(&rbdc->node, &rbd_client_list);
Alex Elder432b8582012-01-29 13:57:44 -0600508 spin_unlock(&rbd_client_list_lock);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700509
Alex Elderbc534d82012-01-29 13:57:44 -0600510 mutex_unlock(&ctl_mutex);
Alex Elder37206ee2013-02-20 17:32:08 -0600511 dout("%s: rbdc %p\n", __func__, rbdc);
Alex Elderbc534d82012-01-29 13:57:44 -0600512
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700513 return rbdc;
514
515out_err:
516 ceph_destroy_client(rbdc->client);
Alex Elderbc534d82012-01-29 13:57:44 -0600517out_mutex:
518 mutex_unlock(&ctl_mutex);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700519 kfree(rbdc);
520out_opt:
Alex Elder43ae4702012-07-03 16:01:18 -0500521 if (ceph_opts)
522 ceph_destroy_options(ceph_opts);
Alex Elder37206ee2013-02-20 17:32:08 -0600523 dout("%s: error %d\n", __func__, ret);
524
Vasiliy Kulikov28f259b2010-09-26 12:59:37 +0400525 return ERR_PTR(ret);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700526}
527
Alex Elder2f82ee52012-10-30 19:40:33 -0500528static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
529{
530 kref_get(&rbdc->kref);
531
532 return rbdc;
533}
534
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700535/*
Alex Elder1f7ba332012-08-10 13:12:07 -0700536 * Find a ceph client with specific addr and configuration. If
537 * found, bump its reference count.
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700538 */
Alex Elder1f7ba332012-08-10 13:12:07 -0700539static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700540{
541 struct rbd_client *client_node;
Alex Elder1f7ba332012-08-10 13:12:07 -0700542 bool found = false;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700543
Alex Elder43ae4702012-07-03 16:01:18 -0500544 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700545 return NULL;
546
Alex Elder1f7ba332012-08-10 13:12:07 -0700547 spin_lock(&rbd_client_list_lock);
548 list_for_each_entry(client_node, &rbd_client_list, node) {
549 if (!ceph_compare_options(ceph_opts, client_node->client)) {
Alex Elder2f82ee52012-10-30 19:40:33 -0500550 __rbd_get_client(client_node);
551
Alex Elder1f7ba332012-08-10 13:12:07 -0700552 found = true;
553 break;
554 }
555 }
556 spin_unlock(&rbd_client_list_lock);
557
558 return found ? client_node : NULL;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700559}
560
561/*
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700562 * mount options
563 */
564enum {
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700565 Opt_last_int,
566 /* int args above */
567 Opt_last_string,
568 /* string args above */
Alex Eldercc0538b2012-08-10 13:12:07 -0700569 Opt_read_only,
570 Opt_read_write,
571 /* Boolean args above */
572 Opt_last_bool,
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700573};
574
Alex Elder43ae4702012-07-03 16:01:18 -0500575static match_table_t rbd_opts_tokens = {
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700576 /* int args above */
577 /* string args above */
Alex Elderbe466c12012-10-22 11:31:26 -0500578 {Opt_read_only, "read_only"},
Alex Eldercc0538b2012-08-10 13:12:07 -0700579 {Opt_read_only, "ro"}, /* Alternate spelling */
580 {Opt_read_write, "read_write"},
581 {Opt_read_write, "rw"}, /* Alternate spelling */
582 /* Boolean args above */
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700583 {-1, NULL}
584};
585
Alex Elder98571b52013-01-20 14:44:42 -0600586struct rbd_options {
587 bool read_only;
588};
589
590#define RBD_READ_ONLY_DEFAULT false
591
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700592static int parse_rbd_opts_token(char *c, void *private)
593{
Alex Elder43ae4702012-07-03 16:01:18 -0500594 struct rbd_options *rbd_opts = private;
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700595 substring_t argstr[MAX_OPT_ARGS];
596 int token, intval, ret;
597
Alex Elder43ae4702012-07-03 16:01:18 -0500598 token = match_token(c, rbd_opts_tokens, argstr);
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700599 if (token < 0)
600 return -EINVAL;
601
602 if (token < Opt_last_int) {
603 ret = match_int(&argstr[0], &intval);
604 if (ret < 0) {
605 pr_err("bad mount option arg (not int) "
606 "at '%s'\n", c);
607 return ret;
608 }
609 dout("got int token %d val %d\n", token, intval);
610 } else if (token > Opt_last_int && token < Opt_last_string) {
611 dout("got string token %d val %s\n", token,
612 argstr[0].from);
Alex Eldercc0538b2012-08-10 13:12:07 -0700613 } else if (token > Opt_last_string && token < Opt_last_bool) {
614 dout("got Boolean token %d\n", token);
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700615 } else {
616 dout("got token %d\n", token);
617 }
618
619 switch (token) {
Alex Eldercc0538b2012-08-10 13:12:07 -0700620 case Opt_read_only:
621 rbd_opts->read_only = true;
622 break;
623 case Opt_read_write:
624 rbd_opts->read_only = false;
625 break;
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700626 default:
Alex Elderaafb2302012-09-06 16:00:54 -0500627 rbd_assert(false);
628 break;
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700629 }
630 return 0;
631}
632
633/*
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700634 * Get a ceph client with specific addr and configuration, if one does
635 * not exist create it.
636 */
Alex Elder9d3997f2012-10-25 23:34:42 -0500637static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700638{
Alex Elderf8c38922012-08-10 13:12:07 -0700639 struct rbd_client *rbdc;
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700640
Alex Elder1f7ba332012-08-10 13:12:07 -0700641 rbdc = rbd_client_find(ceph_opts);
Alex Elder9d3997f2012-10-25 23:34:42 -0500642 if (rbdc) /* using an existing client */
Alex Elder43ae4702012-07-03 16:01:18 -0500643 ceph_destroy_options(ceph_opts);
Alex Elder9d3997f2012-10-25 23:34:42 -0500644 else
Alex Elderf8c38922012-08-10 13:12:07 -0700645 rbdc = rbd_client_create(ceph_opts);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700646
Alex Elder9d3997f2012-10-25 23:34:42 -0500647 return rbdc;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700648}
649
650/*
651 * Destroy ceph client
Alex Elderd23a4b32012-01-29 13:57:43 -0600652 *
Alex Elder432b8582012-01-29 13:57:44 -0600653 * Caller must hold rbd_client_list_lock.
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700654 */
655static void rbd_client_release(struct kref *kref)
656{
657 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
658
Alex Elder37206ee2013-02-20 17:32:08 -0600659 dout("%s: rbdc %p\n", __func__, rbdc);
Alex Eldercd9d9f52012-04-04 13:35:44 -0500660 spin_lock(&rbd_client_list_lock);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700661 list_del(&rbdc->node);
Alex Eldercd9d9f52012-04-04 13:35:44 -0500662 spin_unlock(&rbd_client_list_lock);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700663
664 ceph_destroy_client(rbdc->client);
665 kfree(rbdc);
666}
667
668/*
669 * Drop reference to ceph client node. If it's not referenced anymore, release
670 * it.
671 */
Alex Elder9d3997f2012-10-25 23:34:42 -0500672static void rbd_put_client(struct rbd_client *rbdc)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700673{
Alex Elderc53d5892012-10-25 23:34:42 -0500674 if (rbdc)
675 kref_put(&rbdc->kref, rbd_client_release);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700676}
677
Alex Eldera30b71b2012-07-10 20:30:11 -0500678static bool rbd_image_format_valid(u32 image_format)
679{
680 return image_format == 1 || image_format == 2;
681}
682
Alex Elder8e94af82012-07-25 09:32:40 -0500683static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
684{
Alex Elder103a1502012-08-02 11:29:45 -0500685 size_t size;
686 u32 snap_count;
687
688 /* The header has to start with the magic rbd header text */
689 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
690 return false;
691
Alex Elderdb2388b2012-10-20 22:17:27 -0500692 /* The bio layer requires at least sector-sized I/O */
693
694 if (ondisk->options.order < SECTOR_SHIFT)
695 return false;
696
697 /* If we use u64 in a few spots we may be able to loosen this */
698
699 if (ondisk->options.order > 8 * sizeof (int) - 1)
700 return false;
701
Alex Elder103a1502012-08-02 11:29:45 -0500702 /*
703 * The size of a snapshot header has to fit in a size_t, and
704 * that limits the number of snapshots.
705 */
706 snap_count = le32_to_cpu(ondisk->snap_count);
707 size = SIZE_MAX - sizeof (struct ceph_snap_context);
708 if (snap_count > size / sizeof (__le64))
709 return false;
710
711 /*
712 * Not only that, but the size of the entire the snapshot
713 * header must also be representable in a size_t.
714 */
715 size -= snap_count * sizeof (__le64);
716 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
717 return false;
718
719 return true;
Alex Elder8e94af82012-07-25 09:32:40 -0500720}
721
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700722/*
723 * Create a new header structure, translate header format from the on-disk
724 * header.
725 */
726static int rbd_header_from_disk(struct rbd_image_header *header,
Alex Elder4156d992012-08-02 11:29:46 -0500727 struct rbd_image_header_ondisk *ondisk)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700728{
Alex Elderccece232012-07-10 20:30:10 -0500729 u32 snap_count;
Alex Elder58c17b02012-08-23 23:22:06 -0500730 size_t len;
Alex Elderd2bb24e2012-07-26 23:37:14 -0500731 size_t size;
Alex Elder621901d2012-08-23 23:22:06 -0500732 u32 i;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700733
Alex Elder6a523252012-07-19 17:12:59 -0500734 memset(header, 0, sizeof (*header));
735
Alex Elder103a1502012-08-02 11:29:45 -0500736 snap_count = le32_to_cpu(ondisk->snap_count);
737
Alex Elder58c17b02012-08-23 23:22:06 -0500738 len = strnlen(ondisk->object_prefix, sizeof (ondisk->object_prefix));
739 header->object_prefix = kmalloc(len + 1, GFP_KERNEL);
Alex Elder6a523252012-07-19 17:12:59 -0500740 if (!header->object_prefix)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700741 return -ENOMEM;
Alex Elder58c17b02012-08-23 23:22:06 -0500742 memcpy(header->object_prefix, ondisk->object_prefix, len);
743 header->object_prefix[len] = '\0';
Alex Elder00f1f362012-02-07 12:03:36 -0600744
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700745 if (snap_count) {
Alex Elderf785cc12012-08-23 23:22:06 -0500746 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
747
Alex Elder621901d2012-08-23 23:22:06 -0500748 /* Save a copy of the snapshot names */
749
Alex Elderf785cc12012-08-23 23:22:06 -0500750 if (snap_names_len > (u64) SIZE_MAX)
751 return -EIO;
752 header->snap_names = kmalloc(snap_names_len, GFP_KERNEL);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700753 if (!header->snap_names)
Alex Elder6a523252012-07-19 17:12:59 -0500754 goto out_err;
Alex Elderf785cc12012-08-23 23:22:06 -0500755 /*
756 * Note that rbd_dev_v1_header_read() guarantees
757 * the ondisk buffer we're working with has
758 * snap_names_len bytes beyond the end of the
759 * snapshot id array, this memcpy() is safe.
760 */
761 memcpy(header->snap_names, &ondisk->snaps[snap_count],
762 snap_names_len);
Alex Elder6a523252012-07-19 17:12:59 -0500763
Alex Elder621901d2012-08-23 23:22:06 -0500764 /* Record each snapshot's size */
765
Alex Elderd2bb24e2012-07-26 23:37:14 -0500766 size = snap_count * sizeof (*header->snap_sizes);
767 header->snap_sizes = kmalloc(size, GFP_KERNEL);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700768 if (!header->snap_sizes)
Alex Elder6a523252012-07-19 17:12:59 -0500769 goto out_err;
Alex Elder621901d2012-08-23 23:22:06 -0500770 for (i = 0; i < snap_count; i++)
771 header->snap_sizes[i] =
772 le64_to_cpu(ondisk->snaps[i].image_size);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700773 } else {
774 header->snap_names = NULL;
775 header->snap_sizes = NULL;
776 }
Alex Elder849b4262012-07-09 21:04:24 -0500777
Alex Elder34b13182012-07-13 20:35:12 -0500778 header->features = 0; /* No features support in v1 images */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700779 header->obj_order = ondisk->options.order;
780 header->crypt_type = ondisk->options.crypt_type;
781 header->comp_type = ondisk->options.comp_type;
Alex Elder6a523252012-07-19 17:12:59 -0500782
Alex Elder621901d2012-08-23 23:22:06 -0500783 /* Allocate and fill in the snapshot context */
784
Alex Elderf84344f2012-08-31 17:29:51 -0500785 header->image_size = le64_to_cpu(ondisk->image_size);
Alex Elder468521c2013-04-26 09:43:47 -0500786
Alex Elder812164f82013-04-30 00:44:32 -0500787 header->snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
Alex Elder6a523252012-07-19 17:12:59 -0500788 if (!header->snapc)
789 goto out_err;
Alex Elder505cbb92012-07-19 08:49:18 -0500790 header->snapc->seq = le64_to_cpu(ondisk->snap_seq);
Alex Elder621901d2012-08-23 23:22:06 -0500791 for (i = 0; i < snap_count; i++)
Alex Elder468521c2013-04-26 09:43:47 -0500792 header->snapc->snaps[i] = le64_to_cpu(ondisk->snaps[i].id);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700793
794 return 0;
795
Alex Elder6a523252012-07-19 17:12:59 -0500796out_err:
Alex Elder849b4262012-07-09 21:04:24 -0500797 kfree(header->snap_sizes);
Alex Elderccece232012-07-10 20:30:10 -0500798 header->snap_sizes = NULL;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700799 kfree(header->snap_names);
Alex Elderccece232012-07-10 20:30:10 -0500800 header->snap_names = NULL;
Alex Elder6a523252012-07-19 17:12:59 -0500801 kfree(header->object_prefix);
802 header->object_prefix = NULL;
Alex Elderccece232012-07-10 20:30:10 -0500803
Alex Elder00f1f362012-02-07 12:03:36 -0600804 return -ENOMEM;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700805}
806
Alex Elder9682fc62013-04-30 00:44:33 -0500807static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
808{
809 const char *snap_name;
810
811 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
812
813 /* Skip over names until we find the one we are looking for */
814
815 snap_name = rbd_dev->header.snap_names;
816 while (which--)
817 snap_name += strlen(snap_name) + 1;
818
819 return kstrdup(snap_name, GFP_KERNEL);
820}
821
822static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
823{
824 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
825 u32 which;
826
827 for (which = 0; which < snapc->num_snaps; which++)
828 if (snapc->snaps[which] == snap_id)
829 return which;
830
831 return BAD_SNAP_INDEX;
832}
833
Alex Elder2ad3d712013-04-30 00:44:33 -0500834static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
835 u64 snap_id)
Alex Elder54cac612013-04-30 00:44:33 -0500836{
837 u32 which;
838
839 which = rbd_dev_snap_index(rbd_dev, snap_id);
840 if (which == BAD_SNAP_INDEX)
841 return NULL;
842
843 return _rbd_dev_v1_snap_name(rbd_dev, which);
844}
845
Alex Elder9e15b772012-10-30 19:40:33 -0500846static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
847{
Alex Elder9e15b772012-10-30 19:40:33 -0500848 if (snap_id == CEPH_NOSNAP)
849 return RBD_SNAP_HEAD_NAME;
850
Alex Elder54cac612013-04-30 00:44:33 -0500851 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
852 if (rbd_dev->image_format == 1)
853 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
Alex Elder9e15b772012-10-30 19:40:33 -0500854
Alex Elder54cac612013-04-30 00:44:33 -0500855 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
Alex Elder9e15b772012-10-30 19:40:33 -0500856}
857
Alex Elder2ad3d712013-04-30 00:44:33 -0500858static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
859 u64 *snap_size)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700860{
Alex Elder2ad3d712013-04-30 00:44:33 -0500861 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
862 if (snap_id == CEPH_NOSNAP) {
863 *snap_size = rbd_dev->header.image_size;
864 } else if (rbd_dev->image_format == 1) {
865 u32 which;
Alex Elder00f1f362012-02-07 12:03:36 -0600866
Alex Elder2ad3d712013-04-30 00:44:33 -0500867 which = rbd_dev_snap_index(rbd_dev, snap_id);
868 if (which == BAD_SNAP_INDEX)
869 return -ENOENT;
Alex Elder00f1f362012-02-07 12:03:36 -0600870
Alex Elder2ad3d712013-04-30 00:44:33 -0500871 *snap_size = rbd_dev->header.snap_sizes[which];
872 } else {
873 u64 size = 0;
874 int ret;
875
876 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
877 if (ret)
878 return ret;
879
880 *snap_size = size;
881 }
882 return 0;
883}
884
885static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
886 u64 *snap_features)
887{
888 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
889 if (snap_id == CEPH_NOSNAP) {
890 *snap_features = rbd_dev->header.features;
891 } else if (rbd_dev->image_format == 1) {
892 *snap_features = 0; /* No features for format 1 */
893 } else {
894 u64 features = 0;
895 int ret;
896
897 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
898 if (ret)
899 return ret;
900
901 *snap_features = features;
902 }
903 return 0;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700904}
905
Alex Elderd1cf5782013-04-27 09:59:30 -0500906static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700907{
Alex Elder2ad3d712013-04-30 00:44:33 -0500908 const char *snap_name = rbd_dev->spec->snap_name;
909 u64 snap_id;
910 u64 size = 0;
911 u64 features = 0;
912 int ret;
Alex Elder8b0241f2013-04-25 23:15:08 -0500913
Alex Elder2ad3d712013-04-30 00:44:33 -0500914 if (strcmp(snap_name, RBD_SNAP_HEAD_NAME)) {
915 snap_id = rbd_snap_id_by_name(rbd_dev, snap_name);
916 if (snap_id == CEPH_NOSNAP)
Alex Elder8b0241f2013-04-25 23:15:08 -0500917 return -ENOENT;
Alex Elder2ad3d712013-04-30 00:44:33 -0500918 } else {
919 snap_id = CEPH_NOSNAP;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700920 }
Alex Elder6d292902013-01-14 12:43:31 -0600921
Alex Elder2ad3d712013-04-30 00:44:33 -0500922 ret = rbd_snap_size(rbd_dev, snap_id, &size);
923 if (ret)
924 return ret;
925 ret = rbd_snap_features(rbd_dev, snap_id, &features);
926 if (ret)
927 return ret;
928
929 rbd_dev->mapping.size = size;
930 rbd_dev->mapping.features = features;
931
932 /* If we are mapping a snapshot it must be marked read-only */
933
934 if (snap_id != CEPH_NOSNAP)
935 rbd_dev->mapping.read_only = true;
936
Alex Elder8b0241f2013-04-25 23:15:08 -0500937 return 0;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700938}
939
Alex Elderd1cf5782013-04-27 09:59:30 -0500940static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
941{
942 rbd_dev->mapping.size = 0;
943 rbd_dev->mapping.features = 0;
944 rbd_dev->mapping.read_only = true;
945}
946
Alex Elder200a6a82013-04-28 23:32:34 -0500947static void rbd_dev_clear_mapping(struct rbd_device *rbd_dev)
948{
949 rbd_dev->mapping.size = 0;
950 rbd_dev->mapping.features = 0;
951 rbd_dev->mapping.read_only = true;
952}
953
Alex Elder98571b52013-01-20 14:44:42 -0600954static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700955{
Alex Elder65ccfe22012-08-09 10:33:26 -0700956 char *name;
957 u64 segment;
958 int ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700959
Alex Elder2fd82b92012-11-09 15:05:54 -0600960 name = kmalloc(MAX_OBJ_NAME_SIZE + 1, GFP_NOIO);
Alex Elder65ccfe22012-08-09 10:33:26 -0700961 if (!name)
962 return NULL;
963 segment = offset >> rbd_dev->header.obj_order;
Alex Elder2fd82b92012-11-09 15:05:54 -0600964 ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, "%s.%012llx",
Alex Elder65ccfe22012-08-09 10:33:26 -0700965 rbd_dev->header.object_prefix, segment);
Alex Elder2fd82b92012-11-09 15:05:54 -0600966 if (ret < 0 || ret > MAX_OBJ_NAME_SIZE) {
Alex Elder65ccfe22012-08-09 10:33:26 -0700967 pr_err("error formatting segment name for #%llu (%d)\n",
968 segment, ret);
969 kfree(name);
970 name = NULL;
971 }
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700972
Alex Elder65ccfe22012-08-09 10:33:26 -0700973 return name;
974}
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700975
Alex Elder65ccfe22012-08-09 10:33:26 -0700976static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
977{
978 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700979
Alex Elder65ccfe22012-08-09 10:33:26 -0700980 return offset & (segment_size - 1);
981}
982
983static u64 rbd_segment_length(struct rbd_device *rbd_dev,
984 u64 offset, u64 length)
985{
986 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
987
988 offset &= segment_size - 1;
989
Alex Elderaafb2302012-09-06 16:00:54 -0500990 rbd_assert(length <= U64_MAX - offset);
Alex Elder65ccfe22012-08-09 10:33:26 -0700991 if (offset + length > segment_size)
992 length = segment_size - offset;
993
994 return length;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700995}
996
997/*
Josh Durgin029bcbd2011-07-22 11:35:23 -0700998 * returns the size of an object in the image
999 */
1000static u64 rbd_obj_bytes(struct rbd_image_header *header)
1001{
1002 return 1 << header->obj_order;
1003}
1004
1005/*
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001006 * bio helpers
1007 */
1008
1009static void bio_chain_put(struct bio *chain)
1010{
1011 struct bio *tmp;
1012
1013 while (chain) {
1014 tmp = chain;
1015 chain = chain->bi_next;
1016 bio_put(tmp);
1017 }
1018}
1019
1020/*
1021 * zeros a bio chain, starting at specific offset
1022 */
1023static void zero_bio_chain(struct bio *chain, int start_ofs)
1024{
1025 struct bio_vec *bv;
1026 unsigned long flags;
1027 void *buf;
1028 int i;
1029 int pos = 0;
1030
1031 while (chain) {
1032 bio_for_each_segment(bv, chain, i) {
1033 if (pos + bv->bv_len > start_ofs) {
1034 int remainder = max(start_ofs - pos, 0);
1035 buf = bvec_kmap_irq(bv, &flags);
1036 memset(buf + remainder, 0,
1037 bv->bv_len - remainder);
Dan Carpenter85b5aaa2010-10-11 21:15:11 +02001038 bvec_kunmap_irq(buf, &flags);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001039 }
1040 pos += bv->bv_len;
1041 }
1042
1043 chain = chain->bi_next;
1044 }
1045}
1046
1047/*
Alex Elderb9434c52013-04-19 15:34:50 -05001048 * similar to zero_bio_chain(), zeros data defined by a page array,
1049 * starting at the given byte offset from the start of the array and
1050 * continuing up to the given end offset. The pages array is
1051 * assumed to be big enough to hold all bytes up to the end.
1052 */
1053static void zero_pages(struct page **pages, u64 offset, u64 end)
1054{
1055 struct page **page = &pages[offset >> PAGE_SHIFT];
1056
1057 rbd_assert(end > offset);
1058 rbd_assert(end - offset <= (u64)SIZE_MAX);
1059 while (offset < end) {
1060 size_t page_offset;
1061 size_t length;
1062 unsigned long flags;
1063 void *kaddr;
1064
1065 page_offset = (size_t)(offset & ~PAGE_MASK);
1066 length = min(PAGE_SIZE - page_offset, (size_t)(end - offset));
1067 local_irq_save(flags);
1068 kaddr = kmap_atomic(*page);
1069 memset(kaddr + page_offset, 0, length);
1070 kunmap_atomic(kaddr);
1071 local_irq_restore(flags);
1072
1073 offset += length;
1074 page++;
1075 }
1076}
1077
1078/*
Alex Elderf7760da2012-10-20 22:17:27 -05001079 * Clone a portion of a bio, starting at the given byte offset
1080 * and continuing for the number of bytes indicated.
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001081 */
Alex Elderf7760da2012-10-20 22:17:27 -05001082static struct bio *bio_clone_range(struct bio *bio_src,
1083 unsigned int offset,
1084 unsigned int len,
1085 gfp_t gfpmask)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001086{
Alex Elderf7760da2012-10-20 22:17:27 -05001087 struct bio_vec *bv;
1088 unsigned int resid;
1089 unsigned short idx;
1090 unsigned int voff;
1091 unsigned short end_idx;
1092 unsigned short vcnt;
1093 struct bio *bio;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001094
Alex Elderf7760da2012-10-20 22:17:27 -05001095 /* Handle the easy case for the caller */
1096
1097 if (!offset && len == bio_src->bi_size)
1098 return bio_clone(bio_src, gfpmask);
1099
1100 if (WARN_ON_ONCE(!len))
1101 return NULL;
1102 if (WARN_ON_ONCE(len > bio_src->bi_size))
1103 return NULL;
1104 if (WARN_ON_ONCE(offset > bio_src->bi_size - len))
1105 return NULL;
1106
1107 /* Find first affected segment... */
1108
1109 resid = offset;
1110 __bio_for_each_segment(bv, bio_src, idx, 0) {
1111 if (resid < bv->bv_len)
1112 break;
1113 resid -= bv->bv_len;
1114 }
1115 voff = resid;
1116
1117 /* ...and the last affected segment */
1118
1119 resid += len;
1120 __bio_for_each_segment(bv, bio_src, end_idx, idx) {
1121 if (resid <= bv->bv_len)
1122 break;
1123 resid -= bv->bv_len;
1124 }
1125 vcnt = end_idx - idx + 1;
1126
1127 /* Build the clone */
1128
1129 bio = bio_alloc(gfpmask, (unsigned int) vcnt);
1130 if (!bio)
1131 return NULL; /* ENOMEM */
1132
1133 bio->bi_bdev = bio_src->bi_bdev;
1134 bio->bi_sector = bio_src->bi_sector + (offset >> SECTOR_SHIFT);
1135 bio->bi_rw = bio_src->bi_rw;
1136 bio->bi_flags |= 1 << BIO_CLONED;
1137
1138 /*
1139 * Copy over our part of the bio_vec, then update the first
1140 * and last (or only) entries.
1141 */
1142 memcpy(&bio->bi_io_vec[0], &bio_src->bi_io_vec[idx],
1143 vcnt * sizeof (struct bio_vec));
1144 bio->bi_io_vec[0].bv_offset += voff;
1145 if (vcnt > 1) {
1146 bio->bi_io_vec[0].bv_len -= voff;
1147 bio->bi_io_vec[vcnt - 1].bv_len = resid;
1148 } else {
1149 bio->bi_io_vec[0].bv_len = len;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001150 }
1151
Alex Elderf7760da2012-10-20 22:17:27 -05001152 bio->bi_vcnt = vcnt;
1153 bio->bi_size = len;
1154 bio->bi_idx = 0;
Alex Elder542582f2012-08-09 10:33:25 -07001155
Alex Elderf7760da2012-10-20 22:17:27 -05001156 return bio;
1157}
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001158
Alex Elderf7760da2012-10-20 22:17:27 -05001159/*
1160 * Clone a portion of a bio chain, starting at the given byte offset
1161 * into the first bio in the source chain and continuing for the
1162 * number of bytes indicated. The result is another bio chain of
1163 * exactly the given length, or a null pointer on error.
1164 *
1165 * The bio_src and offset parameters are both in-out. On entry they
1166 * refer to the first source bio and the offset into that bio where
1167 * the start of data to be cloned is located.
1168 *
1169 * On return, bio_src is updated to refer to the bio in the source
1170 * chain that contains first un-cloned byte, and *offset will
1171 * contain the offset of that byte within that bio.
1172 */
1173static struct bio *bio_chain_clone_range(struct bio **bio_src,
1174 unsigned int *offset,
1175 unsigned int len,
1176 gfp_t gfpmask)
1177{
1178 struct bio *bi = *bio_src;
1179 unsigned int off = *offset;
1180 struct bio *chain = NULL;
1181 struct bio **end;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001182
Alex Elderf7760da2012-10-20 22:17:27 -05001183 /* Build up a chain of clone bios up to the limit */
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001184
Alex Elderf7760da2012-10-20 22:17:27 -05001185 if (!bi || off >= bi->bi_size || !len)
1186 return NULL; /* Nothing to clone */
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001187
Alex Elderf7760da2012-10-20 22:17:27 -05001188 end = &chain;
1189 while (len) {
1190 unsigned int bi_size;
1191 struct bio *bio;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001192
Alex Elderf5400b72012-11-01 10:17:15 -05001193 if (!bi) {
1194 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
Alex Elderf7760da2012-10-20 22:17:27 -05001195 goto out_err; /* EINVAL; ran out of bio's */
Alex Elderf5400b72012-11-01 10:17:15 -05001196 }
Alex Elderf7760da2012-10-20 22:17:27 -05001197 bi_size = min_t(unsigned int, bi->bi_size - off, len);
1198 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1199 if (!bio)
1200 goto out_err; /* ENOMEM */
1201
1202 *end = bio;
1203 end = &bio->bi_next;
1204
1205 off += bi_size;
1206 if (off == bi->bi_size) {
1207 bi = bi->bi_next;
1208 off = 0;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001209 }
Alex Elderf7760da2012-10-20 22:17:27 -05001210 len -= bi_size;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001211 }
Alex Elderf7760da2012-10-20 22:17:27 -05001212 *bio_src = bi;
1213 *offset = off;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001214
Alex Elderf7760da2012-10-20 22:17:27 -05001215 return chain;
1216out_err:
1217 bio_chain_put(chain);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001218
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001219 return NULL;
1220}
1221
Alex Elder926f9b32013-02-11 12:33:24 -06001222/*
1223 * The default/initial value for all object request flags is 0. For
1224 * each flag, once its value is set to 1 it is never reset to 0
1225 * again.
1226 */
Alex Elder6365d332013-02-11 12:33:24 -06001227static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
1228{
1229 if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
Alex Elder6365d332013-02-11 12:33:24 -06001230 struct rbd_device *rbd_dev;
1231
Alex Elder57acbaa2013-02-11 12:33:24 -06001232 rbd_dev = obj_request->img_request->rbd_dev;
Alex Elder6365d332013-02-11 12:33:24 -06001233 rbd_warn(rbd_dev, "obj_request %p already marked img_data\n",
1234 obj_request);
1235 }
1236}
1237
1238static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
1239{
1240 smp_mb();
1241 return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
1242}
1243
Alex Elder57acbaa2013-02-11 12:33:24 -06001244static void obj_request_done_set(struct rbd_obj_request *obj_request)
1245{
1246 if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
1247 struct rbd_device *rbd_dev = NULL;
1248
1249 if (obj_request_img_data_test(obj_request))
1250 rbd_dev = obj_request->img_request->rbd_dev;
1251 rbd_warn(rbd_dev, "obj_request %p already marked done\n",
1252 obj_request);
1253 }
1254}
1255
1256static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1257{
1258 smp_mb();
1259 return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
1260}
1261
Alex Elder5679c592013-02-11 12:33:24 -06001262/*
1263 * This sets the KNOWN flag after (possibly) setting the EXISTS
1264 * flag. The latter is set based on the "exists" value provided.
1265 *
1266 * Note that for our purposes once an object exists it never goes
1267 * away again. It's possible that the response from two existence
1268 * checks are separated by the creation of the target object, and
1269 * the first ("doesn't exist") response arrives *after* the second
1270 * ("does exist"). In that case we ignore the second one.
1271 */
1272static void obj_request_existence_set(struct rbd_obj_request *obj_request,
1273 bool exists)
1274{
1275 if (exists)
1276 set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
1277 set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
1278 smp_mb();
1279}
1280
1281static bool obj_request_known_test(struct rbd_obj_request *obj_request)
1282{
1283 smp_mb();
1284 return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
1285}
1286
1287static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
1288{
1289 smp_mb();
1290 return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
1291}
1292
Alex Elderbf0d5f502012-11-22 00:00:08 -06001293static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1294{
Alex Elder37206ee2013-02-20 17:32:08 -06001295 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1296 atomic_read(&obj_request->kref.refcount));
Alex Elderbf0d5f502012-11-22 00:00:08 -06001297 kref_get(&obj_request->kref);
1298}
1299
1300static void rbd_obj_request_destroy(struct kref *kref);
1301static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1302{
1303 rbd_assert(obj_request != NULL);
Alex Elder37206ee2013-02-20 17:32:08 -06001304 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1305 atomic_read(&obj_request->kref.refcount));
Alex Elderbf0d5f502012-11-22 00:00:08 -06001306 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1307}
1308
1309static void rbd_img_request_get(struct rbd_img_request *img_request)
1310{
Alex Elder37206ee2013-02-20 17:32:08 -06001311 dout("%s: img %p (was %d)\n", __func__, img_request,
1312 atomic_read(&img_request->kref.refcount));
Alex Elderbf0d5f502012-11-22 00:00:08 -06001313 kref_get(&img_request->kref);
1314}
1315
1316static void rbd_img_request_destroy(struct kref *kref);
1317static void rbd_img_request_put(struct rbd_img_request *img_request)
1318{
1319 rbd_assert(img_request != NULL);
Alex Elder37206ee2013-02-20 17:32:08 -06001320 dout("%s: img %p (was %d)\n", __func__, img_request,
1321 atomic_read(&img_request->kref.refcount));
Alex Elderbf0d5f502012-11-22 00:00:08 -06001322 kref_put(&img_request->kref, rbd_img_request_destroy);
1323}
1324
1325static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1326 struct rbd_obj_request *obj_request)
1327{
Alex Elder25dcf952013-01-25 17:08:55 -06001328 rbd_assert(obj_request->img_request == NULL);
1329
Alex Elderb155e862013-04-15 14:50:37 -05001330 /* Image request now owns object's original reference */
Alex Elderbf0d5f502012-11-22 00:00:08 -06001331 obj_request->img_request = img_request;
Alex Elder25dcf952013-01-25 17:08:55 -06001332 obj_request->which = img_request->obj_request_count;
Alex Elder6365d332013-02-11 12:33:24 -06001333 rbd_assert(!obj_request_img_data_test(obj_request));
1334 obj_request_img_data_set(obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001335 rbd_assert(obj_request->which != BAD_WHICH);
Alex Elder25dcf952013-01-25 17:08:55 -06001336 img_request->obj_request_count++;
1337 list_add_tail(&obj_request->links, &img_request->obj_requests);
Alex Elder37206ee2013-02-20 17:32:08 -06001338 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1339 obj_request->which);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001340}
1341
1342static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1343 struct rbd_obj_request *obj_request)
1344{
1345 rbd_assert(obj_request->which != BAD_WHICH);
Alex Elder25dcf952013-01-25 17:08:55 -06001346
Alex Elder37206ee2013-02-20 17:32:08 -06001347 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1348 obj_request->which);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001349 list_del(&obj_request->links);
Alex Elder25dcf952013-01-25 17:08:55 -06001350 rbd_assert(img_request->obj_request_count > 0);
1351 img_request->obj_request_count--;
1352 rbd_assert(obj_request->which == img_request->obj_request_count);
1353 obj_request->which = BAD_WHICH;
Alex Elder6365d332013-02-11 12:33:24 -06001354 rbd_assert(obj_request_img_data_test(obj_request));
Alex Elderbf0d5f502012-11-22 00:00:08 -06001355 rbd_assert(obj_request->img_request == img_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001356 obj_request->img_request = NULL;
Alex Elder25dcf952013-01-25 17:08:55 -06001357 obj_request->callback = NULL;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001358 rbd_obj_request_put(obj_request);
1359}
1360
1361static bool obj_request_type_valid(enum obj_request_type type)
1362{
1363 switch (type) {
Alex Elder9969ebc2013-01-18 12:31:10 -06001364 case OBJ_REQUEST_NODATA:
Alex Elderbf0d5f502012-11-22 00:00:08 -06001365 case OBJ_REQUEST_BIO:
Alex Elder788e2df2013-01-17 12:25:27 -06001366 case OBJ_REQUEST_PAGES:
Alex Elderbf0d5f502012-11-22 00:00:08 -06001367 return true;
1368 default:
1369 return false;
1370 }
1371}
1372
Alex Elderbf0d5f502012-11-22 00:00:08 -06001373static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
1374 struct rbd_obj_request *obj_request)
1375{
Alex Elder37206ee2013-02-20 17:32:08 -06001376 dout("%s: osdc %p obj %p\n", __func__, osdc, obj_request);
1377
Alex Elderbf0d5f502012-11-22 00:00:08 -06001378 return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
1379}
1380
1381static void rbd_img_request_complete(struct rbd_img_request *img_request)
1382{
Alex Elder55f27e02013-04-10 12:34:25 -05001383
Alex Elder37206ee2013-02-20 17:32:08 -06001384 dout("%s: img %p\n", __func__, img_request);
Alex Elder55f27e02013-04-10 12:34:25 -05001385
1386 /*
1387 * If no error occurred, compute the aggregate transfer
1388 * count for the image request. We could instead use
1389 * atomic64_cmpxchg() to update it as each object request
1390 * completes; not clear which way is better off hand.
1391 */
1392 if (!img_request->result) {
1393 struct rbd_obj_request *obj_request;
1394 u64 xferred = 0;
1395
1396 for_each_obj_request(img_request, obj_request)
1397 xferred += obj_request->xferred;
1398 img_request->xferred = xferred;
1399 }
1400
Alex Elderbf0d5f502012-11-22 00:00:08 -06001401 if (img_request->callback)
1402 img_request->callback(img_request);
1403 else
1404 rbd_img_request_put(img_request);
1405}
1406
Alex Elder788e2df2013-01-17 12:25:27 -06001407/* Caller is responsible for rbd_obj_request_destroy(obj_request) */
1408
1409static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
1410{
Alex Elder37206ee2013-02-20 17:32:08 -06001411 dout("%s: obj %p\n", __func__, obj_request);
1412
Alex Elder788e2df2013-01-17 12:25:27 -06001413 return wait_for_completion_interruptible(&obj_request->completion);
1414}
1415
Alex Elder0c425242013-02-08 09:55:49 -06001416/*
1417 * The default/initial value for all image request flags is 0. Each
1418 * is conditionally set to 1 at image request initialization time
1419 * and currently never change thereafter.
1420 */
1421static void img_request_write_set(struct rbd_img_request *img_request)
1422{
1423 set_bit(IMG_REQ_WRITE, &img_request->flags);
1424 smp_mb();
1425}
1426
1427static bool img_request_write_test(struct rbd_img_request *img_request)
1428{
1429 smp_mb();
1430 return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
1431}
1432
Alex Elder9849e982013-01-24 16:13:36 -06001433static void img_request_child_set(struct rbd_img_request *img_request)
1434{
1435 set_bit(IMG_REQ_CHILD, &img_request->flags);
1436 smp_mb();
1437}
1438
1439static bool img_request_child_test(struct rbd_img_request *img_request)
1440{
1441 smp_mb();
1442 return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
1443}
1444
Alex Elderd0b2e942013-01-24 16:13:36 -06001445static void img_request_layered_set(struct rbd_img_request *img_request)
1446{
1447 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1448 smp_mb();
1449}
1450
1451static bool img_request_layered_test(struct rbd_img_request *img_request)
1452{
1453 smp_mb();
1454 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1455}
1456
Alex Elder6e2a4502013-03-27 09:16:30 -05001457static void
1458rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1459{
Alex Elderb9434c52013-04-19 15:34:50 -05001460 u64 xferred = obj_request->xferred;
1461 u64 length = obj_request->length;
1462
Alex Elder6e2a4502013-03-27 09:16:30 -05001463 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1464 obj_request, obj_request->img_request, obj_request->result,
Alex Elderb9434c52013-04-19 15:34:50 -05001465 xferred, length);
Alex Elder6e2a4502013-03-27 09:16:30 -05001466 /*
1467 * ENOENT means a hole in the image. We zero-fill the
1468 * entire length of the request. A short read also implies
1469 * zero-fill to the end of the request. Either way we
1470 * update the xferred count to indicate the whole request
1471 * was satisfied.
1472 */
Alex Elderb9434c52013-04-19 15:34:50 -05001473 rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
Alex Elder6e2a4502013-03-27 09:16:30 -05001474 if (obj_request->result == -ENOENT) {
Alex Elderb9434c52013-04-19 15:34:50 -05001475 if (obj_request->type == OBJ_REQUEST_BIO)
1476 zero_bio_chain(obj_request->bio_list, 0);
1477 else
1478 zero_pages(obj_request->pages, 0, length);
Alex Elder6e2a4502013-03-27 09:16:30 -05001479 obj_request->result = 0;
Alex Elderb9434c52013-04-19 15:34:50 -05001480 obj_request->xferred = length;
1481 } else if (xferred < length && !obj_request->result) {
1482 if (obj_request->type == OBJ_REQUEST_BIO)
1483 zero_bio_chain(obj_request->bio_list, xferred);
1484 else
1485 zero_pages(obj_request->pages, xferred, length);
1486 obj_request->xferred = length;
Alex Elder6e2a4502013-03-27 09:16:30 -05001487 }
1488 obj_request_done_set(obj_request);
1489}
1490
Alex Elderbf0d5f502012-11-22 00:00:08 -06001491static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1492{
Alex Elder37206ee2013-02-20 17:32:08 -06001493 dout("%s: obj %p cb %p\n", __func__, obj_request,
1494 obj_request->callback);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001495 if (obj_request->callback)
1496 obj_request->callback(obj_request);
Alex Elder788e2df2013-01-17 12:25:27 -06001497 else
1498 complete_all(&obj_request->completion);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001499}
1500
Alex Elderc47f9372013-02-26 14:23:07 -06001501static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
Alex Elder39bf2c52013-02-26 14:23:07 -06001502{
1503 dout("%s: obj %p\n", __func__, obj_request);
1504 obj_request_done_set(obj_request);
1505}
1506
Alex Elderc47f9372013-02-26 14:23:07 -06001507static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
Alex Elderbf0d5f502012-11-22 00:00:08 -06001508{
Alex Elder57acbaa2013-02-11 12:33:24 -06001509 struct rbd_img_request *img_request = NULL;
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05001510 struct rbd_device *rbd_dev = NULL;
Alex Elder57acbaa2013-02-11 12:33:24 -06001511 bool layered = false;
1512
1513 if (obj_request_img_data_test(obj_request)) {
1514 img_request = obj_request->img_request;
1515 layered = img_request && img_request_layered_test(img_request);
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05001516 rbd_dev = img_request->rbd_dev;
Alex Elder57acbaa2013-02-11 12:33:24 -06001517 }
Alex Elder8b3e1a52013-01-24 16:13:36 -06001518
1519 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1520 obj_request, img_request, obj_request->result,
1521 obj_request->xferred, obj_request->length);
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05001522 if (layered && obj_request->result == -ENOENT &&
1523 obj_request->img_offset < rbd_dev->parent_overlap)
Alex Elder8b3e1a52013-01-24 16:13:36 -06001524 rbd_img_parent_read(obj_request);
1525 else if (img_request)
Alex Elder6e2a4502013-03-27 09:16:30 -05001526 rbd_img_obj_request_read_callback(obj_request);
1527 else
1528 obj_request_done_set(obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001529}
1530
Alex Elderc47f9372013-02-26 14:23:07 -06001531static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
Alex Elderbf0d5f502012-11-22 00:00:08 -06001532{
Sage Weil1b83bef2013-02-25 16:11:12 -08001533 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1534 obj_request->result, obj_request->length);
1535 /*
Alex Elder8b3e1a52013-01-24 16:13:36 -06001536 * There is no such thing as a successful short write. Set
1537 * it to our originally-requested length.
Sage Weil1b83bef2013-02-25 16:11:12 -08001538 */
1539 obj_request->xferred = obj_request->length;
Alex Elder07741302013-02-05 23:41:50 -06001540 obj_request_done_set(obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001541}
1542
Alex Elderfbfab532013-02-08 09:55:48 -06001543/*
1544 * For a simple stat call there's nothing to do. We'll do more if
1545 * this is part of a write sequence for a layered image.
1546 */
Alex Elderc47f9372013-02-26 14:23:07 -06001547static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
Alex Elderfbfab532013-02-08 09:55:48 -06001548{
Alex Elder37206ee2013-02-20 17:32:08 -06001549 dout("%s: obj %p\n", __func__, obj_request);
Alex Elderfbfab532013-02-08 09:55:48 -06001550 obj_request_done_set(obj_request);
1551}
1552
Alex Elderbf0d5f502012-11-22 00:00:08 -06001553static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1554 struct ceph_msg *msg)
1555{
1556 struct rbd_obj_request *obj_request = osd_req->r_priv;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001557 u16 opcode;
1558
Alex Elder37206ee2013-02-20 17:32:08 -06001559 dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001560 rbd_assert(osd_req == obj_request->osd_req);
Alex Elder57acbaa2013-02-11 12:33:24 -06001561 if (obj_request_img_data_test(obj_request)) {
1562 rbd_assert(obj_request->img_request);
1563 rbd_assert(obj_request->which != BAD_WHICH);
1564 } else {
1565 rbd_assert(obj_request->which == BAD_WHICH);
1566 }
Alex Elderbf0d5f502012-11-22 00:00:08 -06001567
Sage Weil1b83bef2013-02-25 16:11:12 -08001568 if (osd_req->r_result < 0)
1569 obj_request->result = osd_req->r_result;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001570
Alex Elder0eefd472013-04-19 15:34:50 -05001571 BUG_ON(osd_req->r_num_ops > 2);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001572
Alex Elderc47f9372013-02-26 14:23:07 -06001573 /*
1574 * We support a 64-bit length, but ultimately it has to be
1575 * passed to blk_end_request(), which takes an unsigned int.
1576 */
Sage Weil1b83bef2013-02-25 16:11:12 -08001577 obj_request->xferred = osd_req->r_reply_op_len[0];
Alex Elder8b3e1a52013-01-24 16:13:36 -06001578 rbd_assert(obj_request->xferred < (u64)UINT_MAX);
Alex Elder79528732013-04-03 21:32:51 -05001579 opcode = osd_req->r_ops[0].op;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001580 switch (opcode) {
1581 case CEPH_OSD_OP_READ:
Alex Elderc47f9372013-02-26 14:23:07 -06001582 rbd_osd_read_callback(obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001583 break;
1584 case CEPH_OSD_OP_WRITE:
Alex Elderc47f9372013-02-26 14:23:07 -06001585 rbd_osd_write_callback(obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001586 break;
Alex Elderfbfab532013-02-08 09:55:48 -06001587 case CEPH_OSD_OP_STAT:
Alex Elderc47f9372013-02-26 14:23:07 -06001588 rbd_osd_stat_callback(obj_request);
Alex Elderfbfab532013-02-08 09:55:48 -06001589 break;
Alex Elder36be9a72013-01-19 00:30:28 -06001590 case CEPH_OSD_OP_CALL:
Alex Elderb8d70032012-11-30 17:53:04 -06001591 case CEPH_OSD_OP_NOTIFY_ACK:
Alex Elder9969ebc2013-01-18 12:31:10 -06001592 case CEPH_OSD_OP_WATCH:
Alex Elderc47f9372013-02-26 14:23:07 -06001593 rbd_osd_trivial_callback(obj_request);
Alex Elder9969ebc2013-01-18 12:31:10 -06001594 break;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001595 default:
1596 rbd_warn(NULL, "%s: unsupported op %hu\n",
1597 obj_request->object_name, (unsigned short) opcode);
1598 break;
1599 }
1600
Alex Elder07741302013-02-05 23:41:50 -06001601 if (obj_request_done_test(obj_request))
Alex Elderbf0d5f502012-11-22 00:00:08 -06001602 rbd_obj_request_complete(obj_request);
1603}
1604
Alex Elder9d4df012013-04-19 15:34:50 -05001605static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
Alex Elder430c28c2013-04-03 21:32:51 -05001606{
1607 struct rbd_img_request *img_request = obj_request->img_request;
Alex Elder8c042b02013-04-03 01:28:58 -05001608 struct ceph_osd_request *osd_req = obj_request->osd_req;
Alex Elder9d4df012013-04-19 15:34:50 -05001609 u64 snap_id;
Alex Elder430c28c2013-04-03 21:32:51 -05001610
Alex Elder8c042b02013-04-03 01:28:58 -05001611 rbd_assert(osd_req != NULL);
Alex Elder430c28c2013-04-03 21:32:51 -05001612
Alex Elder9d4df012013-04-19 15:34:50 -05001613 snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP;
Alex Elder8c042b02013-04-03 01:28:58 -05001614 ceph_osdc_build_request(osd_req, obj_request->offset,
Alex Elder9d4df012013-04-19 15:34:50 -05001615 NULL, snap_id, NULL);
1616}
1617
1618static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1619{
1620 struct rbd_img_request *img_request = obj_request->img_request;
1621 struct ceph_osd_request *osd_req = obj_request->osd_req;
1622 struct ceph_snap_context *snapc;
1623 struct timespec mtime = CURRENT_TIME;
1624
1625 rbd_assert(osd_req != NULL);
1626
1627 snapc = img_request ? img_request->snapc : NULL;
1628 ceph_osdc_build_request(osd_req, obj_request->offset,
1629 snapc, CEPH_NOSNAP, &mtime);
Alex Elder430c28c2013-04-03 21:32:51 -05001630}
1631
Alex Elderbf0d5f502012-11-22 00:00:08 -06001632static struct ceph_osd_request *rbd_osd_req_create(
1633 struct rbd_device *rbd_dev,
1634 bool write_request,
Alex Elder430c28c2013-04-03 21:32:51 -05001635 struct rbd_obj_request *obj_request)
Alex Elderbf0d5f502012-11-22 00:00:08 -06001636{
Alex Elderbf0d5f502012-11-22 00:00:08 -06001637 struct ceph_snap_context *snapc = NULL;
1638 struct ceph_osd_client *osdc;
1639 struct ceph_osd_request *osd_req;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001640
Alex Elder6365d332013-02-11 12:33:24 -06001641 if (obj_request_img_data_test(obj_request)) {
1642 struct rbd_img_request *img_request = obj_request->img_request;
1643
Alex Elder0c425242013-02-08 09:55:49 -06001644 rbd_assert(write_request ==
1645 img_request_write_test(img_request));
1646 if (write_request)
Alex Elderbf0d5f502012-11-22 00:00:08 -06001647 snapc = img_request->snapc;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001648 }
1649
1650 /* Allocate and initialize the request, for the single op */
1651
1652 osdc = &rbd_dev->rbd_client->client->osdc;
1653 osd_req = ceph_osdc_alloc_request(osdc, snapc, 1, false, GFP_ATOMIC);
1654 if (!osd_req)
1655 return NULL; /* ENOMEM */
Alex Elderbf0d5f502012-11-22 00:00:08 -06001656
Alex Elder430c28c2013-04-03 21:32:51 -05001657 if (write_request)
Alex Elderbf0d5f502012-11-22 00:00:08 -06001658 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
Alex Elder430c28c2013-04-03 21:32:51 -05001659 else
Alex Elderbf0d5f502012-11-22 00:00:08 -06001660 osd_req->r_flags = CEPH_OSD_FLAG_READ;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001661
1662 osd_req->r_callback = rbd_osd_req_callback;
1663 osd_req->r_priv = obj_request;
1664
1665 osd_req->r_oid_len = strlen(obj_request->object_name);
1666 rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
1667 memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
1668
1669 osd_req->r_file_layout = rbd_dev->layout; /* struct */
1670
Alex Elderbf0d5f502012-11-22 00:00:08 -06001671 return osd_req;
1672}
1673
Alex Elder0eefd472013-04-19 15:34:50 -05001674/*
1675 * Create a copyup osd request based on the information in the
1676 * object request supplied. A copyup request has two osd ops,
1677 * a copyup method call, and a "normal" write request.
1678 */
1679static struct ceph_osd_request *
1680rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
1681{
1682 struct rbd_img_request *img_request;
1683 struct ceph_snap_context *snapc;
1684 struct rbd_device *rbd_dev;
1685 struct ceph_osd_client *osdc;
1686 struct ceph_osd_request *osd_req;
1687
1688 rbd_assert(obj_request_img_data_test(obj_request));
1689 img_request = obj_request->img_request;
1690 rbd_assert(img_request);
1691 rbd_assert(img_request_write_test(img_request));
1692
1693 /* Allocate and initialize the request, for the two ops */
1694
1695 snapc = img_request->snapc;
1696 rbd_dev = img_request->rbd_dev;
1697 osdc = &rbd_dev->rbd_client->client->osdc;
1698 osd_req = ceph_osdc_alloc_request(osdc, snapc, 2, false, GFP_ATOMIC);
1699 if (!osd_req)
1700 return NULL; /* ENOMEM */
1701
1702 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1703 osd_req->r_callback = rbd_osd_req_callback;
1704 osd_req->r_priv = obj_request;
1705
1706 osd_req->r_oid_len = strlen(obj_request->object_name);
1707 rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
1708 memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
1709
1710 osd_req->r_file_layout = rbd_dev->layout; /* struct */
1711
1712 return osd_req;
1713}
1714
1715
Alex Elderbf0d5f502012-11-22 00:00:08 -06001716static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
1717{
1718 ceph_osdc_put_request(osd_req);
1719}
1720
1721/* object_name is assumed to be a non-null pointer and NUL-terminated */
1722
1723static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
1724 u64 offset, u64 length,
1725 enum obj_request_type type)
1726{
1727 struct rbd_obj_request *obj_request;
1728 size_t size;
1729 char *name;
1730
1731 rbd_assert(obj_request_type_valid(type));
1732
1733 size = strlen(object_name) + 1;
1734 obj_request = kzalloc(sizeof (*obj_request) + size, GFP_KERNEL);
1735 if (!obj_request)
1736 return NULL;
1737
1738 name = (char *)(obj_request + 1);
1739 obj_request->object_name = memcpy(name, object_name, size);
1740 obj_request->offset = offset;
1741 obj_request->length = length;
Alex Elder926f9b32013-02-11 12:33:24 -06001742 obj_request->flags = 0;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001743 obj_request->which = BAD_WHICH;
1744 obj_request->type = type;
1745 INIT_LIST_HEAD(&obj_request->links);
Alex Elder788e2df2013-01-17 12:25:27 -06001746 init_completion(&obj_request->completion);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001747 kref_init(&obj_request->kref);
1748
Alex Elder37206ee2013-02-20 17:32:08 -06001749 dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
1750 offset, length, (int)type, obj_request);
1751
Alex Elderbf0d5f502012-11-22 00:00:08 -06001752 return obj_request;
1753}
1754
1755static void rbd_obj_request_destroy(struct kref *kref)
1756{
1757 struct rbd_obj_request *obj_request;
1758
1759 obj_request = container_of(kref, struct rbd_obj_request, kref);
1760
Alex Elder37206ee2013-02-20 17:32:08 -06001761 dout("%s: obj %p\n", __func__, obj_request);
1762
Alex Elderbf0d5f502012-11-22 00:00:08 -06001763 rbd_assert(obj_request->img_request == NULL);
1764 rbd_assert(obj_request->which == BAD_WHICH);
1765
1766 if (obj_request->osd_req)
1767 rbd_osd_req_destroy(obj_request->osd_req);
1768
1769 rbd_assert(obj_request_type_valid(obj_request->type));
1770 switch (obj_request->type) {
Alex Elder9969ebc2013-01-18 12:31:10 -06001771 case OBJ_REQUEST_NODATA:
1772 break; /* Nothing to do */
Alex Elderbf0d5f502012-11-22 00:00:08 -06001773 case OBJ_REQUEST_BIO:
1774 if (obj_request->bio_list)
1775 bio_chain_put(obj_request->bio_list);
1776 break;
Alex Elder788e2df2013-01-17 12:25:27 -06001777 case OBJ_REQUEST_PAGES:
1778 if (obj_request->pages)
1779 ceph_release_page_vector(obj_request->pages,
1780 obj_request->page_count);
1781 break;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001782 }
1783
1784 kfree(obj_request);
1785}
1786
1787/*
1788 * Caller is responsible for filling in the list of object requests
1789 * that comprises the image request, and the Linux request pointer
1790 * (if there is one).
1791 */
Alex Eldercc344fa2013-02-19 12:25:56 -06001792static struct rbd_img_request *rbd_img_request_create(
1793 struct rbd_device *rbd_dev,
Alex Elderbf0d5f502012-11-22 00:00:08 -06001794 u64 offset, u64 length,
Alex Elder9849e982013-01-24 16:13:36 -06001795 bool write_request,
1796 bool child_request)
Alex Elderbf0d5f502012-11-22 00:00:08 -06001797{
1798 struct rbd_img_request *img_request;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001799
1800 img_request = kmalloc(sizeof (*img_request), GFP_ATOMIC);
1801 if (!img_request)
1802 return NULL;
1803
1804 if (write_request) {
1805 down_read(&rbd_dev->header_rwsem);
Alex Elder812164f82013-04-30 00:44:32 -05001806 ceph_get_snap_context(rbd_dev->header.snapc);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001807 up_read(&rbd_dev->header_rwsem);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001808 }
1809
1810 img_request->rq = NULL;
1811 img_request->rbd_dev = rbd_dev;
1812 img_request->offset = offset;
1813 img_request->length = length;
Alex Elder0c425242013-02-08 09:55:49 -06001814 img_request->flags = 0;
1815 if (write_request) {
1816 img_request_write_set(img_request);
Alex Elder468521c2013-04-26 09:43:47 -05001817 img_request->snapc = rbd_dev->header.snapc;
Alex Elder0c425242013-02-08 09:55:49 -06001818 } else {
Alex Elderbf0d5f502012-11-22 00:00:08 -06001819 img_request->snap_id = rbd_dev->spec->snap_id;
Alex Elder0c425242013-02-08 09:55:49 -06001820 }
Alex Elder9849e982013-01-24 16:13:36 -06001821 if (child_request)
1822 img_request_child_set(img_request);
Alex Elderd0b2e942013-01-24 16:13:36 -06001823 if (rbd_dev->parent_spec)
1824 img_request_layered_set(img_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001825 spin_lock_init(&img_request->completion_lock);
1826 img_request->next_completion = 0;
1827 img_request->callback = NULL;
Alex Eldera5a337d2013-01-24 16:13:36 -06001828 img_request->result = 0;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001829 img_request->obj_request_count = 0;
1830 INIT_LIST_HEAD(&img_request->obj_requests);
1831 kref_init(&img_request->kref);
1832
1833 rbd_img_request_get(img_request); /* Avoid a warning */
1834 rbd_img_request_put(img_request); /* TEMPORARY */
1835
Alex Elder37206ee2013-02-20 17:32:08 -06001836 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
1837 write_request ? "write" : "read", offset, length,
1838 img_request);
1839
Alex Elderbf0d5f502012-11-22 00:00:08 -06001840 return img_request;
1841}
1842
1843static void rbd_img_request_destroy(struct kref *kref)
1844{
1845 struct rbd_img_request *img_request;
1846 struct rbd_obj_request *obj_request;
1847 struct rbd_obj_request *next_obj_request;
1848
1849 img_request = container_of(kref, struct rbd_img_request, kref);
1850
Alex Elder37206ee2013-02-20 17:32:08 -06001851 dout("%s: img %p\n", __func__, img_request);
1852
Alex Elderbf0d5f502012-11-22 00:00:08 -06001853 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
1854 rbd_img_obj_request_del(img_request, obj_request);
Alex Elder25dcf952013-01-25 17:08:55 -06001855 rbd_assert(img_request->obj_request_count == 0);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001856
Alex Elder0c425242013-02-08 09:55:49 -06001857 if (img_request_write_test(img_request))
Alex Elder812164f82013-04-30 00:44:32 -05001858 ceph_put_snap_context(img_request->snapc);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001859
Alex Elder8b3e1a52013-01-24 16:13:36 -06001860 if (img_request_child_test(img_request))
1861 rbd_obj_request_put(img_request->obj_request);
1862
Alex Elderbf0d5f502012-11-22 00:00:08 -06001863 kfree(img_request);
1864}
1865
Alex Elder12178572013-02-08 09:55:49 -06001866static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
1867{
Alex Elder6365d332013-02-11 12:33:24 -06001868 struct rbd_img_request *img_request;
Alex Elder12178572013-02-08 09:55:49 -06001869 unsigned int xferred;
1870 int result;
Alex Elder8b3e1a52013-01-24 16:13:36 -06001871 bool more;
Alex Elder12178572013-02-08 09:55:49 -06001872
Alex Elder6365d332013-02-11 12:33:24 -06001873 rbd_assert(obj_request_img_data_test(obj_request));
1874 img_request = obj_request->img_request;
1875
Alex Elder12178572013-02-08 09:55:49 -06001876 rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
1877 xferred = (unsigned int)obj_request->xferred;
1878 result = obj_request->result;
1879 if (result) {
1880 struct rbd_device *rbd_dev = img_request->rbd_dev;
1881
1882 rbd_warn(rbd_dev, "%s %llx at %llx (%llx)\n",
1883 img_request_write_test(img_request) ? "write" : "read",
1884 obj_request->length, obj_request->img_offset,
1885 obj_request->offset);
1886 rbd_warn(rbd_dev, " result %d xferred %x\n",
1887 result, xferred);
1888 if (!img_request->result)
1889 img_request->result = result;
1890 }
1891
Alex Elderf1a47392013-04-19 15:34:50 -05001892 /* Image object requests don't own their page array */
1893
1894 if (obj_request->type == OBJ_REQUEST_PAGES) {
1895 obj_request->pages = NULL;
1896 obj_request->page_count = 0;
1897 }
1898
Alex Elder8b3e1a52013-01-24 16:13:36 -06001899 if (img_request_child_test(img_request)) {
1900 rbd_assert(img_request->obj_request != NULL);
1901 more = obj_request->which < img_request->obj_request_count - 1;
1902 } else {
1903 rbd_assert(img_request->rq != NULL);
1904 more = blk_end_request(img_request->rq, result, xferred);
1905 }
1906
1907 return more;
Alex Elder12178572013-02-08 09:55:49 -06001908}
1909
Alex Elder21692382013-04-05 01:27:12 -05001910static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
1911{
1912 struct rbd_img_request *img_request;
1913 u32 which = obj_request->which;
1914 bool more = true;
1915
Alex Elder6365d332013-02-11 12:33:24 -06001916 rbd_assert(obj_request_img_data_test(obj_request));
Alex Elder21692382013-04-05 01:27:12 -05001917 img_request = obj_request->img_request;
1918
1919 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
1920 rbd_assert(img_request != NULL);
Alex Elder21692382013-04-05 01:27:12 -05001921 rbd_assert(img_request->obj_request_count > 0);
1922 rbd_assert(which != BAD_WHICH);
1923 rbd_assert(which < img_request->obj_request_count);
1924 rbd_assert(which >= img_request->next_completion);
1925
1926 spin_lock_irq(&img_request->completion_lock);
1927 if (which != img_request->next_completion)
1928 goto out;
1929
1930 for_each_obj_request_from(img_request, obj_request) {
Alex Elder21692382013-04-05 01:27:12 -05001931 rbd_assert(more);
1932 rbd_assert(which < img_request->obj_request_count);
1933
1934 if (!obj_request_done_test(obj_request))
1935 break;
Alex Elder12178572013-02-08 09:55:49 -06001936 more = rbd_img_obj_end_request(obj_request);
Alex Elder21692382013-04-05 01:27:12 -05001937 which++;
1938 }
1939
1940 rbd_assert(more ^ (which == img_request->obj_request_count));
1941 img_request->next_completion = which;
1942out:
1943 spin_unlock_irq(&img_request->completion_lock);
1944
1945 if (!more)
1946 rbd_img_request_complete(img_request);
1947}
1948
Alex Elderf1a47392013-04-19 15:34:50 -05001949/*
1950 * Split up an image request into one or more object requests, each
1951 * to a different object. The "type" parameter indicates whether
1952 * "data_desc" is the pointer to the head of a list of bio
1953 * structures, or the base of a page array. In either case this
1954 * function assumes data_desc describes memory sufficient to hold
1955 * all data described by the image request.
1956 */
1957static int rbd_img_request_fill(struct rbd_img_request *img_request,
1958 enum obj_request_type type,
1959 void *data_desc)
Alex Elderbf0d5f502012-11-22 00:00:08 -06001960{
1961 struct rbd_device *rbd_dev = img_request->rbd_dev;
1962 struct rbd_obj_request *obj_request = NULL;
1963 struct rbd_obj_request *next_obj_request;
Alex Elder0c425242013-02-08 09:55:49 -06001964 bool write_request = img_request_write_test(img_request);
Alex Elderf1a47392013-04-19 15:34:50 -05001965 struct bio *bio_list;
1966 unsigned int bio_offset = 0;
1967 struct page **pages;
Alex Elder7da22d22013-01-24 16:13:36 -06001968 u64 img_offset;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001969 u64 resid;
1970 u16 opcode;
1971
Alex Elderf1a47392013-04-19 15:34:50 -05001972 dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
1973 (int)type, data_desc);
Alex Elder37206ee2013-02-20 17:32:08 -06001974
Alex Elder430c28c2013-04-03 21:32:51 -05001975 opcode = write_request ? CEPH_OSD_OP_WRITE : CEPH_OSD_OP_READ;
Alex Elder7da22d22013-01-24 16:13:36 -06001976 img_offset = img_request->offset;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001977 resid = img_request->length;
Alex Elder4dda41d2013-02-20 21:59:33 -06001978 rbd_assert(resid > 0);
Alex Elderf1a47392013-04-19 15:34:50 -05001979
1980 if (type == OBJ_REQUEST_BIO) {
1981 bio_list = data_desc;
1982 rbd_assert(img_offset == bio_list->bi_sector << SECTOR_SHIFT);
1983 } else {
1984 rbd_assert(type == OBJ_REQUEST_PAGES);
1985 pages = data_desc;
1986 }
1987
Alex Elderbf0d5f502012-11-22 00:00:08 -06001988 while (resid) {
Alex Elder2fa12322013-04-05 01:27:12 -05001989 struct ceph_osd_request *osd_req;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001990 const char *object_name;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001991 u64 offset;
1992 u64 length;
1993
Alex Elder7da22d22013-01-24 16:13:36 -06001994 object_name = rbd_segment_name(rbd_dev, img_offset);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001995 if (!object_name)
1996 goto out_unwind;
Alex Elder7da22d22013-01-24 16:13:36 -06001997 offset = rbd_segment_offset(rbd_dev, img_offset);
1998 length = rbd_segment_length(rbd_dev, img_offset, resid);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001999 obj_request = rbd_obj_request_create(object_name,
Alex Elderf1a47392013-04-19 15:34:50 -05002000 offset, length, type);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002001 kfree(object_name); /* object request has its own copy */
2002 if (!obj_request)
2003 goto out_unwind;
2004
Alex Elderf1a47392013-04-19 15:34:50 -05002005 if (type == OBJ_REQUEST_BIO) {
2006 unsigned int clone_size;
2007
2008 rbd_assert(length <= (u64)UINT_MAX);
2009 clone_size = (unsigned int)length;
2010 obj_request->bio_list =
2011 bio_chain_clone_range(&bio_list,
2012 &bio_offset,
2013 clone_size,
2014 GFP_ATOMIC);
2015 if (!obj_request->bio_list)
2016 goto out_partial;
2017 } else {
2018 unsigned int page_count;
2019
2020 obj_request->pages = pages;
2021 page_count = (u32)calc_pages_for(offset, length);
2022 obj_request->page_count = page_count;
2023 if ((offset + length) & ~PAGE_MASK)
2024 page_count--; /* more on last page */
2025 pages += page_count;
2026 }
Alex Elderbf0d5f502012-11-22 00:00:08 -06002027
Alex Elder2fa12322013-04-05 01:27:12 -05002028 osd_req = rbd_osd_req_create(rbd_dev, write_request,
2029 obj_request);
2030 if (!osd_req)
Alex Elderbf0d5f502012-11-22 00:00:08 -06002031 goto out_partial;
Alex Elder2fa12322013-04-05 01:27:12 -05002032 obj_request->osd_req = osd_req;
Alex Elder21692382013-04-05 01:27:12 -05002033 obj_request->callback = rbd_img_obj_callback;
Alex Elder430c28c2013-04-03 21:32:51 -05002034
Alex Elder2fa12322013-04-05 01:27:12 -05002035 osd_req_op_extent_init(osd_req, 0, opcode, offset, length,
2036 0, 0);
Alex Elderf1a47392013-04-19 15:34:50 -05002037 if (type == OBJ_REQUEST_BIO)
2038 osd_req_op_extent_osd_data_bio(osd_req, 0,
2039 obj_request->bio_list, length);
2040 else
2041 osd_req_op_extent_osd_data_pages(osd_req, 0,
2042 obj_request->pages, length,
2043 offset & ~PAGE_MASK, false, false);
Alex Elder9d4df012013-04-19 15:34:50 -05002044
2045 if (write_request)
2046 rbd_osd_req_format_write(obj_request);
2047 else
2048 rbd_osd_req_format_read(obj_request);
Alex Elder430c28c2013-04-03 21:32:51 -05002049
Alex Elder7da22d22013-01-24 16:13:36 -06002050 obj_request->img_offset = img_offset;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002051 rbd_img_obj_request_add(img_request, obj_request);
2052
Alex Elder7da22d22013-01-24 16:13:36 -06002053 img_offset += length;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002054 resid -= length;
2055 }
2056
2057 return 0;
2058
2059out_partial:
2060 rbd_obj_request_put(obj_request);
2061out_unwind:
2062 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2063 rbd_obj_request_put(obj_request);
2064
2065 return -ENOMEM;
2066}
2067
Alex Elder3d7efd12013-04-19 15:34:50 -05002068static void
Alex Elder0eefd472013-04-19 15:34:50 -05002069rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
2070{
2071 struct rbd_img_request *img_request;
2072 struct rbd_device *rbd_dev;
2073 u64 length;
2074 u32 page_count;
2075
2076 rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
2077 rbd_assert(obj_request_img_data_test(obj_request));
2078 img_request = obj_request->img_request;
2079 rbd_assert(img_request);
2080
2081 rbd_dev = img_request->rbd_dev;
2082 rbd_assert(rbd_dev);
2083 length = (u64)1 << rbd_dev->header.obj_order;
2084 page_count = (u32)calc_pages_for(0, length);
2085
2086 rbd_assert(obj_request->copyup_pages);
2087 ceph_release_page_vector(obj_request->copyup_pages, page_count);
2088 obj_request->copyup_pages = NULL;
2089
2090 /*
2091 * We want the transfer count to reflect the size of the
2092 * original write request. There is no such thing as a
2093 * successful short write, so if the request was successful
2094 * we can just set it to the originally-requested length.
2095 */
2096 if (!obj_request->result)
2097 obj_request->xferred = obj_request->length;
2098
2099 /* Finish up with the normal image object callback */
2100
2101 rbd_img_obj_callback(obj_request);
2102}
2103
2104static void
Alex Elder3d7efd12013-04-19 15:34:50 -05002105rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2106{
2107 struct rbd_obj_request *orig_request;
Alex Elder0eefd472013-04-19 15:34:50 -05002108 struct ceph_osd_request *osd_req;
2109 struct ceph_osd_client *osdc;
2110 struct rbd_device *rbd_dev;
Alex Elder3d7efd12013-04-19 15:34:50 -05002111 struct page **pages;
Alex Elder3d7efd12013-04-19 15:34:50 -05002112 int result;
2113 u64 obj_size;
2114 u64 xferred;
2115
2116 rbd_assert(img_request_child_test(img_request));
2117
2118 /* First get what we need from the image request */
2119
2120 pages = img_request->copyup_pages;
2121 rbd_assert(pages != NULL);
2122 img_request->copyup_pages = NULL;
2123
2124 orig_request = img_request->obj_request;
2125 rbd_assert(orig_request != NULL);
Alex Elder0eefd472013-04-19 15:34:50 -05002126 rbd_assert(orig_request->type == OBJ_REQUEST_BIO);
Alex Elder3d7efd12013-04-19 15:34:50 -05002127 result = img_request->result;
2128 obj_size = img_request->length;
2129 xferred = img_request->xferred;
2130
Alex Elder0eefd472013-04-19 15:34:50 -05002131 rbd_dev = img_request->rbd_dev;
2132 rbd_assert(rbd_dev);
2133 rbd_assert(obj_size == (u64)1 << rbd_dev->header.obj_order);
2134
Alex Elder3d7efd12013-04-19 15:34:50 -05002135 rbd_img_request_put(img_request);
2136
Alex Elder0eefd472013-04-19 15:34:50 -05002137 if (result)
2138 goto out_err;
Alex Elder3d7efd12013-04-19 15:34:50 -05002139
Alex Elder0eefd472013-04-19 15:34:50 -05002140 /* Allocate the new copyup osd request for the original request */
Alex Elder3d7efd12013-04-19 15:34:50 -05002141
Alex Elder0eefd472013-04-19 15:34:50 -05002142 result = -ENOMEM;
2143 rbd_assert(!orig_request->osd_req);
2144 osd_req = rbd_osd_req_create_copyup(orig_request);
2145 if (!osd_req)
2146 goto out_err;
2147 orig_request->osd_req = osd_req;
2148 orig_request->copyup_pages = pages;
Alex Elder3d7efd12013-04-19 15:34:50 -05002149
Alex Elder0eefd472013-04-19 15:34:50 -05002150 /* Initialize the copyup op */
2151
2152 osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
2153 osd_req_op_cls_request_data_pages(osd_req, 0, pages, obj_size, 0,
2154 false, false);
2155
2156 /* Then the original write request op */
2157
2158 osd_req_op_extent_init(osd_req, 1, CEPH_OSD_OP_WRITE,
2159 orig_request->offset,
2160 orig_request->length, 0, 0);
2161 osd_req_op_extent_osd_data_bio(osd_req, 1, orig_request->bio_list,
2162 orig_request->length);
2163
2164 rbd_osd_req_format_write(orig_request);
2165
2166 /* All set, send it off. */
2167
2168 orig_request->callback = rbd_img_obj_copyup_callback;
2169 osdc = &rbd_dev->rbd_client->client->osdc;
2170 result = rbd_obj_request_submit(osdc, orig_request);
2171 if (!result)
2172 return;
2173out_err:
2174 /* Record the error code and complete the request */
2175
2176 orig_request->result = result;
2177 orig_request->xferred = 0;
2178 obj_request_done_set(orig_request);
2179 rbd_obj_request_complete(orig_request);
Alex Elder3d7efd12013-04-19 15:34:50 -05002180}
2181
2182/*
2183 * Read from the parent image the range of data that covers the
2184 * entire target of the given object request. This is used for
2185 * satisfying a layered image write request when the target of an
2186 * object request from the image request does not exist.
2187 *
2188 * A page array big enough to hold the returned data is allocated
2189 * and supplied to rbd_img_request_fill() as the "data descriptor."
2190 * When the read completes, this page array will be transferred to
2191 * the original object request for the copyup operation.
2192 *
2193 * If an error occurs, record it as the result of the original
2194 * object request and mark it done so it gets completed.
2195 */
2196static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2197{
2198 struct rbd_img_request *img_request = NULL;
2199 struct rbd_img_request *parent_request = NULL;
2200 struct rbd_device *rbd_dev;
2201 u64 img_offset;
2202 u64 length;
2203 struct page **pages = NULL;
2204 u32 page_count;
2205 int result;
2206
2207 rbd_assert(obj_request_img_data_test(obj_request));
2208 rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
2209
2210 img_request = obj_request->img_request;
2211 rbd_assert(img_request != NULL);
2212 rbd_dev = img_request->rbd_dev;
2213 rbd_assert(rbd_dev->parent != NULL);
2214
2215 /*
Alex Elder0eefd472013-04-19 15:34:50 -05002216 * First things first. The original osd request is of no
2217 * use to use any more, we'll need a new one that can hold
2218 * the two ops in a copyup request. We'll get that later,
2219 * but for now we can release the old one.
2220 */
2221 rbd_osd_req_destroy(obj_request->osd_req);
2222 obj_request->osd_req = NULL;
2223
2224 /*
Alex Elder3d7efd12013-04-19 15:34:50 -05002225 * Determine the byte range covered by the object in the
2226 * child image to which the original request was to be sent.
2227 */
2228 img_offset = obj_request->img_offset - obj_request->offset;
2229 length = (u64)1 << rbd_dev->header.obj_order;
2230
2231 /*
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05002232 * There is no defined parent data beyond the parent
2233 * overlap, so limit what we read at that boundary if
2234 * necessary.
2235 */
2236 if (img_offset + length > rbd_dev->parent_overlap) {
2237 rbd_assert(img_offset < rbd_dev->parent_overlap);
2238 length = rbd_dev->parent_overlap - img_offset;
2239 }
2240
2241 /*
Alex Elder3d7efd12013-04-19 15:34:50 -05002242 * Allocate a page array big enough to receive the data read
2243 * from the parent.
2244 */
2245 page_count = (u32)calc_pages_for(0, length);
2246 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2247 if (IS_ERR(pages)) {
2248 result = PTR_ERR(pages);
2249 pages = NULL;
2250 goto out_err;
2251 }
2252
2253 result = -ENOMEM;
2254 parent_request = rbd_img_request_create(rbd_dev->parent,
2255 img_offset, length,
2256 false, true);
2257 if (!parent_request)
2258 goto out_err;
2259 rbd_obj_request_get(obj_request);
2260 parent_request->obj_request = obj_request;
2261
2262 result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
2263 if (result)
2264 goto out_err;
2265 parent_request->copyup_pages = pages;
2266
2267 parent_request->callback = rbd_img_obj_parent_read_full_callback;
2268 result = rbd_img_request_submit(parent_request);
2269 if (!result)
2270 return 0;
2271
2272 parent_request->copyup_pages = NULL;
2273 parent_request->obj_request = NULL;
2274 rbd_obj_request_put(obj_request);
2275out_err:
2276 if (pages)
2277 ceph_release_page_vector(pages, page_count);
2278 if (parent_request)
2279 rbd_img_request_put(parent_request);
2280 obj_request->result = result;
2281 obj_request->xferred = 0;
2282 obj_request_done_set(obj_request);
2283
2284 return result;
2285}
2286
Alex Elderc5b5ef62013-02-11 12:33:24 -06002287static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2288{
Alex Elderc5b5ef62013-02-11 12:33:24 -06002289 struct rbd_obj_request *orig_request;
2290 int result;
2291
2292 rbd_assert(!obj_request_img_data_test(obj_request));
2293
2294 /*
2295 * All we need from the object request is the original
2296 * request and the result of the STAT op. Grab those, then
2297 * we're done with the request.
2298 */
2299 orig_request = obj_request->obj_request;
2300 obj_request->obj_request = NULL;
2301 rbd_assert(orig_request);
2302 rbd_assert(orig_request->img_request);
2303
2304 result = obj_request->result;
2305 obj_request->result = 0;
2306
2307 dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
2308 obj_request, orig_request, result,
2309 obj_request->xferred, obj_request->length);
2310 rbd_obj_request_put(obj_request);
2311
2312 rbd_assert(orig_request);
2313 rbd_assert(orig_request->img_request);
Alex Elderc5b5ef62013-02-11 12:33:24 -06002314
2315 /*
2316 * Our only purpose here is to determine whether the object
2317 * exists, and we don't want to treat the non-existence as
2318 * an error. If something else comes back, transfer the
2319 * error to the original request and complete it now.
2320 */
2321 if (!result) {
2322 obj_request_existence_set(orig_request, true);
2323 } else if (result == -ENOENT) {
2324 obj_request_existence_set(orig_request, false);
2325 } else if (result) {
2326 orig_request->result = result;
Alex Elder3d7efd12013-04-19 15:34:50 -05002327 goto out;
Alex Elderc5b5ef62013-02-11 12:33:24 -06002328 }
2329
2330 /*
2331 * Resubmit the original request now that we have recorded
2332 * whether the target object exists.
2333 */
Alex Elderb454e362013-04-19 15:34:50 -05002334 orig_request->result = rbd_img_obj_request_submit(orig_request);
Alex Elder3d7efd12013-04-19 15:34:50 -05002335out:
Alex Elderc5b5ef62013-02-11 12:33:24 -06002336 if (orig_request->result)
2337 rbd_obj_request_complete(orig_request);
2338 rbd_obj_request_put(orig_request);
2339}
2340
2341static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
2342{
2343 struct rbd_obj_request *stat_request;
2344 struct rbd_device *rbd_dev;
2345 struct ceph_osd_client *osdc;
2346 struct page **pages = NULL;
2347 u32 page_count;
2348 size_t size;
2349 int ret;
2350
2351 /*
2352 * The response data for a STAT call consists of:
2353 * le64 length;
2354 * struct {
2355 * le32 tv_sec;
2356 * le32 tv_nsec;
2357 * } mtime;
2358 */
2359 size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
2360 page_count = (u32)calc_pages_for(0, size);
2361 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2362 if (IS_ERR(pages))
2363 return PTR_ERR(pages);
2364
2365 ret = -ENOMEM;
2366 stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
2367 OBJ_REQUEST_PAGES);
2368 if (!stat_request)
2369 goto out;
2370
2371 rbd_obj_request_get(obj_request);
2372 stat_request->obj_request = obj_request;
2373 stat_request->pages = pages;
2374 stat_request->page_count = page_count;
2375
2376 rbd_assert(obj_request->img_request);
2377 rbd_dev = obj_request->img_request->rbd_dev;
2378 stat_request->osd_req = rbd_osd_req_create(rbd_dev, false,
2379 stat_request);
2380 if (!stat_request->osd_req)
2381 goto out;
2382 stat_request->callback = rbd_img_obj_exists_callback;
2383
2384 osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT);
2385 osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
2386 false, false);
Alex Elder9d4df012013-04-19 15:34:50 -05002387 rbd_osd_req_format_read(stat_request);
Alex Elderc5b5ef62013-02-11 12:33:24 -06002388
2389 osdc = &rbd_dev->rbd_client->client->osdc;
2390 ret = rbd_obj_request_submit(osdc, stat_request);
2391out:
2392 if (ret)
2393 rbd_obj_request_put(obj_request);
2394
2395 return ret;
2396}
2397
Alex Elderb454e362013-04-19 15:34:50 -05002398static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
2399{
2400 struct rbd_img_request *img_request;
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05002401 struct rbd_device *rbd_dev;
Alex Elder3d7efd12013-04-19 15:34:50 -05002402 bool known;
Alex Elderb454e362013-04-19 15:34:50 -05002403
2404 rbd_assert(obj_request_img_data_test(obj_request));
2405
2406 img_request = obj_request->img_request;
2407 rbd_assert(img_request);
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05002408 rbd_dev = img_request->rbd_dev;
Alex Elderb454e362013-04-19 15:34:50 -05002409
Alex Elderb454e362013-04-19 15:34:50 -05002410 /*
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05002411 * Only writes to layered images need special handling.
2412 * Reads and non-layered writes are simple object requests.
2413 * Layered writes that start beyond the end of the overlap
2414 * with the parent have no parent data, so they too are
2415 * simple object requests. Finally, if the target object is
2416 * known to already exist, its parent data has already been
2417 * copied, so a write to the object can also be handled as a
2418 * simple object request.
Alex Elderb454e362013-04-19 15:34:50 -05002419 */
2420 if (!img_request_write_test(img_request) ||
2421 !img_request_layered_test(img_request) ||
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05002422 rbd_dev->parent_overlap <= obj_request->img_offset ||
Alex Elder3d7efd12013-04-19 15:34:50 -05002423 ((known = obj_request_known_test(obj_request)) &&
2424 obj_request_exists_test(obj_request))) {
Alex Elderb454e362013-04-19 15:34:50 -05002425
2426 struct rbd_device *rbd_dev;
2427 struct ceph_osd_client *osdc;
2428
2429 rbd_dev = obj_request->img_request->rbd_dev;
2430 osdc = &rbd_dev->rbd_client->client->osdc;
2431
2432 return rbd_obj_request_submit(osdc, obj_request);
2433 }
2434
2435 /*
Alex Elder3d7efd12013-04-19 15:34:50 -05002436 * It's a layered write. The target object might exist but
2437 * we may not know that yet. If we know it doesn't exist,
2438 * start by reading the data for the full target object from
2439 * the parent so we can use it for a copyup to the target.
Alex Elderb454e362013-04-19 15:34:50 -05002440 */
Alex Elder3d7efd12013-04-19 15:34:50 -05002441 if (known)
2442 return rbd_img_obj_parent_read_full(obj_request);
2443
2444 /* We don't know whether the target exists. Go find out. */
Alex Elderb454e362013-04-19 15:34:50 -05002445
2446 return rbd_img_obj_exists_submit(obj_request);
2447}
2448
Alex Elderbf0d5f502012-11-22 00:00:08 -06002449static int rbd_img_request_submit(struct rbd_img_request *img_request)
2450{
Alex Elderbf0d5f502012-11-22 00:00:08 -06002451 struct rbd_obj_request *obj_request;
Alex Elder46faeed2013-04-10 17:47:46 -05002452 struct rbd_obj_request *next_obj_request;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002453
Alex Elder37206ee2013-02-20 17:32:08 -06002454 dout("%s: img %p\n", __func__, img_request);
Alex Elder46faeed2013-04-10 17:47:46 -05002455 for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
Alex Elderbf0d5f502012-11-22 00:00:08 -06002456 int ret;
2457
Alex Elderb454e362013-04-19 15:34:50 -05002458 ret = rbd_img_obj_request_submit(obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002459 if (ret)
2460 return ret;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002461 }
2462
2463 return 0;
2464}
2465
Alex Elder8b3e1a52013-01-24 16:13:36 -06002466static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
2467{
2468 struct rbd_obj_request *obj_request;
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05002469 struct rbd_device *rbd_dev;
2470 u64 obj_end;
Alex Elder8b3e1a52013-01-24 16:13:36 -06002471
2472 rbd_assert(img_request_child_test(img_request));
2473
2474 obj_request = img_request->obj_request;
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05002475 rbd_assert(obj_request);
2476 rbd_assert(obj_request->img_request);
Alex Elder8b3e1a52013-01-24 16:13:36 -06002477
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05002478 obj_request->result = img_request->result;
2479 if (obj_request->result)
2480 goto out;
2481
2482 /*
2483 * We need to zero anything beyond the parent overlap
2484 * boundary. Since rbd_img_obj_request_read_callback()
2485 * will zero anything beyond the end of a short read, an
2486 * easy way to do this is to pretend the data from the
2487 * parent came up short--ending at the overlap boundary.
2488 */
2489 rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
2490 obj_end = obj_request->img_offset + obj_request->length;
2491 rbd_dev = obj_request->img_request->rbd_dev;
2492 if (obj_end > rbd_dev->parent_overlap) {
2493 u64 xferred = 0;
2494
2495 if (obj_request->img_offset < rbd_dev->parent_overlap)
2496 xferred = rbd_dev->parent_overlap -
2497 obj_request->img_offset;
2498
2499 obj_request->xferred = min(img_request->xferred, xferred);
2500 } else {
2501 obj_request->xferred = img_request->xferred;
2502 }
2503out:
Alex Elder8b3e1a52013-01-24 16:13:36 -06002504 rbd_img_obj_request_read_callback(obj_request);
2505 rbd_obj_request_complete(obj_request);
2506}
2507
2508static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
2509{
2510 struct rbd_device *rbd_dev;
2511 struct rbd_img_request *img_request;
2512 int result;
2513
2514 rbd_assert(obj_request_img_data_test(obj_request));
2515 rbd_assert(obj_request->img_request != NULL);
2516 rbd_assert(obj_request->result == (s32) -ENOENT);
2517 rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
2518
2519 rbd_dev = obj_request->img_request->rbd_dev;
2520 rbd_assert(rbd_dev->parent != NULL);
2521 /* rbd_read_finish(obj_request, obj_request->length); */
2522 img_request = rbd_img_request_create(rbd_dev->parent,
2523 obj_request->img_offset,
2524 obj_request->length,
2525 false, true);
2526 result = -ENOMEM;
2527 if (!img_request)
2528 goto out_err;
2529
2530 rbd_obj_request_get(obj_request);
2531 img_request->obj_request = obj_request;
2532
Alex Elderf1a47392013-04-19 15:34:50 -05002533 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
2534 obj_request->bio_list);
Alex Elder8b3e1a52013-01-24 16:13:36 -06002535 if (result)
2536 goto out_err;
2537
2538 img_request->callback = rbd_img_parent_read_callback;
2539 result = rbd_img_request_submit(img_request);
2540 if (result)
2541 goto out_err;
2542
2543 return;
2544out_err:
2545 if (img_request)
2546 rbd_img_request_put(img_request);
2547 obj_request->result = result;
2548 obj_request->xferred = 0;
2549 obj_request_done_set(obj_request);
2550}
2551
Alex Eldercc4a38bd2013-04-30 00:44:33 -05002552static int rbd_obj_notify_ack(struct rbd_device *rbd_dev, u64 notify_id)
Alex Elderb8d70032012-11-30 17:53:04 -06002553{
2554 struct rbd_obj_request *obj_request;
Alex Elder21692382013-04-05 01:27:12 -05002555 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
Alex Elderb8d70032012-11-30 17:53:04 -06002556 int ret;
2557
2558 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2559 OBJ_REQUEST_NODATA);
2560 if (!obj_request)
2561 return -ENOMEM;
2562
2563 ret = -ENOMEM;
Alex Elder430c28c2013-04-03 21:32:51 -05002564 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
Alex Elderb8d70032012-11-30 17:53:04 -06002565 if (!obj_request->osd_req)
2566 goto out;
Alex Elder21692382013-04-05 01:27:12 -05002567 obj_request->callback = rbd_obj_request_put;
Alex Elderb8d70032012-11-30 17:53:04 -06002568
Alex Elderc99d2d42013-04-05 01:27:11 -05002569 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
Alex Eldercc4a38bd2013-04-30 00:44:33 -05002570 notify_id, 0, 0);
Alex Elder9d4df012013-04-19 15:34:50 -05002571 rbd_osd_req_format_read(obj_request);
Alex Elder430c28c2013-04-03 21:32:51 -05002572
Alex Elderb8d70032012-11-30 17:53:04 -06002573 ret = rbd_obj_request_submit(osdc, obj_request);
Alex Elderb8d70032012-11-30 17:53:04 -06002574out:
Alex Eldercf81b602013-01-17 12:18:46 -06002575 if (ret)
2576 rbd_obj_request_put(obj_request);
Alex Elderb8d70032012-11-30 17:53:04 -06002577
2578 return ret;
2579}
2580
2581static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
2582{
2583 struct rbd_device *rbd_dev = (struct rbd_device *)data;
Alex Elderb8d70032012-11-30 17:53:04 -06002584
2585 if (!rbd_dev)
2586 return;
2587
Alex Elder37206ee2013-02-20 17:32:08 -06002588 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
Alex Eldercc4a38bd2013-04-30 00:44:33 -05002589 rbd_dev->header_name, (unsigned long long)notify_id,
2590 (unsigned int)opcode);
2591 (void)rbd_dev_refresh(rbd_dev);
Alex Elderb8d70032012-11-30 17:53:04 -06002592
Alex Eldercc4a38bd2013-04-30 00:44:33 -05002593 rbd_obj_notify_ack(rbd_dev, notify_id);
Alex Elderb8d70032012-11-30 17:53:04 -06002594}
2595
Alex Elder9969ebc2013-01-18 12:31:10 -06002596/*
2597 * Request sync osd watch/unwatch. The value of "start" determines
2598 * whether a watch request is being initiated or torn down.
2599 */
2600static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, int start)
2601{
2602 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2603 struct rbd_obj_request *obj_request;
Alex Elder9969ebc2013-01-18 12:31:10 -06002604 int ret;
2605
2606 rbd_assert(start ^ !!rbd_dev->watch_event);
2607 rbd_assert(start ^ !!rbd_dev->watch_request);
2608
2609 if (start) {
Alex Elder3c663bb2013-02-15 11:42:30 -06002610 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
Alex Elder9969ebc2013-01-18 12:31:10 -06002611 &rbd_dev->watch_event);
2612 if (ret < 0)
2613 return ret;
Alex Elder8eb87562013-01-25 17:08:55 -06002614 rbd_assert(rbd_dev->watch_event != NULL);
Alex Elder9969ebc2013-01-18 12:31:10 -06002615 }
2616
2617 ret = -ENOMEM;
2618 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2619 OBJ_REQUEST_NODATA);
2620 if (!obj_request)
2621 goto out_cancel;
2622
Alex Elder430c28c2013-04-03 21:32:51 -05002623 obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, obj_request);
2624 if (!obj_request->osd_req)
2625 goto out_cancel;
2626
Alex Elder8eb87562013-01-25 17:08:55 -06002627 if (start)
Alex Elder975241a2013-01-25 17:08:55 -06002628 ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
Alex Elder8eb87562013-01-25 17:08:55 -06002629 else
Alex Elder6977c3f2013-01-25 17:08:55 -06002630 ceph_osdc_unregister_linger_request(osdc,
Alex Elder975241a2013-01-25 17:08:55 -06002631 rbd_dev->watch_request->osd_req);
Alex Elder21692382013-04-05 01:27:12 -05002632
2633 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
Alex Elderb21ebdd2013-04-30 00:44:32 -05002634 rbd_dev->watch_event->cookie, 0, start);
Alex Elder9d4df012013-04-19 15:34:50 -05002635 rbd_osd_req_format_write(obj_request);
Alex Elder21692382013-04-05 01:27:12 -05002636
Alex Elder9969ebc2013-01-18 12:31:10 -06002637 ret = rbd_obj_request_submit(osdc, obj_request);
2638 if (ret)
2639 goto out_cancel;
2640 ret = rbd_obj_request_wait(obj_request);
2641 if (ret)
2642 goto out_cancel;
Alex Elder9969ebc2013-01-18 12:31:10 -06002643 ret = obj_request->result;
2644 if (ret)
2645 goto out_cancel;
2646
Alex Elder8eb87562013-01-25 17:08:55 -06002647 /*
2648 * A watch request is set to linger, so the underlying osd
2649 * request won't go away until we unregister it. We retain
2650 * a pointer to the object request during that time (in
2651 * rbd_dev->watch_request), so we'll keep a reference to
2652 * it. We'll drop that reference (below) after we've
2653 * unregistered it.
2654 */
2655 if (start) {
2656 rbd_dev->watch_request = obj_request;
2657
2658 return 0;
2659 }
2660
2661 /* We have successfully torn down the watch request */
2662
2663 rbd_obj_request_put(rbd_dev->watch_request);
2664 rbd_dev->watch_request = NULL;
Alex Elder9969ebc2013-01-18 12:31:10 -06002665out_cancel:
2666 /* Cancel the event if we're tearing down, or on error */
2667 ceph_osdc_cancel_event(rbd_dev->watch_event);
2668 rbd_dev->watch_event = NULL;
Alex Elder9969ebc2013-01-18 12:31:10 -06002669 if (obj_request)
2670 rbd_obj_request_put(obj_request);
2671
2672 return ret;
2673}
2674
Alex Elder36be9a72013-01-19 00:30:28 -06002675/*
Alex Elderf40eb342013-04-25 15:09:42 -05002676 * Synchronous osd object method call. Returns the number of bytes
2677 * returned in the outbound buffer, or a negative error code.
Alex Elder36be9a72013-01-19 00:30:28 -06002678 */
2679static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
2680 const char *object_name,
2681 const char *class_name,
2682 const char *method_name,
Alex Elder41579762013-04-21 12:14:45 -05002683 const void *outbound,
Alex Elder36be9a72013-01-19 00:30:28 -06002684 size_t outbound_size,
Alex Elder41579762013-04-21 12:14:45 -05002685 void *inbound,
Alex Eldere2a58ee2013-04-30 00:44:33 -05002686 size_t inbound_size)
Alex Elder36be9a72013-01-19 00:30:28 -06002687{
Alex Elder21692382013-04-05 01:27:12 -05002688 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
Alex Elder36be9a72013-01-19 00:30:28 -06002689 struct rbd_obj_request *obj_request;
Alex Elder36be9a72013-01-19 00:30:28 -06002690 struct page **pages;
2691 u32 page_count;
2692 int ret;
2693
2694 /*
Alex Elder6010a452013-04-05 01:27:11 -05002695 * Method calls are ultimately read operations. The result
2696 * should placed into the inbound buffer provided. They
2697 * also supply outbound data--parameters for the object
2698 * method. Currently if this is present it will be a
2699 * snapshot id.
Alex Elder36be9a72013-01-19 00:30:28 -06002700 */
Alex Elder57385b52013-04-21 12:14:45 -05002701 page_count = (u32)calc_pages_for(0, inbound_size);
Alex Elder36be9a72013-01-19 00:30:28 -06002702 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2703 if (IS_ERR(pages))
2704 return PTR_ERR(pages);
2705
2706 ret = -ENOMEM;
Alex Elder6010a452013-04-05 01:27:11 -05002707 obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
Alex Elder36be9a72013-01-19 00:30:28 -06002708 OBJ_REQUEST_PAGES);
2709 if (!obj_request)
2710 goto out;
2711
2712 obj_request->pages = pages;
2713 obj_request->page_count = page_count;
2714
Alex Elder430c28c2013-04-03 21:32:51 -05002715 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
Alex Elder36be9a72013-01-19 00:30:28 -06002716 if (!obj_request->osd_req)
2717 goto out;
2718
Alex Elderc99d2d42013-04-05 01:27:11 -05002719 osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
Alex Elder04017e22013-04-05 14:46:02 -05002720 class_name, method_name);
2721 if (outbound_size) {
2722 struct ceph_pagelist *pagelist;
2723
2724 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
2725 if (!pagelist)
2726 goto out;
2727
2728 ceph_pagelist_init(pagelist);
2729 ceph_pagelist_append(pagelist, outbound, outbound_size);
2730 osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
2731 pagelist);
2732 }
Alex Eldera4ce40a2013-04-05 01:27:12 -05002733 osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
2734 obj_request->pages, inbound_size,
Alex Elder44cd1882013-04-05 01:27:12 -05002735 0, false, false);
Alex Elder9d4df012013-04-19 15:34:50 -05002736 rbd_osd_req_format_read(obj_request);
Alex Elder430c28c2013-04-03 21:32:51 -05002737
Alex Elder36be9a72013-01-19 00:30:28 -06002738 ret = rbd_obj_request_submit(osdc, obj_request);
2739 if (ret)
2740 goto out;
2741 ret = rbd_obj_request_wait(obj_request);
2742 if (ret)
2743 goto out;
2744
2745 ret = obj_request->result;
2746 if (ret < 0)
2747 goto out;
Alex Elder57385b52013-04-21 12:14:45 -05002748
2749 rbd_assert(obj_request->xferred < (u64)INT_MAX);
2750 ret = (int)obj_request->xferred;
Alex Elder903bb322013-02-06 13:11:38 -06002751 ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
Alex Elder36be9a72013-01-19 00:30:28 -06002752out:
2753 if (obj_request)
2754 rbd_obj_request_put(obj_request);
2755 else
2756 ceph_release_page_vector(pages, page_count);
2757
2758 return ret;
2759}
2760
Alex Elderbf0d5f502012-11-22 00:00:08 -06002761static void rbd_request_fn(struct request_queue *q)
Alex Eldercc344fa2013-02-19 12:25:56 -06002762 __releases(q->queue_lock) __acquires(q->queue_lock)
Alex Elderbf0d5f502012-11-22 00:00:08 -06002763{
2764 struct rbd_device *rbd_dev = q->queuedata;
2765 bool read_only = rbd_dev->mapping.read_only;
2766 struct request *rq;
2767 int result;
2768
2769 while ((rq = blk_fetch_request(q))) {
2770 bool write_request = rq_data_dir(rq) == WRITE;
2771 struct rbd_img_request *img_request;
2772 u64 offset;
2773 u64 length;
2774
2775 /* Ignore any non-FS requests that filter through. */
2776
2777 if (rq->cmd_type != REQ_TYPE_FS) {
Alex Elder4dda41d2013-02-20 21:59:33 -06002778 dout("%s: non-fs request type %d\n", __func__,
2779 (int) rq->cmd_type);
2780 __blk_end_request_all(rq, 0);
2781 continue;
2782 }
2783
2784 /* Ignore/skip any zero-length requests */
2785
2786 offset = (u64) blk_rq_pos(rq) << SECTOR_SHIFT;
2787 length = (u64) blk_rq_bytes(rq);
2788
2789 if (!length) {
2790 dout("%s: zero-length request\n", __func__);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002791 __blk_end_request_all(rq, 0);
2792 continue;
2793 }
2794
2795 spin_unlock_irq(q->queue_lock);
2796
2797 /* Disallow writes to a read-only device */
2798
2799 if (write_request) {
2800 result = -EROFS;
2801 if (read_only)
2802 goto end_request;
2803 rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
2804 }
2805
Alex Elder6d292902013-01-14 12:43:31 -06002806 /*
2807 * Quit early if the mapped snapshot no longer
2808 * exists. It's still possible the snapshot will
2809 * have disappeared by the time our request arrives
2810 * at the osd, but there's no sense in sending it if
2811 * we already know.
2812 */
2813 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
Alex Elderbf0d5f502012-11-22 00:00:08 -06002814 dout("request for non-existent snapshot");
2815 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
2816 result = -ENXIO;
2817 goto end_request;
2818 }
2819
Alex Elderbf0d5f502012-11-22 00:00:08 -06002820 result = -EINVAL;
Alex Elderc0cd10db2013-04-26 09:43:47 -05002821 if (offset && length > U64_MAX - offset + 1) {
2822 rbd_warn(rbd_dev, "bad request range (%llu~%llu)\n",
2823 offset, length);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002824 goto end_request; /* Shouldn't happen */
Alex Elderc0cd10db2013-04-26 09:43:47 -05002825 }
Alex Elderbf0d5f502012-11-22 00:00:08 -06002826
2827 result = -ENOMEM;
2828 img_request = rbd_img_request_create(rbd_dev, offset, length,
Alex Elder9849e982013-01-24 16:13:36 -06002829 write_request, false);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002830 if (!img_request)
2831 goto end_request;
2832
2833 img_request->rq = rq;
2834
Alex Elderf1a47392013-04-19 15:34:50 -05002835 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
2836 rq->bio);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002837 if (!result)
2838 result = rbd_img_request_submit(img_request);
2839 if (result)
2840 rbd_img_request_put(img_request);
2841end_request:
2842 spin_lock_irq(q->queue_lock);
2843 if (result < 0) {
Alex Elder7da22d22013-01-24 16:13:36 -06002844 rbd_warn(rbd_dev, "%s %llx at %llx result %d\n",
2845 write_request ? "write" : "read",
2846 length, offset, result);
2847
Alex Elderbf0d5f502012-11-22 00:00:08 -06002848 __blk_end_request_all(rq, result);
2849 }
2850 }
2851}
2852
Yehuda Sadeh602adf42010-08-12 16:11:25 -07002853/*
Yehuda Sadeh602adf42010-08-12 16:11:25 -07002854 * a queue callback. Makes sure that we don't create a bio that spans across
2855 * multiple osd objects. One exception would be with a single page bios,
Alex Elderf7760da2012-10-20 22:17:27 -05002856 * which we handle later at bio_chain_clone_range()
Yehuda Sadeh602adf42010-08-12 16:11:25 -07002857 */
2858static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
2859 struct bio_vec *bvec)
2860{
2861 struct rbd_device *rbd_dev = q->queuedata;
Alex Eldere5cfeed22012-10-20 22:17:27 -05002862 sector_t sector_offset;
2863 sector_t sectors_per_obj;
2864 sector_t obj_sector_offset;
2865 int ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07002866
Alex Eldere5cfeed22012-10-20 22:17:27 -05002867 /*
2868 * Find how far into its rbd object the partition-relative
2869 * bio start sector is to offset relative to the enclosing
2870 * device.
2871 */
2872 sector_offset = get_start_sect(bmd->bi_bdev) + bmd->bi_sector;
2873 sectors_per_obj = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
2874 obj_sector_offset = sector_offset & (sectors_per_obj - 1);
Alex Elder593a9e72012-02-07 12:03:37 -06002875
Alex Eldere5cfeed22012-10-20 22:17:27 -05002876 /*
2877 * Compute the number of bytes from that offset to the end
2878 * of the object. Account for what's already used by the bio.
2879 */
2880 ret = (int) (sectors_per_obj - obj_sector_offset) << SECTOR_SHIFT;
2881 if (ret > bmd->bi_size)
2882 ret -= bmd->bi_size;
2883 else
2884 ret = 0;
2885
2886 /*
2887 * Don't send back more than was asked for. And if the bio
2888 * was empty, let the whole thing through because: "Note
2889 * that a block device *must* allow a single page to be
2890 * added to an empty bio."
2891 */
2892 rbd_assert(bvec->bv_len <= PAGE_SIZE);
2893 if (ret > (int) bvec->bv_len || !bmd->bi_size)
2894 ret = (int) bvec->bv_len;
2895
2896 return ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07002897}
2898
2899static void rbd_free_disk(struct rbd_device *rbd_dev)
2900{
2901 struct gendisk *disk = rbd_dev->disk;
2902
2903 if (!disk)
2904 return;
2905
Alex Eldera0cab922013-04-25 23:15:08 -05002906 rbd_dev->disk = NULL;
2907 if (disk->flags & GENHD_FL_UP) {
Yehuda Sadeh602adf42010-08-12 16:11:25 -07002908 del_gendisk(disk);
Alex Eldera0cab922013-04-25 23:15:08 -05002909 if (disk->queue)
2910 blk_cleanup_queue(disk->queue);
2911 }
Yehuda Sadeh602adf42010-08-12 16:11:25 -07002912 put_disk(disk);
2913}
2914
Alex Elder788e2df2013-01-17 12:25:27 -06002915static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
2916 const char *object_name,
Alex Elder7097f8d2013-04-30 00:44:33 -05002917 u64 offset, u64 length, void *buf)
Alex Elder788e2df2013-01-17 12:25:27 -06002918
2919{
Alex Elder21692382013-04-05 01:27:12 -05002920 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
Alex Elder788e2df2013-01-17 12:25:27 -06002921 struct rbd_obj_request *obj_request;
Alex Elder788e2df2013-01-17 12:25:27 -06002922 struct page **pages = NULL;
2923 u32 page_count;
Alex Elder1ceae7e2013-02-06 13:11:38 -06002924 size_t size;
Alex Elder788e2df2013-01-17 12:25:27 -06002925 int ret;
2926
2927 page_count = (u32) calc_pages_for(offset, length);
2928 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2929 if (IS_ERR(pages))
2930 ret = PTR_ERR(pages);
2931
2932 ret = -ENOMEM;
2933 obj_request = rbd_obj_request_create(object_name, offset, length,
Alex Elder36be9a72013-01-19 00:30:28 -06002934 OBJ_REQUEST_PAGES);
Alex Elder788e2df2013-01-17 12:25:27 -06002935 if (!obj_request)
2936 goto out;
2937
2938 obj_request->pages = pages;
2939 obj_request->page_count = page_count;
2940
Alex Elder430c28c2013-04-03 21:32:51 -05002941 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
Alex Elder788e2df2013-01-17 12:25:27 -06002942 if (!obj_request->osd_req)
2943 goto out;
2944
Alex Elderc99d2d42013-04-05 01:27:11 -05002945 osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
2946 offset, length, 0, 0);
Alex Elder406e2c92013-04-15 14:50:36 -05002947 osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
Alex Eldera4ce40a2013-04-05 01:27:12 -05002948 obj_request->pages,
Alex Elder44cd1882013-04-05 01:27:12 -05002949 obj_request->length,
2950 obj_request->offset & ~PAGE_MASK,
2951 false, false);
Alex Elder9d4df012013-04-19 15:34:50 -05002952 rbd_osd_req_format_read(obj_request);
Alex Elder430c28c2013-04-03 21:32:51 -05002953
Alex Elder788e2df2013-01-17 12:25:27 -06002954 ret = rbd_obj_request_submit(osdc, obj_request);
2955 if (ret)
2956 goto out;
2957 ret = rbd_obj_request_wait(obj_request);
2958 if (ret)
2959 goto out;
2960
2961 ret = obj_request->result;
2962 if (ret < 0)
2963 goto out;
Alex Elder1ceae7e2013-02-06 13:11:38 -06002964
2965 rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
2966 size = (size_t) obj_request->xferred;
Alex Elder903bb322013-02-06 13:11:38 -06002967 ceph_copy_from_page_vector(pages, buf, 0, size);
Alex Elder7097f8d2013-04-30 00:44:33 -05002968 rbd_assert(size <= (size_t)INT_MAX);
2969 ret = (int)size;
Alex Elder788e2df2013-01-17 12:25:27 -06002970out:
2971 if (obj_request)
2972 rbd_obj_request_put(obj_request);
2973 else
2974 ceph_release_page_vector(pages, page_count);
2975
2976 return ret;
2977}
2978
Yehuda Sadeh602adf42010-08-12 16:11:25 -07002979/*
Alex Elder4156d992012-08-02 11:29:46 -05002980 * Read the complete header for the given rbd device.
2981 *
2982 * Returns a pointer to a dynamically-allocated buffer containing
2983 * the complete and validated header. Caller can pass the address
2984 * of a variable that will be filled in with the version of the
2985 * header object at the time it was read.
2986 *
2987 * Returns a pointer-coded errno if a failure occurs.
2988 */
2989static struct rbd_image_header_ondisk *
Alex Elder7097f8d2013-04-30 00:44:33 -05002990rbd_dev_v1_header_read(struct rbd_device *rbd_dev)
Alex Elder4156d992012-08-02 11:29:46 -05002991{
2992 struct rbd_image_header_ondisk *ondisk = NULL;
2993 u32 snap_count = 0;
2994 u64 names_size = 0;
2995 u32 want_count;
2996 int ret;
2997
2998 /*
2999 * The complete header will include an array of its 64-bit
3000 * snapshot ids, followed by the names of those snapshots as
3001 * a contiguous block of NUL-terminated strings. Note that
3002 * the number of snapshots could change by the time we read
3003 * it in, in which case we re-read it.
3004 */
3005 do {
3006 size_t size;
3007
3008 kfree(ondisk);
3009
3010 size = sizeof (*ondisk);
3011 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
3012 size += names_size;
3013 ondisk = kmalloc(size, GFP_KERNEL);
3014 if (!ondisk)
3015 return ERR_PTR(-ENOMEM);
3016
Alex Elder788e2df2013-01-17 12:25:27 -06003017 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
Alex Elder7097f8d2013-04-30 00:44:33 -05003018 0, size, ondisk);
Alex Elder4156d992012-08-02 11:29:46 -05003019 if (ret < 0)
3020 goto out_err;
Alex Elderc0cd10db2013-04-26 09:43:47 -05003021 if ((size_t)ret < size) {
Alex Elder4156d992012-08-02 11:29:46 -05003022 ret = -ENXIO;
Alex Elder06ecc6c2012-11-01 10:17:15 -05003023 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
3024 size, ret);
Alex Elder4156d992012-08-02 11:29:46 -05003025 goto out_err;
3026 }
3027 if (!rbd_dev_ondisk_valid(ondisk)) {
3028 ret = -ENXIO;
Alex Elder06ecc6c2012-11-01 10:17:15 -05003029 rbd_warn(rbd_dev, "invalid header");
Alex Elder4156d992012-08-02 11:29:46 -05003030 goto out_err;
3031 }
3032
3033 names_size = le64_to_cpu(ondisk->snap_names_len);
3034 want_count = snap_count;
3035 snap_count = le32_to_cpu(ondisk->snap_count);
3036 } while (snap_count != want_count);
3037
3038 return ondisk;
3039
3040out_err:
3041 kfree(ondisk);
3042
3043 return ERR_PTR(ret);
3044}
3045
3046/*
3047 * reload the ondisk the header
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003048 */
3049static int rbd_read_header(struct rbd_device *rbd_dev,
3050 struct rbd_image_header *header)
3051{
Alex Elder4156d992012-08-02 11:29:46 -05003052 struct rbd_image_header_ondisk *ondisk;
Alex Elder4156d992012-08-02 11:29:46 -05003053 int ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003054
Alex Elder7097f8d2013-04-30 00:44:33 -05003055 ondisk = rbd_dev_v1_header_read(rbd_dev);
Alex Elder4156d992012-08-02 11:29:46 -05003056 if (IS_ERR(ondisk))
3057 return PTR_ERR(ondisk);
3058 ret = rbd_header_from_disk(header, ondisk);
Alex Elder4156d992012-08-02 11:29:46 -05003059 kfree(ondisk);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003060
Alex Elder4156d992012-08-02 11:29:46 -05003061 return ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003062}
3063
Alex Elder94785542012-10-09 13:50:17 -07003064static void rbd_update_mapping_size(struct rbd_device *rbd_dev)
3065{
Alex Elder0d7dbfc2012-10-25 23:34:41 -05003066 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
Alex Elder94785542012-10-09 13:50:17 -07003067 return;
3068
Alex Eldere28626a2013-04-26 15:44:35 -05003069 if (rbd_dev->mapping.size != rbd_dev->header.image_size) {
3070 sector_t size;
3071
3072 rbd_dev->mapping.size = rbd_dev->header.image_size;
3073 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3074 dout("setting size to %llu sectors", (unsigned long long)size);
3075 set_capacity(rbd_dev->disk, size);
3076 }
Alex Elder94785542012-10-09 13:50:17 -07003077}
3078
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003079/*
3080 * only read the first part of the ondisk header, without the snaps info
3081 */
Alex Eldercc4a38bd2013-04-30 00:44:33 -05003082static int rbd_dev_v1_refresh(struct rbd_device *rbd_dev)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003083{
3084 int ret;
3085 struct rbd_image_header h;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003086
3087 ret = rbd_read_header(rbd_dev, &h);
3088 if (ret < 0)
3089 return ret;
3090
Josh Durgina51aa0c2011-12-05 10:35:04 -08003091 down_write(&rbd_dev->header_rwsem);
3092
Alex Elder94785542012-10-09 13:50:17 -07003093 /* Update image size, and check for resize of mapped image */
3094 rbd_dev->header.image_size = h.image_size;
3095 rbd_update_mapping_size(rbd_dev);
Sage Weil9db4b3e2011-04-19 22:49:06 -07003096
Alex Elder849b4262012-07-09 21:04:24 -05003097 /* rbd_dev->header.object_prefix shouldn't change */
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003098 kfree(rbd_dev->header.snap_sizes);
Alex Elder849b4262012-07-09 21:04:24 -05003099 kfree(rbd_dev->header.snap_names);
Josh Durgind1d25642011-12-05 14:03:05 -08003100 /* osd requests may still refer to snapc */
Alex Elder812164f82013-04-30 00:44:32 -05003101 ceph_put_snap_context(rbd_dev->header.snapc);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003102
Josh Durgin93a24e02011-12-05 10:41:28 -08003103 rbd_dev->header.image_size = h.image_size;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003104 rbd_dev->header.snapc = h.snapc;
3105 rbd_dev->header.snap_names = h.snap_names;
3106 rbd_dev->header.snap_sizes = h.snap_sizes;
Alex Elder849b4262012-07-09 21:04:24 -05003107 /* Free the extra copy of the object prefix */
Alex Elderc0cd10db2013-04-26 09:43:47 -05003108 if (strcmp(rbd_dev->header.object_prefix, h.object_prefix))
3109 rbd_warn(rbd_dev, "object prefix changed (ignoring)");
Alex Elder849b4262012-07-09 21:04:24 -05003110 kfree(h.object_prefix);
3111
Josh Durginc6666012011-11-21 17:11:12 -08003112 up_write(&rbd_dev->header_rwsem);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003113
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003114 return ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003115}
3116
Alex Elder15228ed2013-05-01 12:43:03 -05003117/*
3118 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3119 * has disappeared from the (just updated) snapshot context.
3120 */
3121static void rbd_exists_validate(struct rbd_device *rbd_dev)
3122{
3123 u64 snap_id;
3124
3125 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
3126 return;
3127
3128 snap_id = rbd_dev->spec->snap_id;
3129 if (snap_id == CEPH_NOSNAP)
3130 return;
3131
3132 if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
3133 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3134}
3135
Alex Eldercc4a38bd2013-04-30 00:44:33 -05003136static int rbd_dev_refresh(struct rbd_device *rbd_dev)
Alex Elder1fe5e992012-07-25 09:32:41 -05003137{
Alex Eldera3fbe5d2013-04-30 00:44:32 -05003138 u64 image_size;
Alex Elder1fe5e992012-07-25 09:32:41 -05003139 int ret;
3140
Alex Elder117973f2012-08-31 17:29:55 -05003141 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
Alex Eldera3fbe5d2013-04-30 00:44:32 -05003142 image_size = rbd_dev->header.image_size;
Alex Elder1fe5e992012-07-25 09:32:41 -05003143 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
Alex Elder117973f2012-08-31 17:29:55 -05003144 if (rbd_dev->image_format == 1)
Alex Eldercc4a38bd2013-04-30 00:44:33 -05003145 ret = rbd_dev_v1_refresh(rbd_dev);
Alex Elder117973f2012-08-31 17:29:55 -05003146 else
Alex Eldercc4a38bd2013-04-30 00:44:33 -05003147 ret = rbd_dev_v2_refresh(rbd_dev);
Alex Elder15228ed2013-05-01 12:43:03 -05003148
3149 /* If it's a mapped snapshot, validate its EXISTS flag */
3150
3151 rbd_exists_validate(rbd_dev);
Alex Elder1fe5e992012-07-25 09:32:41 -05003152 mutex_unlock(&ctl_mutex);
Alex Elder522a0cc2013-04-25 15:09:41 -05003153 if (ret)
3154 rbd_warn(rbd_dev, "got notification but failed to "
3155 " update snaps: %d\n", ret);
Alex Eldera3fbe5d2013-04-30 00:44:32 -05003156 if (image_size != rbd_dev->header.image_size)
3157 revalidate_disk(rbd_dev->disk);
Alex Elder1fe5e992012-07-25 09:32:41 -05003158
3159 return ret;
3160}
3161
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003162static int rbd_init_disk(struct rbd_device *rbd_dev)
3163{
3164 struct gendisk *disk;
3165 struct request_queue *q;
Alex Elder593a9e72012-02-07 12:03:37 -06003166 u64 segment_size;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003167
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003168 /* create gendisk info */
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003169 disk = alloc_disk(RBD_MINORS_PER_MAJOR);
3170 if (!disk)
Alex Elder1fcdb8a2012-08-29 17:11:06 -05003171 return -ENOMEM;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003172
Alex Elderf0f8cef2012-01-29 13:57:44 -06003173 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
Alex Elderde71a292012-07-03 16:01:19 -05003174 rbd_dev->dev_id);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003175 disk->major = rbd_dev->major;
3176 disk->first_minor = 0;
3177 disk->fops = &rbd_bd_ops;
3178 disk->private_data = rbd_dev;
3179
Alex Elderbf0d5f502012-11-22 00:00:08 -06003180 q = blk_init_queue(rbd_request_fn, &rbd_dev->lock);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003181 if (!q)
3182 goto out_disk;
Josh Durgin029bcbd2011-07-22 11:35:23 -07003183
Alex Elder593a9e72012-02-07 12:03:37 -06003184 /* We use the default size, but let's be explicit about it. */
3185 blk_queue_physical_block_size(q, SECTOR_SIZE);
3186
Josh Durgin029bcbd2011-07-22 11:35:23 -07003187 /* set io sizes to object size */
Alex Elder593a9e72012-02-07 12:03:37 -06003188 segment_size = rbd_obj_bytes(&rbd_dev->header);
3189 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
3190 blk_queue_max_segment_size(q, segment_size);
3191 blk_queue_io_min(q, segment_size);
3192 blk_queue_io_opt(q, segment_size);
Josh Durgin029bcbd2011-07-22 11:35:23 -07003193
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003194 blk_queue_merge_bvec(q, rbd_merge_bvec);
3195 disk->queue = q;
3196
3197 q->queuedata = rbd_dev;
3198
3199 rbd_dev->disk = disk;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003200
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003201 return 0;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003202out_disk:
3203 put_disk(disk);
Alex Elder1fcdb8a2012-08-29 17:11:06 -05003204
3205 return -ENOMEM;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003206}
3207
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003208/*
3209 sysfs
3210*/
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003211
Alex Elder593a9e72012-02-07 12:03:37 -06003212static struct rbd_device *dev_to_rbd_dev(struct device *dev)
3213{
3214 return container_of(dev, struct rbd_device, dev);
3215}
3216
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003217static ssize_t rbd_size_show(struct device *dev,
3218 struct device_attribute *attr, char *buf)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003219{
Alex Elder593a9e72012-02-07 12:03:37 -06003220 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003221
Alex Elderfc71d832013-04-26 15:44:36 -05003222 return sprintf(buf, "%llu\n",
3223 (unsigned long long)rbd_dev->mapping.size);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003224}
3225
Alex Elder34b13182012-07-13 20:35:12 -05003226/*
3227 * Note this shows the features for whatever's mapped, which is not
3228 * necessarily the base image.
3229 */
3230static ssize_t rbd_features_show(struct device *dev,
3231 struct device_attribute *attr, char *buf)
3232{
3233 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3234
3235 return sprintf(buf, "0x%016llx\n",
Alex Elderfc71d832013-04-26 15:44:36 -05003236 (unsigned long long)rbd_dev->mapping.features);
Alex Elder34b13182012-07-13 20:35:12 -05003237}
3238
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003239static ssize_t rbd_major_show(struct device *dev,
3240 struct device_attribute *attr, char *buf)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003241{
Alex Elder593a9e72012-02-07 12:03:37 -06003242 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003243
Alex Elderfc71d832013-04-26 15:44:36 -05003244 if (rbd_dev->major)
3245 return sprintf(buf, "%d\n", rbd_dev->major);
3246
3247 return sprintf(buf, "(none)\n");
3248
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003249}
3250
3251static ssize_t rbd_client_id_show(struct device *dev,
3252 struct device_attribute *attr, char *buf)
3253{
Alex Elder593a9e72012-02-07 12:03:37 -06003254 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003255
Alex Elder1dbb4392012-01-24 10:08:37 -06003256 return sprintf(buf, "client%lld\n",
3257 ceph_client_id(rbd_dev->rbd_client->client));
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003258}
3259
3260static ssize_t rbd_pool_show(struct device *dev,
3261 struct device_attribute *attr, char *buf)
3262{
Alex Elder593a9e72012-02-07 12:03:37 -06003263 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003264
Alex Elder0d7dbfc2012-10-25 23:34:41 -05003265 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003266}
3267
Alex Elder9bb2f332012-07-12 10:46:35 -05003268static ssize_t rbd_pool_id_show(struct device *dev,
3269 struct device_attribute *attr, char *buf)
3270{
3271 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3272
Alex Elder0d7dbfc2012-10-25 23:34:41 -05003273 return sprintf(buf, "%llu\n",
Alex Elderfc71d832013-04-26 15:44:36 -05003274 (unsigned long long) rbd_dev->spec->pool_id);
Alex Elder9bb2f332012-07-12 10:46:35 -05003275}
3276
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003277static ssize_t rbd_name_show(struct device *dev,
3278 struct device_attribute *attr, char *buf)
3279{
Alex Elder593a9e72012-02-07 12:03:37 -06003280 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003281
Alex Eldera92ffdf2012-10-30 19:40:33 -05003282 if (rbd_dev->spec->image_name)
3283 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
3284
3285 return sprintf(buf, "(unknown)\n");
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003286}
3287
Alex Elder589d30e2012-07-10 20:30:11 -05003288static ssize_t rbd_image_id_show(struct device *dev,
3289 struct device_attribute *attr, char *buf)
3290{
3291 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3292
Alex Elder0d7dbfc2012-10-25 23:34:41 -05003293 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
Alex Elder589d30e2012-07-10 20:30:11 -05003294}
3295
Alex Elder34b13182012-07-13 20:35:12 -05003296/*
3297 * Shows the name of the currently-mapped snapshot (or
3298 * RBD_SNAP_HEAD_NAME for the base image).
3299 */
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003300static ssize_t rbd_snap_show(struct device *dev,
3301 struct device_attribute *attr,
3302 char *buf)
3303{
Alex Elder593a9e72012-02-07 12:03:37 -06003304 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003305
Alex Elder0d7dbfc2012-10-25 23:34:41 -05003306 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003307}
3308
Alex Elder86b00e02012-10-25 23:34:42 -05003309/*
3310 * For an rbd v2 image, shows the pool id, image id, and snapshot id
3311 * for the parent image. If there is no parent, simply shows
3312 * "(no parent image)".
3313 */
3314static ssize_t rbd_parent_show(struct device *dev,
3315 struct device_attribute *attr,
3316 char *buf)
3317{
3318 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3319 struct rbd_spec *spec = rbd_dev->parent_spec;
3320 int count;
3321 char *bufp = buf;
3322
3323 if (!spec)
3324 return sprintf(buf, "(no parent image)\n");
3325
3326 count = sprintf(bufp, "pool_id %llu\npool_name %s\n",
3327 (unsigned long long) spec->pool_id, spec->pool_name);
3328 if (count < 0)
3329 return count;
3330 bufp += count;
3331
3332 count = sprintf(bufp, "image_id %s\nimage_name %s\n", spec->image_id,
3333 spec->image_name ? spec->image_name : "(unknown)");
3334 if (count < 0)
3335 return count;
3336 bufp += count;
3337
3338 count = sprintf(bufp, "snap_id %llu\nsnap_name %s\n",
3339 (unsigned long long) spec->snap_id, spec->snap_name);
3340 if (count < 0)
3341 return count;
3342 bufp += count;
3343
3344 count = sprintf(bufp, "overlap %llu\n", rbd_dev->parent_overlap);
3345 if (count < 0)
3346 return count;
3347 bufp += count;
3348
3349 return (ssize_t) (bufp - buf);
3350}
3351
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003352static ssize_t rbd_image_refresh(struct device *dev,
3353 struct device_attribute *attr,
3354 const char *buf,
3355 size_t size)
3356{
Alex Elder593a9e72012-02-07 12:03:37 -06003357 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Alex Elderb8136232012-07-25 09:32:41 -05003358 int ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003359
Alex Eldercc4a38bd2013-04-30 00:44:33 -05003360 ret = rbd_dev_refresh(rbd_dev);
Alex Elderb8136232012-07-25 09:32:41 -05003361
3362 return ret < 0 ? ret : size;
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003363}
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003364
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003365static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
Alex Elder34b13182012-07-13 20:35:12 -05003366static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003367static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
3368static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
3369static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
Alex Elder9bb2f332012-07-12 10:46:35 -05003370static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003371static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
Alex Elder589d30e2012-07-10 20:30:11 -05003372static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003373static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
3374static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
Alex Elder86b00e02012-10-25 23:34:42 -05003375static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003376
3377static struct attribute *rbd_attrs[] = {
3378 &dev_attr_size.attr,
Alex Elder34b13182012-07-13 20:35:12 -05003379 &dev_attr_features.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003380 &dev_attr_major.attr,
3381 &dev_attr_client_id.attr,
3382 &dev_attr_pool.attr,
Alex Elder9bb2f332012-07-12 10:46:35 -05003383 &dev_attr_pool_id.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003384 &dev_attr_name.attr,
Alex Elder589d30e2012-07-10 20:30:11 -05003385 &dev_attr_image_id.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003386 &dev_attr_current_snap.attr,
Alex Elder86b00e02012-10-25 23:34:42 -05003387 &dev_attr_parent.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003388 &dev_attr_refresh.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003389 NULL
3390};
3391
3392static struct attribute_group rbd_attr_group = {
3393 .attrs = rbd_attrs,
3394};
3395
3396static const struct attribute_group *rbd_attr_groups[] = {
3397 &rbd_attr_group,
3398 NULL
3399};
3400
3401static void rbd_sysfs_dev_release(struct device *dev)
3402{
3403}
3404
3405static struct device_type rbd_device_type = {
3406 .name = "rbd",
3407 .groups = rbd_attr_groups,
3408 .release = rbd_sysfs_dev_release,
3409};
3410
Alex Elder8b8fb992012-10-26 17:25:24 -05003411static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
3412{
3413 kref_get(&spec->kref);
3414
3415 return spec;
3416}
3417
3418static void rbd_spec_free(struct kref *kref);
3419static void rbd_spec_put(struct rbd_spec *spec)
3420{
3421 if (spec)
3422 kref_put(&spec->kref, rbd_spec_free);
3423}
3424
3425static struct rbd_spec *rbd_spec_alloc(void)
3426{
3427 struct rbd_spec *spec;
3428
3429 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
3430 if (!spec)
3431 return NULL;
3432 kref_init(&spec->kref);
3433
Alex Elder8b8fb992012-10-26 17:25:24 -05003434 return spec;
3435}
3436
3437static void rbd_spec_free(struct kref *kref)
3438{
3439 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
3440
3441 kfree(spec->pool_name);
3442 kfree(spec->image_id);
3443 kfree(spec->image_name);
3444 kfree(spec->snap_name);
3445 kfree(spec);
3446}
3447
Alex Eldercc344fa2013-02-19 12:25:56 -06003448static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
Alex Elderc53d5892012-10-25 23:34:42 -05003449 struct rbd_spec *spec)
3450{
3451 struct rbd_device *rbd_dev;
3452
3453 rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
3454 if (!rbd_dev)
3455 return NULL;
3456
3457 spin_lock_init(&rbd_dev->lock);
Alex Elder6d292902013-01-14 12:43:31 -06003458 rbd_dev->flags = 0;
Alex Elderc53d5892012-10-25 23:34:42 -05003459 INIT_LIST_HEAD(&rbd_dev->node);
Alex Elderc53d5892012-10-25 23:34:42 -05003460 init_rwsem(&rbd_dev->header_rwsem);
3461
3462 rbd_dev->spec = spec;
3463 rbd_dev->rbd_client = rbdc;
3464
Alex Elder0903e872012-11-14 12:25:19 -06003465 /* Initialize the layout used for all rbd requests */
3466
3467 rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3468 rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
3469 rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3470 rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
3471
Alex Elderc53d5892012-10-25 23:34:42 -05003472 return rbd_dev;
3473}
3474
3475static void rbd_dev_destroy(struct rbd_device *rbd_dev)
3476{
Alex Elderc53d5892012-10-25 23:34:42 -05003477 rbd_put_client(rbd_dev->rbd_client);
3478 rbd_spec_put(rbd_dev->spec);
3479 kfree(rbd_dev);
3480}
3481
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003482/*
Alex Elder9d475de2012-07-03 16:01:19 -05003483 * Get the size and object order for an image snapshot, or if
3484 * snap_id is CEPH_NOSNAP, gets this information for the base
3485 * image.
3486 */
3487static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
3488 u8 *order, u64 *snap_size)
3489{
3490 __le64 snapid = cpu_to_le64(snap_id);
3491 int ret;
3492 struct {
3493 u8 order;
3494 __le64 size;
3495 } __attribute__ ((packed)) size_buf = { 0 };
3496
Alex Elder36be9a72013-01-19 00:30:28 -06003497 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
Alex Elder9d475de2012-07-03 16:01:19 -05003498 "rbd", "get_size",
Alex Elder41579762013-04-21 12:14:45 -05003499 &snapid, sizeof (snapid),
Alex Eldere2a58ee2013-04-30 00:44:33 -05003500 &size_buf, sizeof (size_buf));
Alex Elder36be9a72013-01-19 00:30:28 -06003501 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elder9d475de2012-07-03 16:01:19 -05003502 if (ret < 0)
3503 return ret;
Alex Elder57385b52013-04-21 12:14:45 -05003504 if (ret < sizeof (size_buf))
3505 return -ERANGE;
Alex Elder9d475de2012-07-03 16:01:19 -05003506
Alex Elderc86f86e2013-04-25 15:09:41 -05003507 if (order)
3508 *order = size_buf.order;
Alex Elder9d475de2012-07-03 16:01:19 -05003509 *snap_size = le64_to_cpu(size_buf.size);
3510
3511 dout(" snap_id 0x%016llx order = %u, snap_size = %llu\n",
Alex Elder57385b52013-04-21 12:14:45 -05003512 (unsigned long long)snap_id, (unsigned int)*order,
3513 (unsigned long long)*snap_size);
Alex Elder9d475de2012-07-03 16:01:19 -05003514
3515 return 0;
3516}
3517
3518static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
3519{
3520 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
3521 &rbd_dev->header.obj_order,
3522 &rbd_dev->header.image_size);
3523}
3524
Alex Elder1e130192012-07-03 16:01:19 -05003525static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
3526{
3527 void *reply_buf;
3528 int ret;
3529 void *p;
3530
3531 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
3532 if (!reply_buf)
3533 return -ENOMEM;
3534
Alex Elder36be9a72013-01-19 00:30:28 -06003535 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
Alex Elder41579762013-04-21 12:14:45 -05003536 "rbd", "get_object_prefix", NULL, 0,
Alex Eldere2a58ee2013-04-30 00:44:33 -05003537 reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
Alex Elder36be9a72013-01-19 00:30:28 -06003538 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elder1e130192012-07-03 16:01:19 -05003539 if (ret < 0)
3540 goto out;
3541
3542 p = reply_buf;
3543 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
Alex Elder57385b52013-04-21 12:14:45 -05003544 p + ret, NULL, GFP_NOIO);
3545 ret = 0;
Alex Elder1e130192012-07-03 16:01:19 -05003546
3547 if (IS_ERR(rbd_dev->header.object_prefix)) {
3548 ret = PTR_ERR(rbd_dev->header.object_prefix);
3549 rbd_dev->header.object_prefix = NULL;
3550 } else {
3551 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
3552 }
Alex Elder1e130192012-07-03 16:01:19 -05003553out:
3554 kfree(reply_buf);
3555
3556 return ret;
3557}
3558
Alex Elderb1b54022012-07-03 16:01:19 -05003559static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
3560 u64 *snap_features)
3561{
3562 __le64 snapid = cpu_to_le64(snap_id);
3563 struct {
3564 __le64 features;
3565 __le64 incompat;
Alex Elder41579762013-04-21 12:14:45 -05003566 } __attribute__ ((packed)) features_buf = { 0 };
Alex Elderd8891402012-10-09 13:50:17 -07003567 u64 incompat;
Alex Elderb1b54022012-07-03 16:01:19 -05003568 int ret;
3569
Alex Elder36be9a72013-01-19 00:30:28 -06003570 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
Alex Elderb1b54022012-07-03 16:01:19 -05003571 "rbd", "get_features",
Alex Elder41579762013-04-21 12:14:45 -05003572 &snapid, sizeof (snapid),
Alex Eldere2a58ee2013-04-30 00:44:33 -05003573 &features_buf, sizeof (features_buf));
Alex Elder36be9a72013-01-19 00:30:28 -06003574 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elderb1b54022012-07-03 16:01:19 -05003575 if (ret < 0)
3576 return ret;
Alex Elder57385b52013-04-21 12:14:45 -05003577 if (ret < sizeof (features_buf))
3578 return -ERANGE;
Alex Elderd8891402012-10-09 13:50:17 -07003579
3580 incompat = le64_to_cpu(features_buf.incompat);
Alex Elder5cbf6f122013-04-11 09:29:48 -05003581 if (incompat & ~RBD_FEATURES_SUPPORTED)
Alex Elderb8f5c6e2012-11-01 08:39:26 -05003582 return -ENXIO;
Alex Elderd8891402012-10-09 13:50:17 -07003583
Alex Elderb1b54022012-07-03 16:01:19 -05003584 *snap_features = le64_to_cpu(features_buf.features);
3585
3586 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
Alex Elder57385b52013-04-21 12:14:45 -05003587 (unsigned long long)snap_id,
3588 (unsigned long long)*snap_features,
3589 (unsigned long long)le64_to_cpu(features_buf.incompat));
Alex Elderb1b54022012-07-03 16:01:19 -05003590
3591 return 0;
3592}
3593
3594static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
3595{
3596 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
3597 &rbd_dev->header.features);
3598}
3599
Alex Elder86b00e02012-10-25 23:34:42 -05003600static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
3601{
3602 struct rbd_spec *parent_spec;
3603 size_t size;
3604 void *reply_buf = NULL;
3605 __le64 snapid;
3606 void *p;
3607 void *end;
3608 char *image_id;
3609 u64 overlap;
Alex Elder86b00e02012-10-25 23:34:42 -05003610 int ret;
3611
3612 parent_spec = rbd_spec_alloc();
3613 if (!parent_spec)
3614 return -ENOMEM;
3615
3616 size = sizeof (__le64) + /* pool_id */
3617 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
3618 sizeof (__le64) + /* snap_id */
3619 sizeof (__le64); /* overlap */
3620 reply_buf = kmalloc(size, GFP_KERNEL);
3621 if (!reply_buf) {
3622 ret = -ENOMEM;
3623 goto out_err;
3624 }
3625
3626 snapid = cpu_to_le64(CEPH_NOSNAP);
Alex Elder36be9a72013-01-19 00:30:28 -06003627 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
Alex Elder86b00e02012-10-25 23:34:42 -05003628 "rbd", "get_parent",
Alex Elder41579762013-04-21 12:14:45 -05003629 &snapid, sizeof (snapid),
Alex Eldere2a58ee2013-04-30 00:44:33 -05003630 reply_buf, size);
Alex Elder36be9a72013-01-19 00:30:28 -06003631 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elder86b00e02012-10-25 23:34:42 -05003632 if (ret < 0)
3633 goto out_err;
3634
Alex Elder86b00e02012-10-25 23:34:42 -05003635 p = reply_buf;
Alex Elder57385b52013-04-21 12:14:45 -05003636 end = reply_buf + ret;
3637 ret = -ERANGE;
Alex Elder86b00e02012-10-25 23:34:42 -05003638 ceph_decode_64_safe(&p, end, parent_spec->pool_id, out_err);
3639 if (parent_spec->pool_id == CEPH_NOPOOL)
3640 goto out; /* No parent? No problem. */
3641
Alex Elder0903e872012-11-14 12:25:19 -06003642 /* The ceph file layout needs to fit pool id in 32 bits */
3643
3644 ret = -EIO;
Alex Elderc0cd10db2013-04-26 09:43:47 -05003645 if (parent_spec->pool_id > (u64)U32_MAX) {
3646 rbd_warn(NULL, "parent pool id too large (%llu > %u)\n",
3647 (unsigned long long)parent_spec->pool_id, U32_MAX);
Alex Elder57385b52013-04-21 12:14:45 -05003648 goto out_err;
Alex Elderc0cd10db2013-04-26 09:43:47 -05003649 }
Alex Elder0903e872012-11-14 12:25:19 -06003650
Alex Elder979ed482012-11-01 08:39:26 -05003651 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
Alex Elder86b00e02012-10-25 23:34:42 -05003652 if (IS_ERR(image_id)) {
3653 ret = PTR_ERR(image_id);
3654 goto out_err;
3655 }
3656 parent_spec->image_id = image_id;
3657 ceph_decode_64_safe(&p, end, parent_spec->snap_id, out_err);
3658 ceph_decode_64_safe(&p, end, overlap, out_err);
3659
3660 rbd_dev->parent_overlap = overlap;
3661 rbd_dev->parent_spec = parent_spec;
3662 parent_spec = NULL; /* rbd_dev now owns this */
3663out:
3664 ret = 0;
3665out_err:
3666 kfree(reply_buf);
3667 rbd_spec_put(parent_spec);
3668
3669 return ret;
3670}
3671
Alex Eldercc070d52013-04-21 12:14:45 -05003672static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
3673{
3674 struct {
3675 __le64 stripe_unit;
3676 __le64 stripe_count;
3677 } __attribute__ ((packed)) striping_info_buf = { 0 };
3678 size_t size = sizeof (striping_info_buf);
3679 void *p;
3680 u64 obj_size;
3681 u64 stripe_unit;
3682 u64 stripe_count;
3683 int ret;
3684
3685 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3686 "rbd", "get_stripe_unit_count", NULL, 0,
Alex Eldere2a58ee2013-04-30 00:44:33 -05003687 (char *)&striping_info_buf, size);
Alex Eldercc070d52013-04-21 12:14:45 -05003688 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3689 if (ret < 0)
3690 return ret;
3691 if (ret < size)
3692 return -ERANGE;
3693
3694 /*
3695 * We don't actually support the "fancy striping" feature
3696 * (STRIPINGV2) yet, but if the striping sizes are the
3697 * defaults the behavior is the same as before. So find
3698 * out, and only fail if the image has non-default values.
3699 */
3700 ret = -EINVAL;
3701 obj_size = (u64)1 << rbd_dev->header.obj_order;
3702 p = &striping_info_buf;
3703 stripe_unit = ceph_decode_64(&p);
3704 if (stripe_unit != obj_size) {
3705 rbd_warn(rbd_dev, "unsupported stripe unit "
3706 "(got %llu want %llu)",
3707 stripe_unit, obj_size);
3708 return -EINVAL;
3709 }
3710 stripe_count = ceph_decode_64(&p);
3711 if (stripe_count != 1) {
3712 rbd_warn(rbd_dev, "unsupported stripe count "
3713 "(got %llu want 1)", stripe_count);
3714 return -EINVAL;
3715 }
Alex Elder500d0c02013-04-26 09:43:47 -05003716 rbd_dev->header.stripe_unit = stripe_unit;
3717 rbd_dev->header.stripe_count = stripe_count;
Alex Eldercc070d52013-04-21 12:14:45 -05003718
3719 return 0;
3720}
3721
Alex Elder9e15b772012-10-30 19:40:33 -05003722static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
3723{
3724 size_t image_id_size;
3725 char *image_id;
3726 void *p;
3727 void *end;
3728 size_t size;
3729 void *reply_buf = NULL;
3730 size_t len = 0;
3731 char *image_name = NULL;
3732 int ret;
3733
3734 rbd_assert(!rbd_dev->spec->image_name);
3735
Alex Elder69e7a022012-11-01 08:39:26 -05003736 len = strlen(rbd_dev->spec->image_id);
3737 image_id_size = sizeof (__le32) + len;
Alex Elder9e15b772012-10-30 19:40:33 -05003738 image_id = kmalloc(image_id_size, GFP_KERNEL);
3739 if (!image_id)
3740 return NULL;
3741
3742 p = image_id;
Alex Elder41579762013-04-21 12:14:45 -05003743 end = image_id + image_id_size;
Alex Elder57385b52013-04-21 12:14:45 -05003744 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
Alex Elder9e15b772012-10-30 19:40:33 -05003745
3746 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
3747 reply_buf = kmalloc(size, GFP_KERNEL);
3748 if (!reply_buf)
3749 goto out;
3750
Alex Elder36be9a72013-01-19 00:30:28 -06003751 ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
Alex Elder9e15b772012-10-30 19:40:33 -05003752 "rbd", "dir_get_name",
3753 image_id, image_id_size,
Alex Eldere2a58ee2013-04-30 00:44:33 -05003754 reply_buf, size);
Alex Elder9e15b772012-10-30 19:40:33 -05003755 if (ret < 0)
3756 goto out;
3757 p = reply_buf;
Alex Elderf40eb342013-04-25 15:09:42 -05003758 end = reply_buf + ret;
3759
Alex Elder9e15b772012-10-30 19:40:33 -05003760 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
3761 if (IS_ERR(image_name))
3762 image_name = NULL;
3763 else
3764 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
3765out:
3766 kfree(reply_buf);
3767 kfree(image_id);
3768
3769 return image_name;
3770}
3771
Alex Elder2ad3d712013-04-30 00:44:33 -05003772static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
3773{
3774 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
3775 const char *snap_name;
3776 u32 which = 0;
3777
3778 /* Skip over names until we find the one we are looking for */
3779
3780 snap_name = rbd_dev->header.snap_names;
3781 while (which < snapc->num_snaps) {
3782 if (!strcmp(name, snap_name))
3783 return snapc->snaps[which];
3784 snap_name += strlen(snap_name) + 1;
3785 which++;
3786 }
3787 return CEPH_NOSNAP;
3788}
3789
3790static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
3791{
3792 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
3793 u32 which;
3794 bool found = false;
3795 u64 snap_id;
3796
3797 for (which = 0; !found && which < snapc->num_snaps; which++) {
3798 const char *snap_name;
3799
3800 snap_id = snapc->snaps[which];
3801 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
3802 if (IS_ERR(snap_name))
3803 break;
3804 found = !strcmp(name, snap_name);
3805 kfree(snap_name);
3806 }
3807 return found ? snap_id : CEPH_NOSNAP;
3808}
3809
3810/*
3811 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
3812 * no snapshot by that name is found, or if an error occurs.
3813 */
3814static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
3815{
3816 if (rbd_dev->image_format == 1)
3817 return rbd_v1_snap_id_by_name(rbd_dev, name);
3818
3819 return rbd_v2_snap_id_by_name(rbd_dev, name);
3820}
3821
Alex Elder9e15b772012-10-30 19:40:33 -05003822/*
Alex Elder2e9f7f12013-04-26 09:43:48 -05003823 * When an rbd image has a parent image, it is identified by the
3824 * pool, image, and snapshot ids (not names). This function fills
3825 * in the names for those ids. (It's OK if we can't figure out the
3826 * name for an image id, but the pool and snapshot ids should always
3827 * exist and have names.) All names in an rbd spec are dynamically
3828 * allocated.
Alex Eldere1d42132013-04-25 23:15:08 -05003829 *
3830 * When an image being mapped (not a parent) is probed, we have the
3831 * pool name and pool id, image name and image id, and the snapshot
3832 * name. The only thing we're missing is the snapshot id.
Alex Elder9e15b772012-10-30 19:40:33 -05003833 */
Alex Elder2e9f7f12013-04-26 09:43:48 -05003834static int rbd_dev_spec_update(struct rbd_device *rbd_dev)
Alex Elder9e15b772012-10-30 19:40:33 -05003835{
Alex Elder2e9f7f12013-04-26 09:43:48 -05003836 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3837 struct rbd_spec *spec = rbd_dev->spec;
3838 const char *pool_name;
3839 const char *image_name;
3840 const char *snap_name;
Alex Elder9e15b772012-10-30 19:40:33 -05003841 int ret;
3842
Alex Eldere1d42132013-04-25 23:15:08 -05003843 /*
3844 * An image being mapped will have the pool name (etc.), but
3845 * we need to look up the snapshot id.
3846 */
Alex Elder2e9f7f12013-04-26 09:43:48 -05003847 if (spec->pool_name) {
3848 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
Alex Elder2ad3d712013-04-30 00:44:33 -05003849 u64 snap_id;
Alex Eldere1d42132013-04-25 23:15:08 -05003850
Alex Elder2ad3d712013-04-30 00:44:33 -05003851 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
3852 if (snap_id == CEPH_NOSNAP)
Alex Eldere1d42132013-04-25 23:15:08 -05003853 return -ENOENT;
Alex Elder2ad3d712013-04-30 00:44:33 -05003854 spec->snap_id = snap_id;
Alex Eldere1d42132013-04-25 23:15:08 -05003855 } else {
Alex Elder2e9f7f12013-04-26 09:43:48 -05003856 spec->snap_id = CEPH_NOSNAP;
Alex Eldere1d42132013-04-25 23:15:08 -05003857 }
3858
3859 return 0;
3860 }
Alex Elder9e15b772012-10-30 19:40:33 -05003861
Alex Elder2e9f7f12013-04-26 09:43:48 -05003862 /* Get the pool name; we have to make our own copy of this */
Alex Elder9e15b772012-10-30 19:40:33 -05003863
Alex Elder2e9f7f12013-04-26 09:43:48 -05003864 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
3865 if (!pool_name) {
3866 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
Alex Elder935dc892012-11-01 10:17:15 -05003867 return -EIO;
3868 }
Alex Elder2e9f7f12013-04-26 09:43:48 -05003869 pool_name = kstrdup(pool_name, GFP_KERNEL);
3870 if (!pool_name)
Alex Elder9e15b772012-10-30 19:40:33 -05003871 return -ENOMEM;
3872
3873 /* Fetch the image name; tolerate failure here */
3874
Alex Elder2e9f7f12013-04-26 09:43:48 -05003875 image_name = rbd_dev_image_name(rbd_dev);
3876 if (!image_name)
Alex Elder06ecc6c2012-11-01 10:17:15 -05003877 rbd_warn(rbd_dev, "unable to get image name");
Alex Elder9e15b772012-10-30 19:40:33 -05003878
Alex Elder2e9f7f12013-04-26 09:43:48 -05003879 /* Look up the snapshot name, and make a copy */
Alex Elder9e15b772012-10-30 19:40:33 -05003880
Alex Elder2e9f7f12013-04-26 09:43:48 -05003881 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
3882 if (!snap_name) {
Alex Elder2e9f7f12013-04-26 09:43:48 -05003883 ret = -ENOMEM;
Alex Elder9e15b772012-10-30 19:40:33 -05003884 goto out_err;
Alex Elder2e9f7f12013-04-26 09:43:48 -05003885 }
3886
3887 spec->pool_name = pool_name;
3888 spec->image_name = image_name;
3889 spec->snap_name = snap_name;
Alex Elder9e15b772012-10-30 19:40:33 -05003890
3891 return 0;
3892out_err:
Alex Elder2e9f7f12013-04-26 09:43:48 -05003893 kfree(image_name);
3894 kfree(pool_name);
Alex Elder9e15b772012-10-30 19:40:33 -05003895
3896 return ret;
3897}
3898
Alex Eldercc4a38bd2013-04-30 00:44:33 -05003899static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
Alex Elder35d489f2012-07-03 16:01:19 -05003900{
3901 size_t size;
3902 int ret;
3903 void *reply_buf;
3904 void *p;
3905 void *end;
3906 u64 seq;
3907 u32 snap_count;
3908 struct ceph_snap_context *snapc;
3909 u32 i;
3910
3911 /*
3912 * We'll need room for the seq value (maximum snapshot id),
3913 * snapshot count, and array of that many snapshot ids.
3914 * For now we have a fixed upper limit on the number we're
3915 * prepared to receive.
3916 */
3917 size = sizeof (__le64) + sizeof (__le32) +
3918 RBD_MAX_SNAP_COUNT * sizeof (__le64);
3919 reply_buf = kzalloc(size, GFP_KERNEL);
3920 if (!reply_buf)
3921 return -ENOMEM;
3922
Alex Elder36be9a72013-01-19 00:30:28 -06003923 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
Alex Elder41579762013-04-21 12:14:45 -05003924 "rbd", "get_snapcontext", NULL, 0,
Alex Eldere2a58ee2013-04-30 00:44:33 -05003925 reply_buf, size);
Alex Elder36be9a72013-01-19 00:30:28 -06003926 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elder35d489f2012-07-03 16:01:19 -05003927 if (ret < 0)
3928 goto out;
3929
Alex Elder35d489f2012-07-03 16:01:19 -05003930 p = reply_buf;
Alex Elder57385b52013-04-21 12:14:45 -05003931 end = reply_buf + ret;
3932 ret = -ERANGE;
Alex Elder35d489f2012-07-03 16:01:19 -05003933 ceph_decode_64_safe(&p, end, seq, out);
3934 ceph_decode_32_safe(&p, end, snap_count, out);
3935
3936 /*
3937 * Make sure the reported number of snapshot ids wouldn't go
3938 * beyond the end of our buffer. But before checking that,
3939 * make sure the computed size of the snapshot context we
3940 * allocate is representable in a size_t.
3941 */
3942 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
3943 / sizeof (u64)) {
3944 ret = -EINVAL;
3945 goto out;
3946 }
3947 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
3948 goto out;
Alex Elder468521c2013-04-26 09:43:47 -05003949 ret = 0;
Alex Elder35d489f2012-07-03 16:01:19 -05003950
Alex Elder812164f82013-04-30 00:44:32 -05003951 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
Alex Elder35d489f2012-07-03 16:01:19 -05003952 if (!snapc) {
3953 ret = -ENOMEM;
3954 goto out;
3955 }
Alex Elder35d489f2012-07-03 16:01:19 -05003956 snapc->seq = seq;
Alex Elder35d489f2012-07-03 16:01:19 -05003957 for (i = 0; i < snap_count; i++)
3958 snapc->snaps[i] = ceph_decode_64(&p);
3959
3960 rbd_dev->header.snapc = snapc;
3961
3962 dout(" snap context seq = %llu, snap_count = %u\n",
Alex Elder57385b52013-04-21 12:14:45 -05003963 (unsigned long long)seq, (unsigned int)snap_count);
Alex Elder35d489f2012-07-03 16:01:19 -05003964out:
3965 kfree(reply_buf);
3966
Alex Elder57385b52013-04-21 12:14:45 -05003967 return ret;
Alex Elder35d489f2012-07-03 16:01:19 -05003968}
3969
Alex Elder54cac612013-04-30 00:44:33 -05003970static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
3971 u64 snap_id)
Alex Elderb8b1e2d2012-07-03 16:01:19 -05003972{
3973 size_t size;
3974 void *reply_buf;
Alex Elder54cac612013-04-30 00:44:33 -05003975 __le64 snapid;
Alex Elderb8b1e2d2012-07-03 16:01:19 -05003976 int ret;
3977 void *p;
3978 void *end;
Alex Elderb8b1e2d2012-07-03 16:01:19 -05003979 char *snap_name;
3980
3981 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
3982 reply_buf = kmalloc(size, GFP_KERNEL);
3983 if (!reply_buf)
3984 return ERR_PTR(-ENOMEM);
3985
Alex Elder54cac612013-04-30 00:44:33 -05003986 snapid = cpu_to_le64(snap_id);
Alex Elder36be9a72013-01-19 00:30:28 -06003987 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
Alex Elderb8b1e2d2012-07-03 16:01:19 -05003988 "rbd", "get_snapshot_name",
Alex Elder54cac612013-04-30 00:44:33 -05003989 &snapid, sizeof (snapid),
Alex Eldere2a58ee2013-04-30 00:44:33 -05003990 reply_buf, size);
Alex Elder36be9a72013-01-19 00:30:28 -06003991 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elderf40eb342013-04-25 15:09:42 -05003992 if (ret < 0) {
3993 snap_name = ERR_PTR(ret);
Alex Elderb8b1e2d2012-07-03 16:01:19 -05003994 goto out;
Alex Elderf40eb342013-04-25 15:09:42 -05003995 }
Alex Elderb8b1e2d2012-07-03 16:01:19 -05003996
3997 p = reply_buf;
Alex Elderf40eb342013-04-25 15:09:42 -05003998 end = reply_buf + ret;
Alex Eldere5c35532012-10-25 23:34:41 -05003999 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
Alex Elderf40eb342013-04-25 15:09:42 -05004000 if (IS_ERR(snap_name))
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004001 goto out;
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004002
Alex Elderf40eb342013-04-25 15:09:42 -05004003 dout(" snap_id 0x%016llx snap_name = %s\n",
Alex Elder54cac612013-04-30 00:44:33 -05004004 (unsigned long long)snap_id, snap_name);
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004005out:
4006 kfree(reply_buf);
4007
Alex Elderf40eb342013-04-25 15:09:42 -05004008 return snap_name;
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004009}
4010
Alex Eldercc4a38bd2013-04-30 00:44:33 -05004011static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev)
Alex Elder117973f2012-08-31 17:29:55 -05004012{
4013 int ret;
Alex Elder117973f2012-08-31 17:29:55 -05004014
4015 down_write(&rbd_dev->header_rwsem);
4016
Alex Elder117973f2012-08-31 17:29:55 -05004017 ret = rbd_dev_v2_image_size(rbd_dev);
4018 if (ret)
4019 goto out;
Alex Elder117973f2012-08-31 17:29:55 -05004020 rbd_update_mapping_size(rbd_dev);
4021
Alex Eldercc4a38bd2013-04-30 00:44:33 -05004022 ret = rbd_dev_v2_snap_context(rbd_dev);
Alex Elder117973f2012-08-31 17:29:55 -05004023 dout("rbd_dev_v2_snap_context returned %d\n", ret);
4024 if (ret)
4025 goto out;
Alex Elder117973f2012-08-31 17:29:55 -05004026out:
4027 up_write(&rbd_dev->header_rwsem);
4028
4029 return ret;
4030}
4031
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004032static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
4033{
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004034 struct device *dev;
Alex Eldercd789ab2012-08-30 00:16:38 -05004035 int ret;
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004036
4037 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004038
Alex Eldercd789ab2012-08-30 00:16:38 -05004039 dev = &rbd_dev->dev;
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004040 dev->bus = &rbd_bus_type;
4041 dev->type = &rbd_device_type;
4042 dev->parent = &rbd_root_dev;
Alex Elder200a6a82013-04-28 23:32:34 -05004043 dev->release = rbd_dev_device_release;
Alex Elderde71a292012-07-03 16:01:19 -05004044 dev_set_name(dev, "%d", rbd_dev->dev_id);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004045 ret = device_register(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004046
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004047 mutex_unlock(&ctl_mutex);
Alex Eldercd789ab2012-08-30 00:16:38 -05004048
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004049 return ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004050}
4051
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004052static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
4053{
4054 device_unregister(&rbd_dev->dev);
4055}
4056
Alex Eldere2839302012-08-29 17:11:06 -05004057static atomic64_t rbd_dev_id_max = ATOMIC64_INIT(0);
Alex Elder1ddbe942012-01-29 13:57:44 -06004058
4059/*
Alex Elder499afd52012-02-02 08:13:29 -06004060 * Get a unique rbd identifier for the given new rbd_dev, and add
4061 * the rbd_dev to the global list. The minimum rbd id is 1.
Alex Elder1ddbe942012-01-29 13:57:44 -06004062 */
Alex Eldere2839302012-08-29 17:11:06 -05004063static void rbd_dev_id_get(struct rbd_device *rbd_dev)
Alex Elderb7f23c32012-01-29 13:57:43 -06004064{
Alex Eldere2839302012-08-29 17:11:06 -05004065 rbd_dev->dev_id = atomic64_inc_return(&rbd_dev_id_max);
Alex Elder499afd52012-02-02 08:13:29 -06004066
4067 spin_lock(&rbd_dev_list_lock);
4068 list_add_tail(&rbd_dev->node, &rbd_dev_list);
4069 spin_unlock(&rbd_dev_list_lock);
Alex Eldere2839302012-08-29 17:11:06 -05004070 dout("rbd_dev %p given dev id %llu\n", rbd_dev,
4071 (unsigned long long) rbd_dev->dev_id);
Alex Elder1ddbe942012-01-29 13:57:44 -06004072}
Alex Elderb7f23c32012-01-29 13:57:43 -06004073
Alex Elder1ddbe942012-01-29 13:57:44 -06004074/*
Alex Elder499afd52012-02-02 08:13:29 -06004075 * Remove an rbd_dev from the global list, and record that its
4076 * identifier is no longer in use.
Alex Elder1ddbe942012-01-29 13:57:44 -06004077 */
Alex Eldere2839302012-08-29 17:11:06 -05004078static void rbd_dev_id_put(struct rbd_device *rbd_dev)
Alex Elder1ddbe942012-01-29 13:57:44 -06004079{
Alex Elderd184f6b2012-01-29 13:57:44 -06004080 struct list_head *tmp;
Alex Elderde71a292012-07-03 16:01:19 -05004081 int rbd_id = rbd_dev->dev_id;
Alex Elderd184f6b2012-01-29 13:57:44 -06004082 int max_id;
4083
Alex Elderaafb2302012-09-06 16:00:54 -05004084 rbd_assert(rbd_id > 0);
Alex Elder499afd52012-02-02 08:13:29 -06004085
Alex Eldere2839302012-08-29 17:11:06 -05004086 dout("rbd_dev %p released dev id %llu\n", rbd_dev,
4087 (unsigned long long) rbd_dev->dev_id);
Alex Elder499afd52012-02-02 08:13:29 -06004088 spin_lock(&rbd_dev_list_lock);
4089 list_del_init(&rbd_dev->node);
Alex Elderd184f6b2012-01-29 13:57:44 -06004090
4091 /*
4092 * If the id being "put" is not the current maximum, there
4093 * is nothing special we need to do.
4094 */
Alex Eldere2839302012-08-29 17:11:06 -05004095 if (rbd_id != atomic64_read(&rbd_dev_id_max)) {
Alex Elderd184f6b2012-01-29 13:57:44 -06004096 spin_unlock(&rbd_dev_list_lock);
4097 return;
4098 }
4099
4100 /*
4101 * We need to update the current maximum id. Search the
4102 * list to find out what it is. We're more likely to find
4103 * the maximum at the end, so search the list backward.
4104 */
4105 max_id = 0;
4106 list_for_each_prev(tmp, &rbd_dev_list) {
4107 struct rbd_device *rbd_dev;
4108
4109 rbd_dev = list_entry(tmp, struct rbd_device, node);
Alex Elderb213e0b2012-10-10 21:19:13 -07004110 if (rbd_dev->dev_id > max_id)
4111 max_id = rbd_dev->dev_id;
Alex Elderd184f6b2012-01-29 13:57:44 -06004112 }
Alex Elder499afd52012-02-02 08:13:29 -06004113 spin_unlock(&rbd_dev_list_lock);
Alex Elderb7f23c32012-01-29 13:57:43 -06004114
Alex Elder1ddbe942012-01-29 13:57:44 -06004115 /*
Alex Eldere2839302012-08-29 17:11:06 -05004116 * The max id could have been updated by rbd_dev_id_get(), in
Alex Elderd184f6b2012-01-29 13:57:44 -06004117 * which case it now accurately reflects the new maximum.
4118 * Be careful not to overwrite the maximum value in that
4119 * case.
Alex Elder1ddbe942012-01-29 13:57:44 -06004120 */
Alex Eldere2839302012-08-29 17:11:06 -05004121 atomic64_cmpxchg(&rbd_dev_id_max, rbd_id, max_id);
4122 dout(" max dev id has been reset\n");
Alex Elderb7f23c32012-01-29 13:57:43 -06004123}
4124
Alex Eldera725f65e2012-02-02 08:13:30 -06004125/*
Alex Eldere28fff262012-02-02 08:13:30 -06004126 * Skips over white space at *buf, and updates *buf to point to the
4127 * first found non-space character (if any). Returns the length of
Alex Elder593a9e72012-02-07 12:03:37 -06004128 * the token (string of non-white space characters) found. Note
4129 * that *buf must be terminated with '\0'.
Alex Eldere28fff262012-02-02 08:13:30 -06004130 */
4131static inline size_t next_token(const char **buf)
4132{
4133 /*
4134 * These are the characters that produce nonzero for
4135 * isspace() in the "C" and "POSIX" locales.
4136 */
4137 const char *spaces = " \f\n\r\t\v";
4138
4139 *buf += strspn(*buf, spaces); /* Find start of token */
4140
4141 return strcspn(*buf, spaces); /* Return token length */
4142}
4143
4144/*
4145 * Finds the next token in *buf, and if the provided token buffer is
4146 * big enough, copies the found token into it. The result, if
Alex Elder593a9e72012-02-07 12:03:37 -06004147 * copied, is guaranteed to be terminated with '\0'. Note that *buf
4148 * must be terminated with '\0' on entry.
Alex Eldere28fff262012-02-02 08:13:30 -06004149 *
4150 * Returns the length of the token found (not including the '\0').
4151 * Return value will be 0 if no token is found, and it will be >=
4152 * token_size if the token would not fit.
4153 *
Alex Elder593a9e72012-02-07 12:03:37 -06004154 * The *buf pointer will be updated to point beyond the end of the
Alex Eldere28fff262012-02-02 08:13:30 -06004155 * found token. Note that this occurs even if the token buffer is
4156 * too small to hold it.
4157 */
4158static inline size_t copy_token(const char **buf,
4159 char *token,
4160 size_t token_size)
4161{
4162 size_t len;
4163
4164 len = next_token(buf);
4165 if (len < token_size) {
4166 memcpy(token, *buf, len);
4167 *(token + len) = '\0';
4168 }
4169 *buf += len;
4170
4171 return len;
4172}
4173
4174/*
Alex Elderea3352f2012-07-09 21:04:23 -05004175 * Finds the next token in *buf, dynamically allocates a buffer big
4176 * enough to hold a copy of it, and copies the token into the new
4177 * buffer. The copy is guaranteed to be terminated with '\0'. Note
4178 * that a duplicate buffer is created even for a zero-length token.
4179 *
4180 * Returns a pointer to the newly-allocated duplicate, or a null
4181 * pointer if memory for the duplicate was not available. If
4182 * the lenp argument is a non-null pointer, the length of the token
4183 * (not including the '\0') is returned in *lenp.
4184 *
4185 * If successful, the *buf pointer will be updated to point beyond
4186 * the end of the found token.
4187 *
4188 * Note: uses GFP_KERNEL for allocation.
4189 */
4190static inline char *dup_token(const char **buf, size_t *lenp)
4191{
4192 char *dup;
4193 size_t len;
4194
4195 len = next_token(buf);
Alex Elder4caf35f2012-11-01 08:39:27 -05004196 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
Alex Elderea3352f2012-07-09 21:04:23 -05004197 if (!dup)
4198 return NULL;
Alex Elderea3352f2012-07-09 21:04:23 -05004199 *(dup + len) = '\0';
4200 *buf += len;
4201
4202 if (lenp)
4203 *lenp = len;
4204
4205 return dup;
4206}
4207
4208/*
Alex Elder859c31d2012-10-25 23:34:42 -05004209 * Parse the options provided for an "rbd add" (i.e., rbd image
4210 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
4211 * and the data written is passed here via a NUL-terminated buffer.
4212 * Returns 0 if successful or an error code otherwise.
Alex Elderd22f76e2012-07-12 10:46:35 -05004213 *
Alex Elder859c31d2012-10-25 23:34:42 -05004214 * The information extracted from these options is recorded in
4215 * the other parameters which return dynamically-allocated
4216 * structures:
4217 * ceph_opts
4218 * The address of a pointer that will refer to a ceph options
4219 * structure. Caller must release the returned pointer using
4220 * ceph_destroy_options() when it is no longer needed.
4221 * rbd_opts
4222 * Address of an rbd options pointer. Fully initialized by
4223 * this function; caller must release with kfree().
4224 * spec
4225 * Address of an rbd image specification pointer. Fully
4226 * initialized by this function based on parsed options.
4227 * Caller must release with rbd_spec_put().
4228 *
4229 * The options passed take this form:
4230 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
4231 * where:
4232 * <mon_addrs>
4233 * A comma-separated list of one or more monitor addresses.
4234 * A monitor address is an ip address, optionally followed
4235 * by a port number (separated by a colon).
4236 * I.e.: ip1[:port1][,ip2[:port2]...]
4237 * <options>
4238 * A comma-separated list of ceph and/or rbd options.
4239 * <pool_name>
4240 * The name of the rados pool containing the rbd image.
4241 * <image_name>
4242 * The name of the image in that pool to map.
4243 * <snap_id>
4244 * An optional snapshot id. If provided, the mapping will
4245 * present data from the image at the time that snapshot was
4246 * created. The image head is used if no snapshot id is
4247 * provided. Snapshot mappings are always read-only.
Alex Eldera725f65e2012-02-02 08:13:30 -06004248 */
Alex Elder859c31d2012-10-25 23:34:42 -05004249static int rbd_add_parse_args(const char *buf,
Alex Elderdc79b112012-10-25 23:34:41 -05004250 struct ceph_options **ceph_opts,
Alex Elder859c31d2012-10-25 23:34:42 -05004251 struct rbd_options **opts,
4252 struct rbd_spec **rbd_spec)
Alex Eldera725f65e2012-02-02 08:13:30 -06004253{
Alex Elderd22f76e2012-07-12 10:46:35 -05004254 size_t len;
Alex Elder859c31d2012-10-25 23:34:42 -05004255 char *options;
Alex Elder0ddebc02012-10-25 23:34:41 -05004256 const char *mon_addrs;
Alex Elderecb4dc222013-04-26 09:43:47 -05004257 char *snap_name;
Alex Elder0ddebc02012-10-25 23:34:41 -05004258 size_t mon_addrs_size;
Alex Elder859c31d2012-10-25 23:34:42 -05004259 struct rbd_spec *spec = NULL;
Alex Elder4e9afeb2012-10-25 23:34:41 -05004260 struct rbd_options *rbd_opts = NULL;
Alex Elder859c31d2012-10-25 23:34:42 -05004261 struct ceph_options *copts;
Alex Elderdc79b112012-10-25 23:34:41 -05004262 int ret;
Alex Eldere28fff262012-02-02 08:13:30 -06004263
4264 /* The first four tokens are required */
4265
Alex Elder7ef32142012-02-02 08:13:30 -06004266 len = next_token(&buf);
Alex Elder4fb5d6712012-11-01 10:17:15 -05004267 if (!len) {
4268 rbd_warn(NULL, "no monitor address(es) provided");
4269 return -EINVAL;
4270 }
Alex Elder0ddebc02012-10-25 23:34:41 -05004271 mon_addrs = buf;
Alex Elderf28e5652012-10-25 23:34:41 -05004272 mon_addrs_size = len + 1;
Alex Elder7ef32142012-02-02 08:13:30 -06004273 buf += len;
Alex Eldera725f65e2012-02-02 08:13:30 -06004274
Alex Elderdc79b112012-10-25 23:34:41 -05004275 ret = -EINVAL;
Alex Elderf28e5652012-10-25 23:34:41 -05004276 options = dup_token(&buf, NULL);
4277 if (!options)
Alex Elderdc79b112012-10-25 23:34:41 -05004278 return -ENOMEM;
Alex Elder4fb5d6712012-11-01 10:17:15 -05004279 if (!*options) {
4280 rbd_warn(NULL, "no options provided");
4281 goto out_err;
4282 }
Alex Eldera725f65e2012-02-02 08:13:30 -06004283
Alex Elder859c31d2012-10-25 23:34:42 -05004284 spec = rbd_spec_alloc();
4285 if (!spec)
Alex Elderf28e5652012-10-25 23:34:41 -05004286 goto out_mem;
Alex Elder859c31d2012-10-25 23:34:42 -05004287
4288 spec->pool_name = dup_token(&buf, NULL);
4289 if (!spec->pool_name)
4290 goto out_mem;
Alex Elder4fb5d6712012-11-01 10:17:15 -05004291 if (!*spec->pool_name) {
4292 rbd_warn(NULL, "no pool name provided");
4293 goto out_err;
4294 }
Alex Eldere28fff262012-02-02 08:13:30 -06004295
Alex Elder69e7a022012-11-01 08:39:26 -05004296 spec->image_name = dup_token(&buf, NULL);
Alex Elder859c31d2012-10-25 23:34:42 -05004297 if (!spec->image_name)
Alex Elderf28e5652012-10-25 23:34:41 -05004298 goto out_mem;
Alex Elder4fb5d6712012-11-01 10:17:15 -05004299 if (!*spec->image_name) {
4300 rbd_warn(NULL, "no image name provided");
4301 goto out_err;
4302 }
Alex Eldere28fff262012-02-02 08:13:30 -06004303
Alex Elderf28e5652012-10-25 23:34:41 -05004304 /*
4305 * Snapshot name is optional; default is to use "-"
4306 * (indicating the head/no snapshot).
4307 */
Alex Elder3feeb8942012-08-31 17:29:52 -05004308 len = next_token(&buf);
Alex Elder820a5f32012-07-09 21:04:24 -05004309 if (!len) {
Alex Elder3feeb8942012-08-31 17:29:52 -05004310 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
4311 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
Alex Elderf28e5652012-10-25 23:34:41 -05004312 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
Alex Elderdc79b112012-10-25 23:34:41 -05004313 ret = -ENAMETOOLONG;
Alex Elderf28e5652012-10-25 23:34:41 -05004314 goto out_err;
Alex Elder849b4262012-07-09 21:04:24 -05004315 }
Alex Elderecb4dc222013-04-26 09:43:47 -05004316 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
4317 if (!snap_name)
Alex Elderf28e5652012-10-25 23:34:41 -05004318 goto out_mem;
Alex Elderecb4dc222013-04-26 09:43:47 -05004319 *(snap_name + len) = '\0';
4320 spec->snap_name = snap_name;
Alex Eldere5c35532012-10-25 23:34:41 -05004321
Alex Elder0ddebc02012-10-25 23:34:41 -05004322 /* Initialize all rbd options to the defaults */
Alex Eldere28fff262012-02-02 08:13:30 -06004323
Alex Elder4e9afeb2012-10-25 23:34:41 -05004324 rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
4325 if (!rbd_opts)
4326 goto out_mem;
4327
4328 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
Alex Elderd22f76e2012-07-12 10:46:35 -05004329
Alex Elder859c31d2012-10-25 23:34:42 -05004330 copts = ceph_parse_options(options, mon_addrs,
Alex Elder0ddebc02012-10-25 23:34:41 -05004331 mon_addrs + mon_addrs_size - 1,
Alex Elder4e9afeb2012-10-25 23:34:41 -05004332 parse_rbd_opts_token, rbd_opts);
Alex Elder859c31d2012-10-25 23:34:42 -05004333 if (IS_ERR(copts)) {
4334 ret = PTR_ERR(copts);
Alex Elderdc79b112012-10-25 23:34:41 -05004335 goto out_err;
4336 }
Alex Elder859c31d2012-10-25 23:34:42 -05004337 kfree(options);
4338
4339 *ceph_opts = copts;
Alex Elder4e9afeb2012-10-25 23:34:41 -05004340 *opts = rbd_opts;
Alex Elder859c31d2012-10-25 23:34:42 -05004341 *rbd_spec = spec;
Alex Elder0ddebc02012-10-25 23:34:41 -05004342
Alex Elderdc79b112012-10-25 23:34:41 -05004343 return 0;
Alex Elderf28e5652012-10-25 23:34:41 -05004344out_mem:
Alex Elderdc79b112012-10-25 23:34:41 -05004345 ret = -ENOMEM;
Alex Elderd22f76e2012-07-12 10:46:35 -05004346out_err:
Alex Elder859c31d2012-10-25 23:34:42 -05004347 kfree(rbd_opts);
4348 rbd_spec_put(spec);
Alex Elderf28e5652012-10-25 23:34:41 -05004349 kfree(options);
Alex Elderd22f76e2012-07-12 10:46:35 -05004350
Alex Elderdc79b112012-10-25 23:34:41 -05004351 return ret;
Alex Eldera725f65e2012-02-02 08:13:30 -06004352}
4353
Alex Elder589d30e2012-07-10 20:30:11 -05004354/*
4355 * An rbd format 2 image has a unique identifier, distinct from the
4356 * name given to it by the user. Internally, that identifier is
4357 * what's used to specify the names of objects related to the image.
4358 *
4359 * A special "rbd id" object is used to map an rbd image name to its
4360 * id. If that object doesn't exist, then there is no v2 rbd image
4361 * with the supplied name.
4362 *
4363 * This function will record the given rbd_dev's image_id field if
4364 * it can be determined, and in that case will return 0. If any
4365 * errors occur a negative errno will be returned and the rbd_dev's
4366 * image_id field will be unchanged (and should be NULL).
4367 */
4368static int rbd_dev_image_id(struct rbd_device *rbd_dev)
4369{
4370 int ret;
4371 size_t size;
4372 char *object_name;
4373 void *response;
Alex Elderc0fba362013-04-25 23:15:08 -05004374 char *image_id;
Alex Elder2f82ee52012-10-30 19:40:33 -05004375
Alex Elder589d30e2012-07-10 20:30:11 -05004376 /*
Alex Elder2c0d0a12012-10-30 19:40:33 -05004377 * When probing a parent image, the image id is already
4378 * known (and the image name likely is not). There's no
Alex Elderc0fba362013-04-25 23:15:08 -05004379 * need to fetch the image id again in this case. We
4380 * do still need to set the image format though.
Alex Elder2c0d0a12012-10-30 19:40:33 -05004381 */
Alex Elderc0fba362013-04-25 23:15:08 -05004382 if (rbd_dev->spec->image_id) {
4383 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
4384
Alex Elder2c0d0a12012-10-30 19:40:33 -05004385 return 0;
Alex Elderc0fba362013-04-25 23:15:08 -05004386 }
Alex Elder2c0d0a12012-10-30 19:40:33 -05004387
4388 /*
Alex Elder589d30e2012-07-10 20:30:11 -05004389 * First, see if the format 2 image id file exists, and if
4390 * so, get the image's persistent id from it.
4391 */
Alex Elder69e7a022012-11-01 08:39:26 -05004392 size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
Alex Elder589d30e2012-07-10 20:30:11 -05004393 object_name = kmalloc(size, GFP_NOIO);
4394 if (!object_name)
4395 return -ENOMEM;
Alex Elder0d7dbfc2012-10-25 23:34:41 -05004396 sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
Alex Elder589d30e2012-07-10 20:30:11 -05004397 dout("rbd id object name is %s\n", object_name);
4398
4399 /* Response will be an encoded string, which includes a length */
4400
4401 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
4402 response = kzalloc(size, GFP_NOIO);
4403 if (!response) {
4404 ret = -ENOMEM;
4405 goto out;
4406 }
4407
Alex Elderc0fba362013-04-25 23:15:08 -05004408 /* If it doesn't exist we'll assume it's a format 1 image */
4409
Alex Elder36be9a72013-01-19 00:30:28 -06004410 ret = rbd_obj_method_sync(rbd_dev, object_name,
Alex Elder41579762013-04-21 12:14:45 -05004411 "rbd", "get_id", NULL, 0,
Alex Eldere2a58ee2013-04-30 00:44:33 -05004412 response, RBD_IMAGE_ID_LEN_MAX);
Alex Elder36be9a72013-01-19 00:30:28 -06004413 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elderc0fba362013-04-25 23:15:08 -05004414 if (ret == -ENOENT) {
4415 image_id = kstrdup("", GFP_KERNEL);
4416 ret = image_id ? 0 : -ENOMEM;
4417 if (!ret)
4418 rbd_dev->image_format = 1;
4419 } else if (ret > sizeof (__le32)) {
4420 void *p = response;
Alex Elder589d30e2012-07-10 20:30:11 -05004421
Alex Elderc0fba362013-04-25 23:15:08 -05004422 image_id = ceph_extract_encoded_string(&p, p + ret,
Alex Elder979ed482012-11-01 08:39:26 -05004423 NULL, GFP_NOIO);
Alex Elderc0fba362013-04-25 23:15:08 -05004424 ret = IS_ERR(image_id) ? PTR_ERR(image_id) : 0;
4425 if (!ret)
4426 rbd_dev->image_format = 2;
Alex Elder589d30e2012-07-10 20:30:11 -05004427 } else {
Alex Elderc0fba362013-04-25 23:15:08 -05004428 ret = -EINVAL;
4429 }
4430
4431 if (!ret) {
4432 rbd_dev->spec->image_id = image_id;
4433 dout("image_id is %s\n", image_id);
Alex Elder589d30e2012-07-10 20:30:11 -05004434 }
4435out:
4436 kfree(response);
4437 kfree(object_name);
4438
4439 return ret;
4440}
4441
Alex Elder6fd48b32013-04-28 23:32:34 -05004442/* Undo whatever state changes are made by v1 or v2 image probe */
4443
4444static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
4445{
4446 struct rbd_image_header *header;
4447
4448 rbd_dev_remove_parent(rbd_dev);
4449 rbd_spec_put(rbd_dev->parent_spec);
4450 rbd_dev->parent_spec = NULL;
4451 rbd_dev->parent_overlap = 0;
4452
4453 /* Free dynamic fields from the header, then zero it out */
4454
4455 header = &rbd_dev->header;
Alex Elder812164f82013-04-30 00:44:32 -05004456 ceph_put_snap_context(header->snapc);
Alex Elder6fd48b32013-04-28 23:32:34 -05004457 kfree(header->snap_sizes);
4458 kfree(header->snap_names);
4459 kfree(header->object_prefix);
4460 memset(header, 0, sizeof (*header));
4461}
4462
Alex Eldera30b71b2012-07-10 20:30:11 -05004463static int rbd_dev_v1_probe(struct rbd_device *rbd_dev)
4464{
4465 int ret;
Alex Eldera30b71b2012-07-10 20:30:11 -05004466
4467 /* Populate rbd image metadata */
4468
4469 ret = rbd_read_header(rbd_dev, &rbd_dev->header);
4470 if (ret < 0)
4471 goto out_err;
Alex Elder86b00e02012-10-25 23:34:42 -05004472
4473 /* Version 1 images have no parent (no layering) */
4474
4475 rbd_dev->parent_spec = NULL;
4476 rbd_dev->parent_overlap = 0;
4477
Alex Eldera30b71b2012-07-10 20:30:11 -05004478 dout("discovered version 1 image, header name is %s\n",
4479 rbd_dev->header_name);
4480
4481 return 0;
4482
4483out_err:
4484 kfree(rbd_dev->header_name);
4485 rbd_dev->header_name = NULL;
Alex Elder0d7dbfc2012-10-25 23:34:41 -05004486 kfree(rbd_dev->spec->image_id);
4487 rbd_dev->spec->image_id = NULL;
Alex Eldera30b71b2012-07-10 20:30:11 -05004488
4489 return ret;
4490}
4491
4492static int rbd_dev_v2_probe(struct rbd_device *rbd_dev)
4493{
Alex Elder9d475de2012-07-03 16:01:19 -05004494 int ret;
Alex Eldera30b71b2012-07-10 20:30:11 -05004495
Alex Elder9d475de2012-07-03 16:01:19 -05004496 ret = rbd_dev_v2_image_size(rbd_dev);
Alex Elder57385b52013-04-21 12:14:45 -05004497 if (ret)
Alex Elder9d475de2012-07-03 16:01:19 -05004498 goto out_err;
Alex Elder1e130192012-07-03 16:01:19 -05004499
4500 /* Get the object prefix (a.k.a. block_name) for the image */
4501
4502 ret = rbd_dev_v2_object_prefix(rbd_dev);
Alex Elder57385b52013-04-21 12:14:45 -05004503 if (ret)
Alex Elder1e130192012-07-03 16:01:19 -05004504 goto out_err;
Alex Elderb1b54022012-07-03 16:01:19 -05004505
Alex Elderd8891402012-10-09 13:50:17 -07004506 /* Get the and check features for the image */
Alex Elderb1b54022012-07-03 16:01:19 -05004507
4508 ret = rbd_dev_v2_features(rbd_dev);
Alex Elder57385b52013-04-21 12:14:45 -05004509 if (ret)
Alex Elderb1b54022012-07-03 16:01:19 -05004510 goto out_err;
Alex Elder35d489f2012-07-03 16:01:19 -05004511
Alex Elder86b00e02012-10-25 23:34:42 -05004512 /* If the image supports layering, get the parent info */
4513
4514 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
4515 ret = rbd_dev_v2_parent_info(rbd_dev);
Alex Elder57385b52013-04-21 12:14:45 -05004516 if (ret)
Alex Elder86b00e02012-10-25 23:34:42 -05004517 goto out_err;
Alex Elder96882f52013-04-30 00:44:32 -05004518
4519 /*
4520 * Don't print a warning for parent images. We can
4521 * tell this point because we won't know its pool
4522 * name yet (just its pool id).
4523 */
4524 if (rbd_dev->spec->pool_name)
4525 rbd_warn(rbd_dev, "WARNING: kernel layering "
4526 "is EXPERIMENTAL!");
Alex Elder86b00e02012-10-25 23:34:42 -05004527 }
4528
Alex Eldercc070d52013-04-21 12:14:45 -05004529 /* If the image supports fancy striping, get its parameters */
4530
4531 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
4532 ret = rbd_dev_v2_striping_info(rbd_dev);
4533 if (ret < 0)
4534 goto out_err;
4535 }
4536
Alex Elder6e14b1a2012-07-03 16:01:19 -05004537 /* crypto and compression type aren't (yet) supported for v2 images */
Alex Elder35d489f2012-07-03 16:01:19 -05004538
Alex Elder6e14b1a2012-07-03 16:01:19 -05004539 rbd_dev->header.crypt_type = 0;
4540 rbd_dev->header.comp_type = 0;
4541
4542 /* Get the snapshot context, plus the header version */
4543
Alex Eldercc4a38bd2013-04-30 00:44:33 -05004544 ret = rbd_dev_v2_snap_context(rbd_dev);
Alex Elder35d489f2012-07-03 16:01:19 -05004545 if (ret)
4546 goto out_err;
Alex Elder6e14b1a2012-07-03 16:01:19 -05004547
Alex Eldera30b71b2012-07-10 20:30:11 -05004548 dout("discovered version 2 image, header name is %s\n",
4549 rbd_dev->header_name);
4550
Alex Elder35152972012-08-31 17:29:55 -05004551 return 0;
Alex Elder9d475de2012-07-03 16:01:19 -05004552out_err:
Alex Elder86b00e02012-10-25 23:34:42 -05004553 rbd_dev->parent_overlap = 0;
4554 rbd_spec_put(rbd_dev->parent_spec);
4555 rbd_dev->parent_spec = NULL;
Alex Elder9d475de2012-07-03 16:01:19 -05004556 kfree(rbd_dev->header_name);
4557 rbd_dev->header_name = NULL;
Alex Elder1e130192012-07-03 16:01:19 -05004558 kfree(rbd_dev->header.object_prefix);
4559 rbd_dev->header.object_prefix = NULL;
Alex Elder9d475de2012-07-03 16:01:19 -05004560
4561 return ret;
Alex Eldera30b71b2012-07-10 20:30:11 -05004562}
4563
Alex Elder124afba2013-04-26 15:44:36 -05004564static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
Alex Elder83a06262012-10-30 15:47:17 -05004565{
Alex Elder2f82ee52012-10-30 19:40:33 -05004566 struct rbd_device *parent = NULL;
Alex Elder124afba2013-04-26 15:44:36 -05004567 struct rbd_spec *parent_spec;
4568 struct rbd_client *rbdc;
4569 int ret;
4570
4571 if (!rbd_dev->parent_spec)
4572 return 0;
4573 /*
4574 * We need to pass a reference to the client and the parent
4575 * spec when creating the parent rbd_dev. Images related by
4576 * parent/child relationships always share both.
4577 */
4578 parent_spec = rbd_spec_get(rbd_dev->parent_spec);
4579 rbdc = __rbd_get_client(rbd_dev->rbd_client);
4580
4581 ret = -ENOMEM;
4582 parent = rbd_dev_create(rbdc, parent_spec);
4583 if (!parent)
4584 goto out_err;
4585
4586 ret = rbd_dev_image_probe(parent);
4587 if (ret < 0)
4588 goto out_err;
4589 rbd_dev->parent = parent;
4590
4591 return 0;
4592out_err:
4593 if (parent) {
4594 rbd_spec_put(rbd_dev->parent_spec);
4595 kfree(rbd_dev->header_name);
4596 rbd_dev_destroy(parent);
4597 } else {
4598 rbd_put_client(rbdc);
4599 rbd_spec_put(parent_spec);
4600 }
4601
4602 return ret;
4603}
4604
Alex Elder200a6a82013-04-28 23:32:34 -05004605static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
Alex Elder124afba2013-04-26 15:44:36 -05004606{
Alex Elder83a06262012-10-30 15:47:17 -05004607 int ret;
Alex Elder83a06262012-10-30 15:47:17 -05004608
Alex Elderd1cf5782013-04-27 09:59:30 -05004609 ret = rbd_dev_mapping_set(rbd_dev);
Alex Elder83a06262012-10-30 15:47:17 -05004610 if (ret)
Alex Elder9bb81c92013-04-27 09:59:30 -05004611 return ret;
Alex Elder5de10f32013-04-26 15:44:37 -05004612
Alex Elder83a06262012-10-30 15:47:17 -05004613 /* generate unique id: find highest unique id, add one */
4614 rbd_dev_id_get(rbd_dev);
4615
4616 /* Fill in the device name, now that we have its id. */
4617 BUILD_BUG_ON(DEV_NAME_LEN
4618 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
4619 sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
4620
4621 /* Get our block major device number. */
4622
4623 ret = register_blkdev(0, rbd_dev->name);
4624 if (ret < 0)
4625 goto err_out_id;
4626 rbd_dev->major = ret;
4627
4628 /* Set up the blkdev mapping. */
4629
4630 ret = rbd_init_disk(rbd_dev);
4631 if (ret)
4632 goto err_out_blkdev;
4633
4634 ret = rbd_bus_add_dev(rbd_dev);
4635 if (ret)
4636 goto err_out_disk;
4637
Alex Elder83a06262012-10-30 15:47:17 -05004638 /* Everything's ready. Announce the disk to the world. */
4639
Alex Elderb5156e72013-04-26 15:44:36 -05004640 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
Alex Elder129b79d2013-04-26 15:44:36 -05004641 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
Alex Elder83a06262012-10-30 15:47:17 -05004642 add_disk(rbd_dev->disk);
4643
4644 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
4645 (unsigned long long) rbd_dev->mapping.size);
4646
4647 return ret;
Alex Elder2f82ee52012-10-30 19:40:33 -05004648
Alex Elder83a06262012-10-30 15:47:17 -05004649err_out_disk:
4650 rbd_free_disk(rbd_dev);
4651err_out_blkdev:
4652 unregister_blkdev(rbd_dev->major, rbd_dev->name);
4653err_out_id:
4654 rbd_dev_id_put(rbd_dev);
Alex Elderd1cf5782013-04-27 09:59:30 -05004655 rbd_dev_mapping_clear(rbd_dev);
Alex Elder83a06262012-10-30 15:47:17 -05004656
4657 return ret;
4658}
4659
Alex Elder332bb122013-04-27 09:59:30 -05004660static int rbd_dev_header_name(struct rbd_device *rbd_dev)
4661{
4662 struct rbd_spec *spec = rbd_dev->spec;
4663 size_t size;
4664
4665 /* Record the header object name for this rbd image. */
4666
4667 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4668
4669 if (rbd_dev->image_format == 1)
4670 size = strlen(spec->image_name) + sizeof (RBD_SUFFIX);
4671 else
4672 size = sizeof (RBD_HEADER_PREFIX) + strlen(spec->image_id);
4673
4674 rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
4675 if (!rbd_dev->header_name)
4676 return -ENOMEM;
4677
4678 if (rbd_dev->image_format == 1)
4679 sprintf(rbd_dev->header_name, "%s%s",
4680 spec->image_name, RBD_SUFFIX);
4681 else
4682 sprintf(rbd_dev->header_name, "%s%s",
4683 RBD_HEADER_PREFIX, spec->image_id);
4684 return 0;
4685}
4686
Alex Elder200a6a82013-04-28 23:32:34 -05004687static void rbd_dev_image_release(struct rbd_device *rbd_dev)
4688{
Alex Elder6fd48b32013-04-28 23:32:34 -05004689 int ret;
4690
Alex Elder6fd48b32013-04-28 23:32:34 -05004691 rbd_dev_unprobe(rbd_dev);
4692 ret = rbd_dev_header_watch_sync(rbd_dev, 0);
4693 if (ret)
4694 rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret);
Alex Elder200a6a82013-04-28 23:32:34 -05004695 kfree(rbd_dev->header_name);
Alex Elder6fd48b32013-04-28 23:32:34 -05004696 rbd_dev->header_name = NULL;
4697 rbd_dev->image_format = 0;
4698 kfree(rbd_dev->spec->image_id);
4699 rbd_dev->spec->image_id = NULL;
4700
Alex Elder200a6a82013-04-28 23:32:34 -05004701 rbd_dev_destroy(rbd_dev);
4702}
4703
Alex Eldera30b71b2012-07-10 20:30:11 -05004704/*
4705 * Probe for the existence of the header object for the given rbd
4706 * device. For format 2 images this includes determining the image
4707 * id.
4708 */
Alex Elder71f293e2013-04-26 09:43:48 -05004709static int rbd_dev_image_probe(struct rbd_device *rbd_dev)
Alex Eldera30b71b2012-07-10 20:30:11 -05004710{
4711 int ret;
Alex Elderb644de22013-04-27 09:59:31 -05004712 int tmp;
Alex Eldera30b71b2012-07-10 20:30:11 -05004713
4714 /*
4715 * Get the id from the image id object. If it's not a
4716 * format 2 image, we'll get ENOENT back, and we'll assume
4717 * it's a format 1 image.
4718 */
4719 ret = rbd_dev_image_id(rbd_dev);
4720 if (ret)
Alex Elderc0fba362013-04-25 23:15:08 -05004721 return ret;
4722 rbd_assert(rbd_dev->spec->image_id);
4723 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4724
Alex Elder332bb122013-04-27 09:59:30 -05004725 ret = rbd_dev_header_name(rbd_dev);
4726 if (ret)
4727 goto err_out_format;
4728
Alex Elderb644de22013-04-27 09:59:31 -05004729 ret = rbd_dev_header_watch_sync(rbd_dev, 1);
4730 if (ret)
4731 goto out_header_name;
4732
Alex Elderc0fba362013-04-25 23:15:08 -05004733 if (rbd_dev->image_format == 1)
Alex Eldera30b71b2012-07-10 20:30:11 -05004734 ret = rbd_dev_v1_probe(rbd_dev);
4735 else
4736 ret = rbd_dev_v2_probe(rbd_dev);
Alex Elder5655c4d2013-04-25 23:15:08 -05004737 if (ret)
Alex Elderb644de22013-04-27 09:59:31 -05004738 goto err_out_watch;
Alex Elder83a06262012-10-30 15:47:17 -05004739
Alex Elder9bb81c92013-04-27 09:59:30 -05004740 ret = rbd_dev_spec_update(rbd_dev);
4741 if (ret)
Alex Elder33dca392013-04-30 00:44:33 -05004742 goto err_out_probe;
Alex Elder9bb81c92013-04-27 09:59:30 -05004743
4744 ret = rbd_dev_probe_parent(rbd_dev);
Alex Elder6fd48b32013-04-28 23:32:34 -05004745 if (!ret)
4746 return 0;
Alex Elder83a06262012-10-30 15:47:17 -05004747
Alex Elder6fd48b32013-04-28 23:32:34 -05004748err_out_probe:
4749 rbd_dev_unprobe(rbd_dev);
Alex Elderb644de22013-04-27 09:59:31 -05004750err_out_watch:
4751 tmp = rbd_dev_header_watch_sync(rbd_dev, 0);
4752 if (tmp)
4753 rbd_warn(rbd_dev, "unable to tear down watch request\n");
Alex Elder332bb122013-04-27 09:59:30 -05004754out_header_name:
4755 kfree(rbd_dev->header_name);
4756 rbd_dev->header_name = NULL;
4757err_out_format:
4758 rbd_dev->image_format = 0;
Alex Elder5655c4d2013-04-25 23:15:08 -05004759 kfree(rbd_dev->spec->image_id);
4760 rbd_dev->spec->image_id = NULL;
4761
4762 dout("probe failed, returning %d\n", ret);
4763
4764 return ret;
Alex Eldera30b71b2012-07-10 20:30:11 -05004765}
4766
Yehuda Sadeh59c2be12011-03-21 15:10:11 -07004767static ssize_t rbd_add(struct bus_type *bus,
4768 const char *buf,
4769 size_t count)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004770{
Alex Eldercb8627c2012-07-09 21:04:23 -05004771 struct rbd_device *rbd_dev = NULL;
Alex Elderdc79b112012-10-25 23:34:41 -05004772 struct ceph_options *ceph_opts = NULL;
Alex Elder4e9afeb2012-10-25 23:34:41 -05004773 struct rbd_options *rbd_opts = NULL;
Alex Elder859c31d2012-10-25 23:34:42 -05004774 struct rbd_spec *spec = NULL;
Alex Elder9d3997f2012-10-25 23:34:42 -05004775 struct rbd_client *rbdc;
Alex Elder27cc2592012-02-02 08:13:30 -06004776 struct ceph_osd_client *osdc;
4777 int rc = -ENOMEM;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004778
4779 if (!try_module_get(THIS_MODULE))
4780 return -ENODEV;
4781
Alex Eldera725f65e2012-02-02 08:13:30 -06004782 /* parse add command */
Alex Elder859c31d2012-10-25 23:34:42 -05004783 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
Alex Elderdc79b112012-10-25 23:34:41 -05004784 if (rc < 0)
Alex Elderbd4ba652012-10-25 23:34:42 -05004785 goto err_out_module;
Alex Eldera725f65e2012-02-02 08:13:30 -06004786
Alex Elder9d3997f2012-10-25 23:34:42 -05004787 rbdc = rbd_get_client(ceph_opts);
4788 if (IS_ERR(rbdc)) {
4789 rc = PTR_ERR(rbdc);
Alex Elder0ddebc02012-10-25 23:34:41 -05004790 goto err_out_args;
Alex Elder9d3997f2012-10-25 23:34:42 -05004791 }
Alex Elderc53d5892012-10-25 23:34:42 -05004792 ceph_opts = NULL; /* rbd_dev client now owns this */
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004793
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004794 /* pick the pool */
Alex Elder9d3997f2012-10-25 23:34:42 -05004795 osdc = &rbdc->client->osdc;
Alex Elder859c31d2012-10-25 23:34:42 -05004796 rc = ceph_pg_poolid_by_name(osdc->osdmap, spec->pool_name);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004797 if (rc < 0)
4798 goto err_out_client;
Alex Elderc0cd10db2013-04-26 09:43:47 -05004799 spec->pool_id = (u64)rc;
Alex Elder859c31d2012-10-25 23:34:42 -05004800
Alex Elder0903e872012-11-14 12:25:19 -06004801 /* The ceph file layout needs to fit pool id in 32 bits */
4802
Alex Elderc0cd10db2013-04-26 09:43:47 -05004803 if (spec->pool_id > (u64)U32_MAX) {
4804 rbd_warn(NULL, "pool id too large (%llu > %u)\n",
4805 (unsigned long long)spec->pool_id, U32_MAX);
Alex Elder0903e872012-11-14 12:25:19 -06004806 rc = -EIO;
4807 goto err_out_client;
4808 }
4809
Alex Elderc53d5892012-10-25 23:34:42 -05004810 rbd_dev = rbd_dev_create(rbdc, spec);
Alex Elderbd4ba652012-10-25 23:34:42 -05004811 if (!rbd_dev)
4812 goto err_out_client;
Alex Elderc53d5892012-10-25 23:34:42 -05004813 rbdc = NULL; /* rbd_dev now owns this */
4814 spec = NULL; /* rbd_dev now owns this */
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004815
Alex Elderbd4ba652012-10-25 23:34:42 -05004816 rbd_dev->mapping.read_only = rbd_opts->read_only;
Alex Elderc53d5892012-10-25 23:34:42 -05004817 kfree(rbd_opts);
4818 rbd_opts = NULL; /* done with this */
Alex Elderbd4ba652012-10-25 23:34:42 -05004819
Alex Elder71f293e2013-04-26 09:43:48 -05004820 rc = rbd_dev_image_probe(rbd_dev);
Alex Eldera30b71b2012-07-10 20:30:11 -05004821 if (rc < 0)
Alex Elderc53d5892012-10-25 23:34:42 -05004822 goto err_out_rbd_dev;
Alex Elder05fd6f62012-08-29 17:11:07 -05004823
Alex Elderb536f692013-04-28 23:32:34 -05004824 rc = rbd_dev_device_setup(rbd_dev);
4825 if (!rc)
4826 return count;
4827
4828 rbd_dev_image_release(rbd_dev);
Alex Elderc53d5892012-10-25 23:34:42 -05004829err_out_rbd_dev:
4830 rbd_dev_destroy(rbd_dev);
Alex Elderbd4ba652012-10-25 23:34:42 -05004831err_out_client:
Alex Elder9d3997f2012-10-25 23:34:42 -05004832 rbd_put_client(rbdc);
Alex Elder0ddebc02012-10-25 23:34:41 -05004833err_out_args:
Alex Elder78cea762012-10-25 23:34:41 -05004834 if (ceph_opts)
4835 ceph_destroy_options(ceph_opts);
Alex Elder4e9afeb2012-10-25 23:34:41 -05004836 kfree(rbd_opts);
Alex Elder859c31d2012-10-25 23:34:42 -05004837 rbd_spec_put(spec);
Alex Elderbd4ba652012-10-25 23:34:42 -05004838err_out_module:
4839 module_put(THIS_MODULE);
Alex Elder27cc2592012-02-02 08:13:30 -06004840
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004841 dout("Error adding device %s\n", buf);
Alex Elder27cc2592012-02-02 08:13:30 -06004842
Alex Elderc0cd10db2013-04-26 09:43:47 -05004843 return (ssize_t)rc;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004844}
4845
Alex Elderde71a292012-07-03 16:01:19 -05004846static struct rbd_device *__rbd_get_dev(unsigned long dev_id)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004847{
4848 struct list_head *tmp;
4849 struct rbd_device *rbd_dev;
4850
Alex Eldere124a82f2012-01-29 13:57:44 -06004851 spin_lock(&rbd_dev_list_lock);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004852 list_for_each(tmp, &rbd_dev_list) {
4853 rbd_dev = list_entry(tmp, struct rbd_device, node);
Alex Elderde71a292012-07-03 16:01:19 -05004854 if (rbd_dev->dev_id == dev_id) {
Alex Eldere124a82f2012-01-29 13:57:44 -06004855 spin_unlock(&rbd_dev_list_lock);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004856 return rbd_dev;
Alex Eldere124a82f2012-01-29 13:57:44 -06004857 }
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004858 }
Alex Eldere124a82f2012-01-29 13:57:44 -06004859 spin_unlock(&rbd_dev_list_lock);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004860 return NULL;
4861}
4862
Alex Elder200a6a82013-04-28 23:32:34 -05004863static void rbd_dev_device_release(struct device *dev)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004864{
Alex Elder593a9e72012-02-07 12:03:37 -06004865 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004866
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004867 rbd_free_disk(rbd_dev);
Alex Elder200a6a82013-04-28 23:32:34 -05004868 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
4869 rbd_dev_clear_mapping(rbd_dev);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004870 unregister_blkdev(rbd_dev->major, rbd_dev->name);
Alex Elder200a6a82013-04-28 23:32:34 -05004871 rbd_dev->major = 0;
Alex Eldere2839302012-08-29 17:11:06 -05004872 rbd_dev_id_put(rbd_dev);
Alex Elderd1cf5782013-04-27 09:59:30 -05004873 rbd_dev_mapping_clear(rbd_dev);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004874}
4875
Alex Elder05a46af2013-04-26 15:44:36 -05004876static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
4877{
Alex Elderad945fc2013-04-26 15:44:36 -05004878 while (rbd_dev->parent) {
Alex Elder05a46af2013-04-26 15:44:36 -05004879 struct rbd_device *first = rbd_dev;
4880 struct rbd_device *second = first->parent;
4881 struct rbd_device *third;
4882
4883 /*
4884 * Follow to the parent with no grandparent and
4885 * remove it.
4886 */
4887 while (second && (third = second->parent)) {
4888 first = second;
4889 second = third;
4890 }
Alex Elderad945fc2013-04-26 15:44:36 -05004891 rbd_assert(second);
Alex Elder8ad42cd2013-04-28 23:32:34 -05004892 rbd_dev_image_release(second);
Alex Elderad945fc2013-04-26 15:44:36 -05004893 first->parent = NULL;
4894 first->parent_overlap = 0;
4895
4896 rbd_assert(first->parent_spec);
Alex Elder05a46af2013-04-26 15:44:36 -05004897 rbd_spec_put(first->parent_spec);
4898 first->parent_spec = NULL;
Alex Elder05a46af2013-04-26 15:44:36 -05004899 }
4900}
4901
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004902static ssize_t rbd_remove(struct bus_type *bus,
4903 const char *buf,
4904 size_t count)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004905{
4906 struct rbd_device *rbd_dev = NULL;
Alex Elder0d8189e2013-04-27 09:59:30 -05004907 int target_id;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004908 unsigned long ul;
Alex Elder0d8189e2013-04-27 09:59:30 -05004909 int ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004910
Alex Elder0d8189e2013-04-27 09:59:30 -05004911 ret = strict_strtoul(buf, 10, &ul);
4912 if (ret)
4913 return ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004914
4915 /* convert to int; abort if we lost anything in the conversion */
4916 target_id = (int) ul;
4917 if (target_id != ul)
4918 return -EINVAL;
4919
4920 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
4921
4922 rbd_dev = __rbd_get_dev(target_id);
4923 if (!rbd_dev) {
4924 ret = -ENOENT;
4925 goto done;
4926 }
4927
Alex Eldera14ea262013-02-05 13:23:12 -06004928 spin_lock_irq(&rbd_dev->lock);
Alex Elderb82d1672013-01-14 12:43:31 -06004929 if (rbd_dev->open_count)
Alex Elder42382b72012-11-16 09:29:16 -06004930 ret = -EBUSY;
Alex Elderb82d1672013-01-14 12:43:31 -06004931 else
4932 set_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
Alex Eldera14ea262013-02-05 13:23:12 -06004933 spin_unlock_irq(&rbd_dev->lock);
Alex Elderb82d1672013-01-14 12:43:31 -06004934 if (ret < 0)
Alex Elder42382b72012-11-16 09:29:16 -06004935 goto done;
Alex Elder0d8189e2013-04-27 09:59:30 -05004936 ret = count;
Alex Elderb4808152013-04-26 15:44:36 -05004937 rbd_bus_del_dev(rbd_dev);
Alex Elder8ad42cd2013-04-28 23:32:34 -05004938 rbd_dev_image_release(rbd_dev);
Alex Elder79ab7552013-04-28 23:32:34 -05004939 module_put(THIS_MODULE);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004940done:
4941 mutex_unlock(&ctl_mutex);
Alex Elderaafb2302012-09-06 16:00:54 -05004942
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004943 return ret;
4944}
4945
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004946/*
4947 * create control files in sysfs
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004948 * /sys/bus/rbd/...
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004949 */
4950static int rbd_sysfs_init(void)
4951{
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004952 int ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004953
Alex Elderfed4c142012-02-07 12:03:36 -06004954 ret = device_register(&rbd_root_dev);
Alex Elder21079782012-01-24 10:08:36 -06004955 if (ret < 0)
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004956 return ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004957
Alex Elderfed4c142012-02-07 12:03:36 -06004958 ret = bus_register(&rbd_bus_type);
4959 if (ret < 0)
4960 device_unregister(&rbd_root_dev);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004961
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004962 return ret;
4963}
4964
4965static void rbd_sysfs_cleanup(void)
4966{
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004967 bus_unregister(&rbd_bus_type);
Alex Elderfed4c142012-02-07 12:03:36 -06004968 device_unregister(&rbd_root_dev);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004969}
4970
Alex Eldercc344fa2013-02-19 12:25:56 -06004971static int __init rbd_init(void)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004972{
4973 int rc;
4974
Alex Elder1e32d342013-01-30 11:13:33 -06004975 if (!libceph_compatible(NULL)) {
4976 rbd_warn(NULL, "libceph incompatibility (quitting)");
4977
4978 return -EINVAL;
4979 }
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004980 rc = rbd_sysfs_init();
4981 if (rc)
4982 return rc;
Alex Elderf0f8cef2012-01-29 13:57:44 -06004983 pr_info("loaded " RBD_DRV_NAME_LONG "\n");
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004984 return 0;
4985}
4986
Alex Eldercc344fa2013-02-19 12:25:56 -06004987static void __exit rbd_exit(void)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004988{
4989 rbd_sysfs_cleanup();
4990}
4991
4992module_init(rbd_init);
4993module_exit(rbd_exit);
4994
4995MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
4996MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
4997MODULE_DESCRIPTION("rados block device");
4998
4999/* following authorship retained from original osdblk.c */
5000MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
5001
5002MODULE_LICENSE("GPL");