blob: 0ede6d7e25686cf768e3e74d7611e5e61319d6e7 [file] [log] [blame]
Alex Eldere2a58ee2013-04-30 00:44:33 -05001
Yehuda Sadeh602adf42010-08-12 16:11:25 -07002/*
3 rbd.c -- Export ceph rados objects as a Linux block device
4
5
6 based on drivers/block/osdblk.c:
7
8 Copyright 2009 Red Hat, Inc.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
22
23
24
Yehuda Sadehdfc56062010-11-19 14:51:04 -080025 For usage instructions, please refer to:
Yehuda Sadeh602adf42010-08-12 16:11:25 -070026
Yehuda Sadehdfc56062010-11-19 14:51:04 -080027 Documentation/ABI/testing/sysfs-bus-rbd
Yehuda Sadeh602adf42010-08-12 16:11:25 -070028
29 */
30
31#include <linux/ceph/libceph.h>
32#include <linux/ceph/osd_client.h>
33#include <linux/ceph/mon_client.h>
34#include <linux/ceph/decode.h>
Yehuda Sadeh59c2be12011-03-21 15:10:11 -070035#include <linux/parser.h>
Alex Elder30d1cff2013-05-01 12:43:03 -050036#include <linux/bsearch.h>
Yehuda Sadeh602adf42010-08-12 16:11:25 -070037
38#include <linux/kernel.h>
39#include <linux/device.h>
40#include <linux/module.h>
Christoph Hellwig7ad18af2015-01-13 17:20:04 +010041#include <linux/blk-mq.h>
Yehuda Sadeh602adf42010-08-12 16:11:25 -070042#include <linux/fs.h>
43#include <linux/blkdev.h>
Alex Elder1c2a9df2013-05-01 12:43:03 -050044#include <linux/slab.h>
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +020045#include <linux/idr.h>
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +040046#include <linux/workqueue.h>
Yehuda Sadeh602adf42010-08-12 16:11:25 -070047
48#include "rbd_types.h"
49
Alex Elderaafb2302012-09-06 16:00:54 -050050#define RBD_DEBUG /* Activate rbd_assert() calls */
51
Alex Elder593a9e72012-02-07 12:03:37 -060052/*
53 * The basic unit of block I/O is a sector. It is interpreted in a
54 * number of contexts in Linux (blk, bio, genhd), but the default is
55 * universally 512 bytes. These symbols are just slightly more
56 * meaningful than the bare numbers they represent.
57 */
58#define SECTOR_SHIFT 9
59#define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
60
Alex Eldera2acd002013-05-08 22:50:04 -050061/*
62 * Increment the given counter and return its updated value.
63 * If the counter is already 0 it will not be incremented.
64 * If the counter is already at its maximum value returns
65 * -EINVAL without updating it.
66 */
67static int atomic_inc_return_safe(atomic_t *v)
68{
69 unsigned int counter;
70
71 counter = (unsigned int)__atomic_add_unless(v, 1, 0);
72 if (counter <= (unsigned int)INT_MAX)
73 return (int)counter;
74
75 atomic_dec(v);
76
77 return -EINVAL;
78}
79
80/* Decrement the counter. Return the resulting value, or -EINVAL */
81static int atomic_dec_return_safe(atomic_t *v)
82{
83 int counter;
84
85 counter = atomic_dec_return(v);
86 if (counter >= 0)
87 return counter;
88
89 atomic_inc(v);
90
91 return -EINVAL;
92}
93
Alex Elderf0f8cef2012-01-29 13:57:44 -060094#define RBD_DRV_NAME "rbd"
Yehuda Sadeh602adf42010-08-12 16:11:25 -070095
Ilya Dryomov7e513d42013-12-16 19:26:32 +020096#define RBD_MINORS_PER_MAJOR 256
97#define RBD_SINGLE_MAJOR_PART_SHIFT 4
Yehuda Sadeh602adf42010-08-12 16:11:25 -070098
Ilya Dryomov6d69bb532015-10-11 19:38:00 +020099#define RBD_MAX_PARENT_CHAIN_LEN 16
100
Alex Elderd4b125e2012-07-03 16:01:19 -0500101#define RBD_SNAP_DEV_NAME_PREFIX "snap_"
102#define RBD_MAX_SNAP_NAME_LEN \
103 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
104
Alex Elder35d489f2012-07-03 16:01:19 -0500105#define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700106
107#define RBD_SNAP_HEAD_NAME "-"
108
Alex Elder9682fc62013-04-30 00:44:33 -0500109#define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
110
Alex Elder9e15b772012-10-30 19:40:33 -0500111/* This allows a single page to hold an image name sent by OSD */
112#define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
Alex Elder1e130192012-07-03 16:01:19 -0500113#define RBD_IMAGE_ID_LEN_MAX 64
Alex Elder9e15b772012-10-30 19:40:33 -0500114
Alex Elder1e130192012-07-03 16:01:19 -0500115#define RBD_OBJ_PREFIX_LEN_MAX 64
Alex Elder589d30e2012-07-10 20:30:11 -0500116
Alex Elderd8891402012-10-09 13:50:17 -0700117/* Feature bits */
118
Alex Elder5cbf6f122013-04-11 09:29:48 -0500119#define RBD_FEATURE_LAYERING (1<<0)
120#define RBD_FEATURE_STRIPINGV2 (1<<1)
121#define RBD_FEATURES_ALL \
122 (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
Alex Elderd8891402012-10-09 13:50:17 -0700123
124/* Features supported by this (client software) implementation. */
125
Alex Elder770eba62012-10-25 23:34:40 -0500126#define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
Alex Elderd8891402012-10-09 13:50:17 -0700127
Alex Elder81a89792012-02-02 08:13:30 -0600128/*
129 * An RBD device name will be "rbd#", where the "rbd" comes from
130 * RBD_DRV_NAME above, and # is a unique integer identifier.
131 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
132 * enough to hold all possible device names.
133 */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700134#define DEV_NAME_LEN 32
Alex Elder81a89792012-02-02 08:13:30 -0600135#define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700136
137/*
138 * block device image metadata (in-memory version)
139 */
140struct rbd_image_header {
Alex Elderf35a4de2013-05-06 09:51:29 -0500141 /* These six fields never change for a given rbd image */
Alex Elder849b4262012-07-09 21:04:24 -0500142 char *object_prefix;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700143 __u8 obj_order;
144 __u8 crypt_type;
145 __u8 comp_type;
Alex Elderf35a4de2013-05-06 09:51:29 -0500146 u64 stripe_unit;
147 u64 stripe_count;
148 u64 features; /* Might be changeable someday? */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700149
Alex Elderf84344f2012-08-31 17:29:51 -0500150 /* The remaining fields need to be updated occasionally */
151 u64 image_size;
152 struct ceph_snap_context *snapc;
Alex Elderf35a4de2013-05-06 09:51:29 -0500153 char *snap_names; /* format 1 only */
154 u64 *snap_sizes; /* format 1 only */
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700155};
156
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500157/*
158 * An rbd image specification.
159 *
160 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
Alex Elderc66c6e02012-11-01 08:39:26 -0500161 * identify an image. Each rbd_dev structure includes a pointer to
162 * an rbd_spec structure that encapsulates this identity.
163 *
164 * Each of the id's in an rbd_spec has an associated name. For a
165 * user-mapped image, the names are supplied and the id's associated
166 * with them are looked up. For a layered image, a parent image is
167 * defined by the tuple, and the names are looked up.
168 *
169 * An rbd_dev structure contains a parent_spec pointer which is
170 * non-null if the image it represents is a child in a layered
171 * image. This pointer will refer to the rbd_spec structure used
172 * by the parent rbd_dev for its own identity (i.e., the structure
173 * is shared between the parent and child).
174 *
175 * Since these structures are populated once, during the discovery
176 * phase of image construction, they are effectively immutable so
177 * we make no effort to synchronize access to them.
178 *
179 * Note that code herein does not assume the image name is known (it
180 * could be a null pointer).
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500181 */
182struct rbd_spec {
183 u64 pool_id;
Alex Elderecb4dc22013-04-26 09:43:47 -0500184 const char *pool_name;
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500185
Alex Elderecb4dc22013-04-26 09:43:47 -0500186 const char *image_id;
187 const char *image_name;
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500188
189 u64 snap_id;
Alex Elderecb4dc22013-04-26 09:43:47 -0500190 const char *snap_name;
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500191
192 struct kref kref;
193};
194
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700195/*
Alex Elderf0f8cef2012-01-29 13:57:44 -0600196 * an instance of the client. multiple devices may share an rbd client.
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700197 */
198struct rbd_client {
199 struct ceph_client *client;
200 struct kref kref;
201 struct list_head node;
202};
203
Alex Elderbf0d5f502012-11-22 00:00:08 -0600204struct rbd_img_request;
205typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
206
207#define BAD_WHICH U32_MAX /* Good which or bad which, which? */
208
209struct rbd_obj_request;
210typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
211
Alex Elder9969ebc2013-01-18 12:31:10 -0600212enum obj_request_type {
213 OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
214};
Alex Elderbf0d5f502012-11-22 00:00:08 -0600215
Guangliang Zhao6d2940c2014-03-13 11:21:35 +0800216enum obj_operation_type {
217 OBJ_OP_WRITE,
218 OBJ_OP_READ,
Guangliang Zhao90e98c52014-04-01 22:22:16 +0800219 OBJ_OP_DISCARD,
Guangliang Zhao6d2940c2014-03-13 11:21:35 +0800220};
221
Alex Elder926f9b32013-02-11 12:33:24 -0600222enum obj_req_flags {
223 OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */
Alex Elder6365d332013-02-11 12:33:24 -0600224 OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */
Alex Elder5679c592013-02-11 12:33:24 -0600225 OBJ_REQ_KNOWN, /* EXISTS flag valid: no = 0, yes = 1 */
226 OBJ_REQ_EXISTS, /* target exists: no = 0, yes = 1 */
Alex Elder926f9b32013-02-11 12:33:24 -0600227};
228
Alex Elderbf0d5f502012-11-22 00:00:08 -0600229struct rbd_obj_request {
230 const char *object_name;
231 u64 offset; /* object start byte */
232 u64 length; /* bytes from offset */
Alex Elder926f9b32013-02-11 12:33:24 -0600233 unsigned long flags;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600234
Alex Elderc5b5ef62013-02-11 12:33:24 -0600235 /*
236 * An object request associated with an image will have its
237 * img_data flag set; a standalone object request will not.
238 *
239 * A standalone object request will have which == BAD_WHICH
240 * and a null obj_request pointer.
241 *
242 * An object request initiated in support of a layered image
243 * object (to check for its existence before a write) will
244 * have which == BAD_WHICH and a non-null obj_request pointer.
245 *
246 * Finally, an object request for rbd image data will have
247 * which != BAD_WHICH, and will have a non-null img_request
248 * pointer. The value of which will be in the range
249 * 0..(img_request->obj_request_count-1).
250 */
251 union {
252 struct rbd_obj_request *obj_request; /* STAT op */
253 struct {
254 struct rbd_img_request *img_request;
255 u64 img_offset;
256 /* links for img_request->obj_requests list */
257 struct list_head links;
258 };
259 };
Alex Elderbf0d5f502012-11-22 00:00:08 -0600260 u32 which; /* posn image request list */
261
262 enum obj_request_type type;
Alex Elder788e2df2013-01-17 12:25:27 -0600263 union {
264 struct bio *bio_list;
265 struct {
266 struct page **pages;
267 u32 page_count;
268 };
269 };
Alex Elder0eefd472013-04-19 15:34:50 -0500270 struct page **copyup_pages;
Alex Elderebda6402013-05-10 16:29:22 -0500271 u32 copyup_page_count;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600272
273 struct ceph_osd_request *osd_req;
274
275 u64 xferred; /* bytes transferred */
Sage Weil1b83bef2013-02-25 16:11:12 -0800276 int result;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600277
278 rbd_obj_callback_t callback;
Alex Elder788e2df2013-01-17 12:25:27 -0600279 struct completion completion;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600280
281 struct kref kref;
282};
283
Alex Elder0c425242013-02-08 09:55:49 -0600284enum img_req_flags {
Alex Elder9849e982013-01-24 16:13:36 -0600285 IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */
286 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
Alex Elderd0b2e942013-01-24 16:13:36 -0600287 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
Guangliang Zhao90e98c52014-04-01 22:22:16 +0800288 IMG_REQ_DISCARD, /* discard: normal = 0, discard request = 1 */
Alex Elder0c425242013-02-08 09:55:49 -0600289};
290
Alex Elderbf0d5f502012-11-22 00:00:08 -0600291struct rbd_img_request {
Alex Elderbf0d5f502012-11-22 00:00:08 -0600292 struct rbd_device *rbd_dev;
293 u64 offset; /* starting image byte offset */
294 u64 length; /* byte count from offset */
Alex Elder0c425242013-02-08 09:55:49 -0600295 unsigned long flags;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600296 union {
Alex Elder9849e982013-01-24 16:13:36 -0600297 u64 snap_id; /* for reads */
Alex Elderbf0d5f502012-11-22 00:00:08 -0600298 struct ceph_snap_context *snapc; /* for writes */
Alex Elder9849e982013-01-24 16:13:36 -0600299 };
300 union {
301 struct request *rq; /* block request */
302 struct rbd_obj_request *obj_request; /* obj req initiator */
Alex Elderbf0d5f502012-11-22 00:00:08 -0600303 };
Alex Elder3d7efd12013-04-19 15:34:50 -0500304 struct page **copyup_pages;
Alex Elderebda6402013-05-10 16:29:22 -0500305 u32 copyup_page_count;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600306 spinlock_t completion_lock;/* protects next_completion */
307 u32 next_completion;
308 rbd_img_callback_t callback;
Alex Elder55f27e02013-04-10 12:34:25 -0500309 u64 xferred;/* aggregate bytes transferred */
Alex Eldera5a337d2013-01-24 16:13:36 -0600310 int result; /* first nonzero obj_request result */
Alex Elderbf0d5f502012-11-22 00:00:08 -0600311
312 u32 obj_request_count;
313 struct list_head obj_requests; /* rbd_obj_request structs */
314
315 struct kref kref;
316};
317
318#define for_each_obj_request(ireq, oreq) \
Alex Elderef06f4d32013-02-08 09:55:48 -0600319 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
Alex Elderbf0d5f502012-11-22 00:00:08 -0600320#define for_each_obj_request_from(ireq, oreq) \
Alex Elderef06f4d32013-02-08 09:55:48 -0600321 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
Alex Elderbf0d5f502012-11-22 00:00:08 -0600322#define for_each_obj_request_safe(ireq, oreq, n) \
Alex Elderef06f4d32013-02-08 09:55:48 -0600323 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
Alex Elderbf0d5f502012-11-22 00:00:08 -0600324
Alex Elderf84344f2012-08-31 17:29:51 -0500325struct rbd_mapping {
Alex Elder99c1f082012-08-30 14:42:15 -0500326 u64 size;
Alex Elder34b13182012-07-13 20:35:12 -0500327 u64 features;
Alex Elderf84344f2012-08-31 17:29:51 -0500328 bool read_only;
329};
330
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700331/*
332 * a single device
333 */
334struct rbd_device {
Alex Elderde71a292012-07-03 16:01:19 -0500335 int dev_id; /* blkdev unique id */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700336
337 int major; /* blkdev assigned major */
Ilya Dryomovdd82fff2013-12-13 15:28:57 +0200338 int minor;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700339 struct gendisk *disk; /* blkdev's gendisk and rq */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700340
Alex Eldera30b71b2012-07-10 20:30:11 -0500341 u32 image_format; /* Either 1 or 2 */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700342 struct rbd_client *rbd_client;
343
344 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
345
Alex Elderb82d1672013-01-14 12:43:31 -0600346 spinlock_t lock; /* queue, flags, open_count */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700347
348 struct rbd_image_header header;
Alex Elderb82d1672013-01-14 12:43:31 -0600349 unsigned long flags; /* possibly lock protected */
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500350 struct rbd_spec *spec;
Ilya Dryomovd1475432015-06-22 13:24:48 +0300351 struct rbd_options *opts;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700352
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500353 char *header_name;
Alex Elder971f8392012-10-25 23:34:41 -0500354
Alex Elder0903e872012-11-14 12:25:19 -0600355 struct ceph_file_layout layout;
356
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700357 struct ceph_osd_event *watch_event;
Alex Elder975241a2013-01-25 17:08:55 -0600358 struct rbd_obj_request *watch_request;
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700359
Alex Elder86b00e02012-10-25 23:34:42 -0500360 struct rbd_spec *parent_spec;
361 u64 parent_overlap;
Alex Eldera2acd002013-05-08 22:50:04 -0500362 atomic_t parent_ref;
Alex Elder2f82ee52012-10-30 19:40:33 -0500363 struct rbd_device *parent;
Alex Elder86b00e02012-10-25 23:34:42 -0500364
Christoph Hellwig7ad18af2015-01-13 17:20:04 +0100365 /* Block layer tags. */
366 struct blk_mq_tag_set tag_set;
367
Josh Durginc6666012011-11-21 17:11:12 -0800368 /* protects updating the header */
369 struct rw_semaphore header_rwsem;
Alex Elderf84344f2012-08-31 17:29:51 -0500370
371 struct rbd_mapping mapping;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700372
373 struct list_head node;
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800374
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800375 /* sysfs related */
376 struct device dev;
Alex Elderb82d1672013-01-14 12:43:31 -0600377 unsigned long open_count; /* protected by lock */
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800378};
379
Alex Elderb82d1672013-01-14 12:43:31 -0600380/*
381 * Flag bits for rbd_dev->flags. If atomicity is required,
382 * rbd_dev->lock is used to protect access.
383 *
384 * Currently, only the "removing" flag (which is coupled with the
385 * "open_count" field) requires atomic access.
386 */
Alex Elder6d292902013-01-14 12:43:31 -0600387enum rbd_dev_flags {
388 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
Alex Elderb82d1672013-01-14 12:43:31 -0600389 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
Alex Elder6d292902013-01-14 12:43:31 -0600390};
391
Alex Eldercfbf6372013-05-31 17:40:45 -0500392static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
Alex Eldere124a82f2012-01-29 13:57:44 -0600393
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700394static LIST_HEAD(rbd_dev_list); /* devices */
Alex Eldere124a82f2012-01-29 13:57:44 -0600395static DEFINE_SPINLOCK(rbd_dev_list_lock);
396
Alex Elder432b8582012-01-29 13:57:44 -0600397static LIST_HEAD(rbd_client_list); /* clients */
398static DEFINE_SPINLOCK(rbd_client_list_lock);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700399
Alex Elder78c2a442013-05-01 12:43:04 -0500400/* Slab caches for frequently-allocated structures */
401
Alex Elder1c2a9df2013-05-01 12:43:03 -0500402static struct kmem_cache *rbd_img_request_cache;
Alex Elder868311b2013-05-01 12:43:03 -0500403static struct kmem_cache *rbd_obj_request_cache;
Alex Elder78c2a442013-05-01 12:43:04 -0500404static struct kmem_cache *rbd_segment_name_cache;
Alex Elder1c2a9df2013-05-01 12:43:03 -0500405
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200406static int rbd_major;
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +0200407static DEFINE_IDA(rbd_dev_id_ida);
408
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +0400409static struct workqueue_struct *rbd_wq;
410
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200411/*
412 * Default to false for now, as single-major requires >= 0.75 version of
413 * userspace rbd utility.
414 */
415static bool single_major = false;
416module_param(single_major, bool, S_IRUGO);
417MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: false)");
418
Alex Elder3d7efd12013-04-19 15:34:50 -0500419static int rbd_img_request_submit(struct rbd_img_request *img_request);
420
Alex Elderf0f8cef2012-01-29 13:57:44 -0600421static ssize_t rbd_add(struct bus_type *bus, const char *buf,
422 size_t count);
423static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
424 size_t count);
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200425static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf,
426 size_t count);
427static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf,
428 size_t count);
Ilya Dryomov6d69bb532015-10-11 19:38:00 +0200429static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
Alex Eldera2acd002013-05-08 22:50:04 -0500430static void rbd_spec_put(struct rbd_spec *spec);
Alex Elderf0f8cef2012-01-29 13:57:44 -0600431
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200432static int rbd_dev_id_to_minor(int dev_id)
433{
Ilya Dryomov7e513d42013-12-16 19:26:32 +0200434 return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200435}
436
437static int minor_to_rbd_dev_id(int minor)
438{
Ilya Dryomov7e513d42013-12-16 19:26:32 +0200439 return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200440}
441
Greg Kroah-Hartmanb15a21d2013-08-23 14:24:28 -0700442static BUS_ATTR(add, S_IWUSR, NULL, rbd_add);
443static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove);
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200444static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major);
445static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major);
Greg Kroah-Hartmanb15a21d2013-08-23 14:24:28 -0700446
447static struct attribute *rbd_bus_attrs[] = {
448 &bus_attr_add.attr,
449 &bus_attr_remove.attr,
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200450 &bus_attr_add_single_major.attr,
451 &bus_attr_remove_single_major.attr,
Greg Kroah-Hartmanb15a21d2013-08-23 14:24:28 -0700452 NULL,
Alex Elderf0f8cef2012-01-29 13:57:44 -0600453};
Ilya Dryomov92c76dc2013-12-13 15:28:57 +0200454
455static umode_t rbd_bus_is_visible(struct kobject *kobj,
456 struct attribute *attr, int index)
457{
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200458 if (!single_major &&
459 (attr == &bus_attr_add_single_major.attr ||
460 attr == &bus_attr_remove_single_major.attr))
461 return 0;
462
Ilya Dryomov92c76dc2013-12-13 15:28:57 +0200463 return attr->mode;
464}
465
466static const struct attribute_group rbd_bus_group = {
467 .attrs = rbd_bus_attrs,
468 .is_visible = rbd_bus_is_visible,
469};
470__ATTRIBUTE_GROUPS(rbd_bus);
Alex Elderf0f8cef2012-01-29 13:57:44 -0600471
472static struct bus_type rbd_bus_type = {
473 .name = "rbd",
Greg Kroah-Hartmanb15a21d2013-08-23 14:24:28 -0700474 .bus_groups = rbd_bus_groups,
Alex Elderf0f8cef2012-01-29 13:57:44 -0600475};
476
477static void rbd_root_dev_release(struct device *dev)
478{
479}
480
481static struct device rbd_root_dev = {
482 .init_name = "rbd",
483 .release = rbd_root_dev_release,
484};
485
Alex Elder06ecc6c2012-11-01 10:17:15 -0500486static __printf(2, 3)
487void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
488{
489 struct va_format vaf;
490 va_list args;
491
492 va_start(args, fmt);
493 vaf.fmt = fmt;
494 vaf.va = &args;
495
496 if (!rbd_dev)
497 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
498 else if (rbd_dev->disk)
499 printk(KERN_WARNING "%s: %s: %pV\n",
500 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
501 else if (rbd_dev->spec && rbd_dev->spec->image_name)
502 printk(KERN_WARNING "%s: image %s: %pV\n",
503 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
504 else if (rbd_dev->spec && rbd_dev->spec->image_id)
505 printk(KERN_WARNING "%s: id %s: %pV\n",
506 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
507 else /* punt */
508 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
509 RBD_DRV_NAME, rbd_dev, &vaf);
510 va_end(args);
511}
512
Alex Elderaafb2302012-09-06 16:00:54 -0500513#ifdef RBD_DEBUG
514#define rbd_assert(expr) \
515 if (unlikely(!(expr))) { \
516 printk(KERN_ERR "\nAssertion failure in %s() " \
517 "at line %d:\n\n" \
518 "\trbd_assert(%s);\n\n", \
519 __func__, __LINE__, #expr); \
520 BUG(); \
521 }
522#else /* !RBD_DEBUG */
523# define rbd_assert(expr) ((void) 0)
524#endif /* !RBD_DEBUG */
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800525
Ilya Dryomov27617132015-07-16 17:36:11 +0300526static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request);
Alex Elderb454e362013-04-19 15:34:50 -0500527static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
Alex Elder05a46af2013-04-26 15:44:36 -0500528static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
529static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
Alex Elder8b3e1a52013-01-24 16:13:36 -0600530
Alex Eldercc4a38bd2013-04-30 00:44:33 -0500531static int rbd_dev_refresh(struct rbd_device *rbd_dev);
Alex Elder2df3fac2013-05-06 09:51:30 -0500532static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
Ilya Dryomova720ae02014-07-23 17:11:19 +0400533static int rbd_dev_header_info(struct rbd_device *rbd_dev);
Ilya Dryomove8f59b52014-07-24 10:42:13 +0400534static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
Alex Elder54cac612013-04-30 00:44:33 -0500535static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
536 u64 snap_id);
Alex Elder2ad3d712013-04-30 00:44:33 -0500537static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
538 u8 *order, u64 *snap_size);
539static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
540 u64 *snap_features);
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700541
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700542static int rbd_open(struct block_device *bdev, fmode_t mode)
543{
Alex Elderf0f8cef2012-01-29 13:57:44 -0600544 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
Alex Elderb82d1672013-01-14 12:43:31 -0600545 bool removing = false;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700546
Alex Elderf84344f2012-08-31 17:29:51 -0500547 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700548 return -EROFS;
549
Alex Eldera14ea262013-02-05 13:23:12 -0600550 spin_lock_irq(&rbd_dev->lock);
Alex Elderb82d1672013-01-14 12:43:31 -0600551 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
552 removing = true;
553 else
554 rbd_dev->open_count++;
Alex Eldera14ea262013-02-05 13:23:12 -0600555 spin_unlock_irq(&rbd_dev->lock);
Alex Elderb82d1672013-01-14 12:43:31 -0600556 if (removing)
557 return -ENOENT;
558
Alex Elderc3e946c2012-11-16 09:29:16 -0600559 (void) get_device(&rbd_dev->dev);
Alex Elder340c7a22012-08-10 13:12:07 -0700560
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700561 return 0;
562}
563
Al Virodb2a1442013-05-05 21:52:57 -0400564static void rbd_release(struct gendisk *disk, fmode_t mode)
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800565{
566 struct rbd_device *rbd_dev = disk->private_data;
Alex Elderb82d1672013-01-14 12:43:31 -0600567 unsigned long open_count_before;
568
Alex Eldera14ea262013-02-05 13:23:12 -0600569 spin_lock_irq(&rbd_dev->lock);
Alex Elderb82d1672013-01-14 12:43:31 -0600570 open_count_before = rbd_dev->open_count--;
Alex Eldera14ea262013-02-05 13:23:12 -0600571 spin_unlock_irq(&rbd_dev->lock);
Alex Elderb82d1672013-01-14 12:43:31 -0600572 rbd_assert(open_count_before > 0);
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800573
Alex Elderc3e946c2012-11-16 09:29:16 -0600574 put_device(&rbd_dev->dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800575}
576
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800577static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
578{
Josh Durgin77f33c02013-09-30 17:09:54 -0700579 int ret = 0;
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800580 int val;
581 bool ro;
Josh Durgin77f33c02013-09-30 17:09:54 -0700582 bool ro_changed = false;
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800583
Josh Durgin77f33c02013-09-30 17:09:54 -0700584 /* get_user() may sleep, so call it before taking rbd_dev->lock */
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800585 if (get_user(val, (int __user *)(arg)))
586 return -EFAULT;
587
588 ro = val ? true : false;
589 /* Snapshot doesn't allow to write*/
590 if (rbd_dev->spec->snap_id != CEPH_NOSNAP && !ro)
591 return -EROFS;
592
Josh Durgin77f33c02013-09-30 17:09:54 -0700593 spin_lock_irq(&rbd_dev->lock);
594 /* prevent others open this device */
595 if (rbd_dev->open_count > 1) {
596 ret = -EBUSY;
597 goto out;
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800598 }
599
Josh Durgin77f33c02013-09-30 17:09:54 -0700600 if (rbd_dev->mapping.read_only != ro) {
601 rbd_dev->mapping.read_only = ro;
602 ro_changed = true;
603 }
604
605out:
606 spin_unlock_irq(&rbd_dev->lock);
607 /* set_disk_ro() may sleep, so call it after releasing rbd_dev->lock */
608 if (ret == 0 && ro_changed)
609 set_disk_ro(rbd_dev->disk, ro ? 1 : 0);
610
611 return ret;
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800612}
613
614static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
615 unsigned int cmd, unsigned long arg)
616{
617 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
618 int ret = 0;
619
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800620 switch (cmd) {
621 case BLKROSET:
622 ret = rbd_ioctl_set_ro(rbd_dev, arg);
623 break;
624 default:
625 ret = -ENOTTY;
626 }
627
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800628 return ret;
629}
630
631#ifdef CONFIG_COMPAT
632static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode,
633 unsigned int cmd, unsigned long arg)
634{
635 return rbd_ioctl(bdev, mode, cmd, arg);
636}
637#endif /* CONFIG_COMPAT */
638
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700639static const struct block_device_operations rbd_bd_ops = {
640 .owner = THIS_MODULE,
641 .open = rbd_open,
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800642 .release = rbd_release,
Guangliang Zhao131fd9f2013-09-24 11:25:36 +0800643 .ioctl = rbd_ioctl,
644#ifdef CONFIG_COMPAT
645 .compat_ioctl = rbd_compat_ioctl,
646#endif
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700647};
648
649/*
Alex Elder7262cfc2013-05-16 15:04:20 -0500650 * Initialize an rbd client instance. Success or not, this function
Alex Eldercfbf6372013-05-31 17:40:45 -0500651 * consumes ceph_opts. Caller holds client_mutex.
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700652 */
Alex Elderf8c38922012-08-10 13:12:07 -0700653static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700654{
655 struct rbd_client *rbdc;
656 int ret = -ENOMEM;
657
Alex Elder37206ee2013-02-20 17:32:08 -0600658 dout("%s:\n", __func__);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700659 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
660 if (!rbdc)
661 goto out_opt;
662
663 kref_init(&rbdc->kref);
664 INIT_LIST_HEAD(&rbdc->node);
665
Alex Elder43ae4702012-07-03 16:01:18 -0500666 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700667 if (IS_ERR(rbdc->client))
Alex Elder08f75462013-05-29 11:19:00 -0500668 goto out_rbdc;
Alex Elder43ae4702012-07-03 16:01:18 -0500669 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700670
671 ret = ceph_open_session(rbdc->client);
672 if (ret < 0)
Alex Elder08f75462013-05-29 11:19:00 -0500673 goto out_client;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700674
Alex Elder432b8582012-01-29 13:57:44 -0600675 spin_lock(&rbd_client_list_lock);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700676 list_add_tail(&rbdc->node, &rbd_client_list);
Alex Elder432b8582012-01-29 13:57:44 -0600677 spin_unlock(&rbd_client_list_lock);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700678
Alex Elder37206ee2013-02-20 17:32:08 -0600679 dout("%s: rbdc %p\n", __func__, rbdc);
Alex Elderbc534d82012-01-29 13:57:44 -0600680
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700681 return rbdc;
Alex Elder08f75462013-05-29 11:19:00 -0500682out_client:
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700683 ceph_destroy_client(rbdc->client);
Alex Elder08f75462013-05-29 11:19:00 -0500684out_rbdc:
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700685 kfree(rbdc);
686out_opt:
Alex Elder43ae4702012-07-03 16:01:18 -0500687 if (ceph_opts)
688 ceph_destroy_options(ceph_opts);
Alex Elder37206ee2013-02-20 17:32:08 -0600689 dout("%s: error %d\n", __func__, ret);
690
Vasiliy Kulikov28f259b2010-09-26 12:59:37 +0400691 return ERR_PTR(ret);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700692}
693
Alex Elder2f82ee52012-10-30 19:40:33 -0500694static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
695{
696 kref_get(&rbdc->kref);
697
698 return rbdc;
699}
700
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700701/*
Alex Elder1f7ba332012-08-10 13:12:07 -0700702 * Find a ceph client with specific addr and configuration. If
703 * found, bump its reference count.
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700704 */
Alex Elder1f7ba332012-08-10 13:12:07 -0700705static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700706{
707 struct rbd_client *client_node;
Alex Elder1f7ba332012-08-10 13:12:07 -0700708 bool found = false;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700709
Alex Elder43ae4702012-07-03 16:01:18 -0500710 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700711 return NULL;
712
Alex Elder1f7ba332012-08-10 13:12:07 -0700713 spin_lock(&rbd_client_list_lock);
714 list_for_each_entry(client_node, &rbd_client_list, node) {
715 if (!ceph_compare_options(ceph_opts, client_node->client)) {
Alex Elder2f82ee52012-10-30 19:40:33 -0500716 __rbd_get_client(client_node);
717
Alex Elder1f7ba332012-08-10 13:12:07 -0700718 found = true;
719 break;
720 }
721 }
722 spin_unlock(&rbd_client_list_lock);
723
724 return found ? client_node : NULL;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700725}
726
727/*
Ilya Dryomov210c1042015-06-22 13:24:48 +0300728 * (Per device) rbd map options
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700729 */
730enum {
Ilya Dryomovb5584182015-06-23 16:21:19 +0300731 Opt_queue_depth,
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700732 Opt_last_int,
733 /* int args above */
734 Opt_last_string,
735 /* string args above */
Alex Eldercc0538b2012-08-10 13:12:07 -0700736 Opt_read_only,
737 Opt_read_write,
Ilya Dryomov210c1042015-06-22 13:24:48 +0300738 Opt_err
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700739};
740
Alex Elder43ae4702012-07-03 16:01:18 -0500741static match_table_t rbd_opts_tokens = {
Ilya Dryomovb5584182015-06-23 16:21:19 +0300742 {Opt_queue_depth, "queue_depth=%d"},
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700743 /* int args above */
744 /* string args above */
Alex Elderbe466c12012-10-22 11:31:26 -0500745 {Opt_read_only, "read_only"},
Alex Eldercc0538b2012-08-10 13:12:07 -0700746 {Opt_read_only, "ro"}, /* Alternate spelling */
747 {Opt_read_write, "read_write"},
748 {Opt_read_write, "rw"}, /* Alternate spelling */
Ilya Dryomov210c1042015-06-22 13:24:48 +0300749 {Opt_err, NULL}
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700750};
751
Alex Elder98571b52013-01-20 14:44:42 -0600752struct rbd_options {
Ilya Dryomovb5584182015-06-23 16:21:19 +0300753 int queue_depth;
Alex Elder98571b52013-01-20 14:44:42 -0600754 bool read_only;
755};
756
Ilya Dryomovb5584182015-06-23 16:21:19 +0300757#define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ
Alex Elder98571b52013-01-20 14:44:42 -0600758#define RBD_READ_ONLY_DEFAULT false
759
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700760static int parse_rbd_opts_token(char *c, void *private)
761{
Alex Elder43ae4702012-07-03 16:01:18 -0500762 struct rbd_options *rbd_opts = private;
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700763 substring_t argstr[MAX_OPT_ARGS];
764 int token, intval, ret;
765
Alex Elder43ae4702012-07-03 16:01:18 -0500766 token = match_token(c, rbd_opts_tokens, argstr);
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700767 if (token < Opt_last_int) {
768 ret = match_int(&argstr[0], &intval);
769 if (ret < 0) {
Ilya Dryomov210c1042015-06-22 13:24:48 +0300770 pr_err("bad mount option arg (not int) at '%s'\n", c);
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700771 return ret;
772 }
773 dout("got int token %d val %d\n", token, intval);
774 } else if (token > Opt_last_int && token < Opt_last_string) {
Ilya Dryomov210c1042015-06-22 13:24:48 +0300775 dout("got string token %d val %s\n", token, argstr[0].from);
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700776 } else {
777 dout("got token %d\n", token);
778 }
779
780 switch (token) {
Ilya Dryomovb5584182015-06-23 16:21:19 +0300781 case Opt_queue_depth:
782 if (intval < 1) {
783 pr_err("queue_depth out of range\n");
784 return -EINVAL;
785 }
786 rbd_opts->queue_depth = intval;
787 break;
Alex Eldercc0538b2012-08-10 13:12:07 -0700788 case Opt_read_only:
789 rbd_opts->read_only = true;
790 break;
791 case Opt_read_write:
792 rbd_opts->read_only = false;
793 break;
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700794 default:
Ilya Dryomov210c1042015-06-22 13:24:48 +0300795 /* libceph prints "bad option" msg */
796 return -EINVAL;
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700797 }
Ilya Dryomov210c1042015-06-22 13:24:48 +0300798
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700799 return 0;
800}
801
Guangliang Zhao6d2940c2014-03-13 11:21:35 +0800802static char* obj_op_name(enum obj_operation_type op_type)
803{
804 switch (op_type) {
805 case OBJ_OP_READ:
806 return "read";
807 case OBJ_OP_WRITE:
808 return "write";
Guangliang Zhao90e98c52014-04-01 22:22:16 +0800809 case OBJ_OP_DISCARD:
810 return "discard";
Guangliang Zhao6d2940c2014-03-13 11:21:35 +0800811 default:
812 return "???";
813 }
814}
815
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700816/*
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700817 * Get a ceph client with specific addr and configuration, if one does
Alex Elder7262cfc2013-05-16 15:04:20 -0500818 * not exist create it. Either way, ceph_opts is consumed by this
819 * function.
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700820 */
Alex Elder9d3997f2012-10-25 23:34:42 -0500821static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700822{
Alex Elderf8c38922012-08-10 13:12:07 -0700823 struct rbd_client *rbdc;
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700824
Alex Eldercfbf6372013-05-31 17:40:45 -0500825 mutex_lock_nested(&client_mutex, SINGLE_DEPTH_NESTING);
Alex Elder1f7ba332012-08-10 13:12:07 -0700826 rbdc = rbd_client_find(ceph_opts);
Alex Elder9d3997f2012-10-25 23:34:42 -0500827 if (rbdc) /* using an existing client */
Alex Elder43ae4702012-07-03 16:01:18 -0500828 ceph_destroy_options(ceph_opts);
Alex Elder9d3997f2012-10-25 23:34:42 -0500829 else
Alex Elderf8c38922012-08-10 13:12:07 -0700830 rbdc = rbd_client_create(ceph_opts);
Alex Eldercfbf6372013-05-31 17:40:45 -0500831 mutex_unlock(&client_mutex);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700832
Alex Elder9d3997f2012-10-25 23:34:42 -0500833 return rbdc;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700834}
835
836/*
837 * Destroy ceph client
Alex Elderd23a4b32012-01-29 13:57:43 -0600838 *
Alex Elder432b8582012-01-29 13:57:44 -0600839 * Caller must hold rbd_client_list_lock.
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700840 */
841static void rbd_client_release(struct kref *kref)
842{
843 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
844
Alex Elder37206ee2013-02-20 17:32:08 -0600845 dout("%s: rbdc %p\n", __func__, rbdc);
Alex Eldercd9d9f52012-04-04 13:35:44 -0500846 spin_lock(&rbd_client_list_lock);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700847 list_del(&rbdc->node);
Alex Eldercd9d9f52012-04-04 13:35:44 -0500848 spin_unlock(&rbd_client_list_lock);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700849
850 ceph_destroy_client(rbdc->client);
851 kfree(rbdc);
852}
853
854/*
855 * Drop reference to ceph client node. If it's not referenced anymore, release
856 * it.
857 */
Alex Elder9d3997f2012-10-25 23:34:42 -0500858static void rbd_put_client(struct rbd_client *rbdc)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700859{
Alex Elderc53d5892012-10-25 23:34:42 -0500860 if (rbdc)
861 kref_put(&rbdc->kref, rbd_client_release);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700862}
863
Alex Eldera30b71b2012-07-10 20:30:11 -0500864static bool rbd_image_format_valid(u32 image_format)
865{
866 return image_format == 1 || image_format == 2;
867}
868
Alex Elder8e94af82012-07-25 09:32:40 -0500869static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
870{
Alex Elder103a1502012-08-02 11:29:45 -0500871 size_t size;
872 u32 snap_count;
873
874 /* The header has to start with the magic rbd header text */
875 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
876 return false;
877
Alex Elderdb2388b2012-10-20 22:17:27 -0500878 /* The bio layer requires at least sector-sized I/O */
879
880 if (ondisk->options.order < SECTOR_SHIFT)
881 return false;
882
883 /* If we use u64 in a few spots we may be able to loosen this */
884
885 if (ondisk->options.order > 8 * sizeof (int) - 1)
886 return false;
887
Alex Elder103a1502012-08-02 11:29:45 -0500888 /*
889 * The size of a snapshot header has to fit in a size_t, and
890 * that limits the number of snapshots.
891 */
892 snap_count = le32_to_cpu(ondisk->snap_count);
893 size = SIZE_MAX - sizeof (struct ceph_snap_context);
894 if (snap_count > size / sizeof (__le64))
895 return false;
896
897 /*
898 * Not only that, but the size of the entire the snapshot
899 * header must also be representable in a size_t.
900 */
901 size -= snap_count * sizeof (__le64);
902 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
903 return false;
904
905 return true;
Alex Elder8e94af82012-07-25 09:32:40 -0500906}
907
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700908/*
Alex Elderbb23e372013-05-06 09:51:29 -0500909 * Fill an rbd image header with information from the given format 1
910 * on-disk header.
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700911 */
Alex Elder662518b2013-05-06 09:51:29 -0500912static int rbd_header_from_disk(struct rbd_device *rbd_dev,
Alex Elder4156d992012-08-02 11:29:46 -0500913 struct rbd_image_header_ondisk *ondisk)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700914{
Alex Elder662518b2013-05-06 09:51:29 -0500915 struct rbd_image_header *header = &rbd_dev->header;
Alex Elderbb23e372013-05-06 09:51:29 -0500916 bool first_time = header->object_prefix == NULL;
917 struct ceph_snap_context *snapc;
918 char *object_prefix = NULL;
919 char *snap_names = NULL;
920 u64 *snap_sizes = NULL;
Alex Elderccece232012-07-10 20:30:10 -0500921 u32 snap_count;
Alex Elderd2bb24e2012-07-26 23:37:14 -0500922 size_t size;
Alex Elderbb23e372013-05-06 09:51:29 -0500923 int ret = -ENOMEM;
Alex Elder621901d2012-08-23 23:22:06 -0500924 u32 i;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700925
Alex Elderbb23e372013-05-06 09:51:29 -0500926 /* Allocate this now to avoid having to handle failure below */
927
928 if (first_time) {
929 size_t len;
930
931 len = strnlen(ondisk->object_prefix,
932 sizeof (ondisk->object_prefix));
933 object_prefix = kmalloc(len + 1, GFP_KERNEL);
934 if (!object_prefix)
935 return -ENOMEM;
936 memcpy(object_prefix, ondisk->object_prefix, len);
937 object_prefix[len] = '\0';
938 }
939
940 /* Allocate the snapshot context and fill it in */
Alex Elder6a523252012-07-19 17:12:59 -0500941
Alex Elder103a1502012-08-02 11:29:45 -0500942 snap_count = le32_to_cpu(ondisk->snap_count);
Alex Elderbb23e372013-05-06 09:51:29 -0500943 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
944 if (!snapc)
945 goto out_err;
946 snapc->seq = le64_to_cpu(ondisk->snap_seq);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700947 if (snap_count) {
Alex Elderbb23e372013-05-06 09:51:29 -0500948 struct rbd_image_snap_ondisk *snaps;
Alex Elderf785cc12012-08-23 23:22:06 -0500949 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
950
Alex Elderbb23e372013-05-06 09:51:29 -0500951 /* We'll keep a copy of the snapshot names... */
Alex Elder621901d2012-08-23 23:22:06 -0500952
Alex Elderbb23e372013-05-06 09:51:29 -0500953 if (snap_names_len > (u64)SIZE_MAX)
954 goto out_2big;
955 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
956 if (!snap_names)
Alex Elder6a523252012-07-19 17:12:59 -0500957 goto out_err;
Alex Elderbb23e372013-05-06 09:51:29 -0500958
959 /* ...as well as the array of their sizes. */
960
961 size = snap_count * sizeof (*header->snap_sizes);
962 snap_sizes = kmalloc(size, GFP_KERNEL);
963 if (!snap_sizes)
964 goto out_err;
965
Alex Elderf785cc12012-08-23 23:22:06 -0500966 /*
Alex Elderbb23e372013-05-06 09:51:29 -0500967 * Copy the names, and fill in each snapshot's id
968 * and size.
969 *
Alex Elder99a41eb2013-05-06 09:51:30 -0500970 * Note that rbd_dev_v1_header_info() guarantees the
Alex Elderbb23e372013-05-06 09:51:29 -0500971 * ondisk buffer we're working with has
Alex Elderf785cc12012-08-23 23:22:06 -0500972 * snap_names_len bytes beyond the end of the
973 * snapshot id array, this memcpy() is safe.
974 */
Alex Elderbb23e372013-05-06 09:51:29 -0500975 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
976 snaps = ondisk->snaps;
977 for (i = 0; i < snap_count; i++) {
978 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
979 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
980 }
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700981 }
Alex Elder849b4262012-07-09 21:04:24 -0500982
Alex Elderbb23e372013-05-06 09:51:29 -0500983 /* We won't fail any more, fill in the header */
Alex Elder6a523252012-07-19 17:12:59 -0500984
Alex Elderbb23e372013-05-06 09:51:29 -0500985 if (first_time) {
986 header->object_prefix = object_prefix;
987 header->obj_order = ondisk->options.order;
988 header->crypt_type = ondisk->options.crypt_type;
989 header->comp_type = ondisk->options.comp_type;
990 /* The rest aren't used for format 1 images */
991 header->stripe_unit = 0;
992 header->stripe_count = 0;
993 header->features = 0;
Alex Elder662518b2013-05-06 09:51:29 -0500994 } else {
995 ceph_put_snap_context(header->snapc);
996 kfree(header->snap_names);
997 kfree(header->snap_sizes);
Alex Elderbb23e372013-05-06 09:51:29 -0500998 }
999
1000 /* The remaining fields always get updated (when we refresh) */
Alex Elder621901d2012-08-23 23:22:06 -05001001
Alex Elderf84344f2012-08-31 17:29:51 -05001002 header->image_size = le64_to_cpu(ondisk->image_size);
Alex Elderbb23e372013-05-06 09:51:29 -05001003 header->snapc = snapc;
1004 header->snap_names = snap_names;
1005 header->snap_sizes = snap_sizes;
Alex Elder468521c2013-04-26 09:43:47 -05001006
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001007 return 0;
Alex Elderbb23e372013-05-06 09:51:29 -05001008out_2big:
1009 ret = -EIO;
Alex Elder6a523252012-07-19 17:12:59 -05001010out_err:
Alex Elderbb23e372013-05-06 09:51:29 -05001011 kfree(snap_sizes);
1012 kfree(snap_names);
1013 ceph_put_snap_context(snapc);
1014 kfree(object_prefix);
Alex Elderccece232012-07-10 20:30:10 -05001015
Alex Elderbb23e372013-05-06 09:51:29 -05001016 return ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001017}
1018
Alex Elder9682fc62013-04-30 00:44:33 -05001019static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
1020{
1021 const char *snap_name;
1022
1023 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
1024
1025 /* Skip over names until we find the one we are looking for */
1026
1027 snap_name = rbd_dev->header.snap_names;
1028 while (which--)
1029 snap_name += strlen(snap_name) + 1;
1030
1031 return kstrdup(snap_name, GFP_KERNEL);
1032}
1033
Alex Elder30d1cff2013-05-01 12:43:03 -05001034/*
1035 * Snapshot id comparison function for use with qsort()/bsearch().
1036 * Note that result is for snapshots in *descending* order.
1037 */
1038static int snapid_compare_reverse(const void *s1, const void *s2)
1039{
1040 u64 snap_id1 = *(u64 *)s1;
1041 u64 snap_id2 = *(u64 *)s2;
1042
1043 if (snap_id1 < snap_id2)
1044 return 1;
1045 return snap_id1 == snap_id2 ? 0 : -1;
1046}
1047
1048/*
1049 * Search a snapshot context to see if the given snapshot id is
1050 * present.
1051 *
1052 * Returns the position of the snapshot id in the array if it's found,
1053 * or BAD_SNAP_INDEX otherwise.
1054 *
1055 * Note: The snapshot array is in kept sorted (by the osd) in
1056 * reverse order, highest snapshot id first.
1057 */
Alex Elder9682fc62013-04-30 00:44:33 -05001058static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
1059{
1060 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
Alex Elder30d1cff2013-05-01 12:43:03 -05001061 u64 *found;
Alex Elder9682fc62013-04-30 00:44:33 -05001062
Alex Elder30d1cff2013-05-01 12:43:03 -05001063 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
1064 sizeof (snap_id), snapid_compare_reverse);
Alex Elder9682fc62013-04-30 00:44:33 -05001065
Alex Elder30d1cff2013-05-01 12:43:03 -05001066 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
Alex Elder9682fc62013-04-30 00:44:33 -05001067}
1068
Alex Elder2ad3d712013-04-30 00:44:33 -05001069static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
1070 u64 snap_id)
Alex Elder54cac612013-04-30 00:44:33 -05001071{
1072 u32 which;
Josh Durginda6a6b62013-09-04 17:57:31 -07001073 const char *snap_name;
Alex Elder54cac612013-04-30 00:44:33 -05001074
1075 which = rbd_dev_snap_index(rbd_dev, snap_id);
1076 if (which == BAD_SNAP_INDEX)
Josh Durginda6a6b62013-09-04 17:57:31 -07001077 return ERR_PTR(-ENOENT);
Alex Elder54cac612013-04-30 00:44:33 -05001078
Josh Durginda6a6b62013-09-04 17:57:31 -07001079 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
1080 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
Alex Elder54cac612013-04-30 00:44:33 -05001081}
1082
Alex Elder9e15b772012-10-30 19:40:33 -05001083static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
1084{
Alex Elder9e15b772012-10-30 19:40:33 -05001085 if (snap_id == CEPH_NOSNAP)
1086 return RBD_SNAP_HEAD_NAME;
1087
Alex Elder54cac612013-04-30 00:44:33 -05001088 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1089 if (rbd_dev->image_format == 1)
1090 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
Alex Elder9e15b772012-10-30 19:40:33 -05001091
Alex Elder54cac612013-04-30 00:44:33 -05001092 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
Alex Elder9e15b772012-10-30 19:40:33 -05001093}
1094
Alex Elder2ad3d712013-04-30 00:44:33 -05001095static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
1096 u64 *snap_size)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001097{
Alex Elder2ad3d712013-04-30 00:44:33 -05001098 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1099 if (snap_id == CEPH_NOSNAP) {
1100 *snap_size = rbd_dev->header.image_size;
1101 } else if (rbd_dev->image_format == 1) {
1102 u32 which;
Alex Elder00f1f362012-02-07 12:03:36 -06001103
Alex Elder2ad3d712013-04-30 00:44:33 -05001104 which = rbd_dev_snap_index(rbd_dev, snap_id);
1105 if (which == BAD_SNAP_INDEX)
1106 return -ENOENT;
Alex Elder00f1f362012-02-07 12:03:36 -06001107
Alex Elder2ad3d712013-04-30 00:44:33 -05001108 *snap_size = rbd_dev->header.snap_sizes[which];
1109 } else {
1110 u64 size = 0;
1111 int ret;
1112
1113 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
1114 if (ret)
1115 return ret;
1116
1117 *snap_size = size;
1118 }
1119 return 0;
1120}
1121
1122static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
1123 u64 *snap_features)
1124{
1125 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1126 if (snap_id == CEPH_NOSNAP) {
1127 *snap_features = rbd_dev->header.features;
1128 } else if (rbd_dev->image_format == 1) {
1129 *snap_features = 0; /* No features for format 1 */
1130 } else {
1131 u64 features = 0;
1132 int ret;
1133
1134 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
1135 if (ret)
1136 return ret;
1137
1138 *snap_features = features;
1139 }
1140 return 0;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001141}
1142
Alex Elderd1cf5782013-04-27 09:59:30 -05001143static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001144{
Alex Elder8f4b7d92013-05-06 07:40:30 -05001145 u64 snap_id = rbd_dev->spec->snap_id;
Alex Elder2ad3d712013-04-30 00:44:33 -05001146 u64 size = 0;
1147 u64 features = 0;
1148 int ret;
Alex Elder8b0241f2013-04-25 23:15:08 -05001149
Alex Elder2ad3d712013-04-30 00:44:33 -05001150 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1151 if (ret)
1152 return ret;
1153 ret = rbd_snap_features(rbd_dev, snap_id, &features);
1154 if (ret)
1155 return ret;
1156
1157 rbd_dev->mapping.size = size;
1158 rbd_dev->mapping.features = features;
1159
Alex Elder8b0241f2013-04-25 23:15:08 -05001160 return 0;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001161}
1162
Alex Elderd1cf5782013-04-27 09:59:30 -05001163static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1164{
1165 rbd_dev->mapping.size = 0;
1166 rbd_dev->mapping.features = 0;
Alex Elder200a6a82013-04-28 23:32:34 -05001167}
1168
Himangi Saraogi7d5079a2014-07-24 03:17:07 +05301169static void rbd_segment_name_free(const char *name)
1170{
1171 /* The explicit cast here is needed to drop the const qualifier */
1172
1173 kmem_cache_free(rbd_segment_name_cache, (void *)name);
1174}
1175
Alex Elder98571b52013-01-20 14:44:42 -06001176static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001177{
Alex Elder65ccfe22012-08-09 10:33:26 -07001178 char *name;
1179 u64 segment;
1180 int ret;
Josh Durgin3a96d5c2013-06-12 19:15:06 -07001181 char *name_format;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001182
Alex Elder78c2a442013-05-01 12:43:04 -05001183 name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
Alex Elder65ccfe22012-08-09 10:33:26 -07001184 if (!name)
1185 return NULL;
1186 segment = offset >> rbd_dev->header.obj_order;
Josh Durgin3a96d5c2013-06-12 19:15:06 -07001187 name_format = "%s.%012llx";
1188 if (rbd_dev->image_format == 2)
1189 name_format = "%s.%016llx";
Ilya Dryomov2d0ebc52014-01-27 17:40:18 +02001190 ret = snprintf(name, CEPH_MAX_OID_NAME_LEN + 1, name_format,
Alex Elder65ccfe22012-08-09 10:33:26 -07001191 rbd_dev->header.object_prefix, segment);
Ilya Dryomov2d0ebc52014-01-27 17:40:18 +02001192 if (ret < 0 || ret > CEPH_MAX_OID_NAME_LEN) {
Alex Elder65ccfe22012-08-09 10:33:26 -07001193 pr_err("error formatting segment name for #%llu (%d)\n",
1194 segment, ret);
Himangi Saraogi7d5079a2014-07-24 03:17:07 +05301195 rbd_segment_name_free(name);
Alex Elder65ccfe22012-08-09 10:33:26 -07001196 name = NULL;
1197 }
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001198
Alex Elder65ccfe22012-08-09 10:33:26 -07001199 return name;
1200}
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001201
Alex Elder65ccfe22012-08-09 10:33:26 -07001202static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
1203{
1204 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001205
Alex Elder65ccfe22012-08-09 10:33:26 -07001206 return offset & (segment_size - 1);
1207}
1208
1209static u64 rbd_segment_length(struct rbd_device *rbd_dev,
1210 u64 offset, u64 length)
1211{
1212 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1213
1214 offset &= segment_size - 1;
1215
Alex Elderaafb2302012-09-06 16:00:54 -05001216 rbd_assert(length <= U64_MAX - offset);
Alex Elder65ccfe22012-08-09 10:33:26 -07001217 if (offset + length > segment_size)
1218 length = segment_size - offset;
1219
1220 return length;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001221}
1222
1223/*
Josh Durgin029bcbd2011-07-22 11:35:23 -07001224 * returns the size of an object in the image
1225 */
1226static u64 rbd_obj_bytes(struct rbd_image_header *header)
1227{
1228 return 1 << header->obj_order;
1229}
1230
1231/*
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001232 * bio helpers
1233 */
1234
1235static void bio_chain_put(struct bio *chain)
1236{
1237 struct bio *tmp;
1238
1239 while (chain) {
1240 tmp = chain;
1241 chain = chain->bi_next;
1242 bio_put(tmp);
1243 }
1244}
1245
1246/*
1247 * zeros a bio chain, starting at specific offset
1248 */
1249static void zero_bio_chain(struct bio *chain, int start_ofs)
1250{
Kent Overstreet79886132013-11-23 17:19:00 -08001251 struct bio_vec bv;
1252 struct bvec_iter iter;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001253 unsigned long flags;
1254 void *buf;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001255 int pos = 0;
1256
1257 while (chain) {
Kent Overstreet79886132013-11-23 17:19:00 -08001258 bio_for_each_segment(bv, chain, iter) {
1259 if (pos + bv.bv_len > start_ofs) {
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001260 int remainder = max(start_ofs - pos, 0);
Kent Overstreet79886132013-11-23 17:19:00 -08001261 buf = bvec_kmap_irq(&bv, &flags);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001262 memset(buf + remainder, 0,
Kent Overstreet79886132013-11-23 17:19:00 -08001263 bv.bv_len - remainder);
1264 flush_dcache_page(bv.bv_page);
Dan Carpenter85b5aaa2010-10-11 21:15:11 +02001265 bvec_kunmap_irq(buf, &flags);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001266 }
Kent Overstreet79886132013-11-23 17:19:00 -08001267 pos += bv.bv_len;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001268 }
1269
1270 chain = chain->bi_next;
1271 }
1272}
1273
1274/*
Alex Elderb9434c52013-04-19 15:34:50 -05001275 * similar to zero_bio_chain(), zeros data defined by a page array,
1276 * starting at the given byte offset from the start of the array and
1277 * continuing up to the given end offset. The pages array is
1278 * assumed to be big enough to hold all bytes up to the end.
1279 */
1280static void zero_pages(struct page **pages, u64 offset, u64 end)
1281{
1282 struct page **page = &pages[offset >> PAGE_SHIFT];
1283
1284 rbd_assert(end > offset);
1285 rbd_assert(end - offset <= (u64)SIZE_MAX);
1286 while (offset < end) {
1287 size_t page_offset;
1288 size_t length;
1289 unsigned long flags;
1290 void *kaddr;
1291
Geert Uytterhoeven491205a2013-05-13 20:35:37 -05001292 page_offset = offset & ~PAGE_MASK;
1293 length = min_t(size_t, PAGE_SIZE - page_offset, end - offset);
Alex Elderb9434c52013-04-19 15:34:50 -05001294 local_irq_save(flags);
1295 kaddr = kmap_atomic(*page);
1296 memset(kaddr + page_offset, 0, length);
Alex Eldere2156052013-05-22 20:54:25 -05001297 flush_dcache_page(*page);
Alex Elderb9434c52013-04-19 15:34:50 -05001298 kunmap_atomic(kaddr);
1299 local_irq_restore(flags);
1300
1301 offset += length;
1302 page++;
1303 }
1304}
1305
1306/*
Alex Elderf7760da2012-10-20 22:17:27 -05001307 * Clone a portion of a bio, starting at the given byte offset
1308 * and continuing for the number of bytes indicated.
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001309 */
Alex Elderf7760da2012-10-20 22:17:27 -05001310static struct bio *bio_clone_range(struct bio *bio_src,
1311 unsigned int offset,
1312 unsigned int len,
1313 gfp_t gfpmask)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001314{
Alex Elderf7760da2012-10-20 22:17:27 -05001315 struct bio *bio;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001316
Kent Overstreet5341a622013-08-07 14:31:11 -07001317 bio = bio_clone(bio_src, gfpmask);
Alex Elderf7760da2012-10-20 22:17:27 -05001318 if (!bio)
1319 return NULL; /* ENOMEM */
1320
Kent Overstreet5341a622013-08-07 14:31:11 -07001321 bio_advance(bio, offset);
Kent Overstreet4f024f32013-10-11 15:44:27 -07001322 bio->bi_iter.bi_size = len;
Alex Elder542582f2012-08-09 10:33:25 -07001323
Alex Elderf7760da2012-10-20 22:17:27 -05001324 return bio;
1325}
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001326
Alex Elderf7760da2012-10-20 22:17:27 -05001327/*
1328 * Clone a portion of a bio chain, starting at the given byte offset
1329 * into the first bio in the source chain and continuing for the
1330 * number of bytes indicated. The result is another bio chain of
1331 * exactly the given length, or a null pointer on error.
1332 *
1333 * The bio_src and offset parameters are both in-out. On entry they
1334 * refer to the first source bio and the offset into that bio where
1335 * the start of data to be cloned is located.
1336 *
1337 * On return, bio_src is updated to refer to the bio in the source
1338 * chain that contains first un-cloned byte, and *offset will
1339 * contain the offset of that byte within that bio.
1340 */
1341static struct bio *bio_chain_clone_range(struct bio **bio_src,
1342 unsigned int *offset,
1343 unsigned int len,
1344 gfp_t gfpmask)
1345{
1346 struct bio *bi = *bio_src;
1347 unsigned int off = *offset;
1348 struct bio *chain = NULL;
1349 struct bio **end;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001350
Alex Elderf7760da2012-10-20 22:17:27 -05001351 /* Build up a chain of clone bios up to the limit */
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001352
Kent Overstreet4f024f32013-10-11 15:44:27 -07001353 if (!bi || off >= bi->bi_iter.bi_size || !len)
Alex Elderf7760da2012-10-20 22:17:27 -05001354 return NULL; /* Nothing to clone */
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001355
Alex Elderf7760da2012-10-20 22:17:27 -05001356 end = &chain;
1357 while (len) {
1358 unsigned int bi_size;
1359 struct bio *bio;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001360
Alex Elderf5400b72012-11-01 10:17:15 -05001361 if (!bi) {
1362 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
Alex Elderf7760da2012-10-20 22:17:27 -05001363 goto out_err; /* EINVAL; ran out of bio's */
Alex Elderf5400b72012-11-01 10:17:15 -05001364 }
Kent Overstreet4f024f32013-10-11 15:44:27 -07001365 bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len);
Alex Elderf7760da2012-10-20 22:17:27 -05001366 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1367 if (!bio)
1368 goto out_err; /* ENOMEM */
1369
1370 *end = bio;
1371 end = &bio->bi_next;
1372
1373 off += bi_size;
Kent Overstreet4f024f32013-10-11 15:44:27 -07001374 if (off == bi->bi_iter.bi_size) {
Alex Elderf7760da2012-10-20 22:17:27 -05001375 bi = bi->bi_next;
1376 off = 0;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001377 }
Alex Elderf7760da2012-10-20 22:17:27 -05001378 len -= bi_size;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001379 }
Alex Elderf7760da2012-10-20 22:17:27 -05001380 *bio_src = bi;
1381 *offset = off;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001382
Alex Elderf7760da2012-10-20 22:17:27 -05001383 return chain;
1384out_err:
1385 bio_chain_put(chain);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001386
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001387 return NULL;
1388}
1389
Alex Elder926f9b32013-02-11 12:33:24 -06001390/*
1391 * The default/initial value for all object request flags is 0. For
1392 * each flag, once its value is set to 1 it is never reset to 0
1393 * again.
1394 */
Alex Elder6365d332013-02-11 12:33:24 -06001395static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
1396{
1397 if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
Alex Elder6365d332013-02-11 12:33:24 -06001398 struct rbd_device *rbd_dev;
1399
Alex Elder57acbaa2013-02-11 12:33:24 -06001400 rbd_dev = obj_request->img_request->rbd_dev;
Ilya Dryomov9584d502014-07-11 12:11:20 +04001401 rbd_warn(rbd_dev, "obj_request %p already marked img_data",
Alex Elder6365d332013-02-11 12:33:24 -06001402 obj_request);
1403 }
1404}
1405
1406static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
1407{
1408 smp_mb();
1409 return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
1410}
1411
Alex Elder57acbaa2013-02-11 12:33:24 -06001412static void obj_request_done_set(struct rbd_obj_request *obj_request)
1413{
1414 if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
1415 struct rbd_device *rbd_dev = NULL;
1416
1417 if (obj_request_img_data_test(obj_request))
1418 rbd_dev = obj_request->img_request->rbd_dev;
Ilya Dryomov9584d502014-07-11 12:11:20 +04001419 rbd_warn(rbd_dev, "obj_request %p already marked done",
Alex Elder57acbaa2013-02-11 12:33:24 -06001420 obj_request);
1421 }
1422}
1423
1424static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1425{
1426 smp_mb();
1427 return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
1428}
1429
Alex Elder5679c592013-02-11 12:33:24 -06001430/*
1431 * This sets the KNOWN flag after (possibly) setting the EXISTS
1432 * flag. The latter is set based on the "exists" value provided.
1433 *
1434 * Note that for our purposes once an object exists it never goes
1435 * away again. It's possible that the response from two existence
1436 * checks are separated by the creation of the target object, and
1437 * the first ("doesn't exist") response arrives *after* the second
1438 * ("does exist"). In that case we ignore the second one.
1439 */
1440static void obj_request_existence_set(struct rbd_obj_request *obj_request,
1441 bool exists)
1442{
1443 if (exists)
1444 set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
1445 set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
1446 smp_mb();
1447}
1448
1449static bool obj_request_known_test(struct rbd_obj_request *obj_request)
1450{
1451 smp_mb();
1452 return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
1453}
1454
1455static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
1456{
1457 smp_mb();
1458 return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
1459}
1460
Ilya Dryomov96385562014-06-10 13:53:29 +04001461static bool obj_request_overlaps_parent(struct rbd_obj_request *obj_request)
1462{
1463 struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
1464
1465 return obj_request->img_offset <
1466 round_up(rbd_dev->parent_overlap, rbd_obj_bytes(&rbd_dev->header));
1467}
1468
Alex Elderbf0d5f502012-11-22 00:00:08 -06001469static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1470{
Alex Elder37206ee2013-02-20 17:32:08 -06001471 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1472 atomic_read(&obj_request->kref.refcount));
Alex Elderbf0d5f502012-11-22 00:00:08 -06001473 kref_get(&obj_request->kref);
1474}
1475
1476static void rbd_obj_request_destroy(struct kref *kref);
1477static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1478{
1479 rbd_assert(obj_request != NULL);
Alex Elder37206ee2013-02-20 17:32:08 -06001480 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1481 atomic_read(&obj_request->kref.refcount));
Alex Elderbf0d5f502012-11-22 00:00:08 -06001482 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1483}
1484
Alex Elder0f2d5be2014-04-26 14:21:44 +04001485static void rbd_img_request_get(struct rbd_img_request *img_request)
1486{
1487 dout("%s: img %p (was %d)\n", __func__, img_request,
1488 atomic_read(&img_request->kref.refcount));
1489 kref_get(&img_request->kref);
1490}
1491
Alex Eldere93f3152013-05-08 22:50:04 -05001492static bool img_request_child_test(struct rbd_img_request *img_request);
1493static void rbd_parent_request_destroy(struct kref *kref);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001494static void rbd_img_request_destroy(struct kref *kref);
1495static void rbd_img_request_put(struct rbd_img_request *img_request)
1496{
1497 rbd_assert(img_request != NULL);
Alex Elder37206ee2013-02-20 17:32:08 -06001498 dout("%s: img %p (was %d)\n", __func__, img_request,
1499 atomic_read(&img_request->kref.refcount));
Alex Eldere93f3152013-05-08 22:50:04 -05001500 if (img_request_child_test(img_request))
1501 kref_put(&img_request->kref, rbd_parent_request_destroy);
1502 else
1503 kref_put(&img_request->kref, rbd_img_request_destroy);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001504}
1505
1506static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1507 struct rbd_obj_request *obj_request)
1508{
Alex Elder25dcf952013-01-25 17:08:55 -06001509 rbd_assert(obj_request->img_request == NULL);
1510
Alex Elderb155e862013-04-15 14:50:37 -05001511 /* Image request now owns object's original reference */
Alex Elderbf0d5f502012-11-22 00:00:08 -06001512 obj_request->img_request = img_request;
Alex Elder25dcf952013-01-25 17:08:55 -06001513 obj_request->which = img_request->obj_request_count;
Alex Elder6365d332013-02-11 12:33:24 -06001514 rbd_assert(!obj_request_img_data_test(obj_request));
1515 obj_request_img_data_set(obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001516 rbd_assert(obj_request->which != BAD_WHICH);
Alex Elder25dcf952013-01-25 17:08:55 -06001517 img_request->obj_request_count++;
1518 list_add_tail(&obj_request->links, &img_request->obj_requests);
Alex Elder37206ee2013-02-20 17:32:08 -06001519 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1520 obj_request->which);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001521}
1522
1523static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1524 struct rbd_obj_request *obj_request)
1525{
1526 rbd_assert(obj_request->which != BAD_WHICH);
Alex Elder25dcf952013-01-25 17:08:55 -06001527
Alex Elder37206ee2013-02-20 17:32:08 -06001528 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1529 obj_request->which);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001530 list_del(&obj_request->links);
Alex Elder25dcf952013-01-25 17:08:55 -06001531 rbd_assert(img_request->obj_request_count > 0);
1532 img_request->obj_request_count--;
1533 rbd_assert(obj_request->which == img_request->obj_request_count);
1534 obj_request->which = BAD_WHICH;
Alex Elder6365d332013-02-11 12:33:24 -06001535 rbd_assert(obj_request_img_data_test(obj_request));
Alex Elderbf0d5f502012-11-22 00:00:08 -06001536 rbd_assert(obj_request->img_request == img_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001537 obj_request->img_request = NULL;
Alex Elder25dcf952013-01-25 17:08:55 -06001538 obj_request->callback = NULL;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001539 rbd_obj_request_put(obj_request);
1540}
1541
1542static bool obj_request_type_valid(enum obj_request_type type)
1543{
1544 switch (type) {
Alex Elder9969ebc2013-01-18 12:31:10 -06001545 case OBJ_REQUEST_NODATA:
Alex Elderbf0d5f502012-11-22 00:00:08 -06001546 case OBJ_REQUEST_BIO:
Alex Elder788e2df2013-01-17 12:25:27 -06001547 case OBJ_REQUEST_PAGES:
Alex Elderbf0d5f502012-11-22 00:00:08 -06001548 return true;
1549 default:
1550 return false;
1551 }
1552}
1553
Alex Elderbf0d5f502012-11-22 00:00:08 -06001554static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
1555 struct rbd_obj_request *obj_request)
1556{
Ilya Dryomov71c20a02014-06-19 11:38:14 +04001557 dout("%s %p\n", __func__, obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001558 return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
1559}
1560
Ilya Dryomov71c20a02014-06-19 11:38:14 +04001561static void rbd_obj_request_end(struct rbd_obj_request *obj_request)
1562{
1563 dout("%s %p\n", __func__, obj_request);
1564 ceph_osdc_cancel_request(obj_request->osd_req);
1565}
1566
1567/*
1568 * Wait for an object request to complete. If interrupted, cancel the
1569 * underlying osd request.
Ilya Dryomov2894e1d2015-05-12 19:53:24 +03001570 *
1571 * @timeout: in jiffies, 0 means "wait forever"
Ilya Dryomov71c20a02014-06-19 11:38:14 +04001572 */
Ilya Dryomov2894e1d2015-05-12 19:53:24 +03001573static int __rbd_obj_request_wait(struct rbd_obj_request *obj_request,
1574 unsigned long timeout)
Ilya Dryomov71c20a02014-06-19 11:38:14 +04001575{
Ilya Dryomov2894e1d2015-05-12 19:53:24 +03001576 long ret;
Ilya Dryomov71c20a02014-06-19 11:38:14 +04001577
1578 dout("%s %p\n", __func__, obj_request);
Ilya Dryomov2894e1d2015-05-12 19:53:24 +03001579 ret = wait_for_completion_interruptible_timeout(
1580 &obj_request->completion,
1581 ceph_timeout_jiffies(timeout));
1582 if (ret <= 0) {
1583 if (ret == 0)
1584 ret = -ETIMEDOUT;
Ilya Dryomov71c20a02014-06-19 11:38:14 +04001585 rbd_obj_request_end(obj_request);
Ilya Dryomov2894e1d2015-05-12 19:53:24 +03001586 } else {
1587 ret = 0;
Ilya Dryomov71c20a02014-06-19 11:38:14 +04001588 }
1589
Ilya Dryomov2894e1d2015-05-12 19:53:24 +03001590 dout("%s %p ret %d\n", __func__, obj_request, (int)ret);
1591 return ret;
1592}
1593
1594static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
1595{
1596 return __rbd_obj_request_wait(obj_request, 0);
1597}
1598
1599static int rbd_obj_request_wait_timeout(struct rbd_obj_request *obj_request,
1600 unsigned long timeout)
1601{
1602 return __rbd_obj_request_wait(obj_request, timeout);
Ilya Dryomov71c20a02014-06-19 11:38:14 +04001603}
1604
Alex Elderbf0d5f502012-11-22 00:00:08 -06001605static void rbd_img_request_complete(struct rbd_img_request *img_request)
1606{
Alex Elder55f27e02013-04-10 12:34:25 -05001607
Alex Elder37206ee2013-02-20 17:32:08 -06001608 dout("%s: img %p\n", __func__, img_request);
Alex Elder55f27e02013-04-10 12:34:25 -05001609
1610 /*
1611 * If no error occurred, compute the aggregate transfer
1612 * count for the image request. We could instead use
1613 * atomic64_cmpxchg() to update it as each object request
1614 * completes; not clear which way is better off hand.
1615 */
1616 if (!img_request->result) {
1617 struct rbd_obj_request *obj_request;
1618 u64 xferred = 0;
1619
1620 for_each_obj_request(img_request, obj_request)
1621 xferred += obj_request->xferred;
1622 img_request->xferred = xferred;
1623 }
1624
Alex Elderbf0d5f502012-11-22 00:00:08 -06001625 if (img_request->callback)
1626 img_request->callback(img_request);
1627 else
1628 rbd_img_request_put(img_request);
1629}
1630
Alex Elder0c425242013-02-08 09:55:49 -06001631/*
1632 * The default/initial value for all image request flags is 0. Each
1633 * is conditionally set to 1 at image request initialization time
1634 * and currently never change thereafter.
1635 */
1636static void img_request_write_set(struct rbd_img_request *img_request)
1637{
1638 set_bit(IMG_REQ_WRITE, &img_request->flags);
1639 smp_mb();
1640}
1641
1642static bool img_request_write_test(struct rbd_img_request *img_request)
1643{
1644 smp_mb();
1645 return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
1646}
1647
Guangliang Zhao90e98c52014-04-01 22:22:16 +08001648/*
1649 * Set the discard flag when the img_request is an discard request
1650 */
1651static void img_request_discard_set(struct rbd_img_request *img_request)
1652{
1653 set_bit(IMG_REQ_DISCARD, &img_request->flags);
1654 smp_mb();
1655}
1656
1657static bool img_request_discard_test(struct rbd_img_request *img_request)
1658{
1659 smp_mb();
1660 return test_bit(IMG_REQ_DISCARD, &img_request->flags) != 0;
1661}
1662
Alex Elder9849e982013-01-24 16:13:36 -06001663static void img_request_child_set(struct rbd_img_request *img_request)
1664{
1665 set_bit(IMG_REQ_CHILD, &img_request->flags);
1666 smp_mb();
1667}
1668
Alex Eldere93f3152013-05-08 22:50:04 -05001669static void img_request_child_clear(struct rbd_img_request *img_request)
1670{
1671 clear_bit(IMG_REQ_CHILD, &img_request->flags);
1672 smp_mb();
1673}
1674
Alex Elder9849e982013-01-24 16:13:36 -06001675static bool img_request_child_test(struct rbd_img_request *img_request)
1676{
1677 smp_mb();
1678 return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
1679}
1680
Alex Elderd0b2e942013-01-24 16:13:36 -06001681static void img_request_layered_set(struct rbd_img_request *img_request)
1682{
1683 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1684 smp_mb();
1685}
1686
Alex Eldera2acd002013-05-08 22:50:04 -05001687static void img_request_layered_clear(struct rbd_img_request *img_request)
1688{
1689 clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1690 smp_mb();
1691}
1692
Alex Elderd0b2e942013-01-24 16:13:36 -06001693static bool img_request_layered_test(struct rbd_img_request *img_request)
1694{
1695 smp_mb();
1696 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1697}
1698
Josh Durgin3b434a2a2014-04-04 17:32:15 -07001699static enum obj_operation_type
1700rbd_img_request_op_type(struct rbd_img_request *img_request)
1701{
1702 if (img_request_write_test(img_request))
1703 return OBJ_OP_WRITE;
1704 else if (img_request_discard_test(img_request))
1705 return OBJ_OP_DISCARD;
1706 else
1707 return OBJ_OP_READ;
1708}
1709
Alex Elder6e2a4502013-03-27 09:16:30 -05001710static void
1711rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1712{
Alex Elderb9434c52013-04-19 15:34:50 -05001713 u64 xferred = obj_request->xferred;
1714 u64 length = obj_request->length;
1715
Alex Elder6e2a4502013-03-27 09:16:30 -05001716 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1717 obj_request, obj_request->img_request, obj_request->result,
Alex Elderb9434c52013-04-19 15:34:50 -05001718 xferred, length);
Alex Elder6e2a4502013-03-27 09:16:30 -05001719 /*
Josh Durgin17c1cc12013-08-26 17:55:38 -07001720 * ENOENT means a hole in the image. We zero-fill the entire
1721 * length of the request. A short read also implies zero-fill
1722 * to the end of the request. An error requires the whole
1723 * length of the request to be reported finished with an error
1724 * to the block layer. In each case we update the xferred
1725 * count to indicate the whole request was satisfied.
Alex Elder6e2a4502013-03-27 09:16:30 -05001726 */
Alex Elderb9434c52013-04-19 15:34:50 -05001727 rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
Alex Elder6e2a4502013-03-27 09:16:30 -05001728 if (obj_request->result == -ENOENT) {
Alex Elderb9434c52013-04-19 15:34:50 -05001729 if (obj_request->type == OBJ_REQUEST_BIO)
1730 zero_bio_chain(obj_request->bio_list, 0);
1731 else
1732 zero_pages(obj_request->pages, 0, length);
Alex Elder6e2a4502013-03-27 09:16:30 -05001733 obj_request->result = 0;
Alex Elderb9434c52013-04-19 15:34:50 -05001734 } else if (xferred < length && !obj_request->result) {
1735 if (obj_request->type == OBJ_REQUEST_BIO)
1736 zero_bio_chain(obj_request->bio_list, xferred);
1737 else
1738 zero_pages(obj_request->pages, xferred, length);
Alex Elder6e2a4502013-03-27 09:16:30 -05001739 }
Josh Durgin17c1cc12013-08-26 17:55:38 -07001740 obj_request->xferred = length;
Alex Elder6e2a4502013-03-27 09:16:30 -05001741 obj_request_done_set(obj_request);
1742}
1743
Alex Elderbf0d5f502012-11-22 00:00:08 -06001744static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1745{
Alex Elder37206ee2013-02-20 17:32:08 -06001746 dout("%s: obj %p cb %p\n", __func__, obj_request,
1747 obj_request->callback);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001748 if (obj_request->callback)
1749 obj_request->callback(obj_request);
Alex Elder788e2df2013-01-17 12:25:27 -06001750 else
1751 complete_all(&obj_request->completion);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001752}
1753
Alex Elderc47f9372013-02-26 14:23:07 -06001754static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
Alex Elder39bf2c52013-02-26 14:23:07 -06001755{
1756 dout("%s: obj %p\n", __func__, obj_request);
1757 obj_request_done_set(obj_request);
1758}
1759
Alex Elderc47f9372013-02-26 14:23:07 -06001760static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
Alex Elderbf0d5f502012-11-22 00:00:08 -06001761{
Alex Elder57acbaa2013-02-11 12:33:24 -06001762 struct rbd_img_request *img_request = NULL;
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05001763 struct rbd_device *rbd_dev = NULL;
Alex Elder57acbaa2013-02-11 12:33:24 -06001764 bool layered = false;
1765
1766 if (obj_request_img_data_test(obj_request)) {
1767 img_request = obj_request->img_request;
1768 layered = img_request && img_request_layered_test(img_request);
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05001769 rbd_dev = img_request->rbd_dev;
Alex Elder57acbaa2013-02-11 12:33:24 -06001770 }
Alex Elder8b3e1a52013-01-24 16:13:36 -06001771
1772 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1773 obj_request, img_request, obj_request->result,
1774 obj_request->xferred, obj_request->length);
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05001775 if (layered && obj_request->result == -ENOENT &&
1776 obj_request->img_offset < rbd_dev->parent_overlap)
Alex Elder8b3e1a52013-01-24 16:13:36 -06001777 rbd_img_parent_read(obj_request);
1778 else if (img_request)
Alex Elder6e2a4502013-03-27 09:16:30 -05001779 rbd_img_obj_request_read_callback(obj_request);
1780 else
1781 obj_request_done_set(obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001782}
1783
Alex Elderc47f9372013-02-26 14:23:07 -06001784static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
Alex Elderbf0d5f502012-11-22 00:00:08 -06001785{
Sage Weil1b83bef2013-02-25 16:11:12 -08001786 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1787 obj_request->result, obj_request->length);
1788 /*
Alex Elder8b3e1a52013-01-24 16:13:36 -06001789 * There is no such thing as a successful short write. Set
1790 * it to our originally-requested length.
Sage Weil1b83bef2013-02-25 16:11:12 -08001791 */
1792 obj_request->xferred = obj_request->length;
Alex Elder07741302013-02-05 23:41:50 -06001793 obj_request_done_set(obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001794}
1795
Guangliang Zhao90e98c52014-04-01 22:22:16 +08001796static void rbd_osd_discard_callback(struct rbd_obj_request *obj_request)
1797{
1798 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1799 obj_request->result, obj_request->length);
1800 /*
1801 * There is no such thing as a successful short discard. Set
1802 * it to our originally-requested length.
1803 */
1804 obj_request->xferred = obj_request->length;
Josh Durgind0265de2014-04-07 16:54:10 -07001805 /* discarding a non-existent object is not a problem */
1806 if (obj_request->result == -ENOENT)
1807 obj_request->result = 0;
Guangliang Zhao90e98c52014-04-01 22:22:16 +08001808 obj_request_done_set(obj_request);
1809}
1810
Alex Elderfbfab532013-02-08 09:55:48 -06001811/*
1812 * For a simple stat call there's nothing to do. We'll do more if
1813 * this is part of a write sequence for a layered image.
1814 */
Alex Elderc47f9372013-02-26 14:23:07 -06001815static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
Alex Elderfbfab532013-02-08 09:55:48 -06001816{
Alex Elder37206ee2013-02-20 17:32:08 -06001817 dout("%s: obj %p\n", __func__, obj_request);
Alex Elderfbfab532013-02-08 09:55:48 -06001818 obj_request_done_set(obj_request);
1819}
1820
Ilya Dryomov27617132015-07-16 17:36:11 +03001821static void rbd_osd_call_callback(struct rbd_obj_request *obj_request)
1822{
1823 dout("%s: obj %p\n", __func__, obj_request);
1824
1825 if (obj_request_img_data_test(obj_request))
1826 rbd_osd_copyup_callback(obj_request);
1827 else
1828 obj_request_done_set(obj_request);
1829}
1830
Alex Elderbf0d5f502012-11-22 00:00:08 -06001831static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1832 struct ceph_msg *msg)
1833{
1834 struct rbd_obj_request *obj_request = osd_req->r_priv;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001835 u16 opcode;
1836
Alex Elder37206ee2013-02-20 17:32:08 -06001837 dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001838 rbd_assert(osd_req == obj_request->osd_req);
Alex Elder57acbaa2013-02-11 12:33:24 -06001839 if (obj_request_img_data_test(obj_request)) {
1840 rbd_assert(obj_request->img_request);
1841 rbd_assert(obj_request->which != BAD_WHICH);
1842 } else {
1843 rbd_assert(obj_request->which == BAD_WHICH);
1844 }
Alex Elderbf0d5f502012-11-22 00:00:08 -06001845
Sage Weil1b83bef2013-02-25 16:11:12 -08001846 if (osd_req->r_result < 0)
1847 obj_request->result = osd_req->r_result;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001848
Alex Elderc47f9372013-02-26 14:23:07 -06001849 /*
1850 * We support a 64-bit length, but ultimately it has to be
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01001851 * passed to the block layer, which just supports a 32-bit
1852 * length field.
Alex Elderc47f9372013-02-26 14:23:07 -06001853 */
Yan, Zheng7665d852016-01-07 16:48:57 +08001854 obj_request->xferred = osd_req->r_ops[0].outdata_len;
Alex Elder8b3e1a52013-01-24 16:13:36 -06001855 rbd_assert(obj_request->xferred < (u64)UINT_MAX);
Ilya Dryomov0ccd5922014-02-25 16:22:28 +02001856
Alex Elder79528732013-04-03 21:32:51 -05001857 opcode = osd_req->r_ops[0].op;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001858 switch (opcode) {
1859 case CEPH_OSD_OP_READ:
Alex Elderc47f9372013-02-26 14:23:07 -06001860 rbd_osd_read_callback(obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001861 break;
Ilya Dryomov0ccd5922014-02-25 16:22:28 +02001862 case CEPH_OSD_OP_SETALLOCHINT:
Ilya Dryomove30b7572015-10-07 17:27:17 +02001863 rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE ||
1864 osd_req->r_ops[1].op == CEPH_OSD_OP_WRITEFULL);
Ilya Dryomov0ccd5922014-02-25 16:22:28 +02001865 /* fall through */
Alex Elderbf0d5f502012-11-22 00:00:08 -06001866 case CEPH_OSD_OP_WRITE:
Ilya Dryomove30b7572015-10-07 17:27:17 +02001867 case CEPH_OSD_OP_WRITEFULL:
Alex Elderc47f9372013-02-26 14:23:07 -06001868 rbd_osd_write_callback(obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001869 break;
Alex Elderfbfab532013-02-08 09:55:48 -06001870 case CEPH_OSD_OP_STAT:
Alex Elderc47f9372013-02-26 14:23:07 -06001871 rbd_osd_stat_callback(obj_request);
Alex Elderfbfab532013-02-08 09:55:48 -06001872 break;
Guangliang Zhao90e98c52014-04-01 22:22:16 +08001873 case CEPH_OSD_OP_DELETE:
1874 case CEPH_OSD_OP_TRUNCATE:
1875 case CEPH_OSD_OP_ZERO:
1876 rbd_osd_discard_callback(obj_request);
1877 break;
Alex Elder36be9a72013-01-19 00:30:28 -06001878 case CEPH_OSD_OP_CALL:
Ilya Dryomov27617132015-07-16 17:36:11 +03001879 rbd_osd_call_callback(obj_request);
1880 break;
Alex Elderb8d70032012-11-30 17:53:04 -06001881 case CEPH_OSD_OP_NOTIFY_ACK:
Alex Elder9969ebc2013-01-18 12:31:10 -06001882 case CEPH_OSD_OP_WATCH:
Alex Elderc47f9372013-02-26 14:23:07 -06001883 rbd_osd_trivial_callback(obj_request);
Alex Elder9969ebc2013-01-18 12:31:10 -06001884 break;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001885 default:
Ilya Dryomov9584d502014-07-11 12:11:20 +04001886 rbd_warn(NULL, "%s: unsupported op %hu",
Alex Elderbf0d5f502012-11-22 00:00:08 -06001887 obj_request->object_name, (unsigned short) opcode);
1888 break;
1889 }
1890
Alex Elder07741302013-02-05 23:41:50 -06001891 if (obj_request_done_test(obj_request))
Alex Elderbf0d5f502012-11-22 00:00:08 -06001892 rbd_obj_request_complete(obj_request);
1893}
1894
Alex Elder9d4df012013-04-19 15:34:50 -05001895static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
Alex Elder430c28c2013-04-03 21:32:51 -05001896{
1897 struct rbd_img_request *img_request = obj_request->img_request;
Alex Elder8c042b02013-04-03 01:28:58 -05001898 struct ceph_osd_request *osd_req = obj_request->osd_req;
Alex Elder9d4df012013-04-19 15:34:50 -05001899 u64 snap_id;
Alex Elder430c28c2013-04-03 21:32:51 -05001900
Alex Elder8c042b02013-04-03 01:28:58 -05001901 rbd_assert(osd_req != NULL);
Alex Elder430c28c2013-04-03 21:32:51 -05001902
Alex Elder9d4df012013-04-19 15:34:50 -05001903 snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP;
Alex Elder8c042b02013-04-03 01:28:58 -05001904 ceph_osdc_build_request(osd_req, obj_request->offset,
Alex Elder9d4df012013-04-19 15:34:50 -05001905 NULL, snap_id, NULL);
1906}
1907
1908static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1909{
1910 struct rbd_img_request *img_request = obj_request->img_request;
1911 struct ceph_osd_request *osd_req = obj_request->osd_req;
1912 struct ceph_snap_context *snapc;
1913 struct timespec mtime = CURRENT_TIME;
1914
1915 rbd_assert(osd_req != NULL);
1916
1917 snapc = img_request ? img_request->snapc : NULL;
1918 ceph_osdc_build_request(osd_req, obj_request->offset,
1919 snapc, CEPH_NOSNAP, &mtime);
Alex Elder430c28c2013-04-03 21:32:51 -05001920}
1921
Ilya Dryomov0ccd5922014-02-25 16:22:28 +02001922/*
1923 * Create an osd request. A read request has one osd op (read).
1924 * A write request has either one (watch) or two (hint+write) osd ops.
1925 * (All rbd data writes are prefixed with an allocation hint op, but
1926 * technically osd watch is a write request, hence this distinction.)
1927 */
Alex Elderbf0d5f502012-11-22 00:00:08 -06001928static struct ceph_osd_request *rbd_osd_req_create(
1929 struct rbd_device *rbd_dev,
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08001930 enum obj_operation_type op_type,
Ilya Dryomovdeb236b2014-02-25 16:22:27 +02001931 unsigned int num_ops,
Alex Elder430c28c2013-04-03 21:32:51 -05001932 struct rbd_obj_request *obj_request)
Alex Elderbf0d5f502012-11-22 00:00:08 -06001933{
Alex Elderbf0d5f502012-11-22 00:00:08 -06001934 struct ceph_snap_context *snapc = NULL;
1935 struct ceph_osd_client *osdc;
1936 struct ceph_osd_request *osd_req;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001937
Guangliang Zhao90e98c52014-04-01 22:22:16 +08001938 if (obj_request_img_data_test(obj_request) &&
1939 (op_type == OBJ_OP_DISCARD || op_type == OBJ_OP_WRITE)) {
Alex Elder6365d332013-02-11 12:33:24 -06001940 struct rbd_img_request *img_request = obj_request->img_request;
Guangliang Zhao90e98c52014-04-01 22:22:16 +08001941 if (op_type == OBJ_OP_WRITE) {
1942 rbd_assert(img_request_write_test(img_request));
1943 } else {
1944 rbd_assert(img_request_discard_test(img_request));
1945 }
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08001946 snapc = img_request->snapc;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001947 }
1948
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08001949 rbd_assert(num_ops == 1 || ((op_type == OBJ_OP_WRITE) && num_ops == 2));
Ilya Dryomovdeb236b2014-02-25 16:22:27 +02001950
1951 /* Allocate and initialize the request, for the num_ops ops */
Alex Elderbf0d5f502012-11-22 00:00:08 -06001952
1953 osdc = &rbd_dev->rbd_client->client->osdc;
Ilya Dryomovdeb236b2014-02-25 16:22:27 +02001954 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false,
David Disseldorp2224d872016-04-05 11:13:39 +02001955 GFP_NOIO);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001956 if (!osd_req)
1957 return NULL; /* ENOMEM */
Alex Elderbf0d5f502012-11-22 00:00:08 -06001958
Guangliang Zhao90e98c52014-04-01 22:22:16 +08001959 if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
Alex Elderbf0d5f502012-11-22 00:00:08 -06001960 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
Alex Elder430c28c2013-04-03 21:32:51 -05001961 else
Alex Elderbf0d5f502012-11-22 00:00:08 -06001962 osd_req->r_flags = CEPH_OSD_FLAG_READ;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001963
1964 osd_req->r_callback = rbd_osd_req_callback;
1965 osd_req->r_priv = obj_request;
1966
Ilya Dryomov3c972c92014-01-27 17:40:20 +02001967 osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
1968 ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001969
Alex Elderbf0d5f502012-11-22 00:00:08 -06001970 return osd_req;
1971}
1972
Alex Elder0eefd472013-04-19 15:34:50 -05001973/*
Josh Durgind3246fb2014-04-07 16:49:21 -07001974 * Create a copyup osd request based on the information in the object
1975 * request supplied. A copyup request has two or three osd ops, a
1976 * copyup method call, potentially a hint op, and a write or truncate
1977 * or zero op.
Alex Elder0eefd472013-04-19 15:34:50 -05001978 */
1979static struct ceph_osd_request *
1980rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
1981{
1982 struct rbd_img_request *img_request;
1983 struct ceph_snap_context *snapc;
1984 struct rbd_device *rbd_dev;
1985 struct ceph_osd_client *osdc;
1986 struct ceph_osd_request *osd_req;
Josh Durgind3246fb2014-04-07 16:49:21 -07001987 int num_osd_ops = 3;
Alex Elder0eefd472013-04-19 15:34:50 -05001988
1989 rbd_assert(obj_request_img_data_test(obj_request));
1990 img_request = obj_request->img_request;
1991 rbd_assert(img_request);
Josh Durgind3246fb2014-04-07 16:49:21 -07001992 rbd_assert(img_request_write_test(img_request) ||
1993 img_request_discard_test(img_request));
Alex Elder0eefd472013-04-19 15:34:50 -05001994
Josh Durgind3246fb2014-04-07 16:49:21 -07001995 if (img_request_discard_test(img_request))
1996 num_osd_ops = 2;
1997
1998 /* Allocate and initialize the request, for all the ops */
Alex Elder0eefd472013-04-19 15:34:50 -05001999
2000 snapc = img_request->snapc;
2001 rbd_dev = img_request->rbd_dev;
2002 osdc = &rbd_dev->rbd_client->client->osdc;
Josh Durgind3246fb2014-04-07 16:49:21 -07002003 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_osd_ops,
David Disseldorp2224d872016-04-05 11:13:39 +02002004 false, GFP_NOIO);
Alex Elder0eefd472013-04-19 15:34:50 -05002005 if (!osd_req)
2006 return NULL; /* ENOMEM */
2007
2008 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
2009 osd_req->r_callback = rbd_osd_req_callback;
2010 osd_req->r_priv = obj_request;
2011
Ilya Dryomov3c972c92014-01-27 17:40:20 +02002012 osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
2013 ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
Alex Elder0eefd472013-04-19 15:34:50 -05002014
Alex Elder0eefd472013-04-19 15:34:50 -05002015 return osd_req;
2016}
2017
2018
Alex Elderbf0d5f502012-11-22 00:00:08 -06002019static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
2020{
2021 ceph_osdc_put_request(osd_req);
2022}
2023
2024/* object_name is assumed to be a non-null pointer and NUL-terminated */
2025
2026static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
2027 u64 offset, u64 length,
2028 enum obj_request_type type)
2029{
2030 struct rbd_obj_request *obj_request;
2031 size_t size;
2032 char *name;
2033
2034 rbd_assert(obj_request_type_valid(type));
2035
2036 size = strlen(object_name) + 1;
Ilya Dryomov5a60e872015-06-24 17:24:33 +03002037 name = kmalloc(size, GFP_NOIO);
Alex Elderf907ad52013-05-01 12:43:03 -05002038 if (!name)
Alex Elderbf0d5f502012-11-22 00:00:08 -06002039 return NULL;
2040
Ilya Dryomov5a60e872015-06-24 17:24:33 +03002041 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
Alex Elderf907ad52013-05-01 12:43:03 -05002042 if (!obj_request) {
2043 kfree(name);
2044 return NULL;
2045 }
2046
Alex Elderbf0d5f502012-11-22 00:00:08 -06002047 obj_request->object_name = memcpy(name, object_name, size);
2048 obj_request->offset = offset;
2049 obj_request->length = length;
Alex Elder926f9b32013-02-11 12:33:24 -06002050 obj_request->flags = 0;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002051 obj_request->which = BAD_WHICH;
2052 obj_request->type = type;
2053 INIT_LIST_HEAD(&obj_request->links);
Alex Elder788e2df2013-01-17 12:25:27 -06002054 init_completion(&obj_request->completion);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002055 kref_init(&obj_request->kref);
2056
Alex Elder37206ee2013-02-20 17:32:08 -06002057 dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
2058 offset, length, (int)type, obj_request);
2059
Alex Elderbf0d5f502012-11-22 00:00:08 -06002060 return obj_request;
2061}
2062
2063static void rbd_obj_request_destroy(struct kref *kref)
2064{
2065 struct rbd_obj_request *obj_request;
2066
2067 obj_request = container_of(kref, struct rbd_obj_request, kref);
2068
Alex Elder37206ee2013-02-20 17:32:08 -06002069 dout("%s: obj %p\n", __func__, obj_request);
2070
Alex Elderbf0d5f502012-11-22 00:00:08 -06002071 rbd_assert(obj_request->img_request == NULL);
2072 rbd_assert(obj_request->which == BAD_WHICH);
2073
2074 if (obj_request->osd_req)
2075 rbd_osd_req_destroy(obj_request->osd_req);
2076
2077 rbd_assert(obj_request_type_valid(obj_request->type));
2078 switch (obj_request->type) {
Alex Elder9969ebc2013-01-18 12:31:10 -06002079 case OBJ_REQUEST_NODATA:
2080 break; /* Nothing to do */
Alex Elderbf0d5f502012-11-22 00:00:08 -06002081 case OBJ_REQUEST_BIO:
2082 if (obj_request->bio_list)
2083 bio_chain_put(obj_request->bio_list);
2084 break;
Alex Elder788e2df2013-01-17 12:25:27 -06002085 case OBJ_REQUEST_PAGES:
2086 if (obj_request->pages)
2087 ceph_release_page_vector(obj_request->pages,
2088 obj_request->page_count);
2089 break;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002090 }
2091
Alex Elderf907ad52013-05-01 12:43:03 -05002092 kfree(obj_request->object_name);
Alex Elder868311b2013-05-01 12:43:03 -05002093 obj_request->object_name = NULL;
2094 kmem_cache_free(rbd_obj_request_cache, obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002095}
2096
Alex Elderfb65d2282013-05-08 22:50:04 -05002097/* It's OK to call this for a device with no parent */
2098
2099static void rbd_spec_put(struct rbd_spec *spec);
2100static void rbd_dev_unparent(struct rbd_device *rbd_dev)
2101{
2102 rbd_dev_remove_parent(rbd_dev);
2103 rbd_spec_put(rbd_dev->parent_spec);
2104 rbd_dev->parent_spec = NULL;
2105 rbd_dev->parent_overlap = 0;
2106}
2107
Alex Elderbf0d5f502012-11-22 00:00:08 -06002108/*
Alex Eldera2acd002013-05-08 22:50:04 -05002109 * Parent image reference counting is used to determine when an
2110 * image's parent fields can be safely torn down--after there are no
2111 * more in-flight requests to the parent image. When the last
2112 * reference is dropped, cleaning them up is safe.
2113 */
2114static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
2115{
2116 int counter;
2117
2118 if (!rbd_dev->parent_spec)
2119 return;
2120
2121 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
2122 if (counter > 0)
2123 return;
2124
2125 /* Last reference; clean up parent data structures */
2126
2127 if (!counter)
2128 rbd_dev_unparent(rbd_dev);
2129 else
Ilya Dryomov9584d502014-07-11 12:11:20 +04002130 rbd_warn(rbd_dev, "parent reference underflow");
Alex Eldera2acd002013-05-08 22:50:04 -05002131}
2132
2133/*
2134 * If an image has a non-zero parent overlap, get a reference to its
2135 * parent.
2136 *
2137 * Returns true if the rbd device has a parent with a non-zero
2138 * overlap and a reference for it was successfully taken, or
2139 * false otherwise.
2140 */
2141static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
2142{
Ilya Dryomovae43e9d2015-01-19 18:13:43 +03002143 int counter = 0;
Alex Eldera2acd002013-05-08 22:50:04 -05002144
2145 if (!rbd_dev->parent_spec)
2146 return false;
2147
Ilya Dryomovae43e9d2015-01-19 18:13:43 +03002148 down_read(&rbd_dev->header_rwsem);
2149 if (rbd_dev->parent_overlap)
2150 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
2151 up_read(&rbd_dev->header_rwsem);
Alex Eldera2acd002013-05-08 22:50:04 -05002152
2153 if (counter < 0)
Ilya Dryomov9584d502014-07-11 12:11:20 +04002154 rbd_warn(rbd_dev, "parent reference overflow");
Alex Eldera2acd002013-05-08 22:50:04 -05002155
Ilya Dryomovae43e9d2015-01-19 18:13:43 +03002156 return counter > 0;
Alex Eldera2acd002013-05-08 22:50:04 -05002157}
2158
Alex Elderbf0d5f502012-11-22 00:00:08 -06002159/*
2160 * Caller is responsible for filling in the list of object requests
2161 * that comprises the image request, and the Linux request pointer
2162 * (if there is one).
2163 */
Alex Eldercc344fa2013-02-19 12:25:56 -06002164static struct rbd_img_request *rbd_img_request_create(
2165 struct rbd_device *rbd_dev,
Alex Elderbf0d5f502012-11-22 00:00:08 -06002166 u64 offset, u64 length,
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002167 enum obj_operation_type op_type,
Josh Durgin4e752f02014-04-08 11:12:11 -07002168 struct ceph_snap_context *snapc)
Alex Elderbf0d5f502012-11-22 00:00:08 -06002169{
2170 struct rbd_img_request *img_request;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002171
Ilya Dryomov7a716aa2014-08-05 11:25:54 +04002172 img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_NOIO);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002173 if (!img_request)
2174 return NULL;
2175
Alex Elderbf0d5f502012-11-22 00:00:08 -06002176 img_request->rq = NULL;
2177 img_request->rbd_dev = rbd_dev;
2178 img_request->offset = offset;
2179 img_request->length = length;
Alex Elder0c425242013-02-08 09:55:49 -06002180 img_request->flags = 0;
Guangliang Zhao90e98c52014-04-01 22:22:16 +08002181 if (op_type == OBJ_OP_DISCARD) {
2182 img_request_discard_set(img_request);
2183 img_request->snapc = snapc;
2184 } else if (op_type == OBJ_OP_WRITE) {
Alex Elder0c425242013-02-08 09:55:49 -06002185 img_request_write_set(img_request);
Josh Durgin4e752f02014-04-08 11:12:11 -07002186 img_request->snapc = snapc;
Alex Elder0c425242013-02-08 09:55:49 -06002187 } else {
Alex Elderbf0d5f502012-11-22 00:00:08 -06002188 img_request->snap_id = rbd_dev->spec->snap_id;
Alex Elder0c425242013-02-08 09:55:49 -06002189 }
Alex Eldera2acd002013-05-08 22:50:04 -05002190 if (rbd_dev_parent_get(rbd_dev))
Alex Elderd0b2e942013-01-24 16:13:36 -06002191 img_request_layered_set(img_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002192 spin_lock_init(&img_request->completion_lock);
2193 img_request->next_completion = 0;
2194 img_request->callback = NULL;
Alex Eldera5a337d2013-01-24 16:13:36 -06002195 img_request->result = 0;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002196 img_request->obj_request_count = 0;
2197 INIT_LIST_HEAD(&img_request->obj_requests);
2198 kref_init(&img_request->kref);
2199
Alex Elder37206ee2013-02-20 17:32:08 -06002200 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002201 obj_op_name(op_type), offset, length, img_request);
Alex Elder37206ee2013-02-20 17:32:08 -06002202
Alex Elderbf0d5f502012-11-22 00:00:08 -06002203 return img_request;
2204}
2205
2206static void rbd_img_request_destroy(struct kref *kref)
2207{
2208 struct rbd_img_request *img_request;
2209 struct rbd_obj_request *obj_request;
2210 struct rbd_obj_request *next_obj_request;
2211
2212 img_request = container_of(kref, struct rbd_img_request, kref);
2213
Alex Elder37206ee2013-02-20 17:32:08 -06002214 dout("%s: img %p\n", __func__, img_request);
2215
Alex Elderbf0d5f502012-11-22 00:00:08 -06002216 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2217 rbd_img_obj_request_del(img_request, obj_request);
Alex Elder25dcf952013-01-25 17:08:55 -06002218 rbd_assert(img_request->obj_request_count == 0);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002219
Alex Eldera2acd002013-05-08 22:50:04 -05002220 if (img_request_layered_test(img_request)) {
2221 img_request_layered_clear(img_request);
2222 rbd_dev_parent_put(img_request->rbd_dev);
2223 }
2224
Josh Durginbef95452014-04-04 17:47:52 -07002225 if (img_request_write_test(img_request) ||
2226 img_request_discard_test(img_request))
Alex Elder812164f82013-04-30 00:44:32 -05002227 ceph_put_snap_context(img_request->snapc);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002228
Alex Elder1c2a9df2013-05-01 12:43:03 -05002229 kmem_cache_free(rbd_img_request_cache, img_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002230}
2231
Alex Eldere93f3152013-05-08 22:50:04 -05002232static struct rbd_img_request *rbd_parent_request_create(
2233 struct rbd_obj_request *obj_request,
2234 u64 img_offset, u64 length)
2235{
2236 struct rbd_img_request *parent_request;
2237 struct rbd_device *rbd_dev;
2238
2239 rbd_assert(obj_request->img_request);
2240 rbd_dev = obj_request->img_request->rbd_dev;
2241
Josh Durgin4e752f02014-04-08 11:12:11 -07002242 parent_request = rbd_img_request_create(rbd_dev->parent, img_offset,
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002243 length, OBJ_OP_READ, NULL);
Alex Eldere93f3152013-05-08 22:50:04 -05002244 if (!parent_request)
2245 return NULL;
2246
2247 img_request_child_set(parent_request);
2248 rbd_obj_request_get(obj_request);
2249 parent_request->obj_request = obj_request;
2250
2251 return parent_request;
2252}
2253
2254static void rbd_parent_request_destroy(struct kref *kref)
2255{
2256 struct rbd_img_request *parent_request;
2257 struct rbd_obj_request *orig_request;
2258
2259 parent_request = container_of(kref, struct rbd_img_request, kref);
2260 orig_request = parent_request->obj_request;
2261
2262 parent_request->obj_request = NULL;
2263 rbd_obj_request_put(orig_request);
2264 img_request_child_clear(parent_request);
2265
2266 rbd_img_request_destroy(kref);
2267}
2268
Alex Elder12178572013-02-08 09:55:49 -06002269static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
2270{
Alex Elder6365d332013-02-11 12:33:24 -06002271 struct rbd_img_request *img_request;
Alex Elder12178572013-02-08 09:55:49 -06002272 unsigned int xferred;
2273 int result;
Alex Elder8b3e1a52013-01-24 16:13:36 -06002274 bool more;
Alex Elder12178572013-02-08 09:55:49 -06002275
Alex Elder6365d332013-02-11 12:33:24 -06002276 rbd_assert(obj_request_img_data_test(obj_request));
2277 img_request = obj_request->img_request;
2278
Alex Elder12178572013-02-08 09:55:49 -06002279 rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
2280 xferred = (unsigned int)obj_request->xferred;
2281 result = obj_request->result;
2282 if (result) {
2283 struct rbd_device *rbd_dev = img_request->rbd_dev;
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002284 enum obj_operation_type op_type;
2285
Guangliang Zhao90e98c52014-04-01 22:22:16 +08002286 if (img_request_discard_test(img_request))
2287 op_type = OBJ_OP_DISCARD;
2288 else if (img_request_write_test(img_request))
2289 op_type = OBJ_OP_WRITE;
2290 else
2291 op_type = OBJ_OP_READ;
Alex Elder12178572013-02-08 09:55:49 -06002292
Ilya Dryomov9584d502014-07-11 12:11:20 +04002293 rbd_warn(rbd_dev, "%s %llx at %llx (%llx)",
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002294 obj_op_name(op_type), obj_request->length,
2295 obj_request->img_offset, obj_request->offset);
Ilya Dryomov9584d502014-07-11 12:11:20 +04002296 rbd_warn(rbd_dev, " result %d xferred %x",
Alex Elder12178572013-02-08 09:55:49 -06002297 result, xferred);
2298 if (!img_request->result)
2299 img_request->result = result;
Ilya Dryomov082a75d2015-04-25 15:56:15 +03002300 /*
2301 * Need to end I/O on the entire obj_request worth of
2302 * bytes in case of error.
2303 */
2304 xferred = obj_request->length;
Alex Elder12178572013-02-08 09:55:49 -06002305 }
2306
Alex Elderf1a47392013-04-19 15:34:50 -05002307 /* Image object requests don't own their page array */
2308
2309 if (obj_request->type == OBJ_REQUEST_PAGES) {
2310 obj_request->pages = NULL;
2311 obj_request->page_count = 0;
2312 }
2313
Alex Elder8b3e1a52013-01-24 16:13:36 -06002314 if (img_request_child_test(img_request)) {
2315 rbd_assert(img_request->obj_request != NULL);
2316 more = obj_request->which < img_request->obj_request_count - 1;
2317 } else {
2318 rbd_assert(img_request->rq != NULL);
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01002319
2320 more = blk_update_request(img_request->rq, result, xferred);
2321 if (!more)
2322 __blk_mq_end_request(img_request->rq, result);
Alex Elder8b3e1a52013-01-24 16:13:36 -06002323 }
2324
2325 return more;
Alex Elder12178572013-02-08 09:55:49 -06002326}
2327
Alex Elder21692382013-04-05 01:27:12 -05002328static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
2329{
2330 struct rbd_img_request *img_request;
2331 u32 which = obj_request->which;
2332 bool more = true;
2333
Alex Elder6365d332013-02-11 12:33:24 -06002334 rbd_assert(obj_request_img_data_test(obj_request));
Alex Elder21692382013-04-05 01:27:12 -05002335 img_request = obj_request->img_request;
2336
2337 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
2338 rbd_assert(img_request != NULL);
Alex Elder21692382013-04-05 01:27:12 -05002339 rbd_assert(img_request->obj_request_count > 0);
2340 rbd_assert(which != BAD_WHICH);
2341 rbd_assert(which < img_request->obj_request_count);
Alex Elder21692382013-04-05 01:27:12 -05002342
2343 spin_lock_irq(&img_request->completion_lock);
2344 if (which != img_request->next_completion)
2345 goto out;
2346
2347 for_each_obj_request_from(img_request, obj_request) {
Alex Elder21692382013-04-05 01:27:12 -05002348 rbd_assert(more);
2349 rbd_assert(which < img_request->obj_request_count);
2350
2351 if (!obj_request_done_test(obj_request))
2352 break;
Alex Elder12178572013-02-08 09:55:49 -06002353 more = rbd_img_obj_end_request(obj_request);
Alex Elder21692382013-04-05 01:27:12 -05002354 which++;
2355 }
2356
2357 rbd_assert(more ^ (which == img_request->obj_request_count));
2358 img_request->next_completion = which;
2359out:
2360 spin_unlock_irq(&img_request->completion_lock);
Alex Elder0f2d5be2014-04-26 14:21:44 +04002361 rbd_img_request_put(img_request);
Alex Elder21692382013-04-05 01:27:12 -05002362
2363 if (!more)
2364 rbd_img_request_complete(img_request);
2365}
2366
Alex Elderf1a47392013-04-19 15:34:50 -05002367/*
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002368 * Add individual osd ops to the given ceph_osd_request and prepare
2369 * them for submission. num_ops is the current number of
2370 * osd operations already to the object request.
2371 */
2372static void rbd_img_obj_request_fill(struct rbd_obj_request *obj_request,
2373 struct ceph_osd_request *osd_request,
2374 enum obj_operation_type op_type,
2375 unsigned int num_ops)
2376{
2377 struct rbd_img_request *img_request = obj_request->img_request;
2378 struct rbd_device *rbd_dev = img_request->rbd_dev;
2379 u64 object_size = rbd_obj_bytes(&rbd_dev->header);
2380 u64 offset = obj_request->offset;
2381 u64 length = obj_request->length;
2382 u64 img_end;
2383 u16 opcode;
2384
2385 if (op_type == OBJ_OP_DISCARD) {
Josh Durgind3246fb2014-04-07 16:49:21 -07002386 if (!offset && length == object_size &&
2387 (!img_request_layered_test(img_request) ||
2388 !obj_request_overlaps_parent(obj_request))) {
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002389 opcode = CEPH_OSD_OP_DELETE;
2390 } else if ((offset + length == object_size)) {
2391 opcode = CEPH_OSD_OP_TRUNCATE;
2392 } else {
2393 down_read(&rbd_dev->header_rwsem);
2394 img_end = rbd_dev->header.image_size;
2395 up_read(&rbd_dev->header_rwsem);
2396
2397 if (obj_request->img_offset + length == img_end)
2398 opcode = CEPH_OSD_OP_TRUNCATE;
2399 else
2400 opcode = CEPH_OSD_OP_ZERO;
2401 }
2402 } else if (op_type == OBJ_OP_WRITE) {
Ilya Dryomove30b7572015-10-07 17:27:17 +02002403 if (!offset && length == object_size)
2404 opcode = CEPH_OSD_OP_WRITEFULL;
2405 else
2406 opcode = CEPH_OSD_OP_WRITE;
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002407 osd_req_op_alloc_hint_init(osd_request, num_ops,
2408 object_size, object_size);
2409 num_ops++;
2410 } else {
2411 opcode = CEPH_OSD_OP_READ;
2412 }
2413
Ilya Dryomov7e868b62014-11-21 22:16:43 +03002414 if (opcode == CEPH_OSD_OP_DELETE)
Yan, Zheng144cba12015-04-27 11:09:54 +08002415 osd_req_op_init(osd_request, num_ops, opcode, 0);
Ilya Dryomov7e868b62014-11-21 22:16:43 +03002416 else
2417 osd_req_op_extent_init(osd_request, num_ops, opcode,
2418 offset, length, 0, 0);
2419
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002420 if (obj_request->type == OBJ_REQUEST_BIO)
2421 osd_req_op_extent_osd_data_bio(osd_request, num_ops,
2422 obj_request->bio_list, length);
2423 else if (obj_request->type == OBJ_REQUEST_PAGES)
2424 osd_req_op_extent_osd_data_pages(osd_request, num_ops,
2425 obj_request->pages, length,
2426 offset & ~PAGE_MASK, false, false);
2427
2428 /* Discards are also writes */
2429 if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
2430 rbd_osd_req_format_write(obj_request);
2431 else
2432 rbd_osd_req_format_read(obj_request);
2433}
2434
2435/*
Alex Elderf1a47392013-04-19 15:34:50 -05002436 * Split up an image request into one or more object requests, each
2437 * to a different object. The "type" parameter indicates whether
2438 * "data_desc" is the pointer to the head of a list of bio
2439 * structures, or the base of a page array. In either case this
2440 * function assumes data_desc describes memory sufficient to hold
2441 * all data described by the image request.
2442 */
2443static int rbd_img_request_fill(struct rbd_img_request *img_request,
2444 enum obj_request_type type,
2445 void *data_desc)
Alex Elderbf0d5f502012-11-22 00:00:08 -06002446{
2447 struct rbd_device *rbd_dev = img_request->rbd_dev;
2448 struct rbd_obj_request *obj_request = NULL;
2449 struct rbd_obj_request *next_obj_request;
Jingoo Hana1580732013-08-09 13:04:35 +09002450 struct bio *bio_list = NULL;
Alex Elderf1a47392013-04-19 15:34:50 -05002451 unsigned int bio_offset = 0;
Jingoo Hana1580732013-08-09 13:04:35 +09002452 struct page **pages = NULL;
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002453 enum obj_operation_type op_type;
Alex Elder7da22d22013-01-24 16:13:36 -06002454 u64 img_offset;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002455 u64 resid;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002456
Alex Elderf1a47392013-04-19 15:34:50 -05002457 dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
2458 (int)type, data_desc);
Alex Elder37206ee2013-02-20 17:32:08 -06002459
Alex Elder7da22d22013-01-24 16:13:36 -06002460 img_offset = img_request->offset;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002461 resid = img_request->length;
Alex Elder4dda41d2013-02-20 21:59:33 -06002462 rbd_assert(resid > 0);
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002463 op_type = rbd_img_request_op_type(img_request);
Alex Elderf1a47392013-04-19 15:34:50 -05002464
2465 if (type == OBJ_REQUEST_BIO) {
2466 bio_list = data_desc;
Kent Overstreet4f024f32013-10-11 15:44:27 -07002467 rbd_assert(img_offset ==
2468 bio_list->bi_iter.bi_sector << SECTOR_SHIFT);
Guangliang Zhao90e98c52014-04-01 22:22:16 +08002469 } else if (type == OBJ_REQUEST_PAGES) {
Alex Elderf1a47392013-04-19 15:34:50 -05002470 pages = data_desc;
2471 }
2472
Alex Elderbf0d5f502012-11-22 00:00:08 -06002473 while (resid) {
Alex Elder2fa12322013-04-05 01:27:12 -05002474 struct ceph_osd_request *osd_req;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002475 const char *object_name;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002476 u64 offset;
2477 u64 length;
2478
Alex Elder7da22d22013-01-24 16:13:36 -06002479 object_name = rbd_segment_name(rbd_dev, img_offset);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002480 if (!object_name)
2481 goto out_unwind;
Alex Elder7da22d22013-01-24 16:13:36 -06002482 offset = rbd_segment_offset(rbd_dev, img_offset);
2483 length = rbd_segment_length(rbd_dev, img_offset, resid);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002484 obj_request = rbd_obj_request_create(object_name,
Alex Elderf1a47392013-04-19 15:34:50 -05002485 offset, length, type);
Alex Elder78c2a442013-05-01 12:43:04 -05002486 /* object request has its own copy of the object name */
2487 rbd_segment_name_free(object_name);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002488 if (!obj_request)
2489 goto out_unwind;
Ilya Dryomov62054da2014-03-04 11:57:17 +02002490
Josh Durgin03507db2013-08-27 14:45:46 -07002491 /*
2492 * set obj_request->img_request before creating the
2493 * osd_request so that it gets the right snapc
2494 */
2495 rbd_img_obj_request_add(img_request, obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002496
Alex Elderf1a47392013-04-19 15:34:50 -05002497 if (type == OBJ_REQUEST_BIO) {
2498 unsigned int clone_size;
2499
2500 rbd_assert(length <= (u64)UINT_MAX);
2501 clone_size = (unsigned int)length;
2502 obj_request->bio_list =
2503 bio_chain_clone_range(&bio_list,
2504 &bio_offset,
2505 clone_size,
David Disseldorp2224d872016-04-05 11:13:39 +02002506 GFP_NOIO);
Alex Elderf1a47392013-04-19 15:34:50 -05002507 if (!obj_request->bio_list)
Ilya Dryomov62054da2014-03-04 11:57:17 +02002508 goto out_unwind;
Guangliang Zhao90e98c52014-04-01 22:22:16 +08002509 } else if (type == OBJ_REQUEST_PAGES) {
Alex Elderf1a47392013-04-19 15:34:50 -05002510 unsigned int page_count;
2511
2512 obj_request->pages = pages;
2513 page_count = (u32)calc_pages_for(offset, length);
2514 obj_request->page_count = page_count;
2515 if ((offset + length) & ~PAGE_MASK)
2516 page_count--; /* more on last page */
2517 pages += page_count;
2518 }
Alex Elderbf0d5f502012-11-22 00:00:08 -06002519
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002520 osd_req = rbd_osd_req_create(rbd_dev, op_type,
2521 (op_type == OBJ_OP_WRITE) ? 2 : 1,
2522 obj_request);
Alex Elder2fa12322013-04-05 01:27:12 -05002523 if (!osd_req)
Ilya Dryomov62054da2014-03-04 11:57:17 +02002524 goto out_unwind;
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002525
Alex Elder2fa12322013-04-05 01:27:12 -05002526 obj_request->osd_req = osd_req;
Alex Elder21692382013-04-05 01:27:12 -05002527 obj_request->callback = rbd_img_obj_callback;
Alex Elder7da22d22013-01-24 16:13:36 -06002528 obj_request->img_offset = img_offset;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002529
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002530 rbd_img_obj_request_fill(obj_request, osd_req, op_type, 0);
2531
2532 rbd_img_request_get(img_request);
2533
Alex Elder7da22d22013-01-24 16:13:36 -06002534 img_offset += length;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002535 resid -= length;
2536 }
2537
2538 return 0;
2539
Alex Elderbf0d5f502012-11-22 00:00:08 -06002540out_unwind:
2541 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
Ilya Dryomov42dd0372014-03-04 11:57:17 +02002542 rbd_img_obj_request_del(img_request, obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002543
2544 return -ENOMEM;
2545}
2546
Alex Elder3d7efd12013-04-19 15:34:50 -05002547static void
Ilya Dryomov27617132015-07-16 17:36:11 +03002548rbd_osd_copyup_callback(struct rbd_obj_request *obj_request)
Alex Elder0eefd472013-04-19 15:34:50 -05002549{
2550 struct rbd_img_request *img_request;
2551 struct rbd_device *rbd_dev;
Alex Elderebda6402013-05-10 16:29:22 -05002552 struct page **pages;
Alex Elder0eefd472013-04-19 15:34:50 -05002553 u32 page_count;
2554
Ilya Dryomov27617132015-07-16 17:36:11 +03002555 dout("%s: obj %p\n", __func__, obj_request);
2556
Josh Durgind3246fb2014-04-07 16:49:21 -07002557 rbd_assert(obj_request->type == OBJ_REQUEST_BIO ||
2558 obj_request->type == OBJ_REQUEST_NODATA);
Alex Elder0eefd472013-04-19 15:34:50 -05002559 rbd_assert(obj_request_img_data_test(obj_request));
2560 img_request = obj_request->img_request;
2561 rbd_assert(img_request);
2562
2563 rbd_dev = img_request->rbd_dev;
2564 rbd_assert(rbd_dev);
Alex Elder0eefd472013-04-19 15:34:50 -05002565
Alex Elderebda6402013-05-10 16:29:22 -05002566 pages = obj_request->copyup_pages;
2567 rbd_assert(pages != NULL);
Alex Elder0eefd472013-04-19 15:34:50 -05002568 obj_request->copyup_pages = NULL;
Alex Elderebda6402013-05-10 16:29:22 -05002569 page_count = obj_request->copyup_page_count;
2570 rbd_assert(page_count);
2571 obj_request->copyup_page_count = 0;
2572 ceph_release_page_vector(pages, page_count);
Alex Elder0eefd472013-04-19 15:34:50 -05002573
2574 /*
2575 * We want the transfer count to reflect the size of the
2576 * original write request. There is no such thing as a
2577 * successful short write, so if the request was successful
2578 * we can just set it to the originally-requested length.
2579 */
2580 if (!obj_request->result)
2581 obj_request->xferred = obj_request->length;
2582
Ilya Dryomov27617132015-07-16 17:36:11 +03002583 obj_request_done_set(obj_request);
Alex Elder0eefd472013-04-19 15:34:50 -05002584}
2585
2586static void
Alex Elder3d7efd12013-04-19 15:34:50 -05002587rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2588{
2589 struct rbd_obj_request *orig_request;
Alex Elder0eefd472013-04-19 15:34:50 -05002590 struct ceph_osd_request *osd_req;
2591 struct ceph_osd_client *osdc;
2592 struct rbd_device *rbd_dev;
Alex Elder3d7efd12013-04-19 15:34:50 -05002593 struct page **pages;
Josh Durgind3246fb2014-04-07 16:49:21 -07002594 enum obj_operation_type op_type;
Alex Elderebda6402013-05-10 16:29:22 -05002595 u32 page_count;
Alex Elderbbea1c12013-05-06 17:40:33 -05002596 int img_result;
Alex Elderebda6402013-05-10 16:29:22 -05002597 u64 parent_length;
Alex Elder3d7efd12013-04-19 15:34:50 -05002598
2599 rbd_assert(img_request_child_test(img_request));
2600
2601 /* First get what we need from the image request */
2602
2603 pages = img_request->copyup_pages;
2604 rbd_assert(pages != NULL);
2605 img_request->copyup_pages = NULL;
Alex Elderebda6402013-05-10 16:29:22 -05002606 page_count = img_request->copyup_page_count;
2607 rbd_assert(page_count);
2608 img_request->copyup_page_count = 0;
Alex Elder3d7efd12013-04-19 15:34:50 -05002609
2610 orig_request = img_request->obj_request;
2611 rbd_assert(orig_request != NULL);
Alex Elderb91f09f2013-05-10 16:29:22 -05002612 rbd_assert(obj_request_type_valid(orig_request->type));
Alex Elderbbea1c12013-05-06 17:40:33 -05002613 img_result = img_request->result;
Alex Elderebda6402013-05-10 16:29:22 -05002614 parent_length = img_request->length;
2615 rbd_assert(parent_length == img_request->xferred);
Alex Elder3d7efd12013-04-19 15:34:50 -05002616 rbd_img_request_put(img_request);
2617
Alex Elder91c6feb2013-05-06 17:40:32 -05002618 rbd_assert(orig_request->img_request);
2619 rbd_dev = orig_request->img_request->rbd_dev;
Alex Elder3d7efd12013-04-19 15:34:50 -05002620 rbd_assert(rbd_dev);
Alex Elder3d7efd12013-04-19 15:34:50 -05002621
Alex Elderbbea1c12013-05-06 17:40:33 -05002622 /*
2623 * If the overlap has become 0 (most likely because the
2624 * image has been flattened) we need to free the pages
2625 * and re-submit the original write request.
2626 */
2627 if (!rbd_dev->parent_overlap) {
2628 struct ceph_osd_client *osdc;
2629
2630 ceph_release_page_vector(pages, page_count);
2631 osdc = &rbd_dev->rbd_client->client->osdc;
2632 img_result = rbd_obj_request_submit(osdc, orig_request);
2633 if (!img_result)
2634 return;
2635 }
2636
2637 if (img_result)
Alex Elder0eefd472013-04-19 15:34:50 -05002638 goto out_err;
Alex Elder3d7efd12013-04-19 15:34:50 -05002639
Alex Elder8785b1d2013-05-09 10:08:49 -05002640 /*
2641 * The original osd request is of no use to use any more.
Ilya Dryomov0ccd5922014-02-25 16:22:28 +02002642 * We need a new one that can hold the three ops in a copyup
Alex Elder8785b1d2013-05-09 10:08:49 -05002643 * request. Allocate the new copyup osd request for the
2644 * original request, and release the old one.
2645 */
Alex Elderbbea1c12013-05-06 17:40:33 -05002646 img_result = -ENOMEM;
Alex Elder0eefd472013-04-19 15:34:50 -05002647 osd_req = rbd_osd_req_create_copyup(orig_request);
2648 if (!osd_req)
2649 goto out_err;
Alex Elder8785b1d2013-05-09 10:08:49 -05002650 rbd_osd_req_destroy(orig_request->osd_req);
Alex Elder0eefd472013-04-19 15:34:50 -05002651 orig_request->osd_req = osd_req;
2652 orig_request->copyup_pages = pages;
Alex Elderebda6402013-05-10 16:29:22 -05002653 orig_request->copyup_page_count = page_count;
Alex Elder3d7efd12013-04-19 15:34:50 -05002654
Alex Elder0eefd472013-04-19 15:34:50 -05002655 /* Initialize the copyup op */
2656
2657 osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
Alex Elderebda6402013-05-10 16:29:22 -05002658 osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
Alex Elder0eefd472013-04-19 15:34:50 -05002659 false, false);
2660
Josh Durgind3246fb2014-04-07 16:49:21 -07002661 /* Add the other op(s) */
Ilya Dryomov0ccd5922014-02-25 16:22:28 +02002662
Josh Durgind3246fb2014-04-07 16:49:21 -07002663 op_type = rbd_img_request_op_type(orig_request->img_request);
2664 rbd_img_obj_request_fill(orig_request, osd_req, op_type, 1);
Alex Elder0eefd472013-04-19 15:34:50 -05002665
2666 /* All set, send it off. */
2667
Alex Elder0eefd472013-04-19 15:34:50 -05002668 osdc = &rbd_dev->rbd_client->client->osdc;
Alex Elderbbea1c12013-05-06 17:40:33 -05002669 img_result = rbd_obj_request_submit(osdc, orig_request);
2670 if (!img_result)
Alex Elder0eefd472013-04-19 15:34:50 -05002671 return;
2672out_err:
2673 /* Record the error code and complete the request */
2674
Alex Elderbbea1c12013-05-06 17:40:33 -05002675 orig_request->result = img_result;
Alex Elder0eefd472013-04-19 15:34:50 -05002676 orig_request->xferred = 0;
2677 obj_request_done_set(orig_request);
2678 rbd_obj_request_complete(orig_request);
Alex Elder3d7efd12013-04-19 15:34:50 -05002679}
2680
2681/*
2682 * Read from the parent image the range of data that covers the
2683 * entire target of the given object request. This is used for
2684 * satisfying a layered image write request when the target of an
2685 * object request from the image request does not exist.
2686 *
2687 * A page array big enough to hold the returned data is allocated
2688 * and supplied to rbd_img_request_fill() as the "data descriptor."
2689 * When the read completes, this page array will be transferred to
2690 * the original object request for the copyup operation.
2691 *
2692 * If an error occurs, record it as the result of the original
2693 * object request and mark it done so it gets completed.
2694 */
2695static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2696{
2697 struct rbd_img_request *img_request = NULL;
2698 struct rbd_img_request *parent_request = NULL;
2699 struct rbd_device *rbd_dev;
2700 u64 img_offset;
2701 u64 length;
2702 struct page **pages = NULL;
2703 u32 page_count;
2704 int result;
2705
2706 rbd_assert(obj_request_img_data_test(obj_request));
Alex Elderb91f09f2013-05-10 16:29:22 -05002707 rbd_assert(obj_request_type_valid(obj_request->type));
Alex Elder3d7efd12013-04-19 15:34:50 -05002708
2709 img_request = obj_request->img_request;
2710 rbd_assert(img_request != NULL);
2711 rbd_dev = img_request->rbd_dev;
2712 rbd_assert(rbd_dev->parent != NULL);
2713
2714 /*
2715 * Determine the byte range covered by the object in the
2716 * child image to which the original request was to be sent.
2717 */
2718 img_offset = obj_request->img_offset - obj_request->offset;
2719 length = (u64)1 << rbd_dev->header.obj_order;
2720
2721 /*
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05002722 * There is no defined parent data beyond the parent
2723 * overlap, so limit what we read at that boundary if
2724 * necessary.
2725 */
2726 if (img_offset + length > rbd_dev->parent_overlap) {
2727 rbd_assert(img_offset < rbd_dev->parent_overlap);
2728 length = rbd_dev->parent_overlap - img_offset;
2729 }
2730
2731 /*
Alex Elder3d7efd12013-04-19 15:34:50 -05002732 * Allocate a page array big enough to receive the data read
2733 * from the parent.
2734 */
2735 page_count = (u32)calc_pages_for(0, length);
2736 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2737 if (IS_ERR(pages)) {
2738 result = PTR_ERR(pages);
2739 pages = NULL;
2740 goto out_err;
2741 }
2742
2743 result = -ENOMEM;
Alex Eldere93f3152013-05-08 22:50:04 -05002744 parent_request = rbd_parent_request_create(obj_request,
2745 img_offset, length);
Alex Elder3d7efd12013-04-19 15:34:50 -05002746 if (!parent_request)
2747 goto out_err;
Alex Elder3d7efd12013-04-19 15:34:50 -05002748
2749 result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
2750 if (result)
2751 goto out_err;
2752 parent_request->copyup_pages = pages;
Alex Elderebda6402013-05-10 16:29:22 -05002753 parent_request->copyup_page_count = page_count;
Alex Elder3d7efd12013-04-19 15:34:50 -05002754
2755 parent_request->callback = rbd_img_obj_parent_read_full_callback;
2756 result = rbd_img_request_submit(parent_request);
2757 if (!result)
2758 return 0;
2759
2760 parent_request->copyup_pages = NULL;
Alex Elderebda6402013-05-10 16:29:22 -05002761 parent_request->copyup_page_count = 0;
Alex Elder3d7efd12013-04-19 15:34:50 -05002762 parent_request->obj_request = NULL;
2763 rbd_obj_request_put(obj_request);
2764out_err:
2765 if (pages)
2766 ceph_release_page_vector(pages, page_count);
2767 if (parent_request)
2768 rbd_img_request_put(parent_request);
2769 obj_request->result = result;
2770 obj_request->xferred = 0;
2771 obj_request_done_set(obj_request);
2772
2773 return result;
2774}
2775
Alex Elderc5b5ef62013-02-11 12:33:24 -06002776static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2777{
Alex Elderc5b5ef62013-02-11 12:33:24 -06002778 struct rbd_obj_request *orig_request;
Alex Elder638f5ab2013-05-06 17:40:33 -05002779 struct rbd_device *rbd_dev;
Alex Elderc5b5ef62013-02-11 12:33:24 -06002780 int result;
2781
2782 rbd_assert(!obj_request_img_data_test(obj_request));
2783
2784 /*
2785 * All we need from the object request is the original
2786 * request and the result of the STAT op. Grab those, then
2787 * we're done with the request.
2788 */
2789 orig_request = obj_request->obj_request;
2790 obj_request->obj_request = NULL;
Alex Elder912c3172013-05-13 20:35:38 -05002791 rbd_obj_request_put(orig_request);
Alex Elderc5b5ef62013-02-11 12:33:24 -06002792 rbd_assert(orig_request);
2793 rbd_assert(orig_request->img_request);
2794
2795 result = obj_request->result;
2796 obj_request->result = 0;
2797
2798 dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
2799 obj_request, orig_request, result,
2800 obj_request->xferred, obj_request->length);
2801 rbd_obj_request_put(obj_request);
2802
Alex Elder638f5ab2013-05-06 17:40:33 -05002803 /*
2804 * If the overlap has become 0 (most likely because the
2805 * image has been flattened) we need to free the pages
2806 * and re-submit the original write request.
2807 */
2808 rbd_dev = orig_request->img_request->rbd_dev;
2809 if (!rbd_dev->parent_overlap) {
2810 struct ceph_osd_client *osdc;
2811
Alex Elder638f5ab2013-05-06 17:40:33 -05002812 osdc = &rbd_dev->rbd_client->client->osdc;
2813 result = rbd_obj_request_submit(osdc, orig_request);
2814 if (!result)
2815 return;
2816 }
Alex Elderc5b5ef62013-02-11 12:33:24 -06002817
2818 /*
2819 * Our only purpose here is to determine whether the object
2820 * exists, and we don't want to treat the non-existence as
2821 * an error. If something else comes back, transfer the
2822 * error to the original request and complete it now.
2823 */
2824 if (!result) {
2825 obj_request_existence_set(orig_request, true);
2826 } else if (result == -ENOENT) {
2827 obj_request_existence_set(orig_request, false);
2828 } else if (result) {
2829 orig_request->result = result;
Alex Elder3d7efd12013-04-19 15:34:50 -05002830 goto out;
Alex Elderc5b5ef62013-02-11 12:33:24 -06002831 }
2832
2833 /*
2834 * Resubmit the original request now that we have recorded
2835 * whether the target object exists.
2836 */
Alex Elderb454e362013-04-19 15:34:50 -05002837 orig_request->result = rbd_img_obj_request_submit(orig_request);
Alex Elder3d7efd12013-04-19 15:34:50 -05002838out:
Alex Elderc5b5ef62013-02-11 12:33:24 -06002839 if (orig_request->result)
2840 rbd_obj_request_complete(orig_request);
Alex Elderc5b5ef62013-02-11 12:33:24 -06002841}
2842
2843static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
2844{
2845 struct rbd_obj_request *stat_request;
2846 struct rbd_device *rbd_dev;
2847 struct ceph_osd_client *osdc;
2848 struct page **pages = NULL;
2849 u32 page_count;
2850 size_t size;
2851 int ret;
2852
2853 /*
2854 * The response data for a STAT call consists of:
2855 * le64 length;
2856 * struct {
2857 * le32 tv_sec;
2858 * le32 tv_nsec;
2859 * } mtime;
2860 */
2861 size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
2862 page_count = (u32)calc_pages_for(0, size);
2863 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2864 if (IS_ERR(pages))
2865 return PTR_ERR(pages);
2866
2867 ret = -ENOMEM;
2868 stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
2869 OBJ_REQUEST_PAGES);
2870 if (!stat_request)
2871 goto out;
2872
2873 rbd_obj_request_get(obj_request);
2874 stat_request->obj_request = obj_request;
2875 stat_request->pages = pages;
2876 stat_request->page_count = page_count;
2877
2878 rbd_assert(obj_request->img_request);
2879 rbd_dev = obj_request->img_request->rbd_dev;
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08002880 stat_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
Ilya Dryomovdeb236b2014-02-25 16:22:27 +02002881 stat_request);
Alex Elderc5b5ef62013-02-11 12:33:24 -06002882 if (!stat_request->osd_req)
2883 goto out;
2884 stat_request->callback = rbd_img_obj_exists_callback;
2885
Yan, Zheng144cba12015-04-27 11:09:54 +08002886 osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT, 0);
Alex Elderc5b5ef62013-02-11 12:33:24 -06002887 osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
2888 false, false);
Alex Elder9d4df012013-04-19 15:34:50 -05002889 rbd_osd_req_format_read(stat_request);
Alex Elderc5b5ef62013-02-11 12:33:24 -06002890
2891 osdc = &rbd_dev->rbd_client->client->osdc;
2892 ret = rbd_obj_request_submit(osdc, stat_request);
2893out:
2894 if (ret)
2895 rbd_obj_request_put(obj_request);
2896
2897 return ret;
2898}
2899
Ilya Dryomov70d045f2014-09-12 16:02:01 +04002900static bool img_obj_request_simple(struct rbd_obj_request *obj_request)
Alex Elderb454e362013-04-19 15:34:50 -05002901{
2902 struct rbd_img_request *img_request;
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05002903 struct rbd_device *rbd_dev;
Alex Elderb454e362013-04-19 15:34:50 -05002904
2905 rbd_assert(obj_request_img_data_test(obj_request));
2906
2907 img_request = obj_request->img_request;
2908 rbd_assert(img_request);
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05002909 rbd_dev = img_request->rbd_dev;
Alex Elderb454e362013-04-19 15:34:50 -05002910
Ilya Dryomov70d045f2014-09-12 16:02:01 +04002911 /* Reads */
Josh Durgin1c220882014-04-04 17:49:12 -07002912 if (!img_request_write_test(img_request) &&
2913 !img_request_discard_test(img_request))
Ilya Dryomov70d045f2014-09-12 16:02:01 +04002914 return true;
Alex Elderb454e362013-04-19 15:34:50 -05002915
Ilya Dryomov70d045f2014-09-12 16:02:01 +04002916 /* Non-layered writes */
2917 if (!img_request_layered_test(img_request))
2918 return true;
2919
2920 /*
2921 * Layered writes outside of the parent overlap range don't
2922 * share any data with the parent.
2923 */
2924 if (!obj_request_overlaps_parent(obj_request))
2925 return true;
2926
2927 /*
Guangliang Zhaoc622d222014-04-01 22:22:15 +08002928 * Entire-object layered writes - we will overwrite whatever
2929 * parent data there is anyway.
2930 */
2931 if (!obj_request->offset &&
2932 obj_request->length == rbd_obj_bytes(&rbd_dev->header))
2933 return true;
2934
2935 /*
Ilya Dryomov70d045f2014-09-12 16:02:01 +04002936 * If the object is known to already exist, its parent data has
2937 * already been copied.
2938 */
2939 if (obj_request_known_test(obj_request) &&
2940 obj_request_exists_test(obj_request))
2941 return true;
2942
2943 return false;
2944}
2945
2946static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
2947{
2948 if (img_obj_request_simple(obj_request)) {
Alex Elderb454e362013-04-19 15:34:50 -05002949 struct rbd_device *rbd_dev;
2950 struct ceph_osd_client *osdc;
2951
2952 rbd_dev = obj_request->img_request->rbd_dev;
2953 osdc = &rbd_dev->rbd_client->client->osdc;
2954
2955 return rbd_obj_request_submit(osdc, obj_request);
2956 }
2957
2958 /*
Alex Elder3d7efd12013-04-19 15:34:50 -05002959 * It's a layered write. The target object might exist but
2960 * we may not know that yet. If we know it doesn't exist,
2961 * start by reading the data for the full target object from
2962 * the parent so we can use it for a copyup to the target.
Alex Elderb454e362013-04-19 15:34:50 -05002963 */
Ilya Dryomov70d045f2014-09-12 16:02:01 +04002964 if (obj_request_known_test(obj_request))
Alex Elder3d7efd12013-04-19 15:34:50 -05002965 return rbd_img_obj_parent_read_full(obj_request);
2966
2967 /* We don't know whether the target exists. Go find out. */
Alex Elderb454e362013-04-19 15:34:50 -05002968
2969 return rbd_img_obj_exists_submit(obj_request);
2970}
2971
Alex Elderbf0d5f502012-11-22 00:00:08 -06002972static int rbd_img_request_submit(struct rbd_img_request *img_request)
2973{
Alex Elderbf0d5f502012-11-22 00:00:08 -06002974 struct rbd_obj_request *obj_request;
Alex Elder46faeed2013-04-10 17:47:46 -05002975 struct rbd_obj_request *next_obj_request;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002976
Alex Elder37206ee2013-02-20 17:32:08 -06002977 dout("%s: img %p\n", __func__, img_request);
Alex Elder46faeed2013-04-10 17:47:46 -05002978 for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
Alex Elderbf0d5f502012-11-22 00:00:08 -06002979 int ret;
2980
Alex Elderb454e362013-04-19 15:34:50 -05002981 ret = rbd_img_obj_request_submit(obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002982 if (ret)
2983 return ret;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002984 }
2985
2986 return 0;
2987}
2988
Alex Elder8b3e1a52013-01-24 16:13:36 -06002989static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
2990{
2991 struct rbd_obj_request *obj_request;
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05002992 struct rbd_device *rbd_dev;
2993 u64 obj_end;
Alex Elder02c74fb2013-05-06 17:40:33 -05002994 u64 img_xferred;
2995 int img_result;
Alex Elder8b3e1a52013-01-24 16:13:36 -06002996
2997 rbd_assert(img_request_child_test(img_request));
2998
Alex Elder02c74fb2013-05-06 17:40:33 -05002999 /* First get what we need from the image request and release it */
3000
Alex Elder8b3e1a52013-01-24 16:13:36 -06003001 obj_request = img_request->obj_request;
Alex Elder02c74fb2013-05-06 17:40:33 -05003002 img_xferred = img_request->xferred;
3003 img_result = img_request->result;
3004 rbd_img_request_put(img_request);
3005
3006 /*
3007 * If the overlap has become 0 (most likely because the
3008 * image has been flattened) we need to re-submit the
3009 * original request.
3010 */
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05003011 rbd_assert(obj_request);
3012 rbd_assert(obj_request->img_request);
Alex Elder02c74fb2013-05-06 17:40:33 -05003013 rbd_dev = obj_request->img_request->rbd_dev;
3014 if (!rbd_dev->parent_overlap) {
3015 struct ceph_osd_client *osdc;
Alex Elder8b3e1a52013-01-24 16:13:36 -06003016
Alex Elder02c74fb2013-05-06 17:40:33 -05003017 osdc = &rbd_dev->rbd_client->client->osdc;
3018 img_result = rbd_obj_request_submit(osdc, obj_request);
3019 if (!img_result)
3020 return;
3021 }
3022
3023 obj_request->result = img_result;
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05003024 if (obj_request->result)
3025 goto out;
3026
3027 /*
3028 * We need to zero anything beyond the parent overlap
3029 * boundary. Since rbd_img_obj_request_read_callback()
3030 * will zero anything beyond the end of a short read, an
3031 * easy way to do this is to pretend the data from the
3032 * parent came up short--ending at the overlap boundary.
3033 */
3034 rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
3035 obj_end = obj_request->img_offset + obj_request->length;
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05003036 if (obj_end > rbd_dev->parent_overlap) {
3037 u64 xferred = 0;
3038
3039 if (obj_request->img_offset < rbd_dev->parent_overlap)
3040 xferred = rbd_dev->parent_overlap -
3041 obj_request->img_offset;
3042
Alex Elder02c74fb2013-05-06 17:40:33 -05003043 obj_request->xferred = min(img_xferred, xferred);
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05003044 } else {
Alex Elder02c74fb2013-05-06 17:40:33 -05003045 obj_request->xferred = img_xferred;
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05003046 }
3047out:
Alex Elder8b3e1a52013-01-24 16:13:36 -06003048 rbd_img_obj_request_read_callback(obj_request);
3049 rbd_obj_request_complete(obj_request);
3050}
3051
3052static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
3053{
Alex Elder8b3e1a52013-01-24 16:13:36 -06003054 struct rbd_img_request *img_request;
3055 int result;
3056
3057 rbd_assert(obj_request_img_data_test(obj_request));
3058 rbd_assert(obj_request->img_request != NULL);
3059 rbd_assert(obj_request->result == (s32) -ENOENT);
Alex Elder5b2ab722013-05-06 17:40:33 -05003060 rbd_assert(obj_request_type_valid(obj_request->type));
Alex Elder8b3e1a52013-01-24 16:13:36 -06003061
Alex Elder8b3e1a52013-01-24 16:13:36 -06003062 /* rbd_read_finish(obj_request, obj_request->length); */
Alex Eldere93f3152013-05-08 22:50:04 -05003063 img_request = rbd_parent_request_create(obj_request,
Alex Elder8b3e1a52013-01-24 16:13:36 -06003064 obj_request->img_offset,
Alex Eldere93f3152013-05-08 22:50:04 -05003065 obj_request->length);
Alex Elder8b3e1a52013-01-24 16:13:36 -06003066 result = -ENOMEM;
3067 if (!img_request)
3068 goto out_err;
3069
Alex Elder5b2ab722013-05-06 17:40:33 -05003070 if (obj_request->type == OBJ_REQUEST_BIO)
3071 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3072 obj_request->bio_list);
3073 else
3074 result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES,
3075 obj_request->pages);
Alex Elder8b3e1a52013-01-24 16:13:36 -06003076 if (result)
3077 goto out_err;
3078
3079 img_request->callback = rbd_img_parent_read_callback;
3080 result = rbd_img_request_submit(img_request);
3081 if (result)
3082 goto out_err;
3083
3084 return;
3085out_err:
3086 if (img_request)
3087 rbd_img_request_put(img_request);
3088 obj_request->result = result;
3089 obj_request->xferred = 0;
3090 obj_request_done_set(obj_request);
3091}
3092
Josh Durgin20e0af62013-08-29 17:36:03 -07003093static int rbd_obj_notify_ack_sync(struct rbd_device *rbd_dev, u64 notify_id)
Alex Elderb8d70032012-11-30 17:53:04 -06003094{
3095 struct rbd_obj_request *obj_request;
Alex Elder21692382013-04-05 01:27:12 -05003096 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
Alex Elderb8d70032012-11-30 17:53:04 -06003097 int ret;
3098
3099 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
3100 OBJ_REQUEST_NODATA);
3101 if (!obj_request)
3102 return -ENOMEM;
3103
3104 ret = -ENOMEM;
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003105 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
Ilya Dryomovdeb236b2014-02-25 16:22:27 +02003106 obj_request);
Alex Elderb8d70032012-11-30 17:53:04 -06003107 if (!obj_request->osd_req)
3108 goto out;
3109
Alex Elderc99d2d42013-04-05 01:27:11 -05003110 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
Alex Eldercc4a38bd2013-04-30 00:44:33 -05003111 notify_id, 0, 0);
Alex Elder9d4df012013-04-19 15:34:50 -05003112 rbd_osd_req_format_read(obj_request);
Alex Elder430c28c2013-04-03 21:32:51 -05003113
Alex Elderb8d70032012-11-30 17:53:04 -06003114 ret = rbd_obj_request_submit(osdc, obj_request);
Alex Eldercf81b602013-01-17 12:18:46 -06003115 if (ret)
Josh Durgin20e0af62013-08-29 17:36:03 -07003116 goto out;
3117 ret = rbd_obj_request_wait(obj_request);
3118out:
3119 rbd_obj_request_put(obj_request);
Alex Elderb8d70032012-11-30 17:53:04 -06003120
3121 return ret;
3122}
3123
3124static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
3125{
3126 struct rbd_device *rbd_dev = (struct rbd_device *)data;
Alex Eldere627db02013-05-06 07:40:30 -05003127 int ret;
Alex Elderb8d70032012-11-30 17:53:04 -06003128
Alex Elder37206ee2013-02-20 17:32:08 -06003129 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
Alex Eldercc4a38bd2013-04-30 00:44:33 -05003130 rbd_dev->header_name, (unsigned long long)notify_id,
3131 (unsigned int)opcode);
Ilya Dryomov52bb1f92014-07-23 17:11:20 +04003132
3133 /*
3134 * Until adequate refresh error handling is in place, there is
3135 * not much we can do here, except warn.
3136 *
3137 * See http://tracker.ceph.com/issues/5040
3138 */
Alex Eldere627db02013-05-06 07:40:30 -05003139 ret = rbd_dev_refresh(rbd_dev);
3140 if (ret)
Ilya Dryomov9584d502014-07-11 12:11:20 +04003141 rbd_warn(rbd_dev, "refresh failed: %d", ret);
Alex Elderb8d70032012-11-30 17:53:04 -06003142
Ilya Dryomov52bb1f92014-07-23 17:11:20 +04003143 ret = rbd_obj_notify_ack_sync(rbd_dev, notify_id);
3144 if (ret)
Ilya Dryomov9584d502014-07-11 12:11:20 +04003145 rbd_warn(rbd_dev, "notify_ack ret %d", ret);
Alex Elderb8d70032012-11-30 17:53:04 -06003146}
3147
Alex Elder9969ebc2013-01-18 12:31:10 -06003148/*
Ilya Dryomovbb040aa2014-06-19 11:38:14 +04003149 * Send a (un)watch request and wait for the ack. Return a request
3150 * with a ref held on success or error.
3151 */
3152static struct rbd_obj_request *rbd_obj_watch_request_helper(
3153 struct rbd_device *rbd_dev,
3154 bool watch)
3155{
3156 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
Ilya Dryomov2894e1d2015-05-12 19:53:24 +03003157 struct ceph_options *opts = osdc->client->options;
Ilya Dryomovbb040aa2014-06-19 11:38:14 +04003158 struct rbd_obj_request *obj_request;
3159 int ret;
3160
3161 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
3162 OBJ_REQUEST_NODATA);
3163 if (!obj_request)
3164 return ERR_PTR(-ENOMEM);
3165
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003166 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_WRITE, 1,
Ilya Dryomovbb040aa2014-06-19 11:38:14 +04003167 obj_request);
3168 if (!obj_request->osd_req) {
3169 ret = -ENOMEM;
3170 goto out;
3171 }
3172
3173 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
3174 rbd_dev->watch_event->cookie, 0, watch);
3175 rbd_osd_req_format_write(obj_request);
3176
3177 if (watch)
3178 ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
3179
3180 ret = rbd_obj_request_submit(osdc, obj_request);
3181 if (ret)
3182 goto out;
3183
Ilya Dryomov2894e1d2015-05-12 19:53:24 +03003184 ret = rbd_obj_request_wait_timeout(obj_request, opts->mount_timeout);
Ilya Dryomovbb040aa2014-06-19 11:38:14 +04003185 if (ret)
3186 goto out;
3187
3188 ret = obj_request->result;
3189 if (ret) {
3190 if (watch)
3191 rbd_obj_request_end(obj_request);
3192 goto out;
3193 }
3194
3195 return obj_request;
3196
3197out:
3198 rbd_obj_request_put(obj_request);
3199 return ERR_PTR(ret);
3200}
3201
3202/*
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003203 * Initiate a watch request, synchronously.
Alex Elder9969ebc2013-01-18 12:31:10 -06003204 */
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003205static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev)
Alex Elder9969ebc2013-01-18 12:31:10 -06003206{
3207 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3208 struct rbd_obj_request *obj_request;
Alex Elder9969ebc2013-01-18 12:31:10 -06003209 int ret;
3210
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003211 rbd_assert(!rbd_dev->watch_event);
3212 rbd_assert(!rbd_dev->watch_request);
Alex Elder9969ebc2013-01-18 12:31:10 -06003213
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003214 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
3215 &rbd_dev->watch_event);
3216 if (ret < 0)
3217 return ret;
Alex Elder9969ebc2013-01-18 12:31:10 -06003218
Ilya Dryomov76756a52014-06-20 18:29:20 +04003219 obj_request = rbd_obj_watch_request_helper(rbd_dev, true);
3220 if (IS_ERR(obj_request)) {
3221 ceph_osdc_cancel_event(rbd_dev->watch_event);
3222 rbd_dev->watch_event = NULL;
3223 return PTR_ERR(obj_request);
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003224 }
Alex Elder9969ebc2013-01-18 12:31:10 -06003225
Alex Elder8eb87562013-01-25 17:08:55 -06003226 /*
3227 * A watch request is set to linger, so the underlying osd
3228 * request won't go away until we unregister it. We retain
3229 * a pointer to the object request during that time (in
Ilya Dryomov76756a52014-06-20 18:29:20 +04003230 * rbd_dev->watch_request), so we'll keep a reference to it.
3231 * We'll drop that reference after we've unregistered it in
3232 * rbd_dev_header_unwatch_sync().
Alex Elder8eb87562013-01-25 17:08:55 -06003233 */
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003234 rbd_dev->watch_request = obj_request;
Alex Elder8eb87562013-01-25 17:08:55 -06003235
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003236 return 0;
Alex Elder9969ebc2013-01-18 12:31:10 -06003237}
3238
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003239/*
3240 * Tear down a watch request, synchronously.
3241 */
Ilya Dryomov76756a52014-06-20 18:29:20 +04003242static void rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
Ilya Dryomovfca27062013-12-16 18:02:40 +02003243{
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003244 struct rbd_obj_request *obj_request;
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003245
3246 rbd_assert(rbd_dev->watch_event);
3247 rbd_assert(rbd_dev->watch_request);
3248
Ilya Dryomov76756a52014-06-20 18:29:20 +04003249 rbd_obj_request_end(rbd_dev->watch_request);
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003250 rbd_obj_request_put(rbd_dev->watch_request);
3251 rbd_dev->watch_request = NULL;
3252
Ilya Dryomov76756a52014-06-20 18:29:20 +04003253 obj_request = rbd_obj_watch_request_helper(rbd_dev, false);
3254 if (!IS_ERR(obj_request))
3255 rbd_obj_request_put(obj_request);
3256 else
3257 rbd_warn(rbd_dev, "unable to tear down watch request (%ld)",
3258 PTR_ERR(obj_request));
3259
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04003260 ceph_osdc_cancel_event(rbd_dev->watch_event);
3261 rbd_dev->watch_event = NULL;
Ilya Dryomov811c6682016-04-15 16:22:16 +02003262
3263 dout("%s flushing notifies\n", __func__);
3264 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
Ilya Dryomovfca27062013-12-16 18:02:40 +02003265}
3266
Alex Elder36be9a72013-01-19 00:30:28 -06003267/*
Alex Elderf40eb342013-04-25 15:09:42 -05003268 * Synchronous osd object method call. Returns the number of bytes
3269 * returned in the outbound buffer, or a negative error code.
Alex Elder36be9a72013-01-19 00:30:28 -06003270 */
3271static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
3272 const char *object_name,
3273 const char *class_name,
3274 const char *method_name,
Alex Elder41579762013-04-21 12:14:45 -05003275 const void *outbound,
Alex Elder36be9a72013-01-19 00:30:28 -06003276 size_t outbound_size,
Alex Elder41579762013-04-21 12:14:45 -05003277 void *inbound,
Alex Eldere2a58ee2013-04-30 00:44:33 -05003278 size_t inbound_size)
Alex Elder36be9a72013-01-19 00:30:28 -06003279{
Alex Elder21692382013-04-05 01:27:12 -05003280 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
Alex Elder36be9a72013-01-19 00:30:28 -06003281 struct rbd_obj_request *obj_request;
Alex Elder36be9a72013-01-19 00:30:28 -06003282 struct page **pages;
3283 u32 page_count;
3284 int ret;
3285
3286 /*
Alex Elder6010a452013-04-05 01:27:11 -05003287 * Method calls are ultimately read operations. The result
3288 * should placed into the inbound buffer provided. They
3289 * also supply outbound data--parameters for the object
3290 * method. Currently if this is present it will be a
3291 * snapshot id.
Alex Elder36be9a72013-01-19 00:30:28 -06003292 */
Alex Elder57385b52013-04-21 12:14:45 -05003293 page_count = (u32)calc_pages_for(0, inbound_size);
Alex Elder36be9a72013-01-19 00:30:28 -06003294 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3295 if (IS_ERR(pages))
3296 return PTR_ERR(pages);
3297
3298 ret = -ENOMEM;
Alex Elder6010a452013-04-05 01:27:11 -05003299 obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
Alex Elder36be9a72013-01-19 00:30:28 -06003300 OBJ_REQUEST_PAGES);
3301 if (!obj_request)
3302 goto out;
3303
3304 obj_request->pages = pages;
3305 obj_request->page_count = page_count;
3306
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003307 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
Ilya Dryomovdeb236b2014-02-25 16:22:27 +02003308 obj_request);
Alex Elder36be9a72013-01-19 00:30:28 -06003309 if (!obj_request->osd_req)
3310 goto out;
3311
Alex Elderc99d2d42013-04-05 01:27:11 -05003312 osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
Alex Elder04017e22013-04-05 14:46:02 -05003313 class_name, method_name);
3314 if (outbound_size) {
3315 struct ceph_pagelist *pagelist;
3316
3317 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
3318 if (!pagelist)
3319 goto out;
3320
3321 ceph_pagelist_init(pagelist);
3322 ceph_pagelist_append(pagelist, outbound, outbound_size);
3323 osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
3324 pagelist);
3325 }
Alex Eldera4ce40a2013-04-05 01:27:12 -05003326 osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
3327 obj_request->pages, inbound_size,
Alex Elder44cd1882013-04-05 01:27:12 -05003328 0, false, false);
Alex Elder9d4df012013-04-19 15:34:50 -05003329 rbd_osd_req_format_read(obj_request);
Alex Elder430c28c2013-04-03 21:32:51 -05003330
Alex Elder36be9a72013-01-19 00:30:28 -06003331 ret = rbd_obj_request_submit(osdc, obj_request);
3332 if (ret)
3333 goto out;
3334 ret = rbd_obj_request_wait(obj_request);
3335 if (ret)
3336 goto out;
3337
3338 ret = obj_request->result;
3339 if (ret < 0)
3340 goto out;
Alex Elder57385b52013-04-21 12:14:45 -05003341
3342 rbd_assert(obj_request->xferred < (u64)INT_MAX);
3343 ret = (int)obj_request->xferred;
Alex Elder903bb322013-02-06 13:11:38 -06003344 ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
Alex Elder36be9a72013-01-19 00:30:28 -06003345out:
3346 if (obj_request)
3347 rbd_obj_request_put(obj_request);
3348 else
3349 ceph_release_page_vector(pages, page_count);
3350
3351 return ret;
3352}
3353
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003354static void rbd_queue_workfn(struct work_struct *work)
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003355{
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003356 struct request *rq = blk_mq_rq_from_pdu(work);
3357 struct rbd_device *rbd_dev = rq->q->queuedata;
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003358 struct rbd_img_request *img_request;
Josh Durgin4e752f02014-04-08 11:12:11 -07003359 struct ceph_snap_context *snapc = NULL;
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003360 u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
3361 u64 length = blk_rq_bytes(rq);
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003362 enum obj_operation_type op_type;
Josh Durgin4e752f02014-04-08 11:12:11 -07003363 u64 mapping_size;
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003364 int result;
3365
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003366 if (rq->cmd_type != REQ_TYPE_FS) {
3367 dout("%s: non-fs request type %d\n", __func__,
3368 (int) rq->cmd_type);
3369 result = -EIO;
3370 goto err;
3371 }
3372
Guangliang Zhao90e98c52014-04-01 22:22:16 +08003373 if (rq->cmd_flags & REQ_DISCARD)
3374 op_type = OBJ_OP_DISCARD;
3375 else if (rq->cmd_flags & REQ_WRITE)
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003376 op_type = OBJ_OP_WRITE;
3377 else
3378 op_type = OBJ_OP_READ;
3379
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003380 /* Ignore/skip any zero-length requests */
3381
3382 if (!length) {
3383 dout("%s: zero-length request\n", __func__);
3384 result = 0;
3385 goto err_rq;
3386 }
3387
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003388 /* Only reads are allowed to a read-only device */
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003389
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003390 if (op_type != OBJ_OP_READ) {
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003391 if (rbd_dev->mapping.read_only) {
3392 result = -EROFS;
3393 goto err_rq;
3394 }
3395 rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
3396 }
3397
3398 /*
3399 * Quit early if the mapped snapshot no longer exists. It's
3400 * still possible the snapshot will have disappeared by the
3401 * time our request arrives at the osd, but there's no sense in
3402 * sending it if we already know.
3403 */
3404 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
3405 dout("request for non-existent snapshot");
3406 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
3407 result = -ENXIO;
3408 goto err_rq;
3409 }
3410
3411 if (offset && length > U64_MAX - offset + 1) {
3412 rbd_warn(rbd_dev, "bad request range (%llu~%llu)", offset,
3413 length);
3414 result = -EINVAL;
3415 goto err_rq; /* Shouldn't happen */
3416 }
3417
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003418 blk_mq_start_request(rq);
3419
Josh Durgin4e752f02014-04-08 11:12:11 -07003420 down_read(&rbd_dev->header_rwsem);
3421 mapping_size = rbd_dev->mapping.size;
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003422 if (op_type != OBJ_OP_READ) {
Josh Durgin4e752f02014-04-08 11:12:11 -07003423 snapc = rbd_dev->header.snapc;
3424 ceph_get_snap_context(snapc);
3425 }
3426 up_read(&rbd_dev->header_rwsem);
3427
3428 if (offset + length > mapping_size) {
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003429 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
Josh Durgin4e752f02014-04-08 11:12:11 -07003430 length, mapping_size);
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003431 result = -EIO;
3432 goto err_rq;
3433 }
3434
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003435 img_request = rbd_img_request_create(rbd_dev, offset, length, op_type,
Josh Durgin4e752f02014-04-08 11:12:11 -07003436 snapc);
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003437 if (!img_request) {
3438 result = -ENOMEM;
3439 goto err_rq;
3440 }
3441 img_request->rq = rq;
Ilya Dryomov70b16db2015-11-27 19:23:24 +01003442 snapc = NULL; /* img_request consumes a ref */
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003443
Guangliang Zhao90e98c52014-04-01 22:22:16 +08003444 if (op_type == OBJ_OP_DISCARD)
3445 result = rbd_img_request_fill(img_request, OBJ_REQUEST_NODATA,
3446 NULL);
3447 else
3448 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3449 rq->bio);
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003450 if (result)
3451 goto err_img_request;
3452
3453 result = rbd_img_request_submit(img_request);
3454 if (result)
3455 goto err_img_request;
3456
3457 return;
3458
3459err_img_request:
3460 rbd_img_request_put(img_request);
3461err_rq:
3462 if (result)
3463 rbd_warn(rbd_dev, "%s %llx at %llx result %d",
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003464 obj_op_name(op_type), length, offset, result);
SF Markus Elfringe96a6502014-11-02 15:20:59 +01003465 ceph_put_snap_context(snapc);
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003466err:
3467 blk_mq_end_request(rq, result);
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003468}
3469
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003470static int rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
3471 const struct blk_mq_queue_data *bd)
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003472{
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003473 struct request *rq = bd->rq;
3474 struct work_struct *work = blk_mq_rq_to_pdu(rq);
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04003475
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003476 queue_work(rbd_wq, work);
3477 return BLK_MQ_RQ_QUEUE_OK;
Alex Elderbf0d5f502012-11-22 00:00:08 -06003478}
3479
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003480static void rbd_free_disk(struct rbd_device *rbd_dev)
3481{
3482 struct gendisk *disk = rbd_dev->disk;
3483
3484 if (!disk)
3485 return;
3486
Alex Eldera0cab922013-04-25 23:15:08 -05003487 rbd_dev->disk = NULL;
3488 if (disk->flags & GENHD_FL_UP) {
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003489 del_gendisk(disk);
Alex Eldera0cab922013-04-25 23:15:08 -05003490 if (disk->queue)
3491 blk_cleanup_queue(disk->queue);
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003492 blk_mq_free_tag_set(&rbd_dev->tag_set);
Alex Eldera0cab922013-04-25 23:15:08 -05003493 }
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003494 put_disk(disk);
3495}
3496
Alex Elder788e2df2013-01-17 12:25:27 -06003497static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
3498 const char *object_name,
Alex Elder7097f8d2013-04-30 00:44:33 -05003499 u64 offset, u64 length, void *buf)
Alex Elder788e2df2013-01-17 12:25:27 -06003500
3501{
Alex Elder21692382013-04-05 01:27:12 -05003502 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
Alex Elder788e2df2013-01-17 12:25:27 -06003503 struct rbd_obj_request *obj_request;
Alex Elder788e2df2013-01-17 12:25:27 -06003504 struct page **pages = NULL;
3505 u32 page_count;
Alex Elder1ceae7e2013-02-06 13:11:38 -06003506 size_t size;
Alex Elder788e2df2013-01-17 12:25:27 -06003507 int ret;
3508
3509 page_count = (u32) calc_pages_for(offset, length);
3510 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3511 if (IS_ERR(pages))
Jan Karaa8d42052014-10-22 09:17:24 +02003512 return PTR_ERR(pages);
Alex Elder788e2df2013-01-17 12:25:27 -06003513
3514 ret = -ENOMEM;
3515 obj_request = rbd_obj_request_create(object_name, offset, length,
Alex Elder36be9a72013-01-19 00:30:28 -06003516 OBJ_REQUEST_PAGES);
Alex Elder788e2df2013-01-17 12:25:27 -06003517 if (!obj_request)
3518 goto out;
3519
3520 obj_request->pages = pages;
3521 obj_request->page_count = page_count;
3522
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08003523 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
Ilya Dryomovdeb236b2014-02-25 16:22:27 +02003524 obj_request);
Alex Elder788e2df2013-01-17 12:25:27 -06003525 if (!obj_request->osd_req)
3526 goto out;
3527
Alex Elderc99d2d42013-04-05 01:27:11 -05003528 osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
3529 offset, length, 0, 0);
Alex Elder406e2c92013-04-15 14:50:36 -05003530 osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
Alex Eldera4ce40a2013-04-05 01:27:12 -05003531 obj_request->pages,
Alex Elder44cd1882013-04-05 01:27:12 -05003532 obj_request->length,
3533 obj_request->offset & ~PAGE_MASK,
3534 false, false);
Alex Elder9d4df012013-04-19 15:34:50 -05003535 rbd_osd_req_format_read(obj_request);
Alex Elder430c28c2013-04-03 21:32:51 -05003536
Alex Elder788e2df2013-01-17 12:25:27 -06003537 ret = rbd_obj_request_submit(osdc, obj_request);
3538 if (ret)
3539 goto out;
3540 ret = rbd_obj_request_wait(obj_request);
3541 if (ret)
3542 goto out;
3543
3544 ret = obj_request->result;
3545 if (ret < 0)
3546 goto out;
Alex Elder1ceae7e2013-02-06 13:11:38 -06003547
3548 rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
3549 size = (size_t) obj_request->xferred;
Alex Elder903bb322013-02-06 13:11:38 -06003550 ceph_copy_from_page_vector(pages, buf, 0, size);
Alex Elder7097f8d2013-04-30 00:44:33 -05003551 rbd_assert(size <= (size_t)INT_MAX);
3552 ret = (int)size;
Alex Elder788e2df2013-01-17 12:25:27 -06003553out:
3554 if (obj_request)
3555 rbd_obj_request_put(obj_request);
3556 else
3557 ceph_release_page_vector(pages, page_count);
3558
3559 return ret;
3560}
3561
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003562/*
Alex Elder662518b2013-05-06 09:51:29 -05003563 * Read the complete header for the given rbd device. On successful
3564 * return, the rbd_dev->header field will contain up-to-date
3565 * information about the image.
Alex Elder4156d992012-08-02 11:29:46 -05003566 */
Alex Elder99a41eb2013-05-06 09:51:30 -05003567static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
Alex Elder4156d992012-08-02 11:29:46 -05003568{
3569 struct rbd_image_header_ondisk *ondisk = NULL;
3570 u32 snap_count = 0;
3571 u64 names_size = 0;
3572 u32 want_count;
3573 int ret;
3574
3575 /*
3576 * The complete header will include an array of its 64-bit
3577 * snapshot ids, followed by the names of those snapshots as
3578 * a contiguous block of NUL-terminated strings. Note that
3579 * the number of snapshots could change by the time we read
3580 * it in, in which case we re-read it.
3581 */
3582 do {
3583 size_t size;
3584
3585 kfree(ondisk);
3586
3587 size = sizeof (*ondisk);
3588 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
3589 size += names_size;
3590 ondisk = kmalloc(size, GFP_KERNEL);
3591 if (!ondisk)
Alex Elder662518b2013-05-06 09:51:29 -05003592 return -ENOMEM;
Alex Elder4156d992012-08-02 11:29:46 -05003593
Alex Elder788e2df2013-01-17 12:25:27 -06003594 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
Alex Elder7097f8d2013-04-30 00:44:33 -05003595 0, size, ondisk);
Alex Elder4156d992012-08-02 11:29:46 -05003596 if (ret < 0)
Alex Elder662518b2013-05-06 09:51:29 -05003597 goto out;
Alex Elderc0cd10db2013-04-26 09:43:47 -05003598 if ((size_t)ret < size) {
Alex Elder4156d992012-08-02 11:29:46 -05003599 ret = -ENXIO;
Alex Elder06ecc6c2012-11-01 10:17:15 -05003600 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
3601 size, ret);
Alex Elder662518b2013-05-06 09:51:29 -05003602 goto out;
Alex Elder4156d992012-08-02 11:29:46 -05003603 }
3604 if (!rbd_dev_ondisk_valid(ondisk)) {
3605 ret = -ENXIO;
Alex Elder06ecc6c2012-11-01 10:17:15 -05003606 rbd_warn(rbd_dev, "invalid header");
Alex Elder662518b2013-05-06 09:51:29 -05003607 goto out;
Alex Elder4156d992012-08-02 11:29:46 -05003608 }
3609
3610 names_size = le64_to_cpu(ondisk->snap_names_len);
3611 want_count = snap_count;
3612 snap_count = le32_to_cpu(ondisk->snap_count);
3613 } while (snap_count != want_count);
3614
Alex Elder662518b2013-05-06 09:51:29 -05003615 ret = rbd_header_from_disk(rbd_dev, ondisk);
3616out:
Alex Elder4156d992012-08-02 11:29:46 -05003617 kfree(ondisk);
3618
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003619 return ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003620}
3621
Alex Elder15228ed2013-05-01 12:43:03 -05003622/*
3623 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3624 * has disappeared from the (just updated) snapshot context.
3625 */
3626static void rbd_exists_validate(struct rbd_device *rbd_dev)
3627{
3628 u64 snap_id;
3629
3630 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
3631 return;
3632
3633 snap_id = rbd_dev->spec->snap_id;
3634 if (snap_id == CEPH_NOSNAP)
3635 return;
3636
3637 if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
3638 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3639}
3640
Josh Durgin98752012013-08-29 17:26:31 -07003641static void rbd_dev_update_size(struct rbd_device *rbd_dev)
3642{
3643 sector_t size;
Josh Durgin98752012013-08-29 17:26:31 -07003644
3645 /*
Ilya Dryomov811c6682016-04-15 16:22:16 +02003646 * If EXISTS is not set, rbd_dev->disk may be NULL, so don't
3647 * try to update its size. If REMOVING is set, updating size
3648 * is just useless work since the device can't be opened.
Josh Durgin98752012013-08-29 17:26:31 -07003649 */
Ilya Dryomov811c6682016-04-15 16:22:16 +02003650 if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) &&
3651 !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) {
Josh Durgin98752012013-08-29 17:26:31 -07003652 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3653 dout("setting size to %llu sectors", (unsigned long long)size);
3654 set_capacity(rbd_dev->disk, size);
3655 revalidate_disk(rbd_dev->disk);
3656 }
3657}
3658
Alex Eldercc4a38bd2013-04-30 00:44:33 -05003659static int rbd_dev_refresh(struct rbd_device *rbd_dev)
Alex Elder1fe5e992012-07-25 09:32:41 -05003660{
Alex Eldere627db02013-05-06 07:40:30 -05003661 u64 mapping_size;
Alex Elder1fe5e992012-07-25 09:32:41 -05003662 int ret;
3663
Alex Eldercfbf6372013-05-31 17:40:45 -05003664 down_write(&rbd_dev->header_rwsem);
Alex Elder3b5cf2a2013-05-29 11:18:59 -05003665 mapping_size = rbd_dev->mapping.size;
Ilya Dryomova720ae02014-07-23 17:11:19 +04003666
3667 ret = rbd_dev_header_info(rbd_dev);
Ilya Dryomov52bb1f92014-07-23 17:11:20 +04003668 if (ret)
Ilya Dryomov73e39e42015-01-08 20:18:22 +03003669 goto out;
Alex Elder15228ed2013-05-01 12:43:03 -05003670
Ilya Dryomove8f59b52014-07-24 10:42:13 +04003671 /*
3672 * If there is a parent, see if it has disappeared due to the
3673 * mapped image getting flattened.
3674 */
3675 if (rbd_dev->parent) {
3676 ret = rbd_dev_v2_parent_info(rbd_dev);
3677 if (ret)
Ilya Dryomov73e39e42015-01-08 20:18:22 +03003678 goto out;
Ilya Dryomove8f59b52014-07-24 10:42:13 +04003679 }
3680
Ilya Dryomov5ff11082014-07-23 17:11:21 +04003681 if (rbd_dev->spec->snap_id == CEPH_NOSNAP) {
Ilya Dryomov73e39e42015-01-08 20:18:22 +03003682 rbd_dev->mapping.size = rbd_dev->header.image_size;
Ilya Dryomov5ff11082014-07-23 17:11:21 +04003683 } else {
3684 /* validate mapped snapshot's EXISTS flag */
3685 rbd_exists_validate(rbd_dev);
3686 }
Alex Elder15228ed2013-05-01 12:43:03 -05003687
Ilya Dryomov73e39e42015-01-08 20:18:22 +03003688out:
Alex Eldercfbf6372013-05-31 17:40:45 -05003689 up_write(&rbd_dev->header_rwsem);
Ilya Dryomov73e39e42015-01-08 20:18:22 +03003690 if (!ret && mapping_size != rbd_dev->mapping.size)
Josh Durgin98752012013-08-29 17:26:31 -07003691 rbd_dev_update_size(rbd_dev);
Alex Elder1fe5e992012-07-25 09:32:41 -05003692
Ilya Dryomov73e39e42015-01-08 20:18:22 +03003693 return ret;
Alex Elder1fe5e992012-07-25 09:32:41 -05003694}
3695
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003696static int rbd_init_request(void *data, struct request *rq,
3697 unsigned int hctx_idx, unsigned int request_idx,
3698 unsigned int numa_node)
3699{
3700 struct work_struct *work = blk_mq_rq_to_pdu(rq);
3701
3702 INIT_WORK(work, rbd_queue_workfn);
3703 return 0;
3704}
3705
3706static struct blk_mq_ops rbd_mq_ops = {
3707 .queue_rq = rbd_queue_rq,
3708 .map_queue = blk_mq_map_queue,
3709 .init_request = rbd_init_request,
3710};
3711
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003712static int rbd_init_disk(struct rbd_device *rbd_dev)
3713{
3714 struct gendisk *disk;
3715 struct request_queue *q;
Alex Elder593a9e72012-02-07 12:03:37 -06003716 u64 segment_size;
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003717 int err;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003718
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003719 /* create gendisk info */
Ilya Dryomov7e513d42013-12-16 19:26:32 +02003720 disk = alloc_disk(single_major ?
3721 (1 << RBD_SINGLE_MAJOR_PART_SHIFT) :
3722 RBD_MINORS_PER_MAJOR);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003723 if (!disk)
Alex Elder1fcdb8a2012-08-29 17:11:06 -05003724 return -ENOMEM;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003725
Alex Elderf0f8cef2012-01-29 13:57:44 -06003726 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
Alex Elderde71a292012-07-03 16:01:19 -05003727 rbd_dev->dev_id);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003728 disk->major = rbd_dev->major;
Ilya Dryomovdd82fff2013-12-13 15:28:57 +02003729 disk->first_minor = rbd_dev->minor;
Ilya Dryomov7e513d42013-12-16 19:26:32 +02003730 if (single_major)
3731 disk->flags |= GENHD_FL_EXT_DEVT;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003732 disk->fops = &rbd_bd_ops;
3733 disk->private_data = rbd_dev;
3734
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003735 memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
3736 rbd_dev->tag_set.ops = &rbd_mq_ops;
Ilya Dryomovb5584182015-06-23 16:21:19 +03003737 rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003738 rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
Ilya Dryomovb5584182015-06-23 16:21:19 +03003739 rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003740 rbd_dev->tag_set.nr_hw_queues = 1;
3741 rbd_dev->tag_set.cmd_size = sizeof(struct work_struct);
3742
3743 err = blk_mq_alloc_tag_set(&rbd_dev->tag_set);
3744 if (err)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003745 goto out_disk;
Josh Durgin029bcbd2011-07-22 11:35:23 -07003746
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003747 q = blk_mq_init_queue(&rbd_dev->tag_set);
3748 if (IS_ERR(q)) {
3749 err = PTR_ERR(q);
3750 goto out_tag_set;
3751 }
3752
Ilya Dryomovd8a2c892015-03-24 16:15:17 +03003753 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
3754 /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
Alex Elder593a9e72012-02-07 12:03:37 -06003755
Josh Durgin029bcbd2011-07-22 11:35:23 -07003756 /* set io sizes to object size */
Alex Elder593a9e72012-02-07 12:03:37 -06003757 segment_size = rbd_obj_bytes(&rbd_dev->header);
3758 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
Ilya Dryomov0d9fde42015-10-07 16:09:35 +02003759 q->limits.max_sectors = queue_max_hw_sectors(q);
Ilya Dryomovd3834fe2015-06-12 19:19:02 +03003760 blk_queue_max_segments(q, segment_size / SECTOR_SIZE);
Alex Elder593a9e72012-02-07 12:03:37 -06003761 blk_queue_max_segment_size(q, segment_size);
3762 blk_queue_io_min(q, segment_size);
3763 blk_queue_io_opt(q, segment_size);
Josh Durgin029bcbd2011-07-22 11:35:23 -07003764
Guangliang Zhao90e98c52014-04-01 22:22:16 +08003765 /* enable the discard support */
3766 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
3767 q->limits.discard_granularity = segment_size;
3768 q->limits.discard_alignment = segment_size;
Jens Axboe2bb4cd52015-07-14 08:15:12 -06003769 blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE);
Josh Durginb76f8232014-04-07 16:52:03 -07003770 q->limits.discard_zeroes_data = 1;
Guangliang Zhao90e98c52014-04-01 22:22:16 +08003771
Ronny Hegewaldbae818e2015-10-15 18:50:46 +00003772 if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
3773 q->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES;
3774
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003775 disk->queue = q;
3776
3777 q->queuedata = rbd_dev;
3778
3779 rbd_dev->disk = disk;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003780
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003781 return 0;
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003782out_tag_set:
3783 blk_mq_free_tag_set(&rbd_dev->tag_set);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003784out_disk:
3785 put_disk(disk);
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01003786 return err;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003787}
3788
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003789/*
3790 sysfs
3791*/
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003792
Alex Elder593a9e72012-02-07 12:03:37 -06003793static struct rbd_device *dev_to_rbd_dev(struct device *dev)
3794{
3795 return container_of(dev, struct rbd_device, dev);
3796}
3797
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003798static ssize_t rbd_size_show(struct device *dev,
3799 struct device_attribute *attr, char *buf)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003800{
Alex Elder593a9e72012-02-07 12:03:37 -06003801 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003802
Alex Elderfc71d832013-04-26 15:44:36 -05003803 return sprintf(buf, "%llu\n",
3804 (unsigned long long)rbd_dev->mapping.size);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003805}
3806
Alex Elder34b13182012-07-13 20:35:12 -05003807/*
3808 * Note this shows the features for whatever's mapped, which is not
3809 * necessarily the base image.
3810 */
3811static ssize_t rbd_features_show(struct device *dev,
3812 struct device_attribute *attr, char *buf)
3813{
3814 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3815
3816 return sprintf(buf, "0x%016llx\n",
Alex Elderfc71d832013-04-26 15:44:36 -05003817 (unsigned long long)rbd_dev->mapping.features);
Alex Elder34b13182012-07-13 20:35:12 -05003818}
3819
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003820static ssize_t rbd_major_show(struct device *dev,
3821 struct device_attribute *attr, char *buf)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003822{
Alex Elder593a9e72012-02-07 12:03:37 -06003823 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003824
Alex Elderfc71d832013-04-26 15:44:36 -05003825 if (rbd_dev->major)
3826 return sprintf(buf, "%d\n", rbd_dev->major);
3827
3828 return sprintf(buf, "(none)\n");
Ilya Dryomovdd82fff2013-12-13 15:28:57 +02003829}
Alex Elderfc71d832013-04-26 15:44:36 -05003830
Ilya Dryomovdd82fff2013-12-13 15:28:57 +02003831static ssize_t rbd_minor_show(struct device *dev,
3832 struct device_attribute *attr, char *buf)
3833{
3834 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3835
3836 return sprintf(buf, "%d\n", rbd_dev->minor);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003837}
3838
3839static ssize_t rbd_client_id_show(struct device *dev,
3840 struct device_attribute *attr, char *buf)
3841{
Alex Elder593a9e72012-02-07 12:03:37 -06003842 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003843
Alex Elder1dbb4392012-01-24 10:08:37 -06003844 return sprintf(buf, "client%lld\n",
3845 ceph_client_id(rbd_dev->rbd_client->client));
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003846}
3847
3848static ssize_t rbd_pool_show(struct device *dev,
3849 struct device_attribute *attr, char *buf)
3850{
Alex Elder593a9e72012-02-07 12:03:37 -06003851 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003852
Alex Elder0d7dbfc2012-10-25 23:34:41 -05003853 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003854}
3855
Alex Elder9bb2f332012-07-12 10:46:35 -05003856static ssize_t rbd_pool_id_show(struct device *dev,
3857 struct device_attribute *attr, char *buf)
3858{
3859 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3860
Alex Elder0d7dbfc2012-10-25 23:34:41 -05003861 return sprintf(buf, "%llu\n",
Alex Elderfc71d832013-04-26 15:44:36 -05003862 (unsigned long long) rbd_dev->spec->pool_id);
Alex Elder9bb2f332012-07-12 10:46:35 -05003863}
3864
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003865static ssize_t rbd_name_show(struct device *dev,
3866 struct device_attribute *attr, char *buf)
3867{
Alex Elder593a9e72012-02-07 12:03:37 -06003868 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003869
Alex Eldera92ffdf2012-10-30 19:40:33 -05003870 if (rbd_dev->spec->image_name)
3871 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
3872
3873 return sprintf(buf, "(unknown)\n");
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003874}
3875
Alex Elder589d30e2012-07-10 20:30:11 -05003876static ssize_t rbd_image_id_show(struct device *dev,
3877 struct device_attribute *attr, char *buf)
3878{
3879 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3880
Alex Elder0d7dbfc2012-10-25 23:34:41 -05003881 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
Alex Elder589d30e2012-07-10 20:30:11 -05003882}
3883
Alex Elder34b13182012-07-13 20:35:12 -05003884/*
3885 * Shows the name of the currently-mapped snapshot (or
3886 * RBD_SNAP_HEAD_NAME for the base image).
3887 */
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003888static ssize_t rbd_snap_show(struct device *dev,
3889 struct device_attribute *attr,
3890 char *buf)
3891{
Alex Elder593a9e72012-02-07 12:03:37 -06003892 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003893
Alex Elder0d7dbfc2012-10-25 23:34:41 -05003894 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003895}
3896
Alex Elder86b00e02012-10-25 23:34:42 -05003897/*
Ilya Dryomovff961282014-07-22 21:53:07 +04003898 * For a v2 image, shows the chain of parent images, separated by empty
3899 * lines. For v1 images or if there is no parent, shows "(no parent
3900 * image)".
Alex Elder86b00e02012-10-25 23:34:42 -05003901 */
3902static ssize_t rbd_parent_show(struct device *dev,
Ilya Dryomovff961282014-07-22 21:53:07 +04003903 struct device_attribute *attr,
3904 char *buf)
Alex Elder86b00e02012-10-25 23:34:42 -05003905{
3906 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Ilya Dryomovff961282014-07-22 21:53:07 +04003907 ssize_t count = 0;
Alex Elder86b00e02012-10-25 23:34:42 -05003908
Ilya Dryomovff961282014-07-22 21:53:07 +04003909 if (!rbd_dev->parent)
Alex Elder86b00e02012-10-25 23:34:42 -05003910 return sprintf(buf, "(no parent image)\n");
3911
Ilya Dryomovff961282014-07-22 21:53:07 +04003912 for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) {
3913 struct rbd_spec *spec = rbd_dev->parent_spec;
Alex Elder86b00e02012-10-25 23:34:42 -05003914
Ilya Dryomovff961282014-07-22 21:53:07 +04003915 count += sprintf(&buf[count], "%s"
3916 "pool_id %llu\npool_name %s\n"
3917 "image_id %s\nimage_name %s\n"
3918 "snap_id %llu\nsnap_name %s\n"
3919 "overlap %llu\n",
3920 !count ? "" : "\n", /* first? */
3921 spec->pool_id, spec->pool_name,
3922 spec->image_id, spec->image_name ?: "(unknown)",
3923 spec->snap_id, spec->snap_name,
3924 rbd_dev->parent_overlap);
3925 }
Alex Elder86b00e02012-10-25 23:34:42 -05003926
Ilya Dryomovff961282014-07-22 21:53:07 +04003927 return count;
Alex Elder86b00e02012-10-25 23:34:42 -05003928}
3929
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003930static ssize_t rbd_image_refresh(struct device *dev,
3931 struct device_attribute *attr,
3932 const char *buf,
3933 size_t size)
3934{
Alex Elder593a9e72012-02-07 12:03:37 -06003935 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Alex Elderb8136232012-07-25 09:32:41 -05003936 int ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003937
Alex Eldercc4a38bd2013-04-30 00:44:33 -05003938 ret = rbd_dev_refresh(rbd_dev);
Alex Eldere627db02013-05-06 07:40:30 -05003939 if (ret)
Ilya Dryomov52bb1f92014-07-23 17:11:20 +04003940 return ret;
Alex Elderb8136232012-07-25 09:32:41 -05003941
Ilya Dryomov52bb1f92014-07-23 17:11:20 +04003942 return size;
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003943}
Yehuda Sadeh602adf42010-08-12 16:11:25 -07003944
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003945static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
Alex Elder34b13182012-07-13 20:35:12 -05003946static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003947static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
Ilya Dryomovdd82fff2013-12-13 15:28:57 +02003948static DEVICE_ATTR(minor, S_IRUGO, rbd_minor_show, NULL);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003949static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
3950static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
Alex Elder9bb2f332012-07-12 10:46:35 -05003951static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003952static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
Alex Elder589d30e2012-07-10 20:30:11 -05003953static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003954static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
3955static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
Alex Elder86b00e02012-10-25 23:34:42 -05003956static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003957
3958static struct attribute *rbd_attrs[] = {
3959 &dev_attr_size.attr,
Alex Elder34b13182012-07-13 20:35:12 -05003960 &dev_attr_features.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003961 &dev_attr_major.attr,
Ilya Dryomovdd82fff2013-12-13 15:28:57 +02003962 &dev_attr_minor.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003963 &dev_attr_client_id.attr,
3964 &dev_attr_pool.attr,
Alex Elder9bb2f332012-07-12 10:46:35 -05003965 &dev_attr_pool_id.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003966 &dev_attr_name.attr,
Alex Elder589d30e2012-07-10 20:30:11 -05003967 &dev_attr_image_id.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003968 &dev_attr_current_snap.attr,
Alex Elder86b00e02012-10-25 23:34:42 -05003969 &dev_attr_parent.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003970 &dev_attr_refresh.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003971 NULL
3972};
3973
3974static struct attribute_group rbd_attr_group = {
3975 .attrs = rbd_attrs,
3976};
3977
3978static const struct attribute_group *rbd_attr_groups[] = {
3979 &rbd_attr_group,
3980 NULL
3981};
3982
Ilya Dryomov6cac4692015-10-16 20:11:25 +02003983static void rbd_dev_release(struct device *dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003984
3985static struct device_type rbd_device_type = {
3986 .name = "rbd",
3987 .groups = rbd_attr_groups,
Ilya Dryomov6cac4692015-10-16 20:11:25 +02003988 .release = rbd_dev_release,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08003989};
3990
Alex Elder8b8fb992012-10-26 17:25:24 -05003991static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
3992{
3993 kref_get(&spec->kref);
3994
3995 return spec;
3996}
3997
3998static void rbd_spec_free(struct kref *kref);
3999static void rbd_spec_put(struct rbd_spec *spec)
4000{
4001 if (spec)
4002 kref_put(&spec->kref, rbd_spec_free);
4003}
4004
4005static struct rbd_spec *rbd_spec_alloc(void)
4006{
4007 struct rbd_spec *spec;
4008
4009 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
4010 if (!spec)
4011 return NULL;
Ilya Dryomov04077592014-07-23 17:11:20 +04004012
4013 spec->pool_id = CEPH_NOPOOL;
4014 spec->snap_id = CEPH_NOSNAP;
Alex Elder8b8fb992012-10-26 17:25:24 -05004015 kref_init(&spec->kref);
4016
Alex Elder8b8fb992012-10-26 17:25:24 -05004017 return spec;
4018}
4019
4020static void rbd_spec_free(struct kref *kref)
4021{
4022 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
4023
4024 kfree(spec->pool_name);
4025 kfree(spec->image_id);
4026 kfree(spec->image_name);
4027 kfree(spec->snap_name);
4028 kfree(spec);
4029}
4030
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02004031static void rbd_dev_release(struct device *dev)
4032{
4033 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4034 bool need_put = !!rbd_dev->opts;
4035
4036 rbd_put_client(rbd_dev->rbd_client);
4037 rbd_spec_put(rbd_dev->spec);
4038 kfree(rbd_dev->opts);
4039 kfree(rbd_dev);
4040
4041 /*
4042 * This is racy, but way better than putting module outside of
4043 * the release callback. The race window is pretty small, so
4044 * doing something similar to dm (dm-builtin.c) is overkill.
4045 */
4046 if (need_put)
4047 module_put(THIS_MODULE);
4048}
4049
Alex Eldercc344fa2013-02-19 12:25:56 -06004050static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
Ilya Dryomovd1475432015-06-22 13:24:48 +03004051 struct rbd_spec *spec,
4052 struct rbd_options *opts)
Alex Elderc53d5892012-10-25 23:34:42 -05004053{
4054 struct rbd_device *rbd_dev;
4055
4056 rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
4057 if (!rbd_dev)
4058 return NULL;
4059
4060 spin_lock_init(&rbd_dev->lock);
Alex Elder6d292902013-01-14 12:43:31 -06004061 rbd_dev->flags = 0;
Alex Eldera2acd002013-05-08 22:50:04 -05004062 atomic_set(&rbd_dev->parent_ref, 0);
Alex Elderc53d5892012-10-25 23:34:42 -05004063 INIT_LIST_HEAD(&rbd_dev->node);
Alex Elderc53d5892012-10-25 23:34:42 -05004064 init_rwsem(&rbd_dev->header_rwsem);
4065
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02004066 rbd_dev->dev.bus = &rbd_bus_type;
4067 rbd_dev->dev.type = &rbd_device_type;
4068 rbd_dev->dev.parent = &rbd_root_dev;
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02004069 device_initialize(&rbd_dev->dev);
4070
Alex Elderc53d5892012-10-25 23:34:42 -05004071 rbd_dev->rbd_client = rbdc;
Ilya Dryomovd1475432015-06-22 13:24:48 +03004072 rbd_dev->spec = spec;
4073 rbd_dev->opts = opts;
Alex Elderc53d5892012-10-25 23:34:42 -05004074
Alex Elder0903e872012-11-14 12:25:19 -06004075 /* Initialize the layout used for all rbd requests */
4076
4077 rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
4078 rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
4079 rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
4080 rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
4081
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02004082 /*
4083 * If this is a mapping rbd_dev (as opposed to a parent one),
4084 * pin our module. We have a ref from do_rbd_add(), so use
4085 * __module_get().
4086 */
4087 if (rbd_dev->opts)
4088 __module_get(THIS_MODULE);
4089
Alex Elderc53d5892012-10-25 23:34:42 -05004090 return rbd_dev;
4091}
4092
4093static void rbd_dev_destroy(struct rbd_device *rbd_dev)
4094{
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02004095 if (rbd_dev)
4096 put_device(&rbd_dev->dev);
Alex Elderc53d5892012-10-25 23:34:42 -05004097}
4098
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004099/*
Alex Elder9d475de2012-07-03 16:01:19 -05004100 * Get the size and object order for an image snapshot, or if
4101 * snap_id is CEPH_NOSNAP, gets this information for the base
4102 * image.
4103 */
4104static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
4105 u8 *order, u64 *snap_size)
4106{
4107 __le64 snapid = cpu_to_le64(snap_id);
4108 int ret;
4109 struct {
4110 u8 order;
4111 __le64 size;
4112 } __attribute__ ((packed)) size_buf = { 0 };
4113
Alex Elder36be9a72013-01-19 00:30:28 -06004114 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
Alex Elder9d475de2012-07-03 16:01:19 -05004115 "rbd", "get_size",
Alex Elder41579762013-04-21 12:14:45 -05004116 &snapid, sizeof (snapid),
Alex Eldere2a58ee2013-04-30 00:44:33 -05004117 &size_buf, sizeof (size_buf));
Alex Elder36be9a72013-01-19 00:30:28 -06004118 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elder9d475de2012-07-03 16:01:19 -05004119 if (ret < 0)
4120 return ret;
Alex Elder57385b52013-04-21 12:14:45 -05004121 if (ret < sizeof (size_buf))
4122 return -ERANGE;
Alex Elder9d475de2012-07-03 16:01:19 -05004123
Josh Durginc3545572013-08-28 17:08:10 -07004124 if (order) {
Alex Elderc86f86e2013-04-25 15:09:41 -05004125 *order = size_buf.order;
Josh Durginc3545572013-08-28 17:08:10 -07004126 dout(" order %u", (unsigned int)*order);
4127 }
Alex Elder9d475de2012-07-03 16:01:19 -05004128 *snap_size = le64_to_cpu(size_buf.size);
4129
Josh Durginc3545572013-08-28 17:08:10 -07004130 dout(" snap_id 0x%016llx snap_size = %llu\n",
4131 (unsigned long long)snap_id,
Alex Elder57385b52013-04-21 12:14:45 -05004132 (unsigned long long)*snap_size);
Alex Elder9d475de2012-07-03 16:01:19 -05004133
4134 return 0;
4135}
4136
4137static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
4138{
4139 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
4140 &rbd_dev->header.obj_order,
4141 &rbd_dev->header.image_size);
4142}
4143
Alex Elder1e130192012-07-03 16:01:19 -05004144static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
4145{
4146 void *reply_buf;
4147 int ret;
4148 void *p;
4149
4150 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
4151 if (!reply_buf)
4152 return -ENOMEM;
4153
Alex Elder36be9a72013-01-19 00:30:28 -06004154 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
Alex Elder41579762013-04-21 12:14:45 -05004155 "rbd", "get_object_prefix", NULL, 0,
Alex Eldere2a58ee2013-04-30 00:44:33 -05004156 reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
Alex Elder36be9a72013-01-19 00:30:28 -06004157 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elder1e130192012-07-03 16:01:19 -05004158 if (ret < 0)
4159 goto out;
4160
4161 p = reply_buf;
4162 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
Alex Elder57385b52013-04-21 12:14:45 -05004163 p + ret, NULL, GFP_NOIO);
4164 ret = 0;
Alex Elder1e130192012-07-03 16:01:19 -05004165
4166 if (IS_ERR(rbd_dev->header.object_prefix)) {
4167 ret = PTR_ERR(rbd_dev->header.object_prefix);
4168 rbd_dev->header.object_prefix = NULL;
4169 } else {
4170 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
4171 }
Alex Elder1e130192012-07-03 16:01:19 -05004172out:
4173 kfree(reply_buf);
4174
4175 return ret;
4176}
4177
Alex Elderb1b54022012-07-03 16:01:19 -05004178static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
4179 u64 *snap_features)
4180{
4181 __le64 snapid = cpu_to_le64(snap_id);
4182 struct {
4183 __le64 features;
4184 __le64 incompat;
Alex Elder41579762013-04-21 12:14:45 -05004185 } __attribute__ ((packed)) features_buf = { 0 };
Ilya Dryomovd3767f02016-04-13 14:15:50 +02004186 u64 unsup;
Alex Elderb1b54022012-07-03 16:01:19 -05004187 int ret;
4188
Alex Elder36be9a72013-01-19 00:30:28 -06004189 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
Alex Elderb1b54022012-07-03 16:01:19 -05004190 "rbd", "get_features",
Alex Elder41579762013-04-21 12:14:45 -05004191 &snapid, sizeof (snapid),
Alex Eldere2a58ee2013-04-30 00:44:33 -05004192 &features_buf, sizeof (features_buf));
Alex Elder36be9a72013-01-19 00:30:28 -06004193 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elderb1b54022012-07-03 16:01:19 -05004194 if (ret < 0)
4195 return ret;
Alex Elder57385b52013-04-21 12:14:45 -05004196 if (ret < sizeof (features_buf))
4197 return -ERANGE;
Alex Elderd8891402012-10-09 13:50:17 -07004198
Ilya Dryomovd3767f02016-04-13 14:15:50 +02004199 unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED;
4200 if (unsup) {
4201 rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx",
4202 unsup);
Alex Elderb8f5c6e2012-11-01 08:39:26 -05004203 return -ENXIO;
Ilya Dryomovd3767f02016-04-13 14:15:50 +02004204 }
Alex Elderd8891402012-10-09 13:50:17 -07004205
Alex Elderb1b54022012-07-03 16:01:19 -05004206 *snap_features = le64_to_cpu(features_buf.features);
4207
4208 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
Alex Elder57385b52013-04-21 12:14:45 -05004209 (unsigned long long)snap_id,
4210 (unsigned long long)*snap_features,
4211 (unsigned long long)le64_to_cpu(features_buf.incompat));
Alex Elderb1b54022012-07-03 16:01:19 -05004212
4213 return 0;
4214}
4215
4216static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
4217{
4218 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
4219 &rbd_dev->header.features);
4220}
4221
Alex Elder86b00e02012-10-25 23:34:42 -05004222static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
4223{
4224 struct rbd_spec *parent_spec;
4225 size_t size;
4226 void *reply_buf = NULL;
4227 __le64 snapid;
4228 void *p;
4229 void *end;
Alex Elder642a2532013-05-06 17:40:33 -05004230 u64 pool_id;
Alex Elder86b00e02012-10-25 23:34:42 -05004231 char *image_id;
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004232 u64 snap_id;
Alex Elder86b00e02012-10-25 23:34:42 -05004233 u64 overlap;
Alex Elder86b00e02012-10-25 23:34:42 -05004234 int ret;
4235
4236 parent_spec = rbd_spec_alloc();
4237 if (!parent_spec)
4238 return -ENOMEM;
4239
4240 size = sizeof (__le64) + /* pool_id */
4241 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
4242 sizeof (__le64) + /* snap_id */
4243 sizeof (__le64); /* overlap */
4244 reply_buf = kmalloc(size, GFP_KERNEL);
4245 if (!reply_buf) {
4246 ret = -ENOMEM;
4247 goto out_err;
4248 }
4249
Ilya Dryomov4d9b67c2014-07-24 10:42:13 +04004250 snapid = cpu_to_le64(rbd_dev->spec->snap_id);
Alex Elder36be9a72013-01-19 00:30:28 -06004251 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
Alex Elder86b00e02012-10-25 23:34:42 -05004252 "rbd", "get_parent",
Alex Elder41579762013-04-21 12:14:45 -05004253 &snapid, sizeof (snapid),
Alex Eldere2a58ee2013-04-30 00:44:33 -05004254 reply_buf, size);
Alex Elder36be9a72013-01-19 00:30:28 -06004255 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elder86b00e02012-10-25 23:34:42 -05004256 if (ret < 0)
4257 goto out_err;
4258
Alex Elder86b00e02012-10-25 23:34:42 -05004259 p = reply_buf;
Alex Elder57385b52013-04-21 12:14:45 -05004260 end = reply_buf + ret;
4261 ret = -ERANGE;
Alex Elder642a2532013-05-06 17:40:33 -05004262 ceph_decode_64_safe(&p, end, pool_id, out_err);
Alex Elder392a9da2013-05-06 17:40:33 -05004263 if (pool_id == CEPH_NOPOOL) {
4264 /*
4265 * Either the parent never existed, or we have
4266 * record of it but the image got flattened so it no
4267 * longer has a parent. When the parent of a
4268 * layered image disappears we immediately set the
4269 * overlap to 0. The effect of this is that all new
4270 * requests will be treated as if the image had no
4271 * parent.
4272 */
4273 if (rbd_dev->parent_overlap) {
4274 rbd_dev->parent_overlap = 0;
Alex Elder392a9da2013-05-06 17:40:33 -05004275 rbd_dev_parent_put(rbd_dev);
4276 pr_info("%s: clone image has been flattened\n",
4277 rbd_dev->disk->disk_name);
4278 }
4279
Alex Elder86b00e02012-10-25 23:34:42 -05004280 goto out; /* No parent? No problem. */
Alex Elder392a9da2013-05-06 17:40:33 -05004281 }
Alex Elder86b00e02012-10-25 23:34:42 -05004282
Alex Elder0903e872012-11-14 12:25:19 -06004283 /* The ceph file layout needs to fit pool id in 32 bits */
4284
4285 ret = -EIO;
Alex Elder642a2532013-05-06 17:40:33 -05004286 if (pool_id > (u64)U32_MAX) {
Ilya Dryomov9584d502014-07-11 12:11:20 +04004287 rbd_warn(NULL, "parent pool id too large (%llu > %u)",
Alex Elder642a2532013-05-06 17:40:33 -05004288 (unsigned long long)pool_id, U32_MAX);
Alex Elder57385b52013-04-21 12:14:45 -05004289 goto out_err;
Alex Elderc0cd10db2013-04-26 09:43:47 -05004290 }
Alex Elder0903e872012-11-14 12:25:19 -06004291
Alex Elder979ed482012-11-01 08:39:26 -05004292 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
Alex Elder86b00e02012-10-25 23:34:42 -05004293 if (IS_ERR(image_id)) {
4294 ret = PTR_ERR(image_id);
4295 goto out_err;
4296 }
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004297 ceph_decode_64_safe(&p, end, snap_id, out_err);
Alex Elder86b00e02012-10-25 23:34:42 -05004298 ceph_decode_64_safe(&p, end, overlap, out_err);
4299
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004300 /*
4301 * The parent won't change (except when the clone is
4302 * flattened, already handled that). So we only need to
4303 * record the parent spec we have not already done so.
4304 */
4305 if (!rbd_dev->parent_spec) {
4306 parent_spec->pool_id = pool_id;
4307 parent_spec->image_id = image_id;
4308 parent_spec->snap_id = snap_id;
Alex Elder70cf49c2013-05-06 17:40:33 -05004309 rbd_dev->parent_spec = parent_spec;
4310 parent_spec = NULL; /* rbd_dev now owns this */
Ilya Dryomovfbba11b2014-06-27 21:46:33 +04004311 } else {
4312 kfree(image_id);
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004313 }
4314
4315 /*
Ilya Dryomovcf32bd92015-01-19 22:57:39 +03004316 * We always update the parent overlap. If it's zero we issue
4317 * a warning, as we will proceed as if there was no parent.
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004318 */
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004319 if (!overlap) {
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004320 if (parent_spec) {
Ilya Dryomovcf32bd92015-01-19 22:57:39 +03004321 /* refresh, careful to warn just once */
4322 if (rbd_dev->parent_overlap)
4323 rbd_warn(rbd_dev,
4324 "clone now standalone (overlap became 0)");
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004325 } else {
Ilya Dryomovcf32bd92015-01-19 22:57:39 +03004326 /* initial probe */
4327 rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004328 }
Alex Elder70cf49c2013-05-06 17:40:33 -05004329 }
Ilya Dryomovcf32bd92015-01-19 22:57:39 +03004330 rbd_dev->parent_overlap = overlap;
4331
Alex Elder86b00e02012-10-25 23:34:42 -05004332out:
4333 ret = 0;
4334out_err:
4335 kfree(reply_buf);
4336 rbd_spec_put(parent_spec);
4337
4338 return ret;
4339}
4340
Alex Eldercc070d52013-04-21 12:14:45 -05004341static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
4342{
4343 struct {
4344 __le64 stripe_unit;
4345 __le64 stripe_count;
4346 } __attribute__ ((packed)) striping_info_buf = { 0 };
4347 size_t size = sizeof (striping_info_buf);
4348 void *p;
4349 u64 obj_size;
4350 u64 stripe_unit;
4351 u64 stripe_count;
4352 int ret;
4353
4354 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4355 "rbd", "get_stripe_unit_count", NULL, 0,
Alex Eldere2a58ee2013-04-30 00:44:33 -05004356 (char *)&striping_info_buf, size);
Alex Eldercc070d52013-04-21 12:14:45 -05004357 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4358 if (ret < 0)
4359 return ret;
4360 if (ret < size)
4361 return -ERANGE;
4362
4363 /*
4364 * We don't actually support the "fancy striping" feature
4365 * (STRIPINGV2) yet, but if the striping sizes are the
4366 * defaults the behavior is the same as before. So find
4367 * out, and only fail if the image has non-default values.
4368 */
4369 ret = -EINVAL;
4370 obj_size = (u64)1 << rbd_dev->header.obj_order;
4371 p = &striping_info_buf;
4372 stripe_unit = ceph_decode_64(&p);
4373 if (stripe_unit != obj_size) {
4374 rbd_warn(rbd_dev, "unsupported stripe unit "
4375 "(got %llu want %llu)",
4376 stripe_unit, obj_size);
4377 return -EINVAL;
4378 }
4379 stripe_count = ceph_decode_64(&p);
4380 if (stripe_count != 1) {
4381 rbd_warn(rbd_dev, "unsupported stripe count "
4382 "(got %llu want 1)", stripe_count);
4383 return -EINVAL;
4384 }
Alex Elder500d0c02013-04-26 09:43:47 -05004385 rbd_dev->header.stripe_unit = stripe_unit;
4386 rbd_dev->header.stripe_count = stripe_count;
Alex Eldercc070d52013-04-21 12:14:45 -05004387
4388 return 0;
4389}
4390
Alex Elder9e15b772012-10-30 19:40:33 -05004391static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
4392{
4393 size_t image_id_size;
4394 char *image_id;
4395 void *p;
4396 void *end;
4397 size_t size;
4398 void *reply_buf = NULL;
4399 size_t len = 0;
4400 char *image_name = NULL;
4401 int ret;
4402
4403 rbd_assert(!rbd_dev->spec->image_name);
4404
Alex Elder69e7a022012-11-01 08:39:26 -05004405 len = strlen(rbd_dev->spec->image_id);
4406 image_id_size = sizeof (__le32) + len;
Alex Elder9e15b772012-10-30 19:40:33 -05004407 image_id = kmalloc(image_id_size, GFP_KERNEL);
4408 if (!image_id)
4409 return NULL;
4410
4411 p = image_id;
Alex Elder41579762013-04-21 12:14:45 -05004412 end = image_id + image_id_size;
Alex Elder57385b52013-04-21 12:14:45 -05004413 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
Alex Elder9e15b772012-10-30 19:40:33 -05004414
4415 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
4416 reply_buf = kmalloc(size, GFP_KERNEL);
4417 if (!reply_buf)
4418 goto out;
4419
Alex Elder36be9a72013-01-19 00:30:28 -06004420 ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
Alex Elder9e15b772012-10-30 19:40:33 -05004421 "rbd", "dir_get_name",
4422 image_id, image_id_size,
Alex Eldere2a58ee2013-04-30 00:44:33 -05004423 reply_buf, size);
Alex Elder9e15b772012-10-30 19:40:33 -05004424 if (ret < 0)
4425 goto out;
4426 p = reply_buf;
Alex Elderf40eb342013-04-25 15:09:42 -05004427 end = reply_buf + ret;
4428
Alex Elder9e15b772012-10-30 19:40:33 -05004429 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
4430 if (IS_ERR(image_name))
4431 image_name = NULL;
4432 else
4433 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
4434out:
4435 kfree(reply_buf);
4436 kfree(image_id);
4437
4438 return image_name;
4439}
4440
Alex Elder2ad3d712013-04-30 00:44:33 -05004441static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4442{
4443 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4444 const char *snap_name;
4445 u32 which = 0;
4446
4447 /* Skip over names until we find the one we are looking for */
4448
4449 snap_name = rbd_dev->header.snap_names;
4450 while (which < snapc->num_snaps) {
4451 if (!strcmp(name, snap_name))
4452 return snapc->snaps[which];
4453 snap_name += strlen(snap_name) + 1;
4454 which++;
4455 }
4456 return CEPH_NOSNAP;
4457}
4458
4459static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4460{
4461 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4462 u32 which;
4463 bool found = false;
4464 u64 snap_id;
4465
4466 for (which = 0; !found && which < snapc->num_snaps; which++) {
4467 const char *snap_name;
4468
4469 snap_id = snapc->snaps[which];
4470 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
Josh Durginefadc982013-08-29 19:16:42 -07004471 if (IS_ERR(snap_name)) {
4472 /* ignore no-longer existing snapshots */
4473 if (PTR_ERR(snap_name) == -ENOENT)
4474 continue;
4475 else
4476 break;
4477 }
Alex Elder2ad3d712013-04-30 00:44:33 -05004478 found = !strcmp(name, snap_name);
4479 kfree(snap_name);
4480 }
4481 return found ? snap_id : CEPH_NOSNAP;
4482}
4483
4484/*
4485 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
4486 * no snapshot by that name is found, or if an error occurs.
4487 */
4488static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4489{
4490 if (rbd_dev->image_format == 1)
4491 return rbd_v1_snap_id_by_name(rbd_dev, name);
4492
4493 return rbd_v2_snap_id_by_name(rbd_dev, name);
4494}
4495
Alex Elder9e15b772012-10-30 19:40:33 -05004496/*
Ilya Dryomov04077592014-07-23 17:11:20 +04004497 * An image being mapped will have everything but the snap id.
Alex Elder9e15b772012-10-30 19:40:33 -05004498 */
Ilya Dryomov04077592014-07-23 17:11:20 +04004499static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev)
4500{
4501 struct rbd_spec *spec = rbd_dev->spec;
4502
4503 rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name);
4504 rbd_assert(spec->image_id && spec->image_name);
4505 rbd_assert(spec->snap_name);
4506
4507 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
4508 u64 snap_id;
4509
4510 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
4511 if (snap_id == CEPH_NOSNAP)
4512 return -ENOENT;
4513
4514 spec->snap_id = snap_id;
4515 } else {
4516 spec->snap_id = CEPH_NOSNAP;
4517 }
4518
4519 return 0;
4520}
4521
4522/*
4523 * A parent image will have all ids but none of the names.
4524 *
4525 * All names in an rbd spec are dynamically allocated. It's OK if we
4526 * can't figure out the name for an image id.
4527 */
4528static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
Alex Elder9e15b772012-10-30 19:40:33 -05004529{
Alex Elder2e9f7f12013-04-26 09:43:48 -05004530 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4531 struct rbd_spec *spec = rbd_dev->spec;
4532 const char *pool_name;
4533 const char *image_name;
4534 const char *snap_name;
Alex Elder9e15b772012-10-30 19:40:33 -05004535 int ret;
4536
Ilya Dryomov04077592014-07-23 17:11:20 +04004537 rbd_assert(spec->pool_id != CEPH_NOPOOL);
4538 rbd_assert(spec->image_id);
4539 rbd_assert(spec->snap_id != CEPH_NOSNAP);
Alex Elder9e15b772012-10-30 19:40:33 -05004540
Alex Elder2e9f7f12013-04-26 09:43:48 -05004541 /* Get the pool name; we have to make our own copy of this */
Alex Elder9e15b772012-10-30 19:40:33 -05004542
Alex Elder2e9f7f12013-04-26 09:43:48 -05004543 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
4544 if (!pool_name) {
4545 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
Alex Elder935dc892012-11-01 10:17:15 -05004546 return -EIO;
4547 }
Alex Elder2e9f7f12013-04-26 09:43:48 -05004548 pool_name = kstrdup(pool_name, GFP_KERNEL);
4549 if (!pool_name)
Alex Elder9e15b772012-10-30 19:40:33 -05004550 return -ENOMEM;
4551
4552 /* Fetch the image name; tolerate failure here */
4553
Alex Elder2e9f7f12013-04-26 09:43:48 -05004554 image_name = rbd_dev_image_name(rbd_dev);
4555 if (!image_name)
Alex Elder06ecc6c2012-11-01 10:17:15 -05004556 rbd_warn(rbd_dev, "unable to get image name");
Alex Elder9e15b772012-10-30 19:40:33 -05004557
Ilya Dryomov04077592014-07-23 17:11:20 +04004558 /* Fetch the snapshot name */
Alex Elder9e15b772012-10-30 19:40:33 -05004559
Alex Elder2e9f7f12013-04-26 09:43:48 -05004560 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
Josh Durginda6a6b62013-09-04 17:57:31 -07004561 if (IS_ERR(snap_name)) {
4562 ret = PTR_ERR(snap_name);
Alex Elder9e15b772012-10-30 19:40:33 -05004563 goto out_err;
Alex Elder2e9f7f12013-04-26 09:43:48 -05004564 }
4565
4566 spec->pool_name = pool_name;
4567 spec->image_name = image_name;
4568 spec->snap_name = snap_name;
Alex Elder9e15b772012-10-30 19:40:33 -05004569
4570 return 0;
Ilya Dryomov04077592014-07-23 17:11:20 +04004571
Alex Elder9e15b772012-10-30 19:40:33 -05004572out_err:
Alex Elder2e9f7f12013-04-26 09:43:48 -05004573 kfree(image_name);
4574 kfree(pool_name);
Alex Elder9e15b772012-10-30 19:40:33 -05004575 return ret;
4576}
4577
Alex Eldercc4a38bd2013-04-30 00:44:33 -05004578static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
Alex Elder35d489f2012-07-03 16:01:19 -05004579{
4580 size_t size;
4581 int ret;
4582 void *reply_buf;
4583 void *p;
4584 void *end;
4585 u64 seq;
4586 u32 snap_count;
4587 struct ceph_snap_context *snapc;
4588 u32 i;
4589
4590 /*
4591 * We'll need room for the seq value (maximum snapshot id),
4592 * snapshot count, and array of that many snapshot ids.
4593 * For now we have a fixed upper limit on the number we're
4594 * prepared to receive.
4595 */
4596 size = sizeof (__le64) + sizeof (__le32) +
4597 RBD_MAX_SNAP_COUNT * sizeof (__le64);
4598 reply_buf = kzalloc(size, GFP_KERNEL);
4599 if (!reply_buf)
4600 return -ENOMEM;
4601
Alex Elder36be9a72013-01-19 00:30:28 -06004602 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
Alex Elder41579762013-04-21 12:14:45 -05004603 "rbd", "get_snapcontext", NULL, 0,
Alex Eldere2a58ee2013-04-30 00:44:33 -05004604 reply_buf, size);
Alex Elder36be9a72013-01-19 00:30:28 -06004605 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elder35d489f2012-07-03 16:01:19 -05004606 if (ret < 0)
4607 goto out;
4608
Alex Elder35d489f2012-07-03 16:01:19 -05004609 p = reply_buf;
Alex Elder57385b52013-04-21 12:14:45 -05004610 end = reply_buf + ret;
4611 ret = -ERANGE;
Alex Elder35d489f2012-07-03 16:01:19 -05004612 ceph_decode_64_safe(&p, end, seq, out);
4613 ceph_decode_32_safe(&p, end, snap_count, out);
4614
4615 /*
4616 * Make sure the reported number of snapshot ids wouldn't go
4617 * beyond the end of our buffer. But before checking that,
4618 * make sure the computed size of the snapshot context we
4619 * allocate is representable in a size_t.
4620 */
4621 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
4622 / sizeof (u64)) {
4623 ret = -EINVAL;
4624 goto out;
4625 }
4626 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
4627 goto out;
Alex Elder468521c2013-04-26 09:43:47 -05004628 ret = 0;
Alex Elder35d489f2012-07-03 16:01:19 -05004629
Alex Elder812164f82013-04-30 00:44:32 -05004630 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
Alex Elder35d489f2012-07-03 16:01:19 -05004631 if (!snapc) {
4632 ret = -ENOMEM;
4633 goto out;
4634 }
Alex Elder35d489f2012-07-03 16:01:19 -05004635 snapc->seq = seq;
Alex Elder35d489f2012-07-03 16:01:19 -05004636 for (i = 0; i < snap_count; i++)
4637 snapc->snaps[i] = ceph_decode_64(&p);
4638
Alex Elder49ece552013-05-06 08:37:00 -05004639 ceph_put_snap_context(rbd_dev->header.snapc);
Alex Elder35d489f2012-07-03 16:01:19 -05004640 rbd_dev->header.snapc = snapc;
4641
4642 dout(" snap context seq = %llu, snap_count = %u\n",
Alex Elder57385b52013-04-21 12:14:45 -05004643 (unsigned long long)seq, (unsigned int)snap_count);
Alex Elder35d489f2012-07-03 16:01:19 -05004644out:
4645 kfree(reply_buf);
4646
Alex Elder57385b52013-04-21 12:14:45 -05004647 return ret;
Alex Elder35d489f2012-07-03 16:01:19 -05004648}
4649
Alex Elder54cac612013-04-30 00:44:33 -05004650static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
4651 u64 snap_id)
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004652{
4653 size_t size;
4654 void *reply_buf;
Alex Elder54cac612013-04-30 00:44:33 -05004655 __le64 snapid;
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004656 int ret;
4657 void *p;
4658 void *end;
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004659 char *snap_name;
4660
4661 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
4662 reply_buf = kmalloc(size, GFP_KERNEL);
4663 if (!reply_buf)
4664 return ERR_PTR(-ENOMEM);
4665
Alex Elder54cac612013-04-30 00:44:33 -05004666 snapid = cpu_to_le64(snap_id);
Alex Elder36be9a72013-01-19 00:30:28 -06004667 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004668 "rbd", "get_snapshot_name",
Alex Elder54cac612013-04-30 00:44:33 -05004669 &snapid, sizeof (snapid),
Alex Eldere2a58ee2013-04-30 00:44:33 -05004670 reply_buf, size);
Alex Elder36be9a72013-01-19 00:30:28 -06004671 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elderf40eb342013-04-25 15:09:42 -05004672 if (ret < 0) {
4673 snap_name = ERR_PTR(ret);
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004674 goto out;
Alex Elderf40eb342013-04-25 15:09:42 -05004675 }
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004676
4677 p = reply_buf;
Alex Elderf40eb342013-04-25 15:09:42 -05004678 end = reply_buf + ret;
Alex Eldere5c35532012-10-25 23:34:41 -05004679 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
Alex Elderf40eb342013-04-25 15:09:42 -05004680 if (IS_ERR(snap_name))
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004681 goto out;
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004682
Alex Elderf40eb342013-04-25 15:09:42 -05004683 dout(" snap_id 0x%016llx snap_name = %s\n",
Alex Elder54cac612013-04-30 00:44:33 -05004684 (unsigned long long)snap_id, snap_name);
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004685out:
4686 kfree(reply_buf);
4687
Alex Elderf40eb342013-04-25 15:09:42 -05004688 return snap_name;
Alex Elderb8b1e2d2012-07-03 16:01:19 -05004689}
4690
Alex Elder2df3fac2013-05-06 09:51:30 -05004691static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
Alex Elder117973f2012-08-31 17:29:55 -05004692{
Alex Elder2df3fac2013-05-06 09:51:30 -05004693 bool first_time = rbd_dev->header.object_prefix == NULL;
Alex Elder117973f2012-08-31 17:29:55 -05004694 int ret;
Alex Elder117973f2012-08-31 17:29:55 -05004695
Josh Durgin1617e402013-06-12 14:43:10 -07004696 ret = rbd_dev_v2_image_size(rbd_dev);
4697 if (ret)
Alex Eldercfbf6372013-05-31 17:40:45 -05004698 return ret;
Josh Durgin1617e402013-06-12 14:43:10 -07004699
Alex Elder2df3fac2013-05-06 09:51:30 -05004700 if (first_time) {
4701 ret = rbd_dev_v2_header_onetime(rbd_dev);
4702 if (ret)
Alex Eldercfbf6372013-05-31 17:40:45 -05004703 return ret;
Alex Elder2df3fac2013-05-06 09:51:30 -05004704 }
4705
Alex Eldercc4a38bd2013-04-30 00:44:33 -05004706 ret = rbd_dev_v2_snap_context(rbd_dev);
Ilya Dryomovd194cd12015-08-31 18:22:10 +03004707 if (ret && first_time) {
4708 kfree(rbd_dev->header.object_prefix);
4709 rbd_dev->header.object_prefix = NULL;
4710 }
Alex Elder117973f2012-08-31 17:29:55 -05004711
4712 return ret;
4713}
4714
Ilya Dryomova720ae02014-07-23 17:11:19 +04004715static int rbd_dev_header_info(struct rbd_device *rbd_dev)
4716{
4717 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4718
4719 if (rbd_dev->image_format == 1)
4720 return rbd_dev_v1_header_info(rbd_dev);
4721
4722 return rbd_dev_v2_header_info(rbd_dev);
4723}
4724
Alex Elder1ddbe942012-01-29 13:57:44 -06004725/*
Alex Elder499afd52012-02-02 08:13:29 -06004726 * Get a unique rbd identifier for the given new rbd_dev, and add
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02004727 * the rbd_dev to the global list.
Alex Elder1ddbe942012-01-29 13:57:44 -06004728 */
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02004729static int rbd_dev_id_get(struct rbd_device *rbd_dev)
Alex Elderb7f23c32012-01-29 13:57:43 -06004730{
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02004731 int new_dev_id;
4732
Ilya Dryomov9b60e702013-12-13 15:28:57 +02004733 new_dev_id = ida_simple_get(&rbd_dev_id_ida,
4734 0, minor_to_rbd_dev_id(1 << MINORBITS),
4735 GFP_KERNEL);
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02004736 if (new_dev_id < 0)
4737 return new_dev_id;
4738
4739 rbd_dev->dev_id = new_dev_id;
Alex Elder499afd52012-02-02 08:13:29 -06004740
4741 spin_lock(&rbd_dev_list_lock);
4742 list_add_tail(&rbd_dev->node, &rbd_dev_list);
4743 spin_unlock(&rbd_dev_list_lock);
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02004744
Ilya Dryomov70eebd22013-12-13 15:28:56 +02004745 dout("rbd_dev %p given dev id %d\n", rbd_dev, rbd_dev->dev_id);
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02004746
4747 return 0;
Alex Elder1ddbe942012-01-29 13:57:44 -06004748}
Alex Elderb7f23c32012-01-29 13:57:43 -06004749
Alex Elder1ddbe942012-01-29 13:57:44 -06004750/*
Alex Elder499afd52012-02-02 08:13:29 -06004751 * Remove an rbd_dev from the global list, and record that its
4752 * identifier is no longer in use.
Alex Elder1ddbe942012-01-29 13:57:44 -06004753 */
Alex Eldere2839302012-08-29 17:11:06 -05004754static void rbd_dev_id_put(struct rbd_device *rbd_dev)
Alex Elder1ddbe942012-01-29 13:57:44 -06004755{
Alex Elder499afd52012-02-02 08:13:29 -06004756 spin_lock(&rbd_dev_list_lock);
4757 list_del_init(&rbd_dev->node);
4758 spin_unlock(&rbd_dev_list_lock);
Alex Elderb7f23c32012-01-29 13:57:43 -06004759
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02004760 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
4761
4762 dout("rbd_dev %p released dev id %d\n", rbd_dev, rbd_dev->dev_id);
Alex Elderb7f23c32012-01-29 13:57:43 -06004763}
4764
Alex Eldera725f65e2012-02-02 08:13:30 -06004765/*
Alex Eldere28fff262012-02-02 08:13:30 -06004766 * Skips over white space at *buf, and updates *buf to point to the
4767 * first found non-space character (if any). Returns the length of
Alex Elder593a9e72012-02-07 12:03:37 -06004768 * the token (string of non-white space characters) found. Note
4769 * that *buf must be terminated with '\0'.
Alex Eldere28fff262012-02-02 08:13:30 -06004770 */
4771static inline size_t next_token(const char **buf)
4772{
4773 /*
4774 * These are the characters that produce nonzero for
4775 * isspace() in the "C" and "POSIX" locales.
4776 */
4777 const char *spaces = " \f\n\r\t\v";
4778
4779 *buf += strspn(*buf, spaces); /* Find start of token */
4780
4781 return strcspn(*buf, spaces); /* Return token length */
4782}
4783
4784/*
Alex Elderea3352f2012-07-09 21:04:23 -05004785 * Finds the next token in *buf, dynamically allocates a buffer big
4786 * enough to hold a copy of it, and copies the token into the new
4787 * buffer. The copy is guaranteed to be terminated with '\0'. Note
4788 * that a duplicate buffer is created even for a zero-length token.
4789 *
4790 * Returns a pointer to the newly-allocated duplicate, or a null
4791 * pointer if memory for the duplicate was not available. If
4792 * the lenp argument is a non-null pointer, the length of the token
4793 * (not including the '\0') is returned in *lenp.
4794 *
4795 * If successful, the *buf pointer will be updated to point beyond
4796 * the end of the found token.
4797 *
4798 * Note: uses GFP_KERNEL for allocation.
4799 */
4800static inline char *dup_token(const char **buf, size_t *lenp)
4801{
4802 char *dup;
4803 size_t len;
4804
4805 len = next_token(buf);
Alex Elder4caf35f2012-11-01 08:39:27 -05004806 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
Alex Elderea3352f2012-07-09 21:04:23 -05004807 if (!dup)
4808 return NULL;
Alex Elderea3352f2012-07-09 21:04:23 -05004809 *(dup + len) = '\0';
4810 *buf += len;
4811
4812 if (lenp)
4813 *lenp = len;
4814
4815 return dup;
4816}
4817
4818/*
Alex Elder859c31d2012-10-25 23:34:42 -05004819 * Parse the options provided for an "rbd add" (i.e., rbd image
4820 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
4821 * and the data written is passed here via a NUL-terminated buffer.
4822 * Returns 0 if successful or an error code otherwise.
Alex Elderd22f76e2012-07-12 10:46:35 -05004823 *
Alex Elder859c31d2012-10-25 23:34:42 -05004824 * The information extracted from these options is recorded in
4825 * the other parameters which return dynamically-allocated
4826 * structures:
4827 * ceph_opts
4828 * The address of a pointer that will refer to a ceph options
4829 * structure. Caller must release the returned pointer using
4830 * ceph_destroy_options() when it is no longer needed.
4831 * rbd_opts
4832 * Address of an rbd options pointer. Fully initialized by
4833 * this function; caller must release with kfree().
4834 * spec
4835 * Address of an rbd image specification pointer. Fully
4836 * initialized by this function based on parsed options.
4837 * Caller must release with rbd_spec_put().
4838 *
4839 * The options passed take this form:
4840 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
4841 * where:
4842 * <mon_addrs>
4843 * A comma-separated list of one or more monitor addresses.
4844 * A monitor address is an ip address, optionally followed
4845 * by a port number (separated by a colon).
4846 * I.e.: ip1[:port1][,ip2[:port2]...]
4847 * <options>
4848 * A comma-separated list of ceph and/or rbd options.
4849 * <pool_name>
4850 * The name of the rados pool containing the rbd image.
4851 * <image_name>
4852 * The name of the image in that pool to map.
4853 * <snap_id>
4854 * An optional snapshot id. If provided, the mapping will
4855 * present data from the image at the time that snapshot was
4856 * created. The image head is used if no snapshot id is
4857 * provided. Snapshot mappings are always read-only.
Alex Eldera725f65e2012-02-02 08:13:30 -06004858 */
Alex Elder859c31d2012-10-25 23:34:42 -05004859static int rbd_add_parse_args(const char *buf,
Alex Elderdc79b112012-10-25 23:34:41 -05004860 struct ceph_options **ceph_opts,
Alex Elder859c31d2012-10-25 23:34:42 -05004861 struct rbd_options **opts,
4862 struct rbd_spec **rbd_spec)
Alex Eldera725f65e2012-02-02 08:13:30 -06004863{
Alex Elderd22f76e2012-07-12 10:46:35 -05004864 size_t len;
Alex Elder859c31d2012-10-25 23:34:42 -05004865 char *options;
Alex Elder0ddebc02012-10-25 23:34:41 -05004866 const char *mon_addrs;
Alex Elderecb4dc22013-04-26 09:43:47 -05004867 char *snap_name;
Alex Elder0ddebc02012-10-25 23:34:41 -05004868 size_t mon_addrs_size;
Alex Elder859c31d2012-10-25 23:34:42 -05004869 struct rbd_spec *spec = NULL;
Alex Elder4e9afeb2012-10-25 23:34:41 -05004870 struct rbd_options *rbd_opts = NULL;
Alex Elder859c31d2012-10-25 23:34:42 -05004871 struct ceph_options *copts;
Alex Elderdc79b112012-10-25 23:34:41 -05004872 int ret;
Alex Eldere28fff262012-02-02 08:13:30 -06004873
4874 /* The first four tokens are required */
4875
Alex Elder7ef32142012-02-02 08:13:30 -06004876 len = next_token(&buf);
Alex Elder4fb5d6712012-11-01 10:17:15 -05004877 if (!len) {
4878 rbd_warn(NULL, "no monitor address(es) provided");
4879 return -EINVAL;
4880 }
Alex Elder0ddebc02012-10-25 23:34:41 -05004881 mon_addrs = buf;
Alex Elderf28e5652012-10-25 23:34:41 -05004882 mon_addrs_size = len + 1;
Alex Elder7ef32142012-02-02 08:13:30 -06004883 buf += len;
Alex Eldera725f65e2012-02-02 08:13:30 -06004884
Alex Elderdc79b112012-10-25 23:34:41 -05004885 ret = -EINVAL;
Alex Elderf28e5652012-10-25 23:34:41 -05004886 options = dup_token(&buf, NULL);
4887 if (!options)
Alex Elderdc79b112012-10-25 23:34:41 -05004888 return -ENOMEM;
Alex Elder4fb5d6712012-11-01 10:17:15 -05004889 if (!*options) {
4890 rbd_warn(NULL, "no options provided");
4891 goto out_err;
4892 }
Alex Eldera725f65e2012-02-02 08:13:30 -06004893
Alex Elder859c31d2012-10-25 23:34:42 -05004894 spec = rbd_spec_alloc();
4895 if (!spec)
Alex Elderf28e5652012-10-25 23:34:41 -05004896 goto out_mem;
Alex Elder859c31d2012-10-25 23:34:42 -05004897
4898 spec->pool_name = dup_token(&buf, NULL);
4899 if (!spec->pool_name)
4900 goto out_mem;
Alex Elder4fb5d6712012-11-01 10:17:15 -05004901 if (!*spec->pool_name) {
4902 rbd_warn(NULL, "no pool name provided");
4903 goto out_err;
4904 }
Alex Eldere28fff262012-02-02 08:13:30 -06004905
Alex Elder69e7a022012-11-01 08:39:26 -05004906 spec->image_name = dup_token(&buf, NULL);
Alex Elder859c31d2012-10-25 23:34:42 -05004907 if (!spec->image_name)
Alex Elderf28e5652012-10-25 23:34:41 -05004908 goto out_mem;
Alex Elder4fb5d6712012-11-01 10:17:15 -05004909 if (!*spec->image_name) {
4910 rbd_warn(NULL, "no image name provided");
4911 goto out_err;
4912 }
Alex Eldere28fff262012-02-02 08:13:30 -06004913
Alex Elderf28e5652012-10-25 23:34:41 -05004914 /*
4915 * Snapshot name is optional; default is to use "-"
4916 * (indicating the head/no snapshot).
4917 */
Alex Elder3feeb8942012-08-31 17:29:52 -05004918 len = next_token(&buf);
Alex Elder820a5f32012-07-09 21:04:24 -05004919 if (!len) {
Alex Elder3feeb8942012-08-31 17:29:52 -05004920 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
4921 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
Alex Elderf28e5652012-10-25 23:34:41 -05004922 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
Alex Elderdc79b112012-10-25 23:34:41 -05004923 ret = -ENAMETOOLONG;
Alex Elderf28e5652012-10-25 23:34:41 -05004924 goto out_err;
Alex Elder849b4262012-07-09 21:04:24 -05004925 }
Alex Elderecb4dc22013-04-26 09:43:47 -05004926 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
4927 if (!snap_name)
Alex Elderf28e5652012-10-25 23:34:41 -05004928 goto out_mem;
Alex Elderecb4dc22013-04-26 09:43:47 -05004929 *(snap_name + len) = '\0';
4930 spec->snap_name = snap_name;
Alex Eldere5c35532012-10-25 23:34:41 -05004931
Alex Elder0ddebc02012-10-25 23:34:41 -05004932 /* Initialize all rbd options to the defaults */
Alex Eldere28fff262012-02-02 08:13:30 -06004933
Alex Elder4e9afeb2012-10-25 23:34:41 -05004934 rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
4935 if (!rbd_opts)
4936 goto out_mem;
4937
4938 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
Ilya Dryomovb5584182015-06-23 16:21:19 +03004939 rbd_opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
Alex Elderd22f76e2012-07-12 10:46:35 -05004940
Alex Elder859c31d2012-10-25 23:34:42 -05004941 copts = ceph_parse_options(options, mon_addrs,
Alex Elder0ddebc02012-10-25 23:34:41 -05004942 mon_addrs + mon_addrs_size - 1,
Alex Elder4e9afeb2012-10-25 23:34:41 -05004943 parse_rbd_opts_token, rbd_opts);
Alex Elder859c31d2012-10-25 23:34:42 -05004944 if (IS_ERR(copts)) {
4945 ret = PTR_ERR(copts);
Alex Elderdc79b112012-10-25 23:34:41 -05004946 goto out_err;
4947 }
Alex Elder859c31d2012-10-25 23:34:42 -05004948 kfree(options);
4949
4950 *ceph_opts = copts;
Alex Elder4e9afeb2012-10-25 23:34:41 -05004951 *opts = rbd_opts;
Alex Elder859c31d2012-10-25 23:34:42 -05004952 *rbd_spec = spec;
Alex Elder0ddebc02012-10-25 23:34:41 -05004953
Alex Elderdc79b112012-10-25 23:34:41 -05004954 return 0;
Alex Elderf28e5652012-10-25 23:34:41 -05004955out_mem:
Alex Elderdc79b112012-10-25 23:34:41 -05004956 ret = -ENOMEM;
Alex Elderd22f76e2012-07-12 10:46:35 -05004957out_err:
Alex Elder859c31d2012-10-25 23:34:42 -05004958 kfree(rbd_opts);
4959 rbd_spec_put(spec);
Alex Elderf28e5652012-10-25 23:34:41 -05004960 kfree(options);
Alex Elderd22f76e2012-07-12 10:46:35 -05004961
Alex Elderdc79b112012-10-25 23:34:41 -05004962 return ret;
Alex Eldera725f65e2012-02-02 08:13:30 -06004963}
4964
Alex Elder589d30e2012-07-10 20:30:11 -05004965/*
Ilya Dryomov30ba1f02014-05-13 11:19:27 +04004966 * Return pool id (>= 0) or a negative error code.
4967 */
4968static int rbd_add_get_pool_id(struct rbd_client *rbdc, const char *pool_name)
4969{
Ilya Dryomova319bf52015-05-15 12:02:17 +03004970 struct ceph_options *opts = rbdc->client->options;
Ilya Dryomov30ba1f02014-05-13 11:19:27 +04004971 u64 newest_epoch;
Ilya Dryomov30ba1f02014-05-13 11:19:27 +04004972 int tries = 0;
4973 int ret;
4974
4975again:
4976 ret = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, pool_name);
4977 if (ret == -ENOENT && tries++ < 1) {
4978 ret = ceph_monc_do_get_version(&rbdc->client->monc, "osdmap",
4979 &newest_epoch);
4980 if (ret < 0)
4981 return ret;
4982
4983 if (rbdc->client->osdc.osdmap->epoch < newest_epoch) {
4984 ceph_monc_request_next_osdmap(&rbdc->client->monc);
4985 (void) ceph_monc_wait_osdmap(&rbdc->client->monc,
Ilya Dryomova319bf52015-05-15 12:02:17 +03004986 newest_epoch,
4987 opts->mount_timeout);
Ilya Dryomov30ba1f02014-05-13 11:19:27 +04004988 goto again;
4989 } else {
4990 /* the osdmap we have is new enough */
4991 return -ENOENT;
4992 }
4993 }
4994
4995 return ret;
4996}
4997
4998/*
Alex Elder589d30e2012-07-10 20:30:11 -05004999 * An rbd format 2 image has a unique identifier, distinct from the
5000 * name given to it by the user. Internally, that identifier is
5001 * what's used to specify the names of objects related to the image.
5002 *
5003 * A special "rbd id" object is used to map an rbd image name to its
5004 * id. If that object doesn't exist, then there is no v2 rbd image
5005 * with the supplied name.
5006 *
5007 * This function will record the given rbd_dev's image_id field if
5008 * it can be determined, and in that case will return 0. If any
5009 * errors occur a negative errno will be returned and the rbd_dev's
5010 * image_id field will be unchanged (and should be NULL).
5011 */
5012static int rbd_dev_image_id(struct rbd_device *rbd_dev)
5013{
5014 int ret;
5015 size_t size;
5016 char *object_name;
5017 void *response;
Alex Elderc0fba362013-04-25 23:15:08 -05005018 char *image_id;
Alex Elder2f82ee52012-10-30 19:40:33 -05005019
Alex Elder589d30e2012-07-10 20:30:11 -05005020 /*
Alex Elder2c0d0a12012-10-30 19:40:33 -05005021 * When probing a parent image, the image id is already
5022 * known (and the image name likely is not). There's no
Alex Elderc0fba362013-04-25 23:15:08 -05005023 * need to fetch the image id again in this case. We
5024 * do still need to set the image format though.
Alex Elder2c0d0a12012-10-30 19:40:33 -05005025 */
Alex Elderc0fba362013-04-25 23:15:08 -05005026 if (rbd_dev->spec->image_id) {
5027 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
5028
Alex Elder2c0d0a12012-10-30 19:40:33 -05005029 return 0;
Alex Elderc0fba362013-04-25 23:15:08 -05005030 }
Alex Elder2c0d0a12012-10-30 19:40:33 -05005031
5032 /*
Alex Elder589d30e2012-07-10 20:30:11 -05005033 * First, see if the format 2 image id file exists, and if
5034 * so, get the image's persistent id from it.
5035 */
Alex Elder69e7a022012-11-01 08:39:26 -05005036 size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
Alex Elder589d30e2012-07-10 20:30:11 -05005037 object_name = kmalloc(size, GFP_NOIO);
5038 if (!object_name)
5039 return -ENOMEM;
Alex Elder0d7dbfc2012-10-25 23:34:41 -05005040 sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
Alex Elder589d30e2012-07-10 20:30:11 -05005041 dout("rbd id object name is %s\n", object_name);
5042
5043 /* Response will be an encoded string, which includes a length */
5044
5045 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
5046 response = kzalloc(size, GFP_NOIO);
5047 if (!response) {
5048 ret = -ENOMEM;
5049 goto out;
5050 }
5051
Alex Elderc0fba362013-04-25 23:15:08 -05005052 /* If it doesn't exist we'll assume it's a format 1 image */
5053
Alex Elder36be9a72013-01-19 00:30:28 -06005054 ret = rbd_obj_method_sync(rbd_dev, object_name,
Alex Elder41579762013-04-21 12:14:45 -05005055 "rbd", "get_id", NULL, 0,
Alex Eldere2a58ee2013-04-30 00:44:33 -05005056 response, RBD_IMAGE_ID_LEN_MAX);
Alex Elder36be9a72013-01-19 00:30:28 -06005057 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elderc0fba362013-04-25 23:15:08 -05005058 if (ret == -ENOENT) {
5059 image_id = kstrdup("", GFP_KERNEL);
5060 ret = image_id ? 0 : -ENOMEM;
5061 if (!ret)
5062 rbd_dev->image_format = 1;
Ilya Dryomov7dd440c2014-09-11 18:49:18 +04005063 } else if (ret >= 0) {
Alex Elderc0fba362013-04-25 23:15:08 -05005064 void *p = response;
Alex Elder589d30e2012-07-10 20:30:11 -05005065
Alex Elderc0fba362013-04-25 23:15:08 -05005066 image_id = ceph_extract_encoded_string(&p, p + ret,
Alex Elder979ed482012-11-01 08:39:26 -05005067 NULL, GFP_NOIO);
Duan Jiong461f7582014-04-11 16:38:12 +08005068 ret = PTR_ERR_OR_ZERO(image_id);
Alex Elderc0fba362013-04-25 23:15:08 -05005069 if (!ret)
5070 rbd_dev->image_format = 2;
Alex Elderc0fba362013-04-25 23:15:08 -05005071 }
5072
5073 if (!ret) {
5074 rbd_dev->spec->image_id = image_id;
5075 dout("image_id is %s\n", image_id);
Alex Elder589d30e2012-07-10 20:30:11 -05005076 }
5077out:
5078 kfree(response);
5079 kfree(object_name);
5080
5081 return ret;
5082}
5083
Alex Elder3abef3b2013-05-13 20:35:37 -05005084/*
5085 * Undo whatever state changes are made by v1 or v2 header info
5086 * call.
5087 */
Alex Elder6fd48b32013-04-28 23:32:34 -05005088static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
5089{
5090 struct rbd_image_header *header;
5091
Ilya Dryomove69b8d42015-01-19 12:06:14 +03005092 rbd_dev_parent_put(rbd_dev);
Alex Elder6fd48b32013-04-28 23:32:34 -05005093
5094 /* Free dynamic fields from the header, then zero it out */
5095
5096 header = &rbd_dev->header;
Alex Elder812164f82013-04-30 00:44:32 -05005097 ceph_put_snap_context(header->snapc);
Alex Elder6fd48b32013-04-28 23:32:34 -05005098 kfree(header->snap_sizes);
5099 kfree(header->snap_names);
5100 kfree(header->object_prefix);
5101 memset(header, 0, sizeof (*header));
5102}
5103
Alex Elder2df3fac2013-05-06 09:51:30 -05005104static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
Alex Eldera30b71b2012-07-10 20:30:11 -05005105{
5106 int ret;
Alex Eldera30b71b2012-07-10 20:30:11 -05005107
Alex Elder1e130192012-07-03 16:01:19 -05005108 ret = rbd_dev_v2_object_prefix(rbd_dev);
Alex Elder57385b52013-04-21 12:14:45 -05005109 if (ret)
Alex Elder1e130192012-07-03 16:01:19 -05005110 goto out_err;
Alex Elderb1b54022012-07-03 16:01:19 -05005111
Alex Elder2df3fac2013-05-06 09:51:30 -05005112 /*
5113 * Get the and check features for the image. Currently the
5114 * features are assumed to never change.
5115 */
Alex Elderb1b54022012-07-03 16:01:19 -05005116 ret = rbd_dev_v2_features(rbd_dev);
Alex Elder57385b52013-04-21 12:14:45 -05005117 if (ret)
Alex Elderb1b54022012-07-03 16:01:19 -05005118 goto out_err;
Alex Elder35d489f2012-07-03 16:01:19 -05005119
Alex Eldercc070d52013-04-21 12:14:45 -05005120 /* If the image supports fancy striping, get its parameters */
5121
5122 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
5123 ret = rbd_dev_v2_striping_info(rbd_dev);
5124 if (ret < 0)
5125 goto out_err;
5126 }
Alex Elder2df3fac2013-05-06 09:51:30 -05005127 /* No support for crypto and compression type format 2 images */
Alex Eldera30b71b2012-07-10 20:30:11 -05005128
Alex Elder35152972012-08-31 17:29:55 -05005129 return 0;
Alex Elder9d475de2012-07-03 16:01:19 -05005130out_err:
Alex Elder642a2532013-05-06 17:40:33 -05005131 rbd_dev->header.features = 0;
Alex Elder1e130192012-07-03 16:01:19 -05005132 kfree(rbd_dev->header.object_prefix);
5133 rbd_dev->header.object_prefix = NULL;
Alex Elder9d475de2012-07-03 16:01:19 -05005134
5135 return ret;
Alex Eldera30b71b2012-07-10 20:30:11 -05005136}
5137
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02005138/*
5139 * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() ->
5140 * rbd_dev_image_probe() recursion depth, which means it's also the
5141 * length of the already discovered part of the parent chain.
5142 */
5143static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
Alex Elder83a06262012-10-30 15:47:17 -05005144{
Alex Elder2f82ee52012-10-30 19:40:33 -05005145 struct rbd_device *parent = NULL;
Alex Elder124afba2013-04-26 15:44:36 -05005146 int ret;
5147
5148 if (!rbd_dev->parent_spec)
5149 return 0;
Alex Elder124afba2013-04-26 15:44:36 -05005150
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02005151 if (++depth > RBD_MAX_PARENT_CHAIN_LEN) {
5152 pr_info("parent chain is too long (%d)\n", depth);
5153 ret = -EINVAL;
5154 goto out_err;
5155 }
5156
Ilya Dryomov1f2c6652015-10-11 19:38:00 +02005157 parent = rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec,
5158 NULL);
5159 if (!parent) {
5160 ret = -ENOMEM;
Alex Elder124afba2013-04-26 15:44:36 -05005161 goto out_err;
Ilya Dryomov1f2c6652015-10-11 19:38:00 +02005162 }
5163
5164 /*
5165 * Images related by parent/child relationships always share
5166 * rbd_client and spec/parent_spec, so bump their refcounts.
5167 */
5168 __rbd_get_client(rbd_dev->rbd_client);
5169 rbd_spec_get(rbd_dev->parent_spec);
Alex Elder124afba2013-04-26 15:44:36 -05005170
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02005171 ret = rbd_dev_image_probe(parent, depth);
Alex Elder124afba2013-04-26 15:44:36 -05005172 if (ret < 0)
5173 goto out_err;
Ilya Dryomov1f2c6652015-10-11 19:38:00 +02005174
Alex Elder124afba2013-04-26 15:44:36 -05005175 rbd_dev->parent = parent;
Alex Eldera2acd002013-05-08 22:50:04 -05005176 atomic_set(&rbd_dev->parent_ref, 1);
Alex Elder124afba2013-04-26 15:44:36 -05005177 return 0;
Alex Elder124afba2013-04-26 15:44:36 -05005178
Ilya Dryomov1f2c6652015-10-11 19:38:00 +02005179out_err:
5180 rbd_dev_unparent(rbd_dev);
Markus Elfring1761b222015-11-23 20:16:45 +01005181 rbd_dev_destroy(parent);
Alex Elder124afba2013-04-26 15:44:36 -05005182 return ret;
5183}
5184
Ilya Dryomov811c6682016-04-15 16:22:16 +02005185/*
5186 * rbd_dev->header_rwsem must be locked for write and will be unlocked
5187 * upon return.
5188 */
Alex Elder200a6a82013-04-28 23:32:34 -05005189static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
Alex Elder124afba2013-04-26 15:44:36 -05005190{
Alex Elder83a06262012-10-30 15:47:17 -05005191 int ret;
Alex Elder83a06262012-10-30 15:47:17 -05005192
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02005193 /* Get an id and fill in device name. */
Alex Elder83a06262012-10-30 15:47:17 -05005194
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02005195 ret = rbd_dev_id_get(rbd_dev);
5196 if (ret)
Ilya Dryomov811c6682016-04-15 16:22:16 +02005197 goto err_out_unlock;
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +02005198
Alex Elder83a06262012-10-30 15:47:17 -05005199 BUILD_BUG_ON(DEV_NAME_LEN
5200 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
5201 sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
5202
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005203 /* Record our major and minor device numbers. */
Alex Elder83a06262012-10-30 15:47:17 -05005204
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005205 if (!single_major) {
5206 ret = register_blkdev(0, rbd_dev->name);
5207 if (ret < 0)
5208 goto err_out_id;
5209
5210 rbd_dev->major = ret;
5211 rbd_dev->minor = 0;
5212 } else {
5213 rbd_dev->major = rbd_major;
5214 rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
5215 }
Alex Elder83a06262012-10-30 15:47:17 -05005216
5217 /* Set up the blkdev mapping. */
5218
5219 ret = rbd_init_disk(rbd_dev);
5220 if (ret)
5221 goto err_out_blkdev;
5222
Alex Elderf35a4de2013-05-06 09:51:29 -05005223 ret = rbd_dev_mapping_set(rbd_dev);
Alex Elder83a06262012-10-30 15:47:17 -05005224 if (ret)
5225 goto err_out_disk;
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04005226
Alex Elderf35a4de2013-05-06 09:51:29 -05005227 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
Josh Durgin22001f62013-09-30 20:10:04 -07005228 set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only);
Alex Elderf35a4de2013-05-06 09:51:29 -05005229
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02005230 dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id);
5231 ret = device_add(&rbd_dev->dev);
Alex Elderf35a4de2013-05-06 09:51:29 -05005232 if (ret)
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +04005233 goto err_out_mapping;
Alex Elder83a06262012-10-30 15:47:17 -05005234
Alex Elder83a06262012-10-30 15:47:17 -05005235 /* Everything's ready. Announce the disk to the world. */
5236
Alex Elder129b79d2013-04-26 15:44:36 -05005237 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
Ilya Dryomov811c6682016-04-15 16:22:16 +02005238 up_write(&rbd_dev->header_rwsem);
Alex Elder83a06262012-10-30 15:47:17 -05005239
Ilya Dryomov811c6682016-04-15 16:22:16 +02005240 add_disk(rbd_dev->disk);
Alex Elder83a06262012-10-30 15:47:17 -05005241 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
5242 (unsigned long long) rbd_dev->mapping.size);
5243
5244 return ret;
Alex Elder2f82ee52012-10-30 19:40:33 -05005245
Alex Elderf35a4de2013-05-06 09:51:29 -05005246err_out_mapping:
5247 rbd_dev_mapping_clear(rbd_dev);
Alex Elder83a06262012-10-30 15:47:17 -05005248err_out_disk:
5249 rbd_free_disk(rbd_dev);
5250err_out_blkdev:
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005251 if (!single_major)
5252 unregister_blkdev(rbd_dev->major, rbd_dev->name);
Alex Elder83a06262012-10-30 15:47:17 -05005253err_out_id:
5254 rbd_dev_id_put(rbd_dev);
Ilya Dryomov811c6682016-04-15 16:22:16 +02005255err_out_unlock:
5256 up_write(&rbd_dev->header_rwsem);
Alex Elder83a06262012-10-30 15:47:17 -05005257 return ret;
5258}
5259
Alex Elder332bb122013-04-27 09:59:30 -05005260static int rbd_dev_header_name(struct rbd_device *rbd_dev)
5261{
5262 struct rbd_spec *spec = rbd_dev->spec;
5263 size_t size;
5264
5265 /* Record the header object name for this rbd image. */
5266
5267 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
5268
5269 if (rbd_dev->image_format == 1)
5270 size = strlen(spec->image_name) + sizeof (RBD_SUFFIX);
5271 else
5272 size = sizeof (RBD_HEADER_PREFIX) + strlen(spec->image_id);
5273
5274 rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
5275 if (!rbd_dev->header_name)
5276 return -ENOMEM;
5277
5278 if (rbd_dev->image_format == 1)
5279 sprintf(rbd_dev->header_name, "%s%s",
5280 spec->image_name, RBD_SUFFIX);
5281 else
5282 sprintf(rbd_dev->header_name, "%s%s",
5283 RBD_HEADER_PREFIX, spec->image_id);
5284 return 0;
5285}
5286
Alex Elder200a6a82013-04-28 23:32:34 -05005287static void rbd_dev_image_release(struct rbd_device *rbd_dev)
5288{
Alex Elder6fd48b32013-04-28 23:32:34 -05005289 rbd_dev_unprobe(rbd_dev);
Alex Elder200a6a82013-04-28 23:32:34 -05005290 kfree(rbd_dev->header_name);
Alex Elder6fd48b32013-04-28 23:32:34 -05005291 rbd_dev->header_name = NULL;
5292 rbd_dev->image_format = 0;
5293 kfree(rbd_dev->spec->image_id);
5294 rbd_dev->spec->image_id = NULL;
5295
Alex Elder200a6a82013-04-28 23:32:34 -05005296 rbd_dev_destroy(rbd_dev);
5297}
5298
Alex Eldera30b71b2012-07-10 20:30:11 -05005299/*
5300 * Probe for the existence of the header object for the given rbd
Alex Elder1f3ef782013-05-06 17:40:33 -05005301 * device. If this image is the one being mapped (i.e., not a
5302 * parent), initiate a watch on its header object before using that
5303 * object to get detailed information about the rbd image.
Alex Eldera30b71b2012-07-10 20:30:11 -05005304 */
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02005305static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
Alex Eldera30b71b2012-07-10 20:30:11 -05005306{
5307 int ret;
5308
5309 /*
Alex Elder3abef3b2013-05-13 20:35:37 -05005310 * Get the id from the image id object. Unless there's an
5311 * error, rbd_dev->spec->image_id will be filled in with
5312 * a dynamically-allocated string, and rbd_dev->image_format
5313 * will be set to either 1 or 2.
Alex Eldera30b71b2012-07-10 20:30:11 -05005314 */
5315 ret = rbd_dev_image_id(rbd_dev);
5316 if (ret)
Alex Elderc0fba362013-04-25 23:15:08 -05005317 return ret;
Alex Elderc0fba362013-04-25 23:15:08 -05005318
Alex Elder332bb122013-04-27 09:59:30 -05005319 ret = rbd_dev_header_name(rbd_dev);
5320 if (ret)
5321 goto err_out_format;
5322
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02005323 if (!depth) {
Ilya Dryomovfca27062013-12-16 18:02:40 +02005324 ret = rbd_dev_header_watch_sync(rbd_dev);
Ilya Dryomov1fe48022015-03-05 10:47:22 +03005325 if (ret) {
5326 if (ret == -ENOENT)
5327 pr_info("image %s/%s does not exist\n",
5328 rbd_dev->spec->pool_name,
5329 rbd_dev->spec->image_name);
Alex Elder1f3ef782013-05-06 17:40:33 -05005330 goto out_header_name;
Ilya Dryomov1fe48022015-03-05 10:47:22 +03005331 }
Alex Elder1f3ef782013-05-06 17:40:33 -05005332 }
Alex Elderb644de22013-04-27 09:59:31 -05005333
Ilya Dryomova720ae02014-07-23 17:11:19 +04005334 ret = rbd_dev_header_info(rbd_dev);
Alex Elder5655c4d2013-04-25 23:15:08 -05005335 if (ret)
Alex Elderb644de22013-04-27 09:59:31 -05005336 goto err_out_watch;
Alex Elder83a06262012-10-30 15:47:17 -05005337
Ilya Dryomov04077592014-07-23 17:11:20 +04005338 /*
5339 * If this image is the one being mapped, we have pool name and
5340 * id, image name and id, and snap name - need to fill snap id.
5341 * Otherwise this is a parent image, identified by pool, image
5342 * and snap ids - need to fill in names for those ids.
5343 */
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02005344 if (!depth)
Ilya Dryomov04077592014-07-23 17:11:20 +04005345 ret = rbd_spec_fill_snap_id(rbd_dev);
5346 else
5347 ret = rbd_spec_fill_names(rbd_dev);
Ilya Dryomov1fe48022015-03-05 10:47:22 +03005348 if (ret) {
5349 if (ret == -ENOENT)
5350 pr_info("snap %s/%s@%s does not exist\n",
5351 rbd_dev->spec->pool_name,
5352 rbd_dev->spec->image_name,
5353 rbd_dev->spec->snap_name);
Alex Elder33dca392013-04-30 00:44:33 -05005354 goto err_out_probe;
Ilya Dryomov1fe48022015-03-05 10:47:22 +03005355 }
Alex Elder9bb81c92013-04-27 09:59:30 -05005356
Ilya Dryomove8f59b52014-07-24 10:42:13 +04005357 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
5358 ret = rbd_dev_v2_parent_info(rbd_dev);
5359 if (ret)
5360 goto err_out_probe;
5361
5362 /*
5363 * Need to warn users if this image is the one being
5364 * mapped and has a parent.
5365 */
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02005366 if (!depth && rbd_dev->parent_spec)
Ilya Dryomove8f59b52014-07-24 10:42:13 +04005367 rbd_warn(rbd_dev,
5368 "WARNING: kernel layering is EXPERIMENTAL!");
5369 }
5370
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02005371 ret = rbd_dev_probe_parent(rbd_dev, depth);
Alex Elder30d60ba2013-05-06 09:51:30 -05005372 if (ret)
5373 goto err_out_probe;
Alex Elder83a06262012-10-30 15:47:17 -05005374
Alex Elder30d60ba2013-05-06 09:51:30 -05005375 dout("discovered format %u image, header name is %s\n",
5376 rbd_dev->image_format, rbd_dev->header_name);
Alex Elder30d60ba2013-05-06 09:51:30 -05005377 return 0;
Ilya Dryomove8f59b52014-07-24 10:42:13 +04005378
Alex Elder6fd48b32013-04-28 23:32:34 -05005379err_out_probe:
5380 rbd_dev_unprobe(rbd_dev);
Alex Elderb644de22013-04-27 09:59:31 -05005381err_out_watch:
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02005382 if (!depth)
Ilya Dryomovfca27062013-12-16 18:02:40 +02005383 rbd_dev_header_unwatch_sync(rbd_dev);
Alex Elder332bb122013-04-27 09:59:30 -05005384out_header_name:
5385 kfree(rbd_dev->header_name);
5386 rbd_dev->header_name = NULL;
5387err_out_format:
5388 rbd_dev->image_format = 0;
Alex Elder5655c4d2013-04-25 23:15:08 -05005389 kfree(rbd_dev->spec->image_id);
5390 rbd_dev->spec->image_id = NULL;
Alex Elder5655c4d2013-04-25 23:15:08 -05005391 return ret;
Alex Eldera30b71b2012-07-10 20:30:11 -05005392}
5393
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005394static ssize_t do_rbd_add(struct bus_type *bus,
5395 const char *buf,
5396 size_t count)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005397{
Alex Eldercb8627c2012-07-09 21:04:23 -05005398 struct rbd_device *rbd_dev = NULL;
Alex Elderdc79b112012-10-25 23:34:41 -05005399 struct ceph_options *ceph_opts = NULL;
Alex Elder4e9afeb2012-10-25 23:34:41 -05005400 struct rbd_options *rbd_opts = NULL;
Alex Elder859c31d2012-10-25 23:34:42 -05005401 struct rbd_spec *spec = NULL;
Alex Elder9d3997f2012-10-25 23:34:42 -05005402 struct rbd_client *rbdc;
Alex Elder51344a32013-05-06 07:40:30 -05005403 bool read_only;
Ilya Dryomovb51c83c2015-10-15 15:38:57 +02005404 int rc;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005405
5406 if (!try_module_get(THIS_MODULE))
5407 return -ENODEV;
5408
Alex Eldera725f65e2012-02-02 08:13:30 -06005409 /* parse add command */
Alex Elder859c31d2012-10-25 23:34:42 -05005410 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
Alex Elderdc79b112012-10-25 23:34:41 -05005411 if (rc < 0)
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02005412 goto out;
Alex Eldera725f65e2012-02-02 08:13:30 -06005413
Alex Elder9d3997f2012-10-25 23:34:42 -05005414 rbdc = rbd_get_client(ceph_opts);
5415 if (IS_ERR(rbdc)) {
5416 rc = PTR_ERR(rbdc);
Alex Elder0ddebc02012-10-25 23:34:41 -05005417 goto err_out_args;
Alex Elder9d3997f2012-10-25 23:34:42 -05005418 }
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005419
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005420 /* pick the pool */
Ilya Dryomov30ba1f02014-05-13 11:19:27 +04005421 rc = rbd_add_get_pool_id(rbdc, spec->pool_name);
Ilya Dryomov1fe48022015-03-05 10:47:22 +03005422 if (rc < 0) {
5423 if (rc == -ENOENT)
5424 pr_info("pool %s does not exist\n", spec->pool_name);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005425 goto err_out_client;
Ilya Dryomov1fe48022015-03-05 10:47:22 +03005426 }
Alex Elderc0cd10db2013-04-26 09:43:47 -05005427 spec->pool_id = (u64)rc;
Alex Elder859c31d2012-10-25 23:34:42 -05005428
Alex Elder0903e872012-11-14 12:25:19 -06005429 /* The ceph file layout needs to fit pool id in 32 bits */
5430
Alex Elderc0cd10db2013-04-26 09:43:47 -05005431 if (spec->pool_id > (u64)U32_MAX) {
Ilya Dryomov9584d502014-07-11 12:11:20 +04005432 rbd_warn(NULL, "pool id too large (%llu > %u)",
Alex Elderc0cd10db2013-04-26 09:43:47 -05005433 (unsigned long long)spec->pool_id, U32_MAX);
Alex Elder0903e872012-11-14 12:25:19 -06005434 rc = -EIO;
5435 goto err_out_client;
5436 }
5437
Ilya Dryomovd1475432015-06-22 13:24:48 +03005438 rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts);
Ilya Dryomovb51c83c2015-10-15 15:38:57 +02005439 if (!rbd_dev) {
5440 rc = -ENOMEM;
Alex Elderbd4ba652012-10-25 23:34:42 -05005441 goto err_out_client;
Ilya Dryomovb51c83c2015-10-15 15:38:57 +02005442 }
Alex Elderc53d5892012-10-25 23:34:42 -05005443 rbdc = NULL; /* rbd_dev now owns this */
5444 spec = NULL; /* rbd_dev now owns this */
Ilya Dryomovd1475432015-06-22 13:24:48 +03005445 rbd_opts = NULL; /* rbd_dev now owns this */
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005446
Ilya Dryomov811c6682016-04-15 16:22:16 +02005447 down_write(&rbd_dev->header_rwsem);
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02005448 rc = rbd_dev_image_probe(rbd_dev, 0);
Alex Eldera30b71b2012-07-10 20:30:11 -05005449 if (rc < 0)
Alex Elderc53d5892012-10-25 23:34:42 -05005450 goto err_out_rbd_dev;
Alex Elder05fd6f62012-08-29 17:11:07 -05005451
Alex Elder7ce4eef2013-05-06 17:40:33 -05005452 /* If we are mapping a snapshot it must be marked read-only */
5453
Ilya Dryomovd1475432015-06-22 13:24:48 +03005454 read_only = rbd_dev->opts->read_only;
Alex Elder7ce4eef2013-05-06 17:40:33 -05005455 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
5456 read_only = true;
5457 rbd_dev->mapping.read_only = read_only;
5458
Alex Elderb536f692013-04-28 23:32:34 -05005459 rc = rbd_dev_device_setup(rbd_dev);
Alex Elder3abef3b2013-05-13 20:35:37 -05005460 if (rc) {
Ilya Dryomove37180c2013-12-16 18:02:41 +02005461 /*
5462 * rbd_dev_header_unwatch_sync() can't be moved into
5463 * rbd_dev_image_release() without refactoring, see
5464 * commit 1f3ef78861ac.
5465 */
5466 rbd_dev_header_unwatch_sync(rbd_dev);
Alex Elder3abef3b2013-05-13 20:35:37 -05005467 rbd_dev_image_release(rbd_dev);
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02005468 goto out;
Alex Elder3abef3b2013-05-13 20:35:37 -05005469 }
Alex Elderb536f692013-04-28 23:32:34 -05005470
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02005471 rc = count;
5472out:
5473 module_put(THIS_MODULE);
5474 return rc;
Alex Elder3abef3b2013-05-13 20:35:37 -05005475
Alex Elderc53d5892012-10-25 23:34:42 -05005476err_out_rbd_dev:
Ilya Dryomov811c6682016-04-15 16:22:16 +02005477 up_write(&rbd_dev->header_rwsem);
Alex Elderc53d5892012-10-25 23:34:42 -05005478 rbd_dev_destroy(rbd_dev);
Alex Elderbd4ba652012-10-25 23:34:42 -05005479err_out_client:
Alex Elder9d3997f2012-10-25 23:34:42 -05005480 rbd_put_client(rbdc);
Alex Elder0ddebc02012-10-25 23:34:41 -05005481err_out_args:
Alex Elder859c31d2012-10-25 23:34:42 -05005482 rbd_spec_put(spec);
Ilya Dryomovd1475432015-06-22 13:24:48 +03005483 kfree(rbd_opts);
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02005484 goto out;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005485}
5486
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005487static ssize_t rbd_add(struct bus_type *bus,
5488 const char *buf,
5489 size_t count)
5490{
5491 if (single_major)
5492 return -EINVAL;
5493
5494 return do_rbd_add(bus, buf, count);
5495}
5496
5497static ssize_t rbd_add_single_major(struct bus_type *bus,
5498 const char *buf,
5499 size_t count)
5500{
5501 return do_rbd_add(bus, buf, count);
5502}
5503
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02005504static void rbd_dev_device_release(struct rbd_device *rbd_dev)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005505{
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005506 rbd_free_disk(rbd_dev);
Alex Elder200a6a82013-04-28 23:32:34 -05005507 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02005508 device_del(&rbd_dev->dev);
Alex Elder6d80b132013-05-06 07:40:30 -05005509 rbd_dev_mapping_clear(rbd_dev);
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005510 if (!single_major)
5511 unregister_blkdev(rbd_dev->major, rbd_dev->name);
Alex Eldere2839302012-08-29 17:11:06 -05005512 rbd_dev_id_put(rbd_dev);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005513}
5514
Alex Elder05a46af2013-04-26 15:44:36 -05005515static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
5516{
Alex Elderad945fc2013-04-26 15:44:36 -05005517 while (rbd_dev->parent) {
Alex Elder05a46af2013-04-26 15:44:36 -05005518 struct rbd_device *first = rbd_dev;
5519 struct rbd_device *second = first->parent;
5520 struct rbd_device *third;
5521
5522 /*
5523 * Follow to the parent with no grandparent and
5524 * remove it.
5525 */
5526 while (second && (third = second->parent)) {
5527 first = second;
5528 second = third;
5529 }
Alex Elderad945fc2013-04-26 15:44:36 -05005530 rbd_assert(second);
Alex Elder8ad42cd2013-04-28 23:32:34 -05005531 rbd_dev_image_release(second);
Alex Elderad945fc2013-04-26 15:44:36 -05005532 first->parent = NULL;
5533 first->parent_overlap = 0;
5534
5535 rbd_assert(first->parent_spec);
Alex Elder05a46af2013-04-26 15:44:36 -05005536 rbd_spec_put(first->parent_spec);
5537 first->parent_spec = NULL;
Alex Elder05a46af2013-04-26 15:44:36 -05005538 }
5539}
5540
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005541static ssize_t do_rbd_remove(struct bus_type *bus,
5542 const char *buf,
5543 size_t count)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005544{
5545 struct rbd_device *rbd_dev = NULL;
Alex Elder751cc0e2013-05-31 15:17:01 -05005546 struct list_head *tmp;
5547 int dev_id;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005548 unsigned long ul;
Alex Elder82a442d2013-05-31 17:40:44 -05005549 bool already = false;
Alex Elder0d8189e2013-04-27 09:59:30 -05005550 int ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005551
Jingoo Hanbb8e0e82013-09-11 14:20:07 -07005552 ret = kstrtoul(buf, 10, &ul);
Alex Elder0d8189e2013-04-27 09:59:30 -05005553 if (ret)
5554 return ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005555
5556 /* convert to int; abort if we lost anything in the conversion */
Alex Elder751cc0e2013-05-31 15:17:01 -05005557 dev_id = (int)ul;
5558 if (dev_id != ul)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005559 return -EINVAL;
5560
Alex Elder751cc0e2013-05-31 15:17:01 -05005561 ret = -ENOENT;
5562 spin_lock(&rbd_dev_list_lock);
5563 list_for_each(tmp, &rbd_dev_list) {
5564 rbd_dev = list_entry(tmp, struct rbd_device, node);
5565 if (rbd_dev->dev_id == dev_id) {
5566 ret = 0;
5567 break;
5568 }
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005569 }
Alex Elder751cc0e2013-05-31 15:17:01 -05005570 if (!ret) {
5571 spin_lock_irq(&rbd_dev->lock);
5572 if (rbd_dev->open_count)
5573 ret = -EBUSY;
5574 else
Alex Elder82a442d2013-05-31 17:40:44 -05005575 already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
5576 &rbd_dev->flags);
Alex Elder751cc0e2013-05-31 15:17:01 -05005577 spin_unlock_irq(&rbd_dev->lock);
5578 }
5579 spin_unlock(&rbd_dev_list_lock);
Alex Elder82a442d2013-05-31 17:40:44 -05005580 if (ret < 0 || already)
Alex Elder1ba0f1e2013-05-31 15:17:01 -05005581 return ret;
Alex Elder751cc0e2013-05-31 15:17:01 -05005582
Ilya Dryomovfca27062013-12-16 18:02:40 +02005583 rbd_dev_header_unwatch_sync(rbd_dev);
Ilya Dryomovfca27062013-12-16 18:02:40 +02005584
Josh Durgin98752012013-08-29 17:26:31 -07005585 /*
5586 * Don't free anything from rbd_dev->disk until after all
5587 * notifies are completely processed. Otherwise
5588 * rbd_bus_del_dev() will race with rbd_watch_cb(), resulting
5589 * in a potential use after free of rbd_dev->disk or rbd_dev.
5590 */
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02005591 rbd_dev_device_release(rbd_dev);
Alex Elder8ad42cd2013-04-28 23:32:34 -05005592 rbd_dev_image_release(rbd_dev);
Alex Elderaafb2302012-09-06 16:00:54 -05005593
Alex Elder1ba0f1e2013-05-31 15:17:01 -05005594 return count;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005595}
5596
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005597static ssize_t rbd_remove(struct bus_type *bus,
5598 const char *buf,
5599 size_t count)
5600{
5601 if (single_major)
5602 return -EINVAL;
5603
5604 return do_rbd_remove(bus, buf, count);
5605}
5606
5607static ssize_t rbd_remove_single_major(struct bus_type *bus,
5608 const char *buf,
5609 size_t count)
5610{
5611 return do_rbd_remove(bus, buf, count);
5612}
5613
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005614/*
5615 * create control files in sysfs
Yehuda Sadehdfc56062010-11-19 14:51:04 -08005616 * /sys/bus/rbd/...
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005617 */
5618static int rbd_sysfs_init(void)
5619{
Yehuda Sadehdfc56062010-11-19 14:51:04 -08005620 int ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005621
Alex Elderfed4c142012-02-07 12:03:36 -06005622 ret = device_register(&rbd_root_dev);
Alex Elder21079782012-01-24 10:08:36 -06005623 if (ret < 0)
Yehuda Sadehdfc56062010-11-19 14:51:04 -08005624 return ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005625
Alex Elderfed4c142012-02-07 12:03:36 -06005626 ret = bus_register(&rbd_bus_type);
5627 if (ret < 0)
5628 device_unregister(&rbd_root_dev);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005629
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005630 return ret;
5631}
5632
5633static void rbd_sysfs_cleanup(void)
5634{
Yehuda Sadehdfc56062010-11-19 14:51:04 -08005635 bus_unregister(&rbd_bus_type);
Alex Elderfed4c142012-02-07 12:03:36 -06005636 device_unregister(&rbd_root_dev);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005637}
5638
Alex Elder1c2a9df2013-05-01 12:43:03 -05005639static int rbd_slab_init(void)
5640{
5641 rbd_assert(!rbd_img_request_cache);
Geliang Tang03d94402016-03-13 15:17:32 +08005642 rbd_img_request_cache = KMEM_CACHE(rbd_img_request, 0);
Alex Elder868311b2013-05-01 12:43:03 -05005643 if (!rbd_img_request_cache)
5644 return -ENOMEM;
5645
5646 rbd_assert(!rbd_obj_request_cache);
Geliang Tang03d94402016-03-13 15:17:32 +08005647 rbd_obj_request_cache = KMEM_CACHE(rbd_obj_request, 0);
Alex Elder78c2a442013-05-01 12:43:04 -05005648 if (!rbd_obj_request_cache)
5649 goto out_err;
5650
5651 rbd_assert(!rbd_segment_name_cache);
5652 rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
Ilya Dryomov2d0ebc52014-01-27 17:40:18 +02005653 CEPH_MAX_OID_NAME_LEN + 1, 1, 0, NULL);
Alex Elder78c2a442013-05-01 12:43:04 -05005654 if (rbd_segment_name_cache)
Alex Elder1c2a9df2013-05-01 12:43:03 -05005655 return 0;
Alex Elder78c2a442013-05-01 12:43:04 -05005656out_err:
Julia Lawall13bf2832015-09-13 14:15:26 +02005657 kmem_cache_destroy(rbd_obj_request_cache);
5658 rbd_obj_request_cache = NULL;
Alex Elder1c2a9df2013-05-01 12:43:03 -05005659
Alex Elder868311b2013-05-01 12:43:03 -05005660 kmem_cache_destroy(rbd_img_request_cache);
5661 rbd_img_request_cache = NULL;
5662
Alex Elder1c2a9df2013-05-01 12:43:03 -05005663 return -ENOMEM;
5664}
5665
5666static void rbd_slab_exit(void)
5667{
Alex Elder78c2a442013-05-01 12:43:04 -05005668 rbd_assert(rbd_segment_name_cache);
5669 kmem_cache_destroy(rbd_segment_name_cache);
5670 rbd_segment_name_cache = NULL;
5671
Alex Elder868311b2013-05-01 12:43:03 -05005672 rbd_assert(rbd_obj_request_cache);
5673 kmem_cache_destroy(rbd_obj_request_cache);
5674 rbd_obj_request_cache = NULL;
5675
Alex Elder1c2a9df2013-05-01 12:43:03 -05005676 rbd_assert(rbd_img_request_cache);
5677 kmem_cache_destroy(rbd_img_request_cache);
5678 rbd_img_request_cache = NULL;
5679}
5680
Alex Eldercc344fa2013-02-19 12:25:56 -06005681static int __init rbd_init(void)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005682{
5683 int rc;
5684
Alex Elder1e32d342013-01-30 11:13:33 -06005685 if (!libceph_compatible(NULL)) {
5686 rbd_warn(NULL, "libceph incompatibility (quitting)");
Alex Elder1e32d342013-01-30 11:13:33 -06005687 return -EINVAL;
5688 }
Ilya Dryomove1b4d962013-12-13 15:28:57 +02005689
Alex Elder1c2a9df2013-05-01 12:43:03 -05005690 rc = rbd_slab_init();
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005691 if (rc)
5692 return rc;
Ilya Dryomove1b4d962013-12-13 15:28:57 +02005693
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +04005694 /*
5695 * The number of active work items is limited by the number of
Ilya Dryomovf77303b2015-04-22 18:28:13 +03005696 * rbd devices * queue depth, so leave @max_active at default.
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +04005697 */
5698 rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
5699 if (!rbd_wq) {
5700 rc = -ENOMEM;
5701 goto err_out_slab;
5702 }
5703
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005704 if (single_major) {
5705 rbd_major = register_blkdev(0, RBD_DRV_NAME);
5706 if (rbd_major < 0) {
5707 rc = rbd_major;
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +04005708 goto err_out_wq;
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005709 }
5710 }
5711
Alex Elder1c2a9df2013-05-01 12:43:03 -05005712 rc = rbd_sysfs_init();
5713 if (rc)
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005714 goto err_out_blkdev;
Alex Elder1c2a9df2013-05-01 12:43:03 -05005715
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005716 if (single_major)
5717 pr_info("loaded (major %d)\n", rbd_major);
5718 else
5719 pr_info("loaded\n");
5720
Ilya Dryomove1b4d962013-12-13 15:28:57 +02005721 return 0;
5722
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005723err_out_blkdev:
5724 if (single_major)
5725 unregister_blkdev(rbd_major, RBD_DRV_NAME);
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +04005726err_out_wq:
5727 destroy_workqueue(rbd_wq);
Ilya Dryomove1b4d962013-12-13 15:28:57 +02005728err_out_slab:
5729 rbd_slab_exit();
Alex Elder1c2a9df2013-05-01 12:43:03 -05005730 return rc;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005731}
5732
Alex Eldercc344fa2013-02-19 12:25:56 -06005733static void __exit rbd_exit(void)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005734{
Ilya Dryomovffe312c2014-05-20 15:46:04 +04005735 ida_destroy(&rbd_dev_id_ida);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005736 rbd_sysfs_cleanup();
Ilya Dryomov9b60e702013-12-13 15:28:57 +02005737 if (single_major)
5738 unregister_blkdev(rbd_major, RBD_DRV_NAME);
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +04005739 destroy_workqueue(rbd_wq);
Alex Elder1c2a9df2013-05-01 12:43:03 -05005740 rbd_slab_exit();
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005741}
5742
5743module_init(rbd_init);
5744module_exit(rbd_exit);
5745
Alex Elderd552c612013-05-31 20:13:09 -05005746MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005747MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
5748MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005749/* following authorship retained from original osdblk.c */
5750MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
5751
Ilya Dryomov90da2582013-12-13 15:28:56 +02005752MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005753MODULE_LICENSE("GPL");