blob: 0728b01d4d431f0d4f6860975146b59d46727cbb [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -07002#include <linux/ceph/ceph_debug.h>
Sage Weil963b61e2009-10-06 11:31:12 -07003
Sage Weil963b61e2009-10-06 11:31:12 -07004#include <linux/sort.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09005#include <linux/slab.h>
Jeff Layton176c77c2019-06-06 08:06:40 -04006#include <linux/iversion.h>
Sage Weil963b61e2009-10-06 11:31:12 -07007#include "super.h"
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -07008#include "mds_client.h"
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -07009#include <linux/ceph/decode.h>
Sage Weil963b61e2009-10-06 11:31:12 -070010
Yan, Zheng75c96272017-12-14 15:11:09 +080011/* unused map expires after 5 minutes */
12#define CEPH_SNAPID_MAP_TIMEOUT (5 * 60 * HZ)
13
Sage Weil963b61e2009-10-06 11:31:12 -070014/*
15 * Snapshots in ceph are driven in large part by cooperation from the
16 * client. In contrast to local file systems or file servers that
17 * implement snapshots at a single point in the system, ceph's
18 * distributed access to storage requires clients to help decide
19 * whether a write logically occurs before or after a recently created
20 * snapshot.
21 *
22 * This provides a perfect instantanous client-wide snapshot. Between
23 * clients, however, snapshots may appear to be applied at slightly
24 * different points in time, depending on delays in delivering the
25 * snapshot notification.
26 *
27 * Snapshots are _not_ file system-wide. Instead, each snapshot
28 * applies to the subdirectory nested beneath some directory. This
29 * effectively divides the hierarchy into multiple "realms," where all
30 * of the files contained by each realm share the same set of
31 * snapshots. An individual realm's snap set contains snapshots
32 * explicitly created on that realm, as well as any snaps in its
33 * parent's snap set _after_ the point at which the parent became it's
34 * parent (due to, say, a rename). Similarly, snaps from prior parents
35 * during the time intervals during which they were the parent are included.
36 *
37 * The client is spared most of this detail, fortunately... it must only
38 * maintains a hierarchy of realms reflecting the current parent/child
39 * realm relationship, and for each realm has an explicit list of snaps
40 * inherited from prior parents.
41 *
42 * A snap_realm struct is maintained for realms containing every inode
43 * with an open cap in the system. (The needed snap realm information is
44 * provided by the MDS whenever a cap is issued, i.e., on open.) A 'seq'
45 * version number is used to ensure that as realm parameters change (new
46 * snapshot, new parent, etc.) the client's realm hierarchy is updated.
47 *
48 * The realm hierarchy drives the generation of a 'snap context' for each
49 * realm, which simply lists the resulting set of snaps for the realm. This
50 * is attached to any writes sent to OSDs.
51 */
52/*
53 * Unfortunately error handling is a bit mixed here. If we get a snap
54 * update, but don't have enough memory to update our realm hierarchy,
55 * it's not clear what we can do about it (besides complaining to the
56 * console).
57 */
58
59
60/*
61 * increase ref count for the realm
62 *
63 * caller must hold snap_rwsem for write.
64 */
65void ceph_get_snap_realm(struct ceph_mds_client *mdsc,
66 struct ceph_snap_realm *realm)
67{
68 dout("get_realm %p %d -> %d\n", realm,
69 atomic_read(&realm->nref), atomic_read(&realm->nref)+1);
70 /*
71 * since we _only_ increment realm refs or empty the empty
72 * list with snap_rwsem held, adjusting the empty list here is
73 * safe. we do need to protect against concurrent empty list
74 * additions, however.
75 */
Yan, Zheng982d6012014-12-23 15:30:54 +080076 if (atomic_inc_return(&realm->nref) == 1) {
Sage Weil963b61e2009-10-06 11:31:12 -070077 spin_lock(&mdsc->snap_empty_lock);
78 list_del_init(&realm->empty_item);
79 spin_unlock(&mdsc->snap_empty_lock);
80 }
Sage Weil963b61e2009-10-06 11:31:12 -070081}
82
Sage Weila105f002010-02-15 14:37:55 -080083static void __insert_snap_realm(struct rb_root *root,
84 struct ceph_snap_realm *new)
85{
86 struct rb_node **p = &root->rb_node;
87 struct rb_node *parent = NULL;
88 struct ceph_snap_realm *r = NULL;
89
90 while (*p) {
91 parent = *p;
92 r = rb_entry(parent, struct ceph_snap_realm, node);
93 if (new->ino < r->ino)
94 p = &(*p)->rb_left;
95 else if (new->ino > r->ino)
96 p = &(*p)->rb_right;
97 else
98 BUG();
99 }
100
101 rb_link_node(&new->node, parent, p);
102 rb_insert_color(&new->node, root);
103}
104
Sage Weil963b61e2009-10-06 11:31:12 -0700105/*
106 * create and get the realm rooted at @ino and bump its ref count.
107 *
108 * caller must hold snap_rwsem for write.
109 */
110static struct ceph_snap_realm *ceph_create_snap_realm(
111 struct ceph_mds_client *mdsc,
112 u64 ino)
113{
114 struct ceph_snap_realm *realm;
115
116 realm = kzalloc(sizeof(*realm), GFP_NOFS);
117 if (!realm)
118 return ERR_PTR(-ENOMEM);
119
Yan, Zheng982d6012014-12-23 15:30:54 +0800120 atomic_set(&realm->nref, 1); /* for caller */
Sage Weil963b61e2009-10-06 11:31:12 -0700121 realm->ino = ino;
122 INIT_LIST_HEAD(&realm->children);
123 INIT_LIST_HEAD(&realm->child_item);
124 INIT_LIST_HEAD(&realm->empty_item);
Sage Weilae00d4f2010-09-16 16:26:51 -0700125 INIT_LIST_HEAD(&realm->dirty_item);
Sage Weil963b61e2009-10-06 11:31:12 -0700126 INIT_LIST_HEAD(&realm->inodes_with_caps);
127 spin_lock_init(&realm->inodes_with_caps_lock);
Sage Weila105f002010-02-15 14:37:55 -0800128 __insert_snap_realm(&mdsc->snap_realms, realm);
Yan, Zheng81c5a142019-01-01 16:28:33 +0800129 mdsc->num_snap_realms++;
130
Sage Weil963b61e2009-10-06 11:31:12 -0700131 dout("create_snap_realm %llx %p\n", realm->ino, realm);
132 return realm;
133}
134
135/*
Sage Weila105f002010-02-15 14:37:55 -0800136 * lookup the realm rooted at @ino.
Sage Weil963b61e2009-10-06 11:31:12 -0700137 *
138 * caller must hold snap_rwsem for write.
139 */
Yan, Zheng982d6012014-12-23 15:30:54 +0800140static struct ceph_snap_realm *__lookup_snap_realm(struct ceph_mds_client *mdsc,
141 u64 ino)
Sage Weil963b61e2009-10-06 11:31:12 -0700142{
Sage Weila105f002010-02-15 14:37:55 -0800143 struct rb_node *n = mdsc->snap_realms.rb_node;
144 struct ceph_snap_realm *r;
Sage Weil963b61e2009-10-06 11:31:12 -0700145
Sage Weila105f002010-02-15 14:37:55 -0800146 while (n) {
147 r = rb_entry(n, struct ceph_snap_realm, node);
148 if (ino < r->ino)
149 n = n->rb_left;
150 else if (ino > r->ino)
151 n = n->rb_right;
152 else {
153 dout("lookup_snap_realm %llx %p\n", r->ino, r);
154 return r;
155 }
156 }
157 return NULL;
Sage Weil963b61e2009-10-06 11:31:12 -0700158}
159
Yan, Zheng982d6012014-12-23 15:30:54 +0800160struct ceph_snap_realm *ceph_lookup_snap_realm(struct ceph_mds_client *mdsc,
161 u64 ino)
162{
163 struct ceph_snap_realm *r;
164 r = __lookup_snap_realm(mdsc, ino);
165 if (r)
166 ceph_get_snap_realm(mdsc, r);
167 return r;
168}
169
Sage Weil963b61e2009-10-06 11:31:12 -0700170static void __put_snap_realm(struct ceph_mds_client *mdsc,
171 struct ceph_snap_realm *realm);
172
173/*
174 * called with snap_rwsem (write)
175 */
176static void __destroy_snap_realm(struct ceph_mds_client *mdsc,
177 struct ceph_snap_realm *realm)
178{
179 dout("__destroy_snap_realm %p %llx\n", realm, realm->ino);
180
Sage Weila105f002010-02-15 14:37:55 -0800181 rb_erase(&realm->node, &mdsc->snap_realms);
Yan, Zheng81c5a142019-01-01 16:28:33 +0800182 mdsc->num_snap_realms--;
Sage Weil963b61e2009-10-06 11:31:12 -0700183
184 if (realm->parent) {
185 list_del_init(&realm->child_item);
186 __put_snap_realm(mdsc, realm->parent);
187 }
188
189 kfree(realm->prior_parent_snaps);
190 kfree(realm->snaps);
191 ceph_put_snap_context(realm->cached_context);
192 kfree(realm);
193}
194
195/*
196 * caller holds snap_rwsem (write)
197 */
198static void __put_snap_realm(struct ceph_mds_client *mdsc,
199 struct ceph_snap_realm *realm)
200{
201 dout("__put_snap_realm %llx %p %d -> %d\n", realm->ino, realm,
202 atomic_read(&realm->nref), atomic_read(&realm->nref)-1);
203 if (atomic_dec_and_test(&realm->nref))
204 __destroy_snap_realm(mdsc, realm);
205}
206
207/*
208 * caller needn't hold any locks
209 */
210void ceph_put_snap_realm(struct ceph_mds_client *mdsc,
211 struct ceph_snap_realm *realm)
212{
213 dout("put_snap_realm %llx %p %d -> %d\n", realm->ino, realm,
214 atomic_read(&realm->nref), atomic_read(&realm->nref)-1);
215 if (!atomic_dec_and_test(&realm->nref))
216 return;
217
218 if (down_write_trylock(&mdsc->snap_rwsem)) {
219 __destroy_snap_realm(mdsc, realm);
220 up_write(&mdsc->snap_rwsem);
221 } else {
222 spin_lock(&mdsc->snap_empty_lock);
Henry C Changa26a1852011-05-11 10:29:53 +0000223 list_add(&realm->empty_item, &mdsc->snap_empty);
Sage Weil963b61e2009-10-06 11:31:12 -0700224 spin_unlock(&mdsc->snap_empty_lock);
225 }
226}
227
228/*
229 * Clean up any realms whose ref counts have dropped to zero. Note
230 * that this does not include realms who were created but not yet
231 * used.
232 *
233 * Called under snap_rwsem (write)
234 */
235static void __cleanup_empty_realms(struct ceph_mds_client *mdsc)
236{
237 struct ceph_snap_realm *realm;
238
239 spin_lock(&mdsc->snap_empty_lock);
240 while (!list_empty(&mdsc->snap_empty)) {
241 realm = list_first_entry(&mdsc->snap_empty,
242 struct ceph_snap_realm, empty_item);
243 list_del(&realm->empty_item);
244 spin_unlock(&mdsc->snap_empty_lock);
245 __destroy_snap_realm(mdsc, realm);
246 spin_lock(&mdsc->snap_empty_lock);
247 }
248 spin_unlock(&mdsc->snap_empty_lock);
249}
250
251void ceph_cleanup_empty_realms(struct ceph_mds_client *mdsc)
252{
253 down_write(&mdsc->snap_rwsem);
254 __cleanup_empty_realms(mdsc);
255 up_write(&mdsc->snap_rwsem);
256}
257
258/*
259 * adjust the parent realm of a given @realm. adjust child list, and parent
260 * pointers, and ref counts appropriately.
261 *
262 * return true if parent was changed, 0 if unchanged, <0 on error.
263 *
264 * caller must hold snap_rwsem for write.
265 */
266static int adjust_snap_realm_parent(struct ceph_mds_client *mdsc,
267 struct ceph_snap_realm *realm,
268 u64 parentino)
269{
270 struct ceph_snap_realm *parent;
271
272 if (realm->parent_ino == parentino)
273 return 0;
274
275 parent = ceph_lookup_snap_realm(mdsc, parentino);
Sage Weil963b61e2009-10-06 11:31:12 -0700276 if (!parent) {
277 parent = ceph_create_snap_realm(mdsc, parentino);
278 if (IS_ERR(parent))
279 return PTR_ERR(parent);
280 }
281 dout("adjust_snap_realm_parent %llx %p: %llx %p -> %llx %p\n",
282 realm->ino, realm, realm->parent_ino, realm->parent,
283 parentino, parent);
284 if (realm->parent) {
285 list_del_init(&realm->child_item);
286 ceph_put_snap_realm(mdsc, realm->parent);
287 }
288 realm->parent_ino = parentino;
289 realm->parent = parent;
Sage Weil963b61e2009-10-06 11:31:12 -0700290 list_add(&realm->child_item, &parent->children);
291 return 1;
292}
293
294
295static int cmpu64_rev(const void *a, const void *b)
296{
297 if (*(u64 *)a < *(u64 *)b)
298 return 1;
299 if (*(u64 *)a > *(u64 *)b)
300 return -1;
301 return 0;
302}
303
Yan, Zheng97c85a82014-11-06 15:09:41 +0800304
Sage Weil963b61e2009-10-06 11:31:12 -0700305/*
306 * build the snap context for a given realm.
307 */
Yan, Zheng3ae0beb2017-08-28 16:36:53 +0800308static int build_snap_context(struct ceph_snap_realm *realm,
309 struct list_head* dirty_realms)
Sage Weil963b61e2009-10-06 11:31:12 -0700310{
311 struct ceph_snap_realm *parent = realm->parent;
312 struct ceph_snap_context *snapc;
313 int err = 0;
Alex Elderaa711ee32012-07-13 20:35:11 -0500314 u32 num = realm->num_prior_parent_snaps + realm->num_snaps;
Sage Weil963b61e2009-10-06 11:31:12 -0700315
316 /*
317 * build parent context, if it hasn't been built.
318 * conservatively estimate that all parent snaps might be
319 * included by us.
320 */
321 if (parent) {
322 if (!parent->cached_context) {
Yan, Zheng3ae0beb2017-08-28 16:36:53 +0800323 err = build_snap_context(parent, dirty_realms);
Sage Weil963b61e2009-10-06 11:31:12 -0700324 if (err)
325 goto fail;
326 }
327 num += parent->cached_context->num_snaps;
328 }
329
330 /* do i actually need to update? not if my context seq
331 matches realm seq, and my parents' does to. (this works
332 because we rebuild_snap_realms() works _downward_ in
333 hierarchy after each update.) */
334 if (realm->cached_context &&
Sage Weilec4318bc2010-03-19 13:24:39 -0700335 realm->cached_context->seq == realm->seq &&
Sage Weil963b61e2009-10-06 11:31:12 -0700336 (!parent ||
Sage Weilec4318bc2010-03-19 13:24:39 -0700337 realm->cached_context->seq >= parent->cached_context->seq)) {
Alex Elderaa711ee32012-07-13 20:35:11 -0500338 dout("build_snap_context %llx %p: %p seq %lld (%u snaps)"
Sage Weil963b61e2009-10-06 11:31:12 -0700339 " (unchanged)\n",
340 realm->ino, realm, realm->cached_context,
341 realm->cached_context->seq,
Yan, Zheng3ae0beb2017-08-28 16:36:53 +0800342 (unsigned int)realm->cached_context->num_snaps);
Sage Weil963b61e2009-10-06 11:31:12 -0700343 return 0;
344 }
345
346 /* alloc new snap context */
347 err = -ENOMEM;
Xi Wanga3860c12012-05-31 16:26:04 -0700348 if (num > (SIZE_MAX - sizeof(*snapc)) / sizeof(u64))
Sage Weil963b61e2009-10-06 11:31:12 -0700349 goto fail;
Alex Elder812164f82013-04-30 00:44:32 -0500350 snapc = ceph_create_snap_context(num, GFP_NOFS);
Sage Weil963b61e2009-10-06 11:31:12 -0700351 if (!snapc)
352 goto fail;
Sage Weil963b61e2009-10-06 11:31:12 -0700353
354 /* build (reverse sorted) snap vector */
355 num = 0;
356 snapc->seq = realm->seq;
357 if (parent) {
Alex Elderaa711ee32012-07-13 20:35:11 -0500358 u32 i;
359
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300360 /* include any of parent's snaps occurring _after_ my
Sage Weil963b61e2009-10-06 11:31:12 -0700361 parent became my parent */
362 for (i = 0; i < parent->cached_context->num_snaps; i++)
363 if (parent->cached_context->snaps[i] >=
364 realm->parent_since)
365 snapc->snaps[num++] =
366 parent->cached_context->snaps[i];
367 if (parent->cached_context->seq > snapc->seq)
368 snapc->seq = parent->cached_context->seq;
369 }
370 memcpy(snapc->snaps + num, realm->snaps,
371 sizeof(u64)*realm->num_snaps);
372 num += realm->num_snaps;
373 memcpy(snapc->snaps + num, realm->prior_parent_snaps,
374 sizeof(u64)*realm->num_prior_parent_snaps);
375 num += realm->num_prior_parent_snaps;
376
377 sort(snapc->snaps, num, sizeof(u64), cmpu64_rev, NULL);
378 snapc->num_snaps = num;
Alex Elderaa711ee32012-07-13 20:35:11 -0500379 dout("build_snap_context %llx %p: %p seq %lld (%u snaps)\n",
380 realm->ino, realm, snapc, snapc->seq,
381 (unsigned int) snapc->num_snaps);
Sage Weil963b61e2009-10-06 11:31:12 -0700382
Yan, Zheng9f4057f2017-09-22 09:26:57 +0800383 ceph_put_snap_context(realm->cached_context);
Sage Weil963b61e2009-10-06 11:31:12 -0700384 realm->cached_context = snapc;
Yan, Zheng9f4057f2017-09-22 09:26:57 +0800385 /* queue realm for cap_snap creation */
386 list_add_tail(&realm->dirty_item, dirty_realms);
Sage Weil963b61e2009-10-06 11:31:12 -0700387 return 0;
388
389fail:
390 /*
391 * if we fail, clear old (incorrect) cached_context... hopefully
392 * we'll have better luck building it later
393 */
394 if (realm->cached_context) {
395 ceph_put_snap_context(realm->cached_context);
396 realm->cached_context = NULL;
397 }
398 pr_err("build_snap_context %llx %p fail %d\n", realm->ino,
399 realm, err);
400 return err;
401}
402
403/*
404 * rebuild snap context for the given realm and all of its children.
405 */
Yan, Zheng3ae0beb2017-08-28 16:36:53 +0800406static void rebuild_snap_realms(struct ceph_snap_realm *realm,
407 struct list_head *dirty_realms)
Sage Weil963b61e2009-10-06 11:31:12 -0700408{
409 struct ceph_snap_realm *child;
410
411 dout("rebuild_snap_realms %llx %p\n", realm->ino, realm);
Yan, Zheng3ae0beb2017-08-28 16:36:53 +0800412 build_snap_context(realm, dirty_realms);
Sage Weil963b61e2009-10-06 11:31:12 -0700413
414 list_for_each_entry(child, &realm->children, child_item)
Yan, Zheng3ae0beb2017-08-28 16:36:53 +0800415 rebuild_snap_realms(child, dirty_realms);
Sage Weil963b61e2009-10-06 11:31:12 -0700416}
417
418
419/*
420 * helper to allocate and decode an array of snapids. free prior
421 * instance, if any.
422 */
Alex Elderaa711ee32012-07-13 20:35:11 -0500423static int dup_array(u64 **dst, __le64 *src, u32 num)
Sage Weil963b61e2009-10-06 11:31:12 -0700424{
Alex Elderaa711ee32012-07-13 20:35:11 -0500425 u32 i;
Sage Weil963b61e2009-10-06 11:31:12 -0700426
427 kfree(*dst);
428 if (num) {
429 *dst = kcalloc(num, sizeof(u64), GFP_NOFS);
430 if (!*dst)
431 return -ENOMEM;
432 for (i = 0; i < num; i++)
433 (*dst)[i] = get_unaligned_le64(src + i);
434 } else {
435 *dst = NULL;
436 }
437 return 0;
438}
439
Yan, Zheng86056092015-05-01 16:57:16 +0800440static bool has_new_snaps(struct ceph_snap_context *o,
441 struct ceph_snap_context *n)
442{
443 if (n->num_snaps == 0)
444 return false;
445 /* snaps are in descending order */
446 return n->snaps[0] > o->seq;
447}
Sage Weil963b61e2009-10-06 11:31:12 -0700448
449/*
450 * When a snapshot is applied, the size/mtime inode metadata is queued
451 * in a ceph_cap_snap (one for each snapshot) until writeback
452 * completes and the metadata can be flushed back to the MDS.
453 *
454 * However, if a (sync) write is currently in-progress when we apply
455 * the snapshot, we have to wait until the write succeeds or fails
456 * (and a final size/mtime is known). In this case the
457 * cap_snap->writing = 1, and is said to be "pending." When the write
458 * finishes, we __ceph_finish_cap_snap().
459 *
460 * Caller must hold snap_rwsem for read (i.e., the realm topology won't
461 * change).
462 */
Sage Weilfc837c8f2010-04-13 11:41:22 -0700463void ceph_queue_cap_snap(struct ceph_inode_info *ci)
Sage Weil963b61e2009-10-06 11:31:12 -0700464{
465 struct inode *inode = &ci->vfs_inode;
466 struct ceph_cap_snap *capsnap;
Yan, Zheng86056092015-05-01 16:57:16 +0800467 struct ceph_snap_context *old_snapc, *new_snapc;
Luis Henriques12fe3dd2019-07-19 15:32:21 +0100468 struct ceph_buffer *old_blob = NULL;
Sage Weil4a625be2010-08-22 15:03:56 -0700469 int used, dirty;
Sage Weil963b61e2009-10-06 11:31:12 -0700470
471 capsnap = kzalloc(sizeof(*capsnap), GFP_NOFS);
472 if (!capsnap) {
473 pr_err("ENOMEM allocating ceph_cap_snap on %p\n", inode);
474 return;
475 }
476
Sage Weilbe655592011-11-30 09:47:09 -0800477 spin_lock(&ci->i_ceph_lock);
Sage Weil963b61e2009-10-06 11:31:12 -0700478 used = __ceph_caps_used(ci);
Sage Weil4a625be2010-08-22 15:03:56 -0700479 dirty = __ceph_caps_dirty(ci);
Sage Weilaf0ed562011-07-26 11:26:31 -0700480
Yan, Zheng5dda377c2015-04-30 14:40:54 +0800481 old_snapc = ci->i_head_snapc;
Yan, Zheng86056092015-05-01 16:57:16 +0800482 new_snapc = ci->i_snap_realm->cached_context;
Yan, Zheng5dda377c2015-04-30 14:40:54 +0800483
Sage Weilaf0ed562011-07-26 11:26:31 -0700484 /*
485 * If there is a write in progress, treat that as a dirty Fw,
486 * even though it hasn't completed yet; by the time we finish
487 * up this capsnap it will be.
488 */
489 if (used & CEPH_CAP_FILE_WR)
490 dirty |= CEPH_CAP_FILE_WR;
491
Sage Weil963b61e2009-10-06 11:31:12 -0700492 if (__ceph_have_pending_cap_snap(ci)) {
493 /* there is no point in queuing multiple "pending" cap_snaps,
494 as no new writes are allowed to start when pending, so any
495 writes in progress now were started before the previous
496 cap_snap. lucky us. */
Sage Weilfc837c8f2010-04-13 11:41:22 -0700497 dout("queue_cap_snap %p already pending\n", inode);
Yan, Zheng5dda377c2015-04-30 14:40:54 +0800498 goto update_snapc;
499 }
Yan, Zheng86056092015-05-01 16:57:16 +0800500 if (ci->i_wrbuffer_ref_head == 0 &&
501 !(dirty & (CEPH_CAP_ANY_EXCL|CEPH_CAP_FILE_WR))) {
Sage Weil963b61e2009-10-06 11:31:12 -0700502 dout("queue_cap_snap %p nothing dirty|writing\n", inode);
Yan, Zheng5dda377c2015-04-30 14:40:54 +0800503 goto update_snapc;
Sage Weil963b61e2009-10-06 11:31:12 -0700504 }
505
Yan, Zheng5dda377c2015-04-30 14:40:54 +0800506 BUG_ON(!old_snapc);
507
Yan, Zheng86056092015-05-01 16:57:16 +0800508 /*
509 * There is no need to send FLUSHSNAP message to MDS if there is
510 * no new snapshot. But when there is dirty pages or on-going
511 * writes, we still need to create cap_snap. cap_snap is needed
512 * by the write path and page writeback path.
513 *
514 * also see ceph_try_drop_cap_snap()
515 */
516 if (has_new_snaps(old_snapc, new_snapc)) {
517 if (dirty & (CEPH_CAP_ANY_EXCL|CEPH_CAP_FILE_WR))
518 capsnap->need_flush = true;
519 } else {
520 if (!(used & CEPH_CAP_FILE_WR) &&
521 ci->i_wrbuffer_ref_head == 0) {
522 dout("queue_cap_snap %p "
523 "no new_snap|dirty_page|writing\n", inode);
524 goto update_snapc;
525 }
526 }
527
528 dout("queue_cap_snap %p cap_snap %p queuing under %p %s %s\n",
529 inode, capsnap, old_snapc, ceph_cap_string(dirty),
530 capsnap->need_flush ? "" : "no_flush");
Yan, Zheng5dda377c2015-04-30 14:40:54 +0800531 ihold(inode);
532
Elena Reshetova805692d2017-03-03 11:15:07 +0200533 refcount_set(&capsnap->nref, 1);
Yan, Zheng5dda377c2015-04-30 14:40:54 +0800534 INIT_LIST_HEAD(&capsnap->ci_item);
Yan, Zheng5dda377c2015-04-30 14:40:54 +0800535
536 capsnap->follows = old_snapc->seq;
537 capsnap->issued = __ceph_caps_issued(ci, NULL);
538 capsnap->dirty = dirty;
539
540 capsnap->mode = inode->i_mode;
541 capsnap->uid = inode->i_uid;
542 capsnap->gid = inode->i_gid;
543
544 if (dirty & CEPH_CAP_XATTR_EXCL) {
Luis Henriques12fe3dd2019-07-19 15:32:21 +0100545 old_blob = __ceph_build_xattrs_blob(ci);
Yan, Zheng5dda377c2015-04-30 14:40:54 +0800546 capsnap->xattr_blob =
547 ceph_buffer_get(ci->i_xattrs.blob);
548 capsnap->xattr_version = ci->i_xattrs.version;
549 } else {
550 capsnap->xattr_blob = NULL;
551 capsnap->xattr_version = 0;
552 }
553
554 capsnap->inline_data = ci->i_inline_version != CEPH_INLINE_NONE;
555
556 /* dirty page count moved from _head to this cap_snap;
557 all subsequent writes page dirties occur _after_ this
558 snapshot. */
559 capsnap->dirty_pages = ci->i_wrbuffer_ref_head;
560 ci->i_wrbuffer_ref_head = 0;
561 capsnap->context = old_snapc;
562 list_add_tail(&capsnap->ci_item, &ci->i_cap_snaps);
Yan, Zheng5dda377c2015-04-30 14:40:54 +0800563
564 if (used & CEPH_CAP_FILE_WR) {
565 dout("queue_cap_snap %p cap_snap %p snapc %p"
566 " seq %llu used WR, now pending\n", inode,
567 capsnap, old_snapc, old_snapc->seq);
568 capsnap->writing = 1;
569 } else {
570 /* note mtime, size NOW. */
571 __ceph_finish_cap_snap(ci, capsnap);
572 }
573 capsnap = NULL;
Yan, Zhengfce85152016-06-15 20:51:22 +0800574 old_snapc = NULL;
Yan, Zheng5dda377c2015-04-30 14:40:54 +0800575
576update_snapc:
Yan, Zheng37659182019-04-18 11:24:57 +0800577 if (ci->i_wrbuffer_ref_head == 0 &&
578 ci->i_wr_ref == 0 &&
579 ci->i_dirty_caps == 0 &&
580 ci->i_flushing_caps == 0) {
581 ci->i_head_snapc = NULL;
582 } else {
Yan, Zheng86056092015-05-01 16:57:16 +0800583 ci->i_head_snapc = ceph_get_snap_context(new_snapc);
584 dout(" new snapc is %p\n", new_snapc);
Yan, Zheng5dda377c2015-04-30 14:40:54 +0800585 }
Sage Weilbe655592011-11-30 09:47:09 -0800586 spin_unlock(&ci->i_ceph_lock);
Yan, Zheng5dda377c2015-04-30 14:40:54 +0800587
Luis Henriques12fe3dd2019-07-19 15:32:21 +0100588 ceph_buffer_put(old_blob);
Yan, Zheng5dda377c2015-04-30 14:40:54 +0800589 kfree(capsnap);
590 ceph_put_snap_context(old_snapc);
Sage Weil963b61e2009-10-06 11:31:12 -0700591}
592
593/*
594 * Finalize the size, mtime for a cap_snap.. that is, settle on final values
595 * to be used for the snapshot, to be flushed back to the mds.
596 *
597 * If capsnap can now be flushed, add to snap_flush list, and return 1.
598 *
Sage Weilbe655592011-11-30 09:47:09 -0800599 * Caller must hold i_ceph_lock.
Sage Weil963b61e2009-10-06 11:31:12 -0700600 */
601int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
602 struct ceph_cap_snap *capsnap)
603{
604 struct inode *inode = &ci->vfs_inode;
Xiubo Li2678da82020-09-03 09:01:39 -0400605 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
Sage Weil963b61e2009-10-06 11:31:12 -0700606
607 BUG_ON(capsnap->writing);
608 capsnap->size = inode->i_size;
Arnd Bergmann9bbeab42018-07-13 22:18:36 +0200609 capsnap->mtime = inode->i_mtime;
610 capsnap->atime = inode->i_atime;
611 capsnap->ctime = inode->i_ctime;
Jeff Laytonec62b892019-05-29 12:23:14 -0400612 capsnap->btime = ci->i_btime;
Jeff Layton176c77c2019-06-06 08:06:40 -0400613 capsnap->change_attr = inode_peek_iversion_raw(inode);
Sage Weil963b61e2009-10-06 11:31:12 -0700614 capsnap->time_warp_seq = ci->i_time_warp_seq;
Yan, Zheng5f743e42016-11-15 16:04:37 +0800615 capsnap->truncate_size = ci->i_truncate_size;
616 capsnap->truncate_seq = ci->i_truncate_seq;
Sage Weil963b61e2009-10-06 11:31:12 -0700617 if (capsnap->dirty_pages) {
Sage Weil819ccbf2010-04-01 09:33:46 -0700618 dout("finish_cap_snap %p cap_snap %p snapc %p %llu %s s=%llu "
Sage Weil963b61e2009-10-06 11:31:12 -0700619 "still has %d dirty pages\n", inode, capsnap,
620 capsnap->context, capsnap->context->seq,
Sage Weil819ccbf2010-04-01 09:33:46 -0700621 ceph_cap_string(capsnap->dirty), capsnap->size,
622 capsnap->dirty_pages);
Sage Weil963b61e2009-10-06 11:31:12 -0700623 return 0;
624 }
Yan, Zheng70220ac2016-07-06 16:21:30 +0800625
Xiubo Li558b4512021-02-02 14:54:53 +0800626 /* Fb cap still in use, delay it */
627 if (ci->i_wb_ref) {
628 dout("finish_cap_snap %p cap_snap %p snapc %p %llu %s s=%llu "
629 "used WRBUFFER, delaying\n", inode, capsnap,
630 capsnap->context, capsnap->context->seq,
631 ceph_cap_string(capsnap->dirty), capsnap->size);
632 capsnap->writing = 1;
633 return 0;
634 }
635
Yan, Zheng70220ac2016-07-06 16:21:30 +0800636 ci->i_ceph_flags |= CEPH_I_FLUSH_SNAPS;
Sage Weil819ccbf2010-04-01 09:33:46 -0700637 dout("finish_cap_snap %p cap_snap %p snapc %p %llu %s s=%llu\n",
Sage Weil963b61e2009-10-06 11:31:12 -0700638 inode, capsnap, capsnap->context,
Sage Weil819ccbf2010-04-01 09:33:46 -0700639 capsnap->context->seq, ceph_cap_string(capsnap->dirty),
640 capsnap->size);
Sage Weil963b61e2009-10-06 11:31:12 -0700641
642 spin_lock(&mdsc->snap_flush_lock);
Yan, Zheng04242ff2019-02-11 15:18:52 +0800643 if (list_empty(&ci->i_snap_flush_item))
644 list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list);
Sage Weil963b61e2009-10-06 11:31:12 -0700645 spin_unlock(&mdsc->snap_flush_lock);
646 return 1; /* caller may want to ceph_flush_snaps */
647}
648
Sage Weiled326042010-08-16 13:37:31 -0700649/*
650 * Queue cap_snaps for snap writeback for this realm and its children.
651 * Called under snap_rwsem, so realm topology won't change.
652 */
653static void queue_realm_cap_snaps(struct ceph_snap_realm *realm)
654{
655 struct ceph_inode_info *ci;
656 struct inode *lastinode = NULL;
Sage Weiled326042010-08-16 13:37:31 -0700657
658 dout("queue_realm_cap_snaps %p %llx inodes\n", realm, realm->ino);
659
660 spin_lock(&realm->inodes_with_caps_lock);
Yan, Zheng3ae0beb2017-08-28 16:36:53 +0800661 list_for_each_entry(ci, &realm->inodes_with_caps, i_snap_realm_item) {
Sage Weiled326042010-08-16 13:37:31 -0700662 struct inode *inode = igrab(&ci->vfs_inode);
663 if (!inode)
664 continue;
665 spin_unlock(&realm->inodes_with_caps_lock);
Yan, Zheng3e1d0452019-05-18 20:39:55 +0800666 /* avoid calling iput_final() while holding
667 * mdsc->snap_rwsem or in mds dispatch threads */
668 ceph_async_iput(lastinode);
Sage Weiled326042010-08-16 13:37:31 -0700669 lastinode = inode;
670 ceph_queue_cap_snap(ci);
671 spin_lock(&realm->inodes_with_caps_lock);
672 }
673 spin_unlock(&realm->inodes_with_caps_lock);
Yan, Zheng3e1d0452019-05-18 20:39:55 +0800674 ceph_async_iput(lastinode);
Sage Weiled326042010-08-16 13:37:31 -0700675
Sage Weiled326042010-08-16 13:37:31 -0700676 dout("queue_realm_cap_snaps %p %llx done\n", realm, realm->ino);
677}
Sage Weil963b61e2009-10-06 11:31:12 -0700678
679/*
680 * Parse and apply a snapblob "snap trace" from the MDS. This specifies
681 * the snap realm parameters from a given realm and all of its ancestors,
682 * up to the root.
683 *
684 * Caller must hold snap_rwsem for write.
685 */
686int ceph_update_snap_trace(struct ceph_mds_client *mdsc,
Yan, Zheng982d6012014-12-23 15:30:54 +0800687 void *p, void *e, bool deletion,
688 struct ceph_snap_realm **realm_ret)
Sage Weil963b61e2009-10-06 11:31:12 -0700689{
690 struct ceph_mds_snap_realm *ri; /* encoded */
691 __le64 *snaps; /* encoded */
692 __le64 *prior_parent_snaps; /* encoded */
Yan, Zheng982d6012014-12-23 15:30:54 +0800693 struct ceph_snap_realm *realm = NULL;
694 struct ceph_snap_realm *first_realm = NULL;
Sage Weil963b61e2009-10-06 11:31:12 -0700695 int invalidate = 0;
696 int err = -ENOMEM;
Sage Weilae00d4f2010-09-16 16:26:51 -0700697 LIST_HEAD(dirty_realms);
Sage Weil963b61e2009-10-06 11:31:12 -0700698
699 dout("update_snap_trace deletion=%d\n", deletion);
700more:
701 ceph_decode_need(&p, e, sizeof(*ri), bad);
702 ri = p;
703 p += sizeof(*ri);
704 ceph_decode_need(&p, e, sizeof(u64)*(le32_to_cpu(ri->num_snaps) +
705 le32_to_cpu(ri->num_prior_parent_snaps)), bad);
706 snaps = p;
707 p += sizeof(u64) * le32_to_cpu(ri->num_snaps);
708 prior_parent_snaps = p;
709 p += sizeof(u64) * le32_to_cpu(ri->num_prior_parent_snaps);
710
711 realm = ceph_lookup_snap_realm(mdsc, le64_to_cpu(ri->ino));
Sage Weil963b61e2009-10-06 11:31:12 -0700712 if (!realm) {
713 realm = ceph_create_snap_realm(mdsc, le64_to_cpu(ri->ino));
714 if (IS_ERR(realm)) {
715 err = PTR_ERR(realm);
716 goto fail;
717 }
718 }
719
Sage Weil963b61e2009-10-06 11:31:12 -0700720 /* ensure the parent is correct */
721 err = adjust_snap_realm_parent(mdsc, realm, le64_to_cpu(ri->parent));
722 if (err < 0)
723 goto fail;
724 invalidate += err;
725
726 if (le64_to_cpu(ri->seq) > realm->seq) {
Sage Weilae00d4f2010-09-16 16:26:51 -0700727 dout("update_snap_trace updating %llx %p %lld -> %lld\n",
728 realm->ino, realm, realm->seq, le64_to_cpu(ri->seq));
Sage Weil963b61e2009-10-06 11:31:12 -0700729 /* update realm parameters, snap lists */
730 realm->seq = le64_to_cpu(ri->seq);
731 realm->created = le64_to_cpu(ri->created);
732 realm->parent_since = le64_to_cpu(ri->parent_since);
733
734 realm->num_snaps = le32_to_cpu(ri->num_snaps);
735 err = dup_array(&realm->snaps, snaps, realm->num_snaps);
736 if (err < 0)
737 goto fail;
738
739 realm->num_prior_parent_snaps =
740 le32_to_cpu(ri->num_prior_parent_snaps);
741 err = dup_array(&realm->prior_parent_snaps, prior_parent_snaps,
742 realm->num_prior_parent_snaps);
743 if (err < 0)
744 goto fail;
745
Yan, Zhengaffbc192015-05-05 21:22:13 +0800746 if (realm->seq > mdsc->last_snap_seq)
747 mdsc->last_snap_seq = realm->seq;
Sage Weilae00d4f2010-09-16 16:26:51 -0700748
Sage Weil963b61e2009-10-06 11:31:12 -0700749 invalidate = 1;
750 } else if (!realm->cached_context) {
Sage Weilae00d4f2010-09-16 16:26:51 -0700751 dout("update_snap_trace %llx %p seq %lld new\n",
752 realm->ino, realm, realm->seq);
Sage Weil963b61e2009-10-06 11:31:12 -0700753 invalidate = 1;
Sage Weilae00d4f2010-09-16 16:26:51 -0700754 } else {
755 dout("update_snap_trace %llx %p seq %lld unchanged\n",
756 realm->ino, realm, realm->seq);
Sage Weil963b61e2009-10-06 11:31:12 -0700757 }
758
759 dout("done with %llx %p, invalidated=%d, %p %p\n", realm->ino,
760 realm, invalidate, p, e);
761
Yan, Zheng982d6012014-12-23 15:30:54 +0800762 /* invalidate when we reach the _end_ (root) of the trace */
763 if (invalidate && p >= e)
Yan, Zheng3ae0beb2017-08-28 16:36:53 +0800764 rebuild_snap_realms(realm, &dirty_realms);
Yan, Zheng982d6012014-12-23 15:30:54 +0800765
766 if (!first_realm)
767 first_realm = realm;
768 else
769 ceph_put_snap_realm(mdsc, realm);
770
Sage Weil963b61e2009-10-06 11:31:12 -0700771 if (p < e)
772 goto more;
773
Sage Weilae00d4f2010-09-16 16:26:51 -0700774 /*
775 * queue cap snaps _after_ we've built the new snap contexts,
776 * so that i_head_snapc can be set appropriately.
777 */
Sage Weile8e1ba962011-02-04 20:45:58 -0800778 while (!list_empty(&dirty_realms)) {
779 realm = list_first_entry(&dirty_realms, struct ceph_snap_realm,
780 dirty_item);
Yan, Zheng3ae0beb2017-08-28 16:36:53 +0800781 list_del_init(&realm->dirty_item);
Sage Weilae00d4f2010-09-16 16:26:51 -0700782 queue_realm_cap_snaps(realm);
783 }
784
Yan, Zheng982d6012014-12-23 15:30:54 +0800785 if (realm_ret)
786 *realm_ret = first_realm;
787 else
788 ceph_put_snap_realm(mdsc, first_realm);
789
Sage Weil963b61e2009-10-06 11:31:12 -0700790 __cleanup_empty_realms(mdsc);
791 return 0;
792
793bad:
794 err = -EINVAL;
795fail:
Yan, Zheng982d6012014-12-23 15:30:54 +0800796 if (realm && !IS_ERR(realm))
797 ceph_put_snap_realm(mdsc, realm);
798 if (first_realm)
799 ceph_put_snap_realm(mdsc, first_realm);
Sage Weil963b61e2009-10-06 11:31:12 -0700800 pr_err("update_snap_trace error %d\n", err);
801 return err;
802}
803
804
805/*
806 * Send any cap_snaps that are queued for flush. Try to carry
807 * s_mutex across multiple snap flushes to avoid locking overhead.
808 *
809 * Caller holds no locks.
810 */
811static void flush_snaps(struct ceph_mds_client *mdsc)
812{
813 struct ceph_inode_info *ci;
814 struct inode *inode;
815 struct ceph_mds_session *session = NULL;
816
817 dout("flush_snaps\n");
818 spin_lock(&mdsc->snap_flush_lock);
819 while (!list_empty(&mdsc->snap_flush_list)) {
820 ci = list_first_entry(&mdsc->snap_flush_list,
821 struct ceph_inode_info, i_snap_flush_item);
822 inode = &ci->vfs_inode;
Sage Weil70b666c2011-05-27 09:24:26 -0700823 ihold(inode);
Sage Weil963b61e2009-10-06 11:31:12 -0700824 spin_unlock(&mdsc->snap_flush_lock);
Yan, Zhenged9b4302016-07-05 21:08:07 +0800825 ceph_flush_snaps(ci, &session);
Yan, Zheng3e1d0452019-05-18 20:39:55 +0800826 /* avoid calling iput_final() while holding
827 * session->s_mutex or in mds dispatch threads */
828 ceph_async_iput(inode);
Sage Weil963b61e2009-10-06 11:31:12 -0700829 spin_lock(&mdsc->snap_flush_lock);
830 }
831 spin_unlock(&mdsc->snap_flush_lock);
832
833 if (session) {
834 mutex_unlock(&session->s_mutex);
835 ceph_put_mds_session(session);
836 }
837 dout("flush_snaps done\n");
838}
839
840
841/*
842 * Handle a snap notification from the MDS.
843 *
844 * This can take two basic forms: the simplest is just a snap creation
845 * or deletion notification on an existing realm. This should update the
846 * realm and its children.
847 *
848 * The more difficult case is realm creation, due to snap creation at a
849 * new point in the file hierarchy, or due to a rename that moves a file or
850 * directory into another realm.
851 */
852void ceph_handle_snap(struct ceph_mds_client *mdsc,
Sage Weil2600d2d2010-02-22 15:12:16 -0800853 struct ceph_mds_session *session,
Sage Weil963b61e2009-10-06 11:31:12 -0700854 struct ceph_msg *msg)
855{
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700856 struct super_block *sb = mdsc->fsc->sb;
Sage Weil2600d2d2010-02-22 15:12:16 -0800857 int mds = session->s_mds;
Sage Weil963b61e2009-10-06 11:31:12 -0700858 u64 split;
859 int op;
860 int trace_len;
861 struct ceph_snap_realm *realm = NULL;
862 void *p = msg->front.iov_base;
863 void *e = p + msg->front.iov_len;
864 struct ceph_mds_snap_head *h;
865 int num_split_inos, num_split_realms;
866 __le64 *split_inos = NULL, *split_realms = NULL;
867 int i;
868 int locked_rwsem = 0;
869
Sage Weil963b61e2009-10-06 11:31:12 -0700870 /* decode */
871 if (msg->front.iov_len < sizeof(*h))
872 goto bad;
873 h = p;
874 op = le32_to_cpu(h->op);
875 split = le64_to_cpu(h->split); /* non-zero if we are splitting an
876 * existing realm */
877 num_split_inos = le32_to_cpu(h->num_split_inos);
878 num_split_realms = le32_to_cpu(h->num_split_realms);
879 trace_len = le32_to_cpu(h->trace_len);
880 p += sizeof(*h);
881
882 dout("handle_snap from mds%d op %s split %llx tracelen %d\n", mds,
883 ceph_snap_op_name(op), split, trace_len);
884
Sage Weil963b61e2009-10-06 11:31:12 -0700885 mutex_lock(&session->s_mutex);
Jeff Layton62575e22020-10-12 09:39:06 -0400886 inc_session_sequence(session);
Sage Weil963b61e2009-10-06 11:31:12 -0700887 mutex_unlock(&session->s_mutex);
888
889 down_write(&mdsc->snap_rwsem);
890 locked_rwsem = 1;
891
892 if (op == CEPH_SNAP_OP_SPLIT) {
893 struct ceph_mds_snap_realm *ri;
894
895 /*
896 * A "split" breaks part of an existing realm off into
897 * a new realm. The MDS provides a list of inodes
898 * (with caps) and child realms that belong to the new
899 * child.
900 */
901 split_inos = p;
902 p += sizeof(u64) * num_split_inos;
903 split_realms = p;
904 p += sizeof(u64) * num_split_realms;
905 ceph_decode_need(&p, e, sizeof(*ri), bad);
906 /* we will peek at realm info here, but will _not_
907 * advance p, as the realm update will occur below in
908 * ceph_update_snap_trace. */
909 ri = p;
910
911 realm = ceph_lookup_snap_realm(mdsc, split);
Sage Weil963b61e2009-10-06 11:31:12 -0700912 if (!realm) {
913 realm = ceph_create_snap_realm(mdsc, split);
914 if (IS_ERR(realm))
915 goto out;
916 }
Sage Weil963b61e2009-10-06 11:31:12 -0700917
918 dout("splitting snap_realm %llx %p\n", realm->ino, realm);
919 for (i = 0; i < num_split_inos; i++) {
920 struct ceph_vino vino = {
921 .ino = le64_to_cpu(split_inos[i]),
922 .snap = CEPH_NOSNAP,
923 };
924 struct inode *inode = ceph_find_inode(sb, vino);
925 struct ceph_inode_info *ci;
Sage Weilae00d4f2010-09-16 16:26:51 -0700926 struct ceph_snap_realm *oldrealm;
Sage Weil963b61e2009-10-06 11:31:12 -0700927
928 if (!inode)
929 continue;
930 ci = ceph_inode(inode);
931
Sage Weilbe655592011-11-30 09:47:09 -0800932 spin_lock(&ci->i_ceph_lock);
Sage Weil963b61e2009-10-06 11:31:12 -0700933 if (!ci->i_snap_realm)
934 goto skip_inode;
935 /*
936 * If this inode belongs to a realm that was
937 * created after our new realm, we experienced
938 * a race (due to another split notifications
939 * arriving from a different MDS). So skip
940 * this inode.
941 */
942 if (ci->i_snap_realm->created >
943 le64_to_cpu(ri->created)) {
944 dout(" leaving %p in newer realm %llx %p\n",
945 inode, ci->i_snap_realm->ino,
946 ci->i_snap_realm);
947 goto skip_inode;
948 }
949 dout(" will move %p to split realm %llx %p\n",
950 inode, realm->ino, realm);
951 /*
Sage Weilae00d4f2010-09-16 16:26:51 -0700952 * Move the inode to the new realm
Sage Weil963b61e2009-10-06 11:31:12 -0700953 */
Yan, Zheng7d9c9192017-12-19 18:00:54 +0800954 oldrealm = ci->i_snap_realm;
955 spin_lock(&oldrealm->inodes_with_caps_lock);
Sage Weil963b61e2009-10-06 11:31:12 -0700956 list_del_init(&ci->i_snap_realm_item);
Yan, Zheng7d9c9192017-12-19 18:00:54 +0800957 spin_unlock(&oldrealm->inodes_with_caps_lock);
958
959 spin_lock(&realm->inodes_with_caps_lock);
Sage Weilae00d4f2010-09-16 16:26:51 -0700960 list_add(&ci->i_snap_realm_item,
961 &realm->inodes_with_caps);
Sage Weilae00d4f2010-09-16 16:26:51 -0700962 ci->i_snap_realm = realm;
Luis Henriquese3161f12018-01-12 17:19:28 +0000963 if (realm->ino == ci->i_vino.ino)
964 realm->inode = inode;
Sage Weil052bb342010-03-09 12:52:26 -0800965 spin_unlock(&realm->inodes_with_caps_lock);
Yan, Zheng7d9c9192017-12-19 18:00:54 +0800966
Sage Weilbe655592011-11-30 09:47:09 -0800967 spin_unlock(&ci->i_ceph_lock);
Sage Weil963b61e2009-10-06 11:31:12 -0700968
Sage Weilae00d4f2010-09-16 16:26:51 -0700969 ceph_get_snap_realm(mdsc, realm);
970 ceph_put_snap_realm(mdsc, oldrealm);
Sage Weil963b61e2009-10-06 11:31:12 -0700971
Yan, Zheng3e1d0452019-05-18 20:39:55 +0800972 /* avoid calling iput_final() while holding
973 * mdsc->snap_rwsem or mds in dispatch threads */
974 ceph_async_iput(inode);
Sage Weil963b61e2009-10-06 11:31:12 -0700975 continue;
976
977skip_inode:
Sage Weilbe655592011-11-30 09:47:09 -0800978 spin_unlock(&ci->i_ceph_lock);
Yan, Zheng3e1d0452019-05-18 20:39:55 +0800979 ceph_async_iput(inode);
Sage Weil963b61e2009-10-06 11:31:12 -0700980 }
981
982 /* we may have taken some of the old realm's children. */
983 for (i = 0; i < num_split_realms; i++) {
984 struct ceph_snap_realm *child =
Yan, Zheng982d6012014-12-23 15:30:54 +0800985 __lookup_snap_realm(mdsc,
Sage Weil963b61e2009-10-06 11:31:12 -0700986 le64_to_cpu(split_realms[i]));
Sage Weil963b61e2009-10-06 11:31:12 -0700987 if (!child)
988 continue;
989 adjust_snap_realm_parent(mdsc, child, realm->ino);
990 }
991 }
992
993 /*
994 * update using the provided snap trace. if we are deleting a
995 * snap, we can avoid queueing cap_snaps.
996 */
997 ceph_update_snap_trace(mdsc, p, e,
Yan, Zheng982d6012014-12-23 15:30:54 +0800998 op == CEPH_SNAP_OP_DESTROY, NULL);
Sage Weil963b61e2009-10-06 11:31:12 -0700999
Sage Weilae00d4f2010-09-16 16:26:51 -07001000 if (op == CEPH_SNAP_OP_SPLIT)
Sage Weil963b61e2009-10-06 11:31:12 -07001001 /* we took a reference when we created the realm, above */
1002 ceph_put_snap_realm(mdsc, realm);
Sage Weil963b61e2009-10-06 11:31:12 -07001003
1004 __cleanup_empty_realms(mdsc);
1005
1006 up_write(&mdsc->snap_rwsem);
1007
1008 flush_snaps(mdsc);
1009 return;
1010
1011bad:
1012 pr_err("corrupt snap message from mds%d\n", mds);
Sage Weil9ec7cab2009-12-14 15:13:47 -08001013 ceph_msg_dump(msg);
Sage Weil963b61e2009-10-06 11:31:12 -07001014out:
1015 if (locked_rwsem)
1016 up_write(&mdsc->snap_rwsem);
1017 return;
1018}
Yan, Zheng75c96272017-12-14 15:11:09 +08001019
1020struct ceph_snapid_map* ceph_get_snapid_map(struct ceph_mds_client *mdsc,
1021 u64 snap)
1022{
1023 struct ceph_snapid_map *sm, *exist;
1024 struct rb_node **p, *parent;
1025 int ret;
1026
1027 exist = NULL;
1028 spin_lock(&mdsc->snapid_map_lock);
1029 p = &mdsc->snapid_map_tree.rb_node;
1030 while (*p) {
1031 exist = rb_entry(*p, struct ceph_snapid_map, node);
1032 if (snap > exist->snap) {
1033 p = &(*p)->rb_left;
1034 } else if (snap < exist->snap) {
1035 p = &(*p)->rb_right;
1036 } else {
1037 if (atomic_inc_return(&exist->ref) == 1)
1038 list_del_init(&exist->lru);
1039 break;
1040 }
1041 exist = NULL;
1042 }
1043 spin_unlock(&mdsc->snapid_map_lock);
1044 if (exist) {
1045 dout("found snapid map %llx -> %x\n", exist->snap, exist->dev);
1046 return exist;
1047 }
1048
1049 sm = kmalloc(sizeof(*sm), GFP_NOFS);
1050 if (!sm)
1051 return NULL;
1052
1053 ret = get_anon_bdev(&sm->dev);
1054 if (ret < 0) {
1055 kfree(sm);
1056 return NULL;
1057 }
1058
1059 INIT_LIST_HEAD(&sm->lru);
1060 atomic_set(&sm->ref, 1);
1061 sm->snap = snap;
1062
1063 exist = NULL;
1064 parent = NULL;
1065 p = &mdsc->snapid_map_tree.rb_node;
1066 spin_lock(&mdsc->snapid_map_lock);
1067 while (*p) {
1068 parent = *p;
1069 exist = rb_entry(*p, struct ceph_snapid_map, node);
1070 if (snap > exist->snap)
1071 p = &(*p)->rb_left;
1072 else if (snap < exist->snap)
1073 p = &(*p)->rb_right;
1074 else
1075 break;
1076 exist = NULL;
1077 }
1078 if (exist) {
1079 if (atomic_inc_return(&exist->ref) == 1)
1080 list_del_init(&exist->lru);
1081 } else {
1082 rb_link_node(&sm->node, parent, p);
1083 rb_insert_color(&sm->node, &mdsc->snapid_map_tree);
1084 }
1085 spin_unlock(&mdsc->snapid_map_lock);
1086 if (exist) {
1087 free_anon_bdev(sm->dev);
1088 kfree(sm);
1089 dout("found snapid map %llx -> %x\n", exist->snap, exist->dev);
1090 return exist;
1091 }
1092
1093 dout("create snapid map %llx -> %x\n", sm->snap, sm->dev);
1094 return sm;
1095}
1096
1097void ceph_put_snapid_map(struct ceph_mds_client* mdsc,
1098 struct ceph_snapid_map *sm)
1099{
1100 if (!sm)
1101 return;
1102 if (atomic_dec_and_lock(&sm->ref, &mdsc->snapid_map_lock)) {
1103 if (!RB_EMPTY_NODE(&sm->node)) {
1104 sm->last_used = jiffies;
1105 list_add_tail(&sm->lru, &mdsc->snapid_map_lru);
1106 spin_unlock(&mdsc->snapid_map_lock);
1107 } else {
1108 /* already cleaned up by
1109 * ceph_cleanup_snapid_map() */
1110 spin_unlock(&mdsc->snapid_map_lock);
1111 kfree(sm);
1112 }
1113 }
1114}
1115
1116void ceph_trim_snapid_map(struct ceph_mds_client *mdsc)
1117{
1118 struct ceph_snapid_map *sm;
1119 unsigned long now;
1120 LIST_HEAD(to_free);
1121
1122 spin_lock(&mdsc->snapid_map_lock);
1123 now = jiffies;
1124
1125 while (!list_empty(&mdsc->snapid_map_lru)) {
1126 sm = list_first_entry(&mdsc->snapid_map_lru,
1127 struct ceph_snapid_map, lru);
1128 if (time_after(sm->last_used + CEPH_SNAPID_MAP_TIMEOUT, now))
1129 break;
1130
1131 rb_erase(&sm->node, &mdsc->snapid_map_tree);
1132 list_move(&sm->lru, &to_free);
1133 }
1134 spin_unlock(&mdsc->snapid_map_lock);
1135
1136 while (!list_empty(&to_free)) {
1137 sm = list_first_entry(&to_free, struct ceph_snapid_map, lru);
1138 list_del(&sm->lru);
1139 dout("trim snapid map %llx -> %x\n", sm->snap, sm->dev);
1140 free_anon_bdev(sm->dev);
1141 kfree(sm);
1142 }
1143}
1144
1145void ceph_cleanup_snapid_map(struct ceph_mds_client *mdsc)
1146{
1147 struct ceph_snapid_map *sm;
1148 struct rb_node *p;
1149 LIST_HEAD(to_free);
1150
1151 spin_lock(&mdsc->snapid_map_lock);
1152 while ((p = rb_first(&mdsc->snapid_map_tree))) {
1153 sm = rb_entry(p, struct ceph_snapid_map, node);
1154 rb_erase(p, &mdsc->snapid_map_tree);
1155 RB_CLEAR_NODE(p);
1156 list_move(&sm->lru, &to_free);
1157 }
1158 spin_unlock(&mdsc->snapid_map_lock);
1159
1160 while (!list_empty(&to_free)) {
1161 sm = list_first_entry(&to_free, struct ceph_snapid_map, lru);
1162 list_del(&sm->lru);
1163 free_anon_bdev(sm->dev);
1164 if (WARN_ON_ONCE(atomic_read(&sm->ref))) {
1165 pr_err("snapid map %llx -> %x still in use\n",
1166 sm->snap, sm->dev);
1167 }
Luis Henriquesc8d6ee02020-03-19 11:43:48 +00001168 kfree(sm);
Yan, Zheng75c96272017-12-14 15:11:09 +08001169 }
1170}