ceph: pre-allocate ceph_cap struct for ceph_add_cap()
So that ceph_add_cap() can be used while i_ceph_lock is locked.
This simplifies the code that handle cap import/export.
Signed-off-by: Yan, Zheng <zheng.z.yan@intel.com>
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index f9e7399..8ad50a3 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -341,7 +341,6 @@
INIT_LIST_HEAD(&ci->i_cap_snaps);
ci->i_head_snapc = NULL;
ci->i_snap_caps = 0;
- ci->i_cap_exporting_issued = 0;
for (i = 0; i < CEPH_FILE_MODE_NUM; i++)
ci->i_nr_by_mode[i] = 0;
@@ -407,7 +406,7 @@
/*
* we may still have a snap_realm reference if there are stray
- * caps in i_cap_exporting_issued or i_snap_caps.
+ * caps in i_snap_caps.
*/
if (ci->i_snap_realm) {
struct ceph_mds_client *mdsc =
@@ -582,6 +581,7 @@
unsigned long ttl_from, int cap_fmode,
struct ceph_cap_reservation *caps_reservation)
{
+ struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
struct ceph_mds_reply_inode *info = iinfo->in;
struct ceph_inode_info *ci = ceph_inode(inode);
int i;
@@ -591,7 +591,9 @@
struct ceph_inode_frag *frag;
struct rb_node *rb_node;
struct ceph_buffer *xattr_blob = NULL;
+ struct ceph_cap *new_cap = NULL;
int err = 0;
+ bool wake = false;
bool queue_trunc = false;
bool new_version = false;
@@ -599,6 +601,10 @@
inode, ceph_vinop(inode), le64_to_cpu(info->version),
ci->i_version);
+ /* prealloc new cap struct */
+ if (info->cap.caps && ceph_snap(inode) == CEPH_NOSNAP)
+ new_cap = ceph_get_cap(mdsc, caps_reservation);
+
/*
* prealloc xattr data, if it looks like we'll need it. only
* if len > 4 (meaning there are actually xattrs; the first 4
@@ -762,8 +768,37 @@
dout(" marking %p complete (empty)\n", inode);
__ceph_dir_set_complete(ci, atomic_read(&ci->i_release_count));
}
+
+ /* were we issued a capability? */
+ if (info->cap.caps) {
+ if (ceph_snap(inode) == CEPH_NOSNAP) {
+ ceph_add_cap(inode, session,
+ le64_to_cpu(info->cap.cap_id),
+ cap_fmode,
+ le32_to_cpu(info->cap.caps),
+ le32_to_cpu(info->cap.wanted),
+ le32_to_cpu(info->cap.seq),
+ le32_to_cpu(info->cap.mseq),
+ le64_to_cpu(info->cap.realm),
+ info->cap.flags, &new_cap);
+ wake = true;
+ } else {
+ dout(" %p got snap_caps %s\n", inode,
+ ceph_cap_string(le32_to_cpu(info->cap.caps)));
+ ci->i_snap_caps |= le32_to_cpu(info->cap.caps);
+ if (cap_fmode >= 0)
+ __ceph_get_fmode(ci, cap_fmode);
+ }
+ } else if (cap_fmode >= 0) {
+ pr_warning("mds issued no caps on %llx.%llx\n",
+ ceph_vinop(inode));
+ __ceph_get_fmode(ci, cap_fmode);
+ }
spin_unlock(&ci->i_ceph_lock);
+ if (wake)
+ wake_up_all(&ci->i_cap_wq);
+
/* queue truncate if we saw i_size decrease */
if (queue_trunc)
ceph_queue_vmtruncate(inode);
@@ -806,41 +841,14 @@
}
mutex_unlock(&ci->i_fragtree_mutex);
- /* were we issued a capability? */
- if (info->cap.caps) {
- if (ceph_snap(inode) == CEPH_NOSNAP) {
- ceph_add_cap(inode, session,
- le64_to_cpu(info->cap.cap_id),
- cap_fmode,
- le32_to_cpu(info->cap.caps),
- le32_to_cpu(info->cap.wanted),
- le32_to_cpu(info->cap.seq),
- le32_to_cpu(info->cap.mseq),
- le64_to_cpu(info->cap.realm),
- info->cap.flags,
- caps_reservation);
- } else {
- spin_lock(&ci->i_ceph_lock);
- dout(" %p got snap_caps %s\n", inode,
- ceph_cap_string(le32_to_cpu(info->cap.caps)));
- ci->i_snap_caps |= le32_to_cpu(info->cap.caps);
- if (cap_fmode >= 0)
- __ceph_get_fmode(ci, cap_fmode);
- spin_unlock(&ci->i_ceph_lock);
- }
- } else if (cap_fmode >= 0) {
- pr_warning("mds issued no caps on %llx.%llx\n",
- ceph_vinop(inode));
- __ceph_get_fmode(ci, cap_fmode);
- }
-
/* update delegation info? */
if (dirinfo)
ceph_fill_dirfrag(inode, dirinfo);
err = 0;
-
out:
+ if (new_cap)
+ ceph_put_cap(mdsc, new_cap);
if (xattr_blob)
ceph_buffer_put(xattr_blob);
return err;