blob: 5944a311bb947574d4948ffb5f81a4acaf21797b [file] [log] [blame]
Tao Maf56654c2008-08-18 17:38:48 +08001/* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * xattr.c
5 *
Tiger Yangc3cb6822008-10-23 16:33:03 +08006 * Copyright (C) 2004, 2008 Oracle. All rights reserved.
Tao Maf56654c2008-08-18 17:38:48 +08007 *
Tiger Yangcf1d6c72008-08-18 17:11:00 +08008 * CREDITS:
Tiger Yangc3cb6822008-10-23 16:33:03 +08009 * Lots of code in this file is copy from linux/fs/ext3/xattr.c.
10 * Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de>
Tiger Yangcf1d6c72008-08-18 17:11:00 +080011 *
Tao Maf56654c2008-08-18 17:38:48 +080012 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public
Tiger Yangc3cb6822008-10-23 16:33:03 +080014 * License version 2 as published by the Free Software Foundation.
Tao Maf56654c2008-08-18 17:38:48 +080015 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
Tao Maf56654c2008-08-18 17:38:48 +080020 */
21
Tiger Yangcf1d6c72008-08-18 17:11:00 +080022#include <linux/capability.h>
23#include <linux/fs.h>
24#include <linux/types.h>
25#include <linux/slab.h>
26#include <linux/highmem.h>
27#include <linux/pagemap.h>
28#include <linux/uio.h>
29#include <linux/sched.h>
30#include <linux/splice.h>
31#include <linux/mount.h>
32#include <linux/writeback.h>
33#include <linux/falloc.h>
Tao Ma01225592008-08-18 17:38:53 +080034#include <linux/sort.h>
Mark Fasheh99219ae2008-10-07 14:52:59 -070035#include <linux/init.h>
36#include <linux/module.h>
37#include <linux/string.h>
Tiger Yang923f7f32008-11-14 11:16:27 +080038#include <linux/security.h>
Tiger Yangcf1d6c72008-08-18 17:11:00 +080039
Tao Maf56654c2008-08-18 17:38:48 +080040#include <cluster/masklog.h>
41
42#include "ocfs2.h"
43#include "alloc.h"
Joel Beckerd6b32bb2008-10-17 14:55:01 -070044#include "blockcheck.h"
Tao Maf56654c2008-08-18 17:38:48 +080045#include "dlmglue.h"
46#include "file.h"
Tiger Yangcf1d6c72008-08-18 17:11:00 +080047#include "symlink.h"
48#include "sysfile.h"
Tao Maf56654c2008-08-18 17:38:48 +080049#include "inode.h"
50#include "journal.h"
51#include "ocfs2_fs.h"
52#include "suballoc.h"
53#include "uptodate.h"
54#include "buffer_head_io.h"
Tao Ma0c044f02008-08-18 17:38:50 +080055#include "super.h"
Tiger Yangcf1d6c72008-08-18 17:11:00 +080056#include "xattr.h"
Tao Ma492a8a32009-08-18 11:43:17 +080057#include "refcounttree.h"
Tao Ma0fe9b662009-08-18 11:47:56 +080058#include "acl.h"
Tao Ma402b4182011-02-23 22:01:17 +080059#include "ocfs2_trace.h"
Tiger Yangcf1d6c72008-08-18 17:11:00 +080060
61struct ocfs2_xattr_def_value_root {
62 struct ocfs2_xattr_value_root xv;
63 struct ocfs2_extent_rec er;
64};
65
Tao Ma0c044f02008-08-18 17:38:50 +080066struct ocfs2_xattr_bucket {
Joel Beckerba937122008-10-24 19:13:20 -070067 /* The inode these xattrs are associated with */
68 struct inode *bu_inode;
69
70 /* The actual buffers that make up the bucket */
Joel Becker4ac60322008-10-18 19:11:42 -070071 struct buffer_head *bu_bhs[OCFS2_XATTR_MAX_BLOCKS_PER_BUCKET];
Joel Beckerba937122008-10-24 19:13:20 -070072
73 /* How many blocks make up one bucket for this filesystem */
74 int bu_blocks;
Tao Ma0c044f02008-08-18 17:38:50 +080075};
76
Tao Ma78f30c32008-11-12 08:27:00 +080077struct ocfs2_xattr_set_ctxt {
Tao Ma85db90e2008-11-12 08:27:01 +080078 handle_t *handle;
Tao Ma78f30c32008-11-12 08:27:00 +080079 struct ocfs2_alloc_context *meta_ac;
80 struct ocfs2_alloc_context *data_ac;
81 struct ocfs2_cached_dealloc_ctxt dealloc;
Tao Ma5f5261a2010-05-13 22:49:05 +080082 int set_abort;
Tao Ma78f30c32008-11-12 08:27:00 +080083};
84
Tiger Yangcf1d6c72008-08-18 17:11:00 +080085#define OCFS2_XATTR_ROOT_SIZE (sizeof(struct ocfs2_xattr_def_value_root))
86#define OCFS2_XATTR_INLINE_SIZE 80
Tiger Yang4442f512009-02-20 11:11:50 +080087#define OCFS2_XATTR_HEADER_GAP 4
Tiger Yang534eadd2008-11-14 11:16:41 +080088#define OCFS2_XATTR_FREE_IN_IBODY (OCFS2_MIN_XATTR_INLINE_SIZE \
89 - sizeof(struct ocfs2_xattr_header) \
Tiger Yang4442f512009-02-20 11:11:50 +080090 - OCFS2_XATTR_HEADER_GAP)
Tiger Yang89c38bd2008-11-14 11:17:41 +080091#define OCFS2_XATTR_FREE_IN_BLOCK(ptr) ((ptr)->i_sb->s_blocksize \
92 - sizeof(struct ocfs2_xattr_block) \
93 - sizeof(struct ocfs2_xattr_header) \
Tiger Yang4442f512009-02-20 11:11:50 +080094 - OCFS2_XATTR_HEADER_GAP)
Tiger Yangcf1d6c72008-08-18 17:11:00 +080095
96static struct ocfs2_xattr_def_value_root def_xv = {
97 .xv.xr_list.l_count = cpu_to_le16(1),
98};
99
Stephen Hemminger537d81c2010-05-13 17:53:22 -0700100const struct xattr_handler *ocfs2_xattr_handlers[] = {
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800101 &ocfs2_xattr_user_handler,
Christoph Hellwig702e5bc2013-12-20 05:16:48 -0800102 &posix_acl_access_xattr_handler,
103 &posix_acl_default_xattr_handler,
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800104 &ocfs2_xattr_trusted_handler,
Tiger Yang923f7f32008-11-14 11:16:27 +0800105 &ocfs2_xattr_security_handler,
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800106 NULL
107};
108
Stephen Hemminger537d81c2010-05-13 17:53:22 -0700109static const struct xattr_handler *ocfs2_xattr_handler_map[OCFS2_XATTR_MAX] = {
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800110 [OCFS2_XATTR_INDEX_USER] = &ocfs2_xattr_user_handler,
Tiger Yang929fb012008-11-14 11:17:04 +0800111 [OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS]
Christoph Hellwig702e5bc2013-12-20 05:16:48 -0800112 = &posix_acl_access_xattr_handler,
Tiger Yang929fb012008-11-14 11:17:04 +0800113 [OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT]
Christoph Hellwig702e5bc2013-12-20 05:16:48 -0800114 = &posix_acl_default_xattr_handler,
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800115 [OCFS2_XATTR_INDEX_TRUSTED] = &ocfs2_xattr_trusted_handler,
Tiger Yang923f7f32008-11-14 11:16:27 +0800116 [OCFS2_XATTR_INDEX_SECURITY] = &ocfs2_xattr_security_handler,
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800117};
118
119struct ocfs2_xattr_info {
Joel Becker6b240ff2009-08-14 18:02:52 -0700120 int xi_name_index;
121 const char *xi_name;
Joel Becker18853b92009-08-14 18:17:07 -0700122 int xi_name_len;
Joel Becker6b240ff2009-08-14 18:02:52 -0700123 const void *xi_value;
124 size_t xi_value_len;
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800125};
126
127struct ocfs2_xattr_search {
128 struct buffer_head *inode_bh;
129 /*
130 * xattr_bh point to the block buffer head which has extended attribute
131 * when extended attribute in inode, xattr_bh is equal to inode_bh.
132 */
133 struct buffer_head *xattr_bh;
134 struct ocfs2_xattr_header *header;
Joel Beckerba937122008-10-24 19:13:20 -0700135 struct ocfs2_xattr_bucket *bucket;
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800136 void *base;
137 void *end;
138 struct ocfs2_xattr_entry *here;
139 int not_found;
140};
141
Joel Becker11179f22009-08-14 16:07:44 -0700142/* Operations on struct ocfs2_xa_entry */
143struct ocfs2_xa_loc;
144struct ocfs2_xa_loc_operations {
145 /*
Joel Beckercf2bc802009-08-18 13:52:38 -0700146 * Journal functions
147 */
148 int (*xlo_journal_access)(handle_t *handle, struct ocfs2_xa_loc *loc,
149 int type);
150 void (*xlo_journal_dirty)(handle_t *handle, struct ocfs2_xa_loc *loc);
151
152 /*
Joel Becker11179f22009-08-14 16:07:44 -0700153 * Return a pointer to the appropriate buffer in loc->xl_storage
154 * at the given offset from loc->xl_header.
155 */
156 void *(*xlo_offset_pointer)(struct ocfs2_xa_loc *loc, int offset);
157
Joel Becker69a3e532009-08-17 12:24:39 -0700158 /* Can we reuse the existing entry for the new value? */
159 int (*xlo_can_reuse)(struct ocfs2_xa_loc *loc,
160 struct ocfs2_xattr_info *xi);
161
162 /* How much space is needed for the new value? */
163 int (*xlo_check_space)(struct ocfs2_xa_loc *loc,
164 struct ocfs2_xattr_info *xi);
165
166 /*
167 * Return the offset of the first name+value pair. This is
168 * the start of our downward-filling free space.
169 */
170 int (*xlo_get_free_start)(struct ocfs2_xa_loc *loc);
171
Joel Becker11179f22009-08-14 16:07:44 -0700172 /*
173 * Remove the name+value at this location. Do whatever is
174 * appropriate with the remaining name+value pairs.
175 */
176 void (*xlo_wipe_namevalue)(struct ocfs2_xa_loc *loc);
Joel Becker69a3e532009-08-17 12:24:39 -0700177
178 /* Fill xl_entry with a new entry */
179 void (*xlo_add_entry)(struct ocfs2_xa_loc *loc, u32 name_hash);
180
181 /* Add name+value storage to an entry */
182 void (*xlo_add_namevalue)(struct ocfs2_xa_loc *loc, int size);
Joel Becker3fc12af2009-08-18 13:20:27 -0700183
184 /*
185 * Initialize the value buf's access and bh fields for this entry.
186 * ocfs2_xa_fill_value_buf() will handle the xv pointer.
187 */
188 void (*xlo_fill_value_buf)(struct ocfs2_xa_loc *loc,
189 struct ocfs2_xattr_value_buf *vb);
Joel Becker11179f22009-08-14 16:07:44 -0700190};
191
192/*
193 * Describes an xattr entry location. This is a memory structure
194 * tracking the on-disk structure.
195 */
196struct ocfs2_xa_loc {
Joel Beckercf2bc802009-08-18 13:52:38 -0700197 /* This xattr belongs to this inode */
198 struct inode *xl_inode;
199
Joel Becker11179f22009-08-14 16:07:44 -0700200 /* The ocfs2_xattr_header inside the on-disk storage. Not NULL. */
201 struct ocfs2_xattr_header *xl_header;
202
203 /* Bytes from xl_header to the end of the storage */
204 int xl_size;
205
206 /*
207 * The ocfs2_xattr_entry this location describes. If this is
208 * NULL, this location describes the on-disk structure where it
209 * would have been.
210 */
211 struct ocfs2_xattr_entry *xl_entry;
212
213 /*
214 * Internal housekeeping
215 */
216
217 /* Buffer(s) containing this entry */
218 void *xl_storage;
219
220 /* Operations on the storage backing this location */
221 const struct ocfs2_xa_loc_operations *xl_ops;
222};
223
Joel Becker199799a2009-08-14 19:04:15 -0700224/*
225 * Convenience functions to calculate how much space is needed for a
226 * given name+value pair
227 */
228static int namevalue_size(int name_len, uint64_t value_len)
229{
230 if (value_len > OCFS2_XATTR_INLINE_SIZE)
231 return OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_ROOT_SIZE;
232 else
233 return OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_SIZE(value_len);
234}
235
236static int namevalue_size_xi(struct ocfs2_xattr_info *xi)
237{
238 return namevalue_size(xi->xi_name_len, xi->xi_value_len);
239}
240
241static int namevalue_size_xe(struct ocfs2_xattr_entry *xe)
242{
243 u64 value_len = le64_to_cpu(xe->xe_value_size);
244
245 BUG_ON((value_len > OCFS2_XATTR_INLINE_SIZE) &&
246 ocfs2_xattr_is_local(xe));
247 return namevalue_size(xe->xe_name_len, value_len);
248}
249
250
Tao Mafd68a892009-08-18 11:43:21 +0800251static int ocfs2_xattr_bucket_get_name_value(struct super_block *sb,
Tao Ma589dc262008-08-18 17:38:51 +0800252 struct ocfs2_xattr_header *xh,
253 int index,
254 int *block_off,
255 int *new_offset);
256
Joel Becker54f443f2008-10-20 18:43:07 -0700257static int ocfs2_xattr_block_find(struct inode *inode,
258 int name_index,
259 const char *name,
260 struct ocfs2_xattr_search *xs);
Tao Ma589dc262008-08-18 17:38:51 +0800261static int ocfs2_xattr_index_block_find(struct inode *inode,
262 struct buffer_head *root_bh,
263 int name_index,
264 const char *name,
265 struct ocfs2_xattr_search *xs);
266
Tao Ma0c044f02008-08-18 17:38:50 +0800267static int ocfs2_xattr_tree_list_index_block(struct inode *inode,
Tao Ma47bca492009-08-18 11:43:42 +0800268 struct buffer_head *blk_bh,
Tao Ma0c044f02008-08-18 17:38:50 +0800269 char *buffer,
270 size_t buffer_size);
271
Tao Ma01225592008-08-18 17:38:53 +0800272static int ocfs2_xattr_create_index_block(struct inode *inode,
Tao Ma78f30c32008-11-12 08:27:00 +0800273 struct ocfs2_xattr_search *xs,
274 struct ocfs2_xattr_set_ctxt *ctxt);
Tao Ma01225592008-08-18 17:38:53 +0800275
276static int ocfs2_xattr_set_entry_index_block(struct inode *inode,
277 struct ocfs2_xattr_info *xi,
Tao Ma78f30c32008-11-12 08:27:00 +0800278 struct ocfs2_xattr_search *xs,
279 struct ocfs2_xattr_set_ctxt *ctxt);
Tao Ma01225592008-08-18 17:38:53 +0800280
Tao Ma47bca492009-08-18 11:43:42 +0800281typedef int (xattr_tree_rec_func)(struct inode *inode,
282 struct buffer_head *root_bh,
283 u64 blkno, u32 cpos, u32 len, void *para);
284static int ocfs2_iterate_xattr_index_block(struct inode *inode,
285 struct buffer_head *root_bh,
286 xattr_tree_rec_func *rec_func,
287 void *para);
288static int ocfs2_delete_xattr_in_bucket(struct inode *inode,
289 struct ocfs2_xattr_bucket *bucket,
290 void *para);
291static int ocfs2_rm_xattr_cluster(struct inode *inode,
292 struct buffer_head *root_bh,
293 u64 blkno,
294 u32 cpos,
295 u32 len,
296 void *para);
297
Joel Beckerc58b6032008-11-26 13:36:24 -0800298static int ocfs2_mv_xattr_buckets(struct inode *inode, handle_t *handle,
299 u64 src_blk, u64 last_blk, u64 to_blk,
300 unsigned int start_bucket,
301 u32 *first_hash);
Tao Ma492a8a32009-08-18 11:43:17 +0800302static int ocfs2_prepare_refcount_xattr(struct inode *inode,
303 struct ocfs2_dinode *di,
304 struct ocfs2_xattr_info *xi,
305 struct ocfs2_xattr_search *xis,
306 struct ocfs2_xattr_search *xbs,
307 struct ocfs2_refcount_tree **ref_tree,
308 int *meta_need,
309 int *credits);
Tao Mace9c5a52009-08-18 11:43:59 +0800310static int ocfs2_get_xattr_tree_value_root(struct super_block *sb,
311 struct ocfs2_xattr_bucket *bucket,
312 int offset,
313 struct ocfs2_xattr_value_root **xv,
314 struct buffer_head **bh);
Tao Maa3944252008-08-18 17:38:54 +0800315
Tiger Yang0030e002008-10-23 16:33:33 +0800316static inline u16 ocfs2_xattr_buckets_per_cluster(struct ocfs2_super *osb)
317{
318 return (1 << osb->s_clustersize_bits) / OCFS2_XATTR_BUCKET_SIZE;
319}
320
321static inline u16 ocfs2_blocks_per_xattr_bucket(struct super_block *sb)
322{
323 return OCFS2_XATTR_BUCKET_SIZE / (1 << sb->s_blocksize_bits);
324}
325
Joel Becker9c7759a2008-10-24 16:21:03 -0700326#define bucket_blkno(_b) ((_b)->bu_bhs[0]->b_blocknr)
Joel Becker51def392008-10-24 16:57:21 -0700327#define bucket_block(_b, _n) ((_b)->bu_bhs[(_n)]->b_data)
Joel Becker3e632942008-10-24 17:04:49 -0700328#define bucket_xh(_b) ((struct ocfs2_xattr_header *)bucket_block((_b), 0))
Joel Becker9c7759a2008-10-24 16:21:03 -0700329
Joel Beckerba937122008-10-24 19:13:20 -0700330static struct ocfs2_xattr_bucket *ocfs2_xattr_bucket_new(struct inode *inode)
Joel Becker6dde41d2008-10-24 17:16:48 -0700331{
Joel Beckerba937122008-10-24 19:13:20 -0700332 struct ocfs2_xattr_bucket *bucket;
333 int blks = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
Joel Becker6dde41d2008-10-24 17:16:48 -0700334
Joel Beckerba937122008-10-24 19:13:20 -0700335 BUG_ON(blks > OCFS2_XATTR_MAX_BLOCKS_PER_BUCKET);
336
337 bucket = kzalloc(sizeof(struct ocfs2_xattr_bucket), GFP_NOFS);
338 if (bucket) {
339 bucket->bu_inode = inode;
340 bucket->bu_blocks = blks;
341 }
342
343 return bucket;
344}
345
346static void ocfs2_xattr_bucket_relse(struct ocfs2_xattr_bucket *bucket)
347{
348 int i;
349
350 for (i = 0; i < bucket->bu_blocks; i++) {
Joel Becker6dde41d2008-10-24 17:16:48 -0700351 brelse(bucket->bu_bhs[i]);
352 bucket->bu_bhs[i] = NULL;
353 }
354}
355
Joel Beckerba937122008-10-24 19:13:20 -0700356static void ocfs2_xattr_bucket_free(struct ocfs2_xattr_bucket *bucket)
357{
358 if (bucket) {
359 ocfs2_xattr_bucket_relse(bucket);
360 bucket->bu_inode = NULL;
361 kfree(bucket);
362 }
363}
364
Joel Becker784b8162008-10-24 17:33:40 -0700365/*
366 * A bucket that has never been written to disk doesn't need to be
367 * read. We just need the buffer_heads. Don't call this for
368 * buckets that are already on disk. ocfs2_read_xattr_bucket() initializes
369 * them fully.
370 */
Joel Beckerba937122008-10-24 19:13:20 -0700371static int ocfs2_init_xattr_bucket(struct ocfs2_xattr_bucket *bucket,
Wengang Wang9c339252014-04-03 14:47:15 -0700372 u64 xb_blkno, int new)
Joel Becker784b8162008-10-24 17:33:40 -0700373{
374 int i, rc = 0;
Joel Becker784b8162008-10-24 17:33:40 -0700375
Joel Beckerba937122008-10-24 19:13:20 -0700376 for (i = 0; i < bucket->bu_blocks; i++) {
377 bucket->bu_bhs[i] = sb_getblk(bucket->bu_inode->i_sb,
378 xb_blkno + i);
Joel Becker784b8162008-10-24 17:33:40 -0700379 if (!bucket->bu_bhs[i]) {
Rui Xiang7391a292013-11-12 15:06:54 -0800380 rc = -ENOMEM;
Joel Becker784b8162008-10-24 17:33:40 -0700381 mlog_errno(rc);
382 break;
383 }
384
Joel Becker8cb471e2009-02-10 20:00:41 -0800385 if (!ocfs2_buffer_uptodate(INODE_CACHE(bucket->bu_inode),
Wengang Wang9c339252014-04-03 14:47:15 -0700386 bucket->bu_bhs[i])) {
387 if (new)
388 ocfs2_set_new_buffer_uptodate(INODE_CACHE(bucket->bu_inode),
389 bucket->bu_bhs[i]);
390 else {
391 set_buffer_uptodate(bucket->bu_bhs[i]);
392 ocfs2_set_buffer_uptodate(INODE_CACHE(bucket->bu_inode),
393 bucket->bu_bhs[i]);
394 }
395 }
Joel Becker784b8162008-10-24 17:33:40 -0700396 }
397
398 if (rc)
Joel Beckerba937122008-10-24 19:13:20 -0700399 ocfs2_xattr_bucket_relse(bucket);
Joel Becker784b8162008-10-24 17:33:40 -0700400 return rc;
401}
402
403/* Read the xattr bucket at xb_blkno */
Joel Beckerba937122008-10-24 19:13:20 -0700404static int ocfs2_read_xattr_bucket(struct ocfs2_xattr_bucket *bucket,
Joel Becker784b8162008-10-24 17:33:40 -0700405 u64 xb_blkno)
406{
Joel Beckerba937122008-10-24 19:13:20 -0700407 int rc;
Joel Becker784b8162008-10-24 17:33:40 -0700408
Joel Becker8cb471e2009-02-10 20:00:41 -0800409 rc = ocfs2_read_blocks(INODE_CACHE(bucket->bu_inode), xb_blkno,
Joel Becker970e4932008-11-13 14:49:19 -0800410 bucket->bu_blocks, bucket->bu_bhs, 0,
411 NULL);
Joel Becker4d0e2142008-12-05 11:19:37 -0800412 if (!rc) {
Tao Mac8b9cf92009-02-24 17:40:26 -0800413 spin_lock(&OCFS2_SB(bucket->bu_inode->i_sb)->osb_xattr_lock);
Joel Becker4d0e2142008-12-05 11:19:37 -0800414 rc = ocfs2_validate_meta_ecc_bhs(bucket->bu_inode->i_sb,
415 bucket->bu_bhs,
416 bucket->bu_blocks,
417 &bucket_xh(bucket)->xh_check);
Tao Mac8b9cf92009-02-24 17:40:26 -0800418 spin_unlock(&OCFS2_SB(bucket->bu_inode->i_sb)->osb_xattr_lock);
Joel Becker4d0e2142008-12-05 11:19:37 -0800419 if (rc)
420 mlog_errno(rc);
421 }
422
Joel Becker784b8162008-10-24 17:33:40 -0700423 if (rc)
Joel Beckerba937122008-10-24 19:13:20 -0700424 ocfs2_xattr_bucket_relse(bucket);
Joel Becker784b8162008-10-24 17:33:40 -0700425 return rc;
426}
427
Joel Becker1224be02008-10-24 18:47:33 -0700428static int ocfs2_xattr_bucket_journal_access(handle_t *handle,
Joel Becker1224be02008-10-24 18:47:33 -0700429 struct ocfs2_xattr_bucket *bucket,
430 int type)
431{
432 int i, rc = 0;
Joel Becker1224be02008-10-24 18:47:33 -0700433
Joel Beckerba937122008-10-24 19:13:20 -0700434 for (i = 0; i < bucket->bu_blocks; i++) {
Joel Becker0cf2f762009-02-12 16:41:25 -0800435 rc = ocfs2_journal_access(handle,
436 INODE_CACHE(bucket->bu_inode),
Joel Becker1224be02008-10-24 18:47:33 -0700437 bucket->bu_bhs[i], type);
438 if (rc) {
439 mlog_errno(rc);
440 break;
441 }
442 }
443
444 return rc;
445}
446
447static void ocfs2_xattr_bucket_journal_dirty(handle_t *handle,
Joel Becker1224be02008-10-24 18:47:33 -0700448 struct ocfs2_xattr_bucket *bucket)
449{
Joel Beckerba937122008-10-24 19:13:20 -0700450 int i;
Joel Becker1224be02008-10-24 18:47:33 -0700451
Tao Mac8b9cf92009-02-24 17:40:26 -0800452 spin_lock(&OCFS2_SB(bucket->bu_inode->i_sb)->osb_xattr_lock);
Joel Becker4d0e2142008-12-05 11:19:37 -0800453 ocfs2_compute_meta_ecc_bhs(bucket->bu_inode->i_sb,
454 bucket->bu_bhs, bucket->bu_blocks,
455 &bucket_xh(bucket)->xh_check);
Tao Mac8b9cf92009-02-24 17:40:26 -0800456 spin_unlock(&OCFS2_SB(bucket->bu_inode->i_sb)->osb_xattr_lock);
Joel Becker4d0e2142008-12-05 11:19:37 -0800457
Joel Beckerba937122008-10-24 19:13:20 -0700458 for (i = 0; i < bucket->bu_blocks; i++)
Joel Becker1224be02008-10-24 18:47:33 -0700459 ocfs2_journal_dirty(handle, bucket->bu_bhs[i]);
460}
461
Joel Beckerba937122008-10-24 19:13:20 -0700462static void ocfs2_xattr_bucket_copy_data(struct ocfs2_xattr_bucket *dest,
Joel Becker4980c6d2008-10-24 18:54:43 -0700463 struct ocfs2_xattr_bucket *src)
464{
465 int i;
Joel Beckerba937122008-10-24 19:13:20 -0700466 int blocksize = src->bu_inode->i_sb->s_blocksize;
Joel Becker4980c6d2008-10-24 18:54:43 -0700467
Joel Beckerba937122008-10-24 19:13:20 -0700468 BUG_ON(dest->bu_blocks != src->bu_blocks);
469 BUG_ON(dest->bu_inode != src->bu_inode);
470
471 for (i = 0; i < src->bu_blocks; i++) {
Joel Becker4980c6d2008-10-24 18:54:43 -0700472 memcpy(bucket_block(dest, i), bucket_block(src, i),
473 blocksize);
474 }
475}
Joel Becker1224be02008-10-24 18:47:33 -0700476
Joel Becker4ae1d692008-11-13 14:49:18 -0800477static int ocfs2_validate_xattr_block(struct super_block *sb,
478 struct buffer_head *bh)
479{
Joel Beckerd6b32bb2008-10-17 14:55:01 -0700480 int rc;
Joel Becker4ae1d692008-11-13 14:49:18 -0800481 struct ocfs2_xattr_block *xb =
482 (struct ocfs2_xattr_block *)bh->b_data;
483
Tao Ma402b4182011-02-23 22:01:17 +0800484 trace_ocfs2_validate_xattr_block((unsigned long long)bh->b_blocknr);
Joel Becker4ae1d692008-11-13 14:49:18 -0800485
Joel Beckerd6b32bb2008-10-17 14:55:01 -0700486 BUG_ON(!buffer_uptodate(bh));
487
488 /*
489 * If the ecc fails, we return the error but otherwise
490 * leave the filesystem running. We know any error is
491 * local to this block.
492 */
493 rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &xb->xb_check);
494 if (rc)
495 return rc;
496
497 /*
498 * Errors after here are fatal
499 */
500
Joel Becker4ae1d692008-11-13 14:49:18 -0800501 if (!OCFS2_IS_VALID_XATTR_BLOCK(xb)) {
Goldwyn Rodrigues17a5b9a2015-09-04 15:44:17 -0700502 return ocfs2_error(sb,
Joel Becker4ae1d692008-11-13 14:49:18 -0800503 "Extended attribute block #%llu has bad "
504 "signature %.*s",
505 (unsigned long long)bh->b_blocknr, 7,
506 xb->xb_signature);
Joel Becker4ae1d692008-11-13 14:49:18 -0800507 }
508
509 if (le64_to_cpu(xb->xb_blkno) != bh->b_blocknr) {
Goldwyn Rodrigues17a5b9a2015-09-04 15:44:17 -0700510 return ocfs2_error(sb,
Joel Becker4ae1d692008-11-13 14:49:18 -0800511 "Extended attribute block #%llu has an "
512 "invalid xb_blkno of %llu",
513 (unsigned long long)bh->b_blocknr,
514 (unsigned long long)le64_to_cpu(xb->xb_blkno));
Joel Becker4ae1d692008-11-13 14:49:18 -0800515 }
516
517 if (le32_to_cpu(xb->xb_fs_generation) != OCFS2_SB(sb)->fs_generation) {
Goldwyn Rodrigues17a5b9a2015-09-04 15:44:17 -0700518 return ocfs2_error(sb,
Joel Becker4ae1d692008-11-13 14:49:18 -0800519 "Extended attribute block #%llu has an invalid "
520 "xb_fs_generation of #%u",
521 (unsigned long long)bh->b_blocknr,
522 le32_to_cpu(xb->xb_fs_generation));
Joel Becker4ae1d692008-11-13 14:49:18 -0800523 }
524
525 return 0;
526}
527
528static int ocfs2_read_xattr_block(struct inode *inode, u64 xb_blkno,
529 struct buffer_head **bh)
530{
531 int rc;
532 struct buffer_head *tmp = *bh;
533
Joel Becker8cb471e2009-02-10 20:00:41 -0800534 rc = ocfs2_read_block(INODE_CACHE(inode), xb_blkno, &tmp,
Joel Becker970e4932008-11-13 14:49:19 -0800535 ocfs2_validate_xattr_block);
Joel Becker4ae1d692008-11-13 14:49:18 -0800536
537 /* If ocfs2_read_block() got us a new bh, pass it up. */
538 if (!rc && !*bh)
539 *bh = tmp;
540
541 return rc;
542}
543
Tao Ma936b8832008-10-09 23:06:14 +0800544static inline const char *ocfs2_xattr_prefix(int name_index)
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800545{
Stephen Hemminger537d81c2010-05-13 17:53:22 -0700546 const struct xattr_handler *handler = NULL;
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800547
548 if (name_index > 0 && name_index < OCFS2_XATTR_MAX)
549 handler = ocfs2_xattr_handler_map[name_index];
550
Tao Ma936b8832008-10-09 23:06:14 +0800551 return handler ? handler->prefix : NULL;
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800552}
553
Mark Fasheh40daa162008-10-07 14:31:42 -0700554static u32 ocfs2_xattr_name_hash(struct inode *inode,
Tao Ma2057e5c2008-10-09 23:06:13 +0800555 const char *name,
Mark Fasheh40daa162008-10-07 14:31:42 -0700556 int name_len)
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800557{
558 /* Get hash value of uuid from super block */
559 u32 hash = OCFS2_SB(inode->i_sb)->uuid_hash;
560 int i;
561
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800562 /* hash extended attribute name */
563 for (i = 0; i < name_len; i++) {
564 hash = (hash << OCFS2_HASH_SHIFT) ^
565 (hash >> (8*sizeof(hash) - OCFS2_HASH_SHIFT)) ^
566 *name++;
567 }
568
569 return hash;
570}
571
Tiger Yang534eadd2008-11-14 11:16:41 +0800572static int ocfs2_xattr_entry_real_size(int name_len, size_t value_len)
573{
Joel Becker199799a2009-08-14 19:04:15 -0700574 return namevalue_size(name_len, value_len) +
575 sizeof(struct ocfs2_xattr_entry);
576}
Tiger Yang534eadd2008-11-14 11:16:41 +0800577
Joel Becker199799a2009-08-14 19:04:15 -0700578static int ocfs2_xi_entry_usage(struct ocfs2_xattr_info *xi)
579{
580 return namevalue_size_xi(xi) +
581 sizeof(struct ocfs2_xattr_entry);
582}
Tiger Yang534eadd2008-11-14 11:16:41 +0800583
Joel Becker199799a2009-08-14 19:04:15 -0700584static int ocfs2_xe_entry_usage(struct ocfs2_xattr_entry *xe)
585{
586 return namevalue_size_xe(xe) +
587 sizeof(struct ocfs2_xattr_entry);
Tiger Yang534eadd2008-11-14 11:16:41 +0800588}
589
590int ocfs2_calc_security_init(struct inode *dir,
591 struct ocfs2_security_xattr_info *si,
592 int *want_clusters,
593 int *xattr_credits,
594 struct ocfs2_alloc_context **xattr_ac)
595{
596 int ret = 0;
597 struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
598 int s_size = ocfs2_xattr_entry_real_size(strlen(si->name),
599 si->value_len);
600
601 /*
602 * The max space of security xattr taken inline is
603 * 256(name) + 80(value) + 16(entry) = 352 bytes,
604 * So reserve one metadata block for it is ok.
605 */
606 if (dir->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE ||
607 s_size > OCFS2_XATTR_FREE_IN_IBODY) {
608 ret = ocfs2_reserve_new_metadata_blocks(osb, 1, xattr_ac);
609 if (ret) {
610 mlog_errno(ret);
611 return ret;
612 }
613 *xattr_credits += OCFS2_XATTR_BLOCK_CREATE_CREDITS;
614 }
615
616 /* reserve clusters for xattr value which will be set in B tree*/
Tiger Yang0e445b62008-12-09 16:42:51 +0800617 if (si->value_len > OCFS2_XATTR_INLINE_SIZE) {
618 int new_clusters = ocfs2_clusters_for_bytes(dir->i_sb,
619 si->value_len);
620
621 *xattr_credits += ocfs2_clusters_to_blocks(dir->i_sb,
622 new_clusters);
623 *want_clusters += new_clusters;
624 }
Tiger Yang534eadd2008-11-14 11:16:41 +0800625 return ret;
626}
627
Tiger Yang89c38bd2008-11-14 11:17:41 +0800628int ocfs2_calc_xattr_init(struct inode *dir,
629 struct buffer_head *dir_bh,
Al Viro67697cb2011-07-26 02:55:32 -0400630 umode_t mode,
Tiger Yang89c38bd2008-11-14 11:17:41 +0800631 struct ocfs2_security_xattr_info *si,
632 int *want_clusters,
633 int *xattr_credits,
Mark Fasheh9b7895e2008-11-12 16:27:44 -0800634 int *want_meta)
Tiger Yang89c38bd2008-11-14 11:17:41 +0800635{
636 int ret = 0;
637 struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
Tiger Yang0e445b62008-12-09 16:42:51 +0800638 int s_size = 0, a_size = 0, acl_len = 0, new_clusters;
Tiger Yang89c38bd2008-11-14 11:17:41 +0800639
640 if (si->enable)
641 s_size = ocfs2_xattr_entry_real_size(strlen(si->name),
642 si->value_len);
643
644 if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) {
645 acl_len = ocfs2_xattr_get_nolock(dir, dir_bh,
646 OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT,
647 "", NULL, 0);
648 if (acl_len > 0) {
649 a_size = ocfs2_xattr_entry_real_size(0, acl_len);
650 if (S_ISDIR(mode))
651 a_size <<= 1;
652 } else if (acl_len != 0 && acl_len != -ENODATA) {
653 mlog_errno(ret);
654 return ret;
655 }
656 }
657
658 if (!(s_size + a_size))
659 return ret;
660
661 /*
662 * The max space of security xattr taken inline is
663 * 256(name) + 80(value) + 16(entry) = 352 bytes,
664 * The max space of acl xattr taken inline is
665 * 80(value) + 16(entry) * 2(if directory) = 192 bytes,
666 * when blocksize = 512, may reserve one more cluser for
667 * xattr bucket, otherwise reserve one metadata block
668 * for them is ok.
Tiger Yang6c9fd1d2009-03-06 10:19:30 +0800669 * If this is a new directory with inline data,
670 * we choose to reserve the entire inline area for
671 * directory contents and force an external xattr block.
Tiger Yang89c38bd2008-11-14 11:17:41 +0800672 */
673 if (dir->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE ||
Tiger Yang6c9fd1d2009-03-06 10:19:30 +0800674 (S_ISDIR(mode) && ocfs2_supports_inline_data(osb)) ||
Tiger Yang89c38bd2008-11-14 11:17:41 +0800675 (s_size + a_size) > OCFS2_XATTR_FREE_IN_IBODY) {
Mark Fasheh9b7895e2008-11-12 16:27:44 -0800676 *want_meta = *want_meta + 1;
Tiger Yang89c38bd2008-11-14 11:17:41 +0800677 *xattr_credits += OCFS2_XATTR_BLOCK_CREATE_CREDITS;
678 }
679
680 if (dir->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE &&
681 (s_size + a_size) > OCFS2_XATTR_FREE_IN_BLOCK(dir)) {
682 *want_clusters += 1;
683 *xattr_credits += ocfs2_blocks_per_xattr_bucket(dir->i_sb);
684 }
685
Tiger Yang0e445b62008-12-09 16:42:51 +0800686 /*
687 * reserve credits and clusters for xattrs which has large value
688 * and have to be set outside
689 */
690 if (si->enable && si->value_len > OCFS2_XATTR_INLINE_SIZE) {
691 new_clusters = ocfs2_clusters_for_bytes(dir->i_sb,
692 si->value_len);
693 *xattr_credits += ocfs2_clusters_to_blocks(dir->i_sb,
694 new_clusters);
695 *want_clusters += new_clusters;
696 }
Tiger Yang89c38bd2008-11-14 11:17:41 +0800697 if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL &&
698 acl_len > OCFS2_XATTR_INLINE_SIZE) {
Tiger Yang0e445b62008-12-09 16:42:51 +0800699 /* for directory, it has DEFAULT and ACCESS two types of acls */
700 new_clusters = (S_ISDIR(mode) ? 2 : 1) *
701 ocfs2_clusters_for_bytes(dir->i_sb, acl_len);
702 *xattr_credits += ocfs2_clusters_to_blocks(dir->i_sb,
703 new_clusters);
704 *want_clusters += new_clusters;
Tiger Yang89c38bd2008-11-14 11:17:41 +0800705 }
706
707 return ret;
708}
709
Tao Maf56654c2008-08-18 17:38:48 +0800710static int ocfs2_xattr_extend_allocation(struct inode *inode,
711 u32 clusters_to_add,
Joel Becker19b801f2008-12-09 14:36:50 -0800712 struct ocfs2_xattr_value_buf *vb,
Tao Ma78f30c32008-11-12 08:27:00 +0800713 struct ocfs2_xattr_set_ctxt *ctxt)
Tao Maf56654c2008-08-18 17:38:48 +0800714{
Tao Maa78f9f42010-07-09 14:53:11 +0800715 int status = 0, credits;
Tao Ma85db90e2008-11-12 08:27:01 +0800716 handle_t *handle = ctxt->handle;
Tao Maf56654c2008-08-18 17:38:48 +0800717 enum ocfs2_alloc_restarted why;
Joel Becker19b801f2008-12-09 14:36:50 -0800718 u32 prev_clusters, logical_start = le32_to_cpu(vb->vb_xv->xr_clusters);
Joel Beckerf99b9b72008-08-20 19:36:33 -0700719 struct ocfs2_extent_tree et;
Tao Maf56654c2008-08-18 17:38:48 +0800720
Joel Becker5e404e92009-02-13 03:54:22 -0800721 ocfs2_init_xattr_value_extent_tree(&et, INODE_CACHE(inode), vb);
Joel Beckerf99b9b72008-08-20 19:36:33 -0700722
Tao Maa78f9f42010-07-09 14:53:11 +0800723 while (clusters_to_add) {
Tao Ma402b4182011-02-23 22:01:17 +0800724 trace_ocfs2_xattr_extend_allocation(clusters_to_add);
725
Tao Maa78f9f42010-07-09 14:53:11 +0800726 status = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh,
727 OCFS2_JOURNAL_ACCESS_WRITE);
728 if (status < 0) {
729 mlog_errno(status);
730 break;
731 }
732
733 prev_clusters = le32_to_cpu(vb->vb_xv->xr_clusters);
734 status = ocfs2_add_clusters_in_btree(handle,
735 &et,
736 &logical_start,
737 clusters_to_add,
738 0,
739 ctxt->data_ac,
740 ctxt->meta_ac,
741 &why);
742 if ((status < 0) && (status != -EAGAIN)) {
743 if (status != -ENOSPC)
744 mlog_errno(status);
745 break;
746 }
747
748 ocfs2_journal_dirty(handle, vb->vb_bh);
749
750 clusters_to_add -= le32_to_cpu(vb->vb_xv->xr_clusters) -
751 prev_clusters;
752
753 if (why != RESTART_NONE && clusters_to_add) {
754 /*
755 * We can only fail in case the alloc file doesn't give
756 * up enough clusters.
757 */
758 BUG_ON(why == RESTART_META);
759
Tao Maa78f9f42010-07-09 14:53:11 +0800760 credits = ocfs2_calc_extend_credits(inode->i_sb,
Goldwyn Rodrigues06f9da62013-11-12 15:06:52 -0800761 &vb->vb_xv->xr_list);
Tao Maa78f9f42010-07-09 14:53:11 +0800762 status = ocfs2_extend_trans(handle, credits);
763 if (status < 0) {
764 status = -ENOMEM;
765 mlog_errno(status);
766 break;
767 }
768 }
Tao Maf56654c2008-08-18 17:38:48 +0800769 }
770
Tao Maf56654c2008-08-18 17:38:48 +0800771 return status;
772}
773
774static int __ocfs2_remove_xattr_range(struct inode *inode,
Joel Beckerd72cc722008-12-09 14:30:41 -0800775 struct ocfs2_xattr_value_buf *vb,
Tao Maf56654c2008-08-18 17:38:48 +0800776 u32 cpos, u32 phys_cpos, u32 len,
Tao Ma492a8a32009-08-18 11:43:17 +0800777 unsigned int ext_flags,
Tao Ma78f30c32008-11-12 08:27:00 +0800778 struct ocfs2_xattr_set_ctxt *ctxt)
Tao Maf56654c2008-08-18 17:38:48 +0800779{
780 int ret;
781 u64 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos);
Tao Ma85db90e2008-11-12 08:27:01 +0800782 handle_t *handle = ctxt->handle;
Joel Beckerf99b9b72008-08-20 19:36:33 -0700783 struct ocfs2_extent_tree et;
Tao Maf56654c2008-08-18 17:38:48 +0800784
Joel Becker5e404e92009-02-13 03:54:22 -0800785 ocfs2_init_xattr_value_extent_tree(&et, INODE_CACHE(inode), vb);
Joel Beckerf99b9b72008-08-20 19:36:33 -0700786
Joel Becker0cf2f762009-02-12 16:41:25 -0800787 ret = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh,
Joel Beckerd72cc722008-12-09 14:30:41 -0800788 OCFS2_JOURNAL_ACCESS_WRITE);
Tao Maf56654c2008-08-18 17:38:48 +0800789 if (ret) {
790 mlog_errno(ret);
Tao Ma85db90e2008-11-12 08:27:01 +0800791 goto out;
Tao Maf56654c2008-08-18 17:38:48 +0800792 }
793
Joel Beckerdbdcf6a2009-02-13 03:41:26 -0800794 ret = ocfs2_remove_extent(handle, &et, cpos, len, ctxt->meta_ac,
Tao Ma78f30c32008-11-12 08:27:00 +0800795 &ctxt->dealloc);
Tao Maf56654c2008-08-18 17:38:48 +0800796 if (ret) {
797 mlog_errno(ret);
Tao Ma85db90e2008-11-12 08:27:01 +0800798 goto out;
Tao Maf56654c2008-08-18 17:38:48 +0800799 }
800
Joel Beckerd72cc722008-12-09 14:30:41 -0800801 le32_add_cpu(&vb->vb_xv->xr_clusters, -len);
Joel Beckerec20cec2010-03-19 14:13:52 -0700802 ocfs2_journal_dirty(handle, vb->vb_bh);
Tao Maf56654c2008-08-18 17:38:48 +0800803
Tao Ma492a8a32009-08-18 11:43:17 +0800804 if (ext_flags & OCFS2_EXT_REFCOUNTED)
805 ret = ocfs2_decrease_refcount(inode, handle,
806 ocfs2_blocks_to_clusters(inode->i_sb,
807 phys_blkno),
808 len, ctxt->meta_ac, &ctxt->dealloc, 1);
809 else
810 ret = ocfs2_cache_cluster_dealloc(&ctxt->dealloc,
811 phys_blkno, len);
Tao Maf56654c2008-08-18 17:38:48 +0800812 if (ret)
813 mlog_errno(ret);
814
Tao Maf56654c2008-08-18 17:38:48 +0800815out:
Tao Maf56654c2008-08-18 17:38:48 +0800816 return ret;
817}
818
819static int ocfs2_xattr_shrink_size(struct inode *inode,
820 u32 old_clusters,
821 u32 new_clusters,
Joel Becker19b801f2008-12-09 14:36:50 -0800822 struct ocfs2_xattr_value_buf *vb,
Tao Ma78f30c32008-11-12 08:27:00 +0800823 struct ocfs2_xattr_set_ctxt *ctxt)
Tao Maf56654c2008-08-18 17:38:48 +0800824{
825 int ret = 0;
Tao Ma492a8a32009-08-18 11:43:17 +0800826 unsigned int ext_flags;
Tao Maf56654c2008-08-18 17:38:48 +0800827 u32 trunc_len, cpos, phys_cpos, alloc_size;
828 u64 block;
Tao Maf56654c2008-08-18 17:38:48 +0800829
830 if (old_clusters <= new_clusters)
831 return 0;
832
833 cpos = new_clusters;
834 trunc_len = old_clusters - new_clusters;
835 while (trunc_len) {
836 ret = ocfs2_xattr_get_clusters(inode, cpos, &phys_cpos,
Joel Beckerd72cc722008-12-09 14:30:41 -0800837 &alloc_size,
Tao Ma492a8a32009-08-18 11:43:17 +0800838 &vb->vb_xv->xr_list, &ext_flags);
Tao Maf56654c2008-08-18 17:38:48 +0800839 if (ret) {
840 mlog_errno(ret);
841 goto out;
842 }
843
844 if (alloc_size > trunc_len)
845 alloc_size = trunc_len;
846
Joel Becker19b801f2008-12-09 14:36:50 -0800847 ret = __ocfs2_remove_xattr_range(inode, vb, cpos,
Tao Maf56654c2008-08-18 17:38:48 +0800848 phys_cpos, alloc_size,
Tao Ma492a8a32009-08-18 11:43:17 +0800849 ext_flags, ctxt);
Tao Maf56654c2008-08-18 17:38:48 +0800850 if (ret) {
851 mlog_errno(ret);
852 goto out;
853 }
854
855 block = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos);
Joel Becker8cb471e2009-02-10 20:00:41 -0800856 ocfs2_remove_xattr_clusters_from_cache(INODE_CACHE(inode),
857 block, alloc_size);
Tao Maf56654c2008-08-18 17:38:48 +0800858 cpos += alloc_size;
859 trunc_len -= alloc_size;
860 }
861
862out:
Tao Maf56654c2008-08-18 17:38:48 +0800863 return ret;
864}
865
866static int ocfs2_xattr_value_truncate(struct inode *inode,
Joel Beckerb3e5d372008-12-09 15:01:04 -0800867 struct ocfs2_xattr_value_buf *vb,
Tao Ma78f30c32008-11-12 08:27:00 +0800868 int len,
869 struct ocfs2_xattr_set_ctxt *ctxt)
Tao Maf56654c2008-08-18 17:38:48 +0800870{
871 int ret;
872 u32 new_clusters = ocfs2_clusters_for_bytes(inode->i_sb, len);
Joel Beckerb3e5d372008-12-09 15:01:04 -0800873 u32 old_clusters = le32_to_cpu(vb->vb_xv->xr_clusters);
Tao Maf56654c2008-08-18 17:38:48 +0800874
875 if (new_clusters == old_clusters)
876 return 0;
877
878 if (new_clusters > old_clusters)
879 ret = ocfs2_xattr_extend_allocation(inode,
880 new_clusters - old_clusters,
Joel Beckerb3e5d372008-12-09 15:01:04 -0800881 vb, ctxt);
Tao Maf56654c2008-08-18 17:38:48 +0800882 else
883 ret = ocfs2_xattr_shrink_size(inode,
884 old_clusters, new_clusters,
Joel Beckerb3e5d372008-12-09 15:01:04 -0800885 vb, ctxt);
Tao Maf56654c2008-08-18 17:38:48 +0800886
887 return ret;
888}
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800889
Tao Ma936b8832008-10-09 23:06:14 +0800890static int ocfs2_xattr_list_entry(char *buffer, size_t size,
891 size_t *result, const char *prefix,
892 const char *name, int name_len)
893{
894 char *p = buffer + *result;
895 int prefix_len = strlen(prefix);
896 int total_len = prefix_len + name_len + 1;
897
898 *result += total_len;
899
900 /* we are just looking for how big our buffer needs to be */
901 if (!size)
902 return 0;
903
904 if (*result > size)
905 return -ERANGE;
906
907 memcpy(p, prefix, prefix_len);
908 memcpy(p + prefix_len, name, name_len);
909 p[prefix_len + name_len] = '\0';
910
911 return 0;
912}
913
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800914static int ocfs2_xattr_list_entries(struct inode *inode,
915 struct ocfs2_xattr_header *header,
916 char *buffer, size_t buffer_size)
917{
Tao Ma936b8832008-10-09 23:06:14 +0800918 size_t result = 0;
919 int i, type, ret;
920 const char *prefix, *name;
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800921
922 for (i = 0 ; i < le16_to_cpu(header->xh_count); i++) {
923 struct ocfs2_xattr_entry *entry = &header->xh_entries[i];
Tao Ma936b8832008-10-09 23:06:14 +0800924 type = ocfs2_xattr_get_type(entry);
925 prefix = ocfs2_xattr_prefix(type);
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800926
Tao Ma936b8832008-10-09 23:06:14 +0800927 if (prefix) {
928 name = (const char *)header +
929 le16_to_cpu(entry->xe_name_offset);
930
931 ret = ocfs2_xattr_list_entry(buffer, buffer_size,
932 &result, prefix, name,
933 entry->xe_name_len);
934 if (ret)
935 return ret;
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800936 }
937 }
938
Tao Ma936b8832008-10-09 23:06:14 +0800939 return result;
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800940}
941
Tao Ma8b2c0db2009-08-18 11:43:49 +0800942int ocfs2_has_inline_xattr_value_outside(struct inode *inode,
943 struct ocfs2_dinode *di)
944{
945 struct ocfs2_xattr_header *xh;
946 int i;
947
948 xh = (struct ocfs2_xattr_header *)
949 ((void *)di + inode->i_sb->s_blocksize -
950 le16_to_cpu(di->i_xattr_inline_size));
951
952 for (i = 0; i < le16_to_cpu(xh->xh_count); i++)
953 if (!ocfs2_xattr_is_local(&xh->xh_entries[i]))
954 return 1;
955
956 return 0;
957}
958
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800959static int ocfs2_xattr_ibody_list(struct inode *inode,
960 struct ocfs2_dinode *di,
961 char *buffer,
962 size_t buffer_size)
963{
964 struct ocfs2_xattr_header *header = NULL;
965 struct ocfs2_inode_info *oi = OCFS2_I(inode);
966 int ret = 0;
967
968 if (!(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL))
969 return ret;
970
971 header = (struct ocfs2_xattr_header *)
972 ((void *)di + inode->i_sb->s_blocksize -
973 le16_to_cpu(di->i_xattr_inline_size));
974
975 ret = ocfs2_xattr_list_entries(inode, header, buffer, buffer_size);
976
977 return ret;
978}
979
980static int ocfs2_xattr_block_list(struct inode *inode,
981 struct ocfs2_dinode *di,
982 char *buffer,
983 size_t buffer_size)
984{
985 struct buffer_head *blk_bh = NULL;
Tao Ma0c044f02008-08-18 17:38:50 +0800986 struct ocfs2_xattr_block *xb;
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800987 int ret = 0;
988
989 if (!di->i_xattr_loc)
990 return ret;
991
Joel Becker4ae1d692008-11-13 14:49:18 -0800992 ret = ocfs2_read_xattr_block(inode, le64_to_cpu(di->i_xattr_loc),
993 &blk_bh);
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800994 if (ret < 0) {
995 mlog_errno(ret);
996 return ret;
997 }
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800998
Tao Ma0c044f02008-08-18 17:38:50 +0800999 xb = (struct ocfs2_xattr_block *)blk_bh->b_data;
Tao Ma0c044f02008-08-18 17:38:50 +08001000 if (!(le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED)) {
1001 struct ocfs2_xattr_header *header = &xb->xb_attrs.xb_header;
1002 ret = ocfs2_xattr_list_entries(inode, header,
1003 buffer, buffer_size);
Tao Ma47bca492009-08-18 11:43:42 +08001004 } else
1005 ret = ocfs2_xattr_tree_list_index_block(inode, blk_bh,
Tao Ma0c044f02008-08-18 17:38:50 +08001006 buffer, buffer_size);
Joel Becker4ae1d692008-11-13 14:49:18 -08001007
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001008 brelse(blk_bh);
1009
1010 return ret;
1011}
1012
1013ssize_t ocfs2_listxattr(struct dentry *dentry,
1014 char *buffer,
1015 size_t size)
1016{
1017 int ret = 0, i_ret = 0, b_ret = 0;
1018 struct buffer_head *di_bh = NULL;
1019 struct ocfs2_dinode *di = NULL;
David Howells2b0143b2015-03-17 22:25:59 +00001020 struct ocfs2_inode_info *oi = OCFS2_I(d_inode(dentry));
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001021
Tiger Yang8154da32008-08-18 17:11:46 +08001022 if (!ocfs2_supports_xattr(OCFS2_SB(dentry->d_sb)))
1023 return -EOPNOTSUPP;
1024
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001025 if (!(oi->ip_dyn_features & OCFS2_HAS_XATTR_FL))
1026 return ret;
1027
David Howells2b0143b2015-03-17 22:25:59 +00001028 ret = ocfs2_inode_lock(d_inode(dentry), &di_bh, 0);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001029 if (ret < 0) {
1030 mlog_errno(ret);
1031 return ret;
1032 }
1033
1034 di = (struct ocfs2_dinode *)di_bh->b_data;
1035
1036 down_read(&oi->ip_xattr_sem);
David Howells2b0143b2015-03-17 22:25:59 +00001037 i_ret = ocfs2_xattr_ibody_list(d_inode(dentry), di, buffer, size);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001038 if (i_ret < 0)
1039 b_ret = 0;
1040 else {
1041 if (buffer) {
1042 buffer += i_ret;
1043 size -= i_ret;
1044 }
David Howells2b0143b2015-03-17 22:25:59 +00001045 b_ret = ocfs2_xattr_block_list(d_inode(dentry), di,
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001046 buffer, size);
1047 if (b_ret < 0)
1048 i_ret = 0;
1049 }
1050 up_read(&oi->ip_xattr_sem);
David Howells2b0143b2015-03-17 22:25:59 +00001051 ocfs2_inode_unlock(d_inode(dentry), 0);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001052
1053 brelse(di_bh);
1054
1055 return i_ret + b_ret;
1056}
1057
1058static int ocfs2_xattr_find_entry(int name_index,
1059 const char *name,
1060 struct ocfs2_xattr_search *xs)
1061{
1062 struct ocfs2_xattr_entry *entry;
1063 size_t name_len;
1064 int i, cmp = 1;
1065
1066 if (name == NULL)
1067 return -EINVAL;
1068
1069 name_len = strlen(name);
1070 entry = xs->here;
1071 for (i = 0; i < le16_to_cpu(xs->header->xh_count); i++) {
1072 cmp = name_index - ocfs2_xattr_get_type(entry);
1073 if (!cmp)
1074 cmp = name_len - entry->xe_name_len;
1075 if (!cmp)
1076 cmp = memcmp(name, (xs->base +
1077 le16_to_cpu(entry->xe_name_offset)),
1078 name_len);
1079 if (cmp == 0)
1080 break;
1081 entry += 1;
1082 }
1083 xs->here = entry;
1084
1085 return cmp ? -ENODATA : 0;
1086}
1087
1088static int ocfs2_xattr_get_value_outside(struct inode *inode,
Tao Ma589dc262008-08-18 17:38:51 +08001089 struct ocfs2_xattr_value_root *xv,
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001090 void *buffer,
1091 size_t len)
1092{
1093 u32 cpos, p_cluster, num_clusters, bpc, clusters;
1094 u64 blkno;
1095 int i, ret = 0;
1096 size_t cplen, blocksize;
1097 struct buffer_head *bh = NULL;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001098 struct ocfs2_extent_list *el;
1099
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001100 el = &xv->xr_list;
1101 clusters = le32_to_cpu(xv->xr_clusters);
1102 bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1);
1103 blocksize = inode->i_sb->s_blocksize;
1104
1105 cpos = 0;
1106 while (cpos < clusters) {
1107 ret = ocfs2_xattr_get_clusters(inode, cpos, &p_cluster,
Tao Ma1061f9c2009-08-18 11:41:57 +08001108 &num_clusters, el, NULL);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001109 if (ret) {
1110 mlog_errno(ret);
1111 goto out;
1112 }
1113
1114 blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cluster);
1115 /* Copy ocfs2_xattr_value */
1116 for (i = 0; i < num_clusters * bpc; i++, blkno++) {
Joel Becker8cb471e2009-02-10 20:00:41 -08001117 ret = ocfs2_read_block(INODE_CACHE(inode), blkno,
1118 &bh, NULL);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001119 if (ret) {
1120 mlog_errno(ret);
1121 goto out;
1122 }
1123
1124 cplen = len >= blocksize ? blocksize : len;
1125 memcpy(buffer, bh->b_data, cplen);
1126 len -= cplen;
1127 buffer += cplen;
1128
1129 brelse(bh);
1130 bh = NULL;
1131 if (len == 0)
1132 break;
1133 }
1134 cpos += num_clusters;
1135 }
1136out:
1137 return ret;
1138}
1139
1140static int ocfs2_xattr_ibody_get(struct inode *inode,
1141 int name_index,
1142 const char *name,
1143 void *buffer,
1144 size_t buffer_size,
1145 struct ocfs2_xattr_search *xs)
1146{
1147 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1148 struct ocfs2_dinode *di = (struct ocfs2_dinode *)xs->inode_bh->b_data;
Tao Ma589dc262008-08-18 17:38:51 +08001149 struct ocfs2_xattr_value_root *xv;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001150 size_t size;
1151 int ret = 0;
1152
1153 if (!(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL))
1154 return -ENODATA;
1155
1156 xs->end = (void *)di + inode->i_sb->s_blocksize;
1157 xs->header = (struct ocfs2_xattr_header *)
1158 (xs->end - le16_to_cpu(di->i_xattr_inline_size));
1159 xs->base = (void *)xs->header;
1160 xs->here = xs->header->xh_entries;
1161
1162 ret = ocfs2_xattr_find_entry(name_index, name, xs);
1163 if (ret)
1164 return ret;
1165 size = le64_to_cpu(xs->here->xe_value_size);
1166 if (buffer) {
1167 if (size > buffer_size)
1168 return -ERANGE;
1169 if (ocfs2_xattr_is_local(xs->here)) {
1170 memcpy(buffer, (void *)xs->base +
1171 le16_to_cpu(xs->here->xe_name_offset) +
1172 OCFS2_XATTR_SIZE(xs->here->xe_name_len), size);
1173 } else {
Tao Ma589dc262008-08-18 17:38:51 +08001174 xv = (struct ocfs2_xattr_value_root *)
1175 (xs->base + le16_to_cpu(
1176 xs->here->xe_name_offset) +
1177 OCFS2_XATTR_SIZE(xs->here->xe_name_len));
1178 ret = ocfs2_xattr_get_value_outside(inode, xv,
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001179 buffer, size);
1180 if (ret < 0) {
1181 mlog_errno(ret);
1182 return ret;
1183 }
1184 }
1185 }
1186
1187 return size;
1188}
1189
1190static int ocfs2_xattr_block_get(struct inode *inode,
1191 int name_index,
1192 const char *name,
1193 void *buffer,
1194 size_t buffer_size,
1195 struct ocfs2_xattr_search *xs)
1196{
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001197 struct ocfs2_xattr_block *xb;
Tao Ma589dc262008-08-18 17:38:51 +08001198 struct ocfs2_xattr_value_root *xv;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001199 size_t size;
Subrata Modak44d8e4e2009-07-14 01:19:31 +05301200 int ret = -ENODATA, name_offset, name_len, i;
1201 int uninitialized_var(block_off);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001202
Joel Beckerba937122008-10-24 19:13:20 -07001203 xs->bucket = ocfs2_xattr_bucket_new(inode);
1204 if (!xs->bucket) {
1205 ret = -ENOMEM;
1206 mlog_errno(ret);
1207 goto cleanup;
1208 }
Tao Ma589dc262008-08-18 17:38:51 +08001209
Joel Becker54f443f2008-10-20 18:43:07 -07001210 ret = ocfs2_xattr_block_find(inode, name_index, name, xs);
1211 if (ret) {
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001212 mlog_errno(ret);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001213 goto cleanup;
1214 }
1215
Tiger Yang6c1e1832008-11-02 19:04:21 +08001216 if (xs->not_found) {
1217 ret = -ENODATA;
1218 goto cleanup;
1219 }
1220
Joel Becker54f443f2008-10-20 18:43:07 -07001221 xb = (struct ocfs2_xattr_block *)xs->xattr_bh->b_data;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001222 size = le64_to_cpu(xs->here->xe_value_size);
1223 if (buffer) {
1224 ret = -ERANGE;
1225 if (size > buffer_size)
1226 goto cleanup;
Tao Ma589dc262008-08-18 17:38:51 +08001227
1228 name_offset = le16_to_cpu(xs->here->xe_name_offset);
1229 name_len = OCFS2_XATTR_SIZE(xs->here->xe_name_len);
1230 i = xs->here - xs->header->xh_entries;
1231
1232 if (le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED) {
Tao Mafd68a892009-08-18 11:43:21 +08001233 ret = ocfs2_xattr_bucket_get_name_value(inode->i_sb,
Joel Beckerba937122008-10-24 19:13:20 -07001234 bucket_xh(xs->bucket),
Tao Ma589dc262008-08-18 17:38:51 +08001235 i,
1236 &block_off,
1237 &name_offset);
Joseph Qi023d4ea2015-04-14 15:43:33 -07001238 if (ret) {
1239 mlog_errno(ret);
1240 goto cleanup;
1241 }
Joel Beckerba937122008-10-24 19:13:20 -07001242 xs->base = bucket_block(xs->bucket, block_off);
Tao Ma589dc262008-08-18 17:38:51 +08001243 }
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001244 if (ocfs2_xattr_is_local(xs->here)) {
1245 memcpy(buffer, (void *)xs->base +
Tao Ma589dc262008-08-18 17:38:51 +08001246 name_offset + name_len, size);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001247 } else {
Tao Ma589dc262008-08-18 17:38:51 +08001248 xv = (struct ocfs2_xattr_value_root *)
1249 (xs->base + name_offset + name_len);
1250 ret = ocfs2_xattr_get_value_outside(inode, xv,
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001251 buffer, size);
1252 if (ret < 0) {
1253 mlog_errno(ret);
1254 goto cleanup;
1255 }
1256 }
1257 }
1258 ret = size;
1259cleanup:
Joel Beckerba937122008-10-24 19:13:20 -07001260 ocfs2_xattr_bucket_free(xs->bucket);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001261
Joel Becker54f443f2008-10-20 18:43:07 -07001262 brelse(xs->xattr_bh);
1263 xs->xattr_bh = NULL;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001264 return ret;
1265}
1266
Tiger Yang4e3e9d02008-11-14 11:16:53 +08001267int ocfs2_xattr_get_nolock(struct inode *inode,
1268 struct buffer_head *di_bh,
Tiger Yang0030e002008-10-23 16:33:33 +08001269 int name_index,
1270 const char *name,
1271 void *buffer,
1272 size_t buffer_size)
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001273{
1274 int ret;
1275 struct ocfs2_dinode *di = NULL;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001276 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1277 struct ocfs2_xattr_search xis = {
1278 .not_found = -ENODATA,
1279 };
1280 struct ocfs2_xattr_search xbs = {
1281 .not_found = -ENODATA,
1282 };
1283
Tiger Yang8154da32008-08-18 17:11:46 +08001284 if (!ocfs2_supports_xattr(OCFS2_SB(inode->i_sb)))
1285 return -EOPNOTSUPP;
1286
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001287 if (!(oi->ip_dyn_features & OCFS2_HAS_XATTR_FL))
Jan Kara2b693002014-12-10 15:41:40 -08001288 return -ENODATA;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001289
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001290 xis.inode_bh = xbs.inode_bh = di_bh;
1291 di = (struct ocfs2_dinode *)di_bh->b_data;
1292
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001293 ret = ocfs2_xattr_ibody_get(inode, name_index, name, buffer,
1294 buffer_size, &xis);
Tiger Yang6c1e1832008-11-02 19:04:21 +08001295 if (ret == -ENODATA && di->i_xattr_loc)
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001296 ret = ocfs2_xattr_block_get(inode, name_index, name, buffer,
1297 buffer_size, &xbs);
Tiger Yang4e3e9d02008-11-14 11:16:53 +08001298
1299 return ret;
1300}
1301
1302/* ocfs2_xattr_get()
1303 *
1304 * Copy an extended attribute into the buffer provided.
1305 * Buffer is NULL to compute the size of buffer required.
1306 */
1307static int ocfs2_xattr_get(struct inode *inode,
1308 int name_index,
1309 const char *name,
1310 void *buffer,
1311 size_t buffer_size)
1312{
1313 int ret;
1314 struct buffer_head *di_bh = NULL;
1315
1316 ret = ocfs2_inode_lock(inode, &di_bh, 0);
1317 if (ret < 0) {
1318 mlog_errno(ret);
1319 return ret;
1320 }
Tao Ma5e64b0d2010-09-07 13:30:05 +08001321 down_read(&OCFS2_I(inode)->ip_xattr_sem);
Tiger Yang4e3e9d02008-11-14 11:16:53 +08001322 ret = ocfs2_xattr_get_nolock(inode, di_bh, name_index,
1323 name, buffer, buffer_size);
Tao Ma5e64b0d2010-09-07 13:30:05 +08001324 up_read(&OCFS2_I(inode)->ip_xattr_sem);
Tiger Yang4e3e9d02008-11-14 11:16:53 +08001325
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001326 ocfs2_inode_unlock(inode, 0);
1327
1328 brelse(di_bh);
1329
1330 return ret;
1331}
1332
1333static int __ocfs2_xattr_set_value_outside(struct inode *inode,
Tao Ma85db90e2008-11-12 08:27:01 +08001334 handle_t *handle,
Tao Ma492a8a32009-08-18 11:43:17 +08001335 struct ocfs2_xattr_value_buf *vb,
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001336 const void *value,
1337 int value_len)
1338{
Tao Ma71d548a2008-12-05 06:20:54 +08001339 int ret = 0, i, cp_len;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001340 u16 blocksize = inode->i_sb->s_blocksize;
1341 u32 p_cluster, num_clusters;
1342 u32 cpos = 0, bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1);
1343 u32 clusters = ocfs2_clusters_for_bytes(inode->i_sb, value_len);
1344 u64 blkno;
1345 struct buffer_head *bh = NULL;
Tao Ma492a8a32009-08-18 11:43:17 +08001346 unsigned int ext_flags;
1347 struct ocfs2_xattr_value_root *xv = vb->vb_xv;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001348
1349 BUG_ON(clusters > le32_to_cpu(xv->xr_clusters));
1350
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001351 while (cpos < clusters) {
1352 ret = ocfs2_xattr_get_clusters(inode, cpos, &p_cluster,
Tao Ma1061f9c2009-08-18 11:41:57 +08001353 &num_clusters, &xv->xr_list,
Tao Ma492a8a32009-08-18 11:43:17 +08001354 &ext_flags);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001355 if (ret) {
1356 mlog_errno(ret);
Tao Ma85db90e2008-11-12 08:27:01 +08001357 goto out;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001358 }
1359
Tao Ma492a8a32009-08-18 11:43:17 +08001360 BUG_ON(ext_flags & OCFS2_EXT_REFCOUNTED);
1361
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001362 blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cluster);
1363
1364 for (i = 0; i < num_clusters * bpc; i++, blkno++) {
Joel Becker8cb471e2009-02-10 20:00:41 -08001365 ret = ocfs2_read_block(INODE_CACHE(inode), blkno,
1366 &bh, NULL);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001367 if (ret) {
1368 mlog_errno(ret);
Tao Ma85db90e2008-11-12 08:27:01 +08001369 goto out;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001370 }
1371
1372 ret = ocfs2_journal_access(handle,
Joel Becker0cf2f762009-02-12 16:41:25 -08001373 INODE_CACHE(inode),
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001374 bh,
1375 OCFS2_JOURNAL_ACCESS_WRITE);
1376 if (ret < 0) {
1377 mlog_errno(ret);
Tao Ma85db90e2008-11-12 08:27:01 +08001378 goto out;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001379 }
1380
1381 cp_len = value_len > blocksize ? blocksize : value_len;
1382 memcpy(bh->b_data, value, cp_len);
1383 value_len -= cp_len;
1384 value += cp_len;
1385 if (cp_len < blocksize)
1386 memset(bh->b_data + cp_len, 0,
1387 blocksize - cp_len);
1388
Joel Beckerec20cec2010-03-19 14:13:52 -07001389 ocfs2_journal_dirty(handle, bh);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001390 brelse(bh);
1391 bh = NULL;
1392
1393 /*
1394 * XXX: do we need to empty all the following
1395 * blocks in this cluster?
1396 */
1397 if (!value_len)
1398 break;
1399 }
1400 cpos += num_clusters;
1401 }
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001402out:
1403 brelse(bh);
1404
1405 return ret;
1406}
1407
Joel Becker69a3e532009-08-17 12:24:39 -07001408static int ocfs2_xa_check_space_helper(int needed_space, int free_start,
1409 int num_entries)
1410{
1411 int free_space;
1412
1413 if (!needed_space)
1414 return 0;
1415
1416 free_space = free_start -
1417 sizeof(struct ocfs2_xattr_header) -
1418 (num_entries * sizeof(struct ocfs2_xattr_entry)) -
1419 OCFS2_XATTR_HEADER_GAP;
1420 if (free_space < 0)
1421 return -EIO;
1422 if (free_space < needed_space)
1423 return -ENOSPC;
1424
1425 return 0;
1426}
1427
Joel Beckercf2bc802009-08-18 13:52:38 -07001428static int ocfs2_xa_journal_access(handle_t *handle, struct ocfs2_xa_loc *loc,
1429 int type)
1430{
1431 return loc->xl_ops->xlo_journal_access(handle, loc, type);
1432}
1433
1434static void ocfs2_xa_journal_dirty(handle_t *handle, struct ocfs2_xa_loc *loc)
1435{
1436 loc->xl_ops->xlo_journal_dirty(handle, loc);
1437}
1438
Joel Becker69a3e532009-08-17 12:24:39 -07001439/* Give a pointer into the storage for the given offset */
1440static void *ocfs2_xa_offset_pointer(struct ocfs2_xa_loc *loc, int offset)
1441{
1442 BUG_ON(offset >= loc->xl_size);
1443 return loc->xl_ops->xlo_offset_pointer(loc, offset);
1444}
1445
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001446/*
Joel Becker11179f22009-08-14 16:07:44 -07001447 * Wipe the name+value pair and allow the storage to reclaim it. This
1448 * must be followed by either removal of the entry or a call to
1449 * ocfs2_xa_add_namevalue().
1450 */
1451static void ocfs2_xa_wipe_namevalue(struct ocfs2_xa_loc *loc)
1452{
1453 loc->xl_ops->xlo_wipe_namevalue(loc);
1454}
1455
Joel Becker69a3e532009-08-17 12:24:39 -07001456/*
1457 * Find lowest offset to a name+value pair. This is the start of our
1458 * downward-growing free space.
1459 */
1460static int ocfs2_xa_get_free_start(struct ocfs2_xa_loc *loc)
1461{
1462 return loc->xl_ops->xlo_get_free_start(loc);
1463}
1464
1465/* Can we reuse loc->xl_entry for xi? */
1466static int ocfs2_xa_can_reuse_entry(struct ocfs2_xa_loc *loc,
1467 struct ocfs2_xattr_info *xi)
1468{
1469 return loc->xl_ops->xlo_can_reuse(loc, xi);
1470}
1471
1472/* How much free space is needed to set the new value */
1473static int ocfs2_xa_check_space(struct ocfs2_xa_loc *loc,
1474 struct ocfs2_xattr_info *xi)
1475{
1476 return loc->xl_ops->xlo_check_space(loc, xi);
1477}
1478
1479static void ocfs2_xa_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash)
1480{
1481 loc->xl_ops->xlo_add_entry(loc, name_hash);
1482 loc->xl_entry->xe_name_hash = cpu_to_le32(name_hash);
1483 /*
1484 * We can't leave the new entry's xe_name_offset at zero or
1485 * add_namevalue() will go nuts. We set it to the size of our
1486 * storage so that it can never be less than any other entry.
1487 */
1488 loc->xl_entry->xe_name_offset = cpu_to_le16(loc->xl_size);
1489}
1490
1491static void ocfs2_xa_add_namevalue(struct ocfs2_xa_loc *loc,
1492 struct ocfs2_xattr_info *xi)
1493{
1494 int size = namevalue_size_xi(xi);
1495 int nameval_offset;
1496 char *nameval_buf;
1497
1498 loc->xl_ops->xlo_add_namevalue(loc, size);
1499 loc->xl_entry->xe_value_size = cpu_to_le64(xi->xi_value_len);
1500 loc->xl_entry->xe_name_len = xi->xi_name_len;
1501 ocfs2_xattr_set_type(loc->xl_entry, xi->xi_name_index);
1502 ocfs2_xattr_set_local(loc->xl_entry,
1503 xi->xi_value_len <= OCFS2_XATTR_INLINE_SIZE);
1504
1505 nameval_offset = le16_to_cpu(loc->xl_entry->xe_name_offset);
1506 nameval_buf = ocfs2_xa_offset_pointer(loc, nameval_offset);
1507 memset(nameval_buf, 0, size);
1508 memcpy(nameval_buf, xi->xi_name, xi->xi_name_len);
1509}
1510
Joel Becker3fc12af2009-08-18 13:20:27 -07001511static void ocfs2_xa_fill_value_buf(struct ocfs2_xa_loc *loc,
1512 struct ocfs2_xattr_value_buf *vb)
1513{
1514 int nameval_offset = le16_to_cpu(loc->xl_entry->xe_name_offset);
1515 int name_size = OCFS2_XATTR_SIZE(loc->xl_entry->xe_name_len);
1516
1517 /* Value bufs are for value trees */
Joel Becker73857ee2009-08-18 20:26:41 -07001518 BUG_ON(ocfs2_xattr_is_local(loc->xl_entry));
Joel Becker3fc12af2009-08-18 13:20:27 -07001519 BUG_ON(namevalue_size_xe(loc->xl_entry) !=
1520 (name_size + OCFS2_XATTR_ROOT_SIZE));
1521
1522 loc->xl_ops->xlo_fill_value_buf(loc, vb);
1523 vb->vb_xv =
1524 (struct ocfs2_xattr_value_root *)ocfs2_xa_offset_pointer(loc,
1525 nameval_offset +
1526 name_size);
1527}
1528
Joel Beckercf2bc802009-08-18 13:52:38 -07001529static int ocfs2_xa_block_journal_access(handle_t *handle,
1530 struct ocfs2_xa_loc *loc, int type)
1531{
1532 struct buffer_head *bh = loc->xl_storage;
1533 ocfs2_journal_access_func access;
1534
1535 if (loc->xl_size == (bh->b_size -
1536 offsetof(struct ocfs2_xattr_block,
1537 xb_attrs.xb_header)))
1538 access = ocfs2_journal_access_xb;
1539 else
1540 access = ocfs2_journal_access_di;
1541 return access(handle, INODE_CACHE(loc->xl_inode), bh, type);
1542}
1543
1544static void ocfs2_xa_block_journal_dirty(handle_t *handle,
1545 struct ocfs2_xa_loc *loc)
1546{
1547 struct buffer_head *bh = loc->xl_storage;
1548
1549 ocfs2_journal_dirty(handle, bh);
1550}
1551
Joel Becker11179f22009-08-14 16:07:44 -07001552static void *ocfs2_xa_block_offset_pointer(struct ocfs2_xa_loc *loc,
1553 int offset)
1554{
Joel Becker11179f22009-08-14 16:07:44 -07001555 return (char *)loc->xl_header + offset;
1556}
1557
Joel Becker69a3e532009-08-17 12:24:39 -07001558static int ocfs2_xa_block_can_reuse(struct ocfs2_xa_loc *loc,
1559 struct ocfs2_xattr_info *xi)
1560{
1561 /*
1562 * Block storage is strict. If the sizes aren't exact, we will
1563 * remove the old one and reinsert the new.
1564 */
1565 return namevalue_size_xe(loc->xl_entry) ==
1566 namevalue_size_xi(xi);
1567}
1568
1569static int ocfs2_xa_block_get_free_start(struct ocfs2_xa_loc *loc)
1570{
1571 struct ocfs2_xattr_header *xh = loc->xl_header;
1572 int i, count = le16_to_cpu(xh->xh_count);
1573 int offset, free_start = loc->xl_size;
1574
1575 for (i = 0; i < count; i++) {
1576 offset = le16_to_cpu(xh->xh_entries[i].xe_name_offset);
1577 if (offset < free_start)
1578 free_start = offset;
1579 }
1580
1581 return free_start;
1582}
1583
1584static int ocfs2_xa_block_check_space(struct ocfs2_xa_loc *loc,
1585 struct ocfs2_xattr_info *xi)
1586{
1587 int count = le16_to_cpu(loc->xl_header->xh_count);
1588 int free_start = ocfs2_xa_get_free_start(loc);
1589 int needed_space = ocfs2_xi_entry_usage(xi);
1590
1591 /*
1592 * Block storage will reclaim the original entry before inserting
1593 * the new value, so we only need the difference. If the new
1594 * entry is smaller than the old one, we don't need anything.
1595 */
1596 if (loc->xl_entry) {
1597 /* Don't need space if we're reusing! */
1598 if (ocfs2_xa_can_reuse_entry(loc, xi))
1599 needed_space = 0;
1600 else
1601 needed_space -= ocfs2_xe_entry_usage(loc->xl_entry);
1602 }
1603 if (needed_space < 0)
1604 needed_space = 0;
1605 return ocfs2_xa_check_space_helper(needed_space, free_start, count);
1606}
1607
Joel Becker11179f22009-08-14 16:07:44 -07001608/*
1609 * Block storage for xattrs keeps the name+value pairs compacted. When
1610 * we remove one, we have to shift any that preceded it towards the end.
1611 */
1612static void ocfs2_xa_block_wipe_namevalue(struct ocfs2_xa_loc *loc)
1613{
1614 int i, offset;
1615 int namevalue_offset, first_namevalue_offset, namevalue_size;
1616 struct ocfs2_xattr_entry *entry = loc->xl_entry;
1617 struct ocfs2_xattr_header *xh = loc->xl_header;
Joel Becker11179f22009-08-14 16:07:44 -07001618 int count = le16_to_cpu(xh->xh_count);
1619
1620 namevalue_offset = le16_to_cpu(entry->xe_name_offset);
Joel Becker199799a2009-08-14 19:04:15 -07001621 namevalue_size = namevalue_size_xe(entry);
Joel Becker69a3e532009-08-17 12:24:39 -07001622 first_namevalue_offset = ocfs2_xa_get_free_start(loc);
Joel Becker11179f22009-08-14 16:07:44 -07001623
1624 /* Shift the name+value pairs */
1625 memmove((char *)xh + first_namevalue_offset + namevalue_size,
1626 (char *)xh + first_namevalue_offset,
1627 namevalue_offset - first_namevalue_offset);
1628 memset((char *)xh + first_namevalue_offset, 0, namevalue_size);
1629
1630 /* Now tell xh->xh_entries about it */
1631 for (i = 0; i < count; i++) {
1632 offset = le16_to_cpu(xh->xh_entries[i].xe_name_offset);
Tao Madfe4d3d2010-03-19 15:04:23 +08001633 if (offset <= namevalue_offset)
Joel Becker11179f22009-08-14 16:07:44 -07001634 le16_add_cpu(&xh->xh_entries[i].xe_name_offset,
1635 namevalue_size);
1636 }
1637
1638 /*
1639 * Note that we don't update xh_free_start or xh_name_value_len
1640 * because they're not used in block-stored xattrs.
1641 */
1642}
1643
Joel Becker69a3e532009-08-17 12:24:39 -07001644static void ocfs2_xa_block_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash)
1645{
1646 int count = le16_to_cpu(loc->xl_header->xh_count);
1647 loc->xl_entry = &(loc->xl_header->xh_entries[count]);
1648 le16_add_cpu(&loc->xl_header->xh_count, 1);
1649 memset(loc->xl_entry, 0, sizeof(struct ocfs2_xattr_entry));
1650}
1651
1652static void ocfs2_xa_block_add_namevalue(struct ocfs2_xa_loc *loc, int size)
1653{
1654 int free_start = ocfs2_xa_get_free_start(loc);
1655
1656 loc->xl_entry->xe_name_offset = cpu_to_le16(free_start - size);
1657}
1658
Joel Becker3fc12af2009-08-18 13:20:27 -07001659static void ocfs2_xa_block_fill_value_buf(struct ocfs2_xa_loc *loc,
1660 struct ocfs2_xattr_value_buf *vb)
1661{
1662 struct buffer_head *bh = loc->xl_storage;
1663
1664 if (loc->xl_size == (bh->b_size -
1665 offsetof(struct ocfs2_xattr_block,
1666 xb_attrs.xb_header)))
1667 vb->vb_access = ocfs2_journal_access_xb;
1668 else
1669 vb->vb_access = ocfs2_journal_access_di;
1670 vb->vb_bh = bh;
1671}
1672
Joel Becker11179f22009-08-14 16:07:44 -07001673/*
1674 * Operations for xattrs stored in blocks. This includes inline inode
1675 * storage and unindexed ocfs2_xattr_blocks.
1676 */
1677static const struct ocfs2_xa_loc_operations ocfs2_xa_block_loc_ops = {
Joel Beckercf2bc802009-08-18 13:52:38 -07001678 .xlo_journal_access = ocfs2_xa_block_journal_access,
1679 .xlo_journal_dirty = ocfs2_xa_block_journal_dirty,
Joel Becker11179f22009-08-14 16:07:44 -07001680 .xlo_offset_pointer = ocfs2_xa_block_offset_pointer,
Joel Becker69a3e532009-08-17 12:24:39 -07001681 .xlo_check_space = ocfs2_xa_block_check_space,
1682 .xlo_can_reuse = ocfs2_xa_block_can_reuse,
1683 .xlo_get_free_start = ocfs2_xa_block_get_free_start,
Joel Becker11179f22009-08-14 16:07:44 -07001684 .xlo_wipe_namevalue = ocfs2_xa_block_wipe_namevalue,
Joel Becker69a3e532009-08-17 12:24:39 -07001685 .xlo_add_entry = ocfs2_xa_block_add_entry,
1686 .xlo_add_namevalue = ocfs2_xa_block_add_namevalue,
Joel Becker3fc12af2009-08-18 13:20:27 -07001687 .xlo_fill_value_buf = ocfs2_xa_block_fill_value_buf,
Joel Becker11179f22009-08-14 16:07:44 -07001688};
1689
Joel Beckercf2bc802009-08-18 13:52:38 -07001690static int ocfs2_xa_bucket_journal_access(handle_t *handle,
1691 struct ocfs2_xa_loc *loc, int type)
1692{
1693 struct ocfs2_xattr_bucket *bucket = loc->xl_storage;
1694
1695 return ocfs2_xattr_bucket_journal_access(handle, bucket, type);
1696}
1697
1698static void ocfs2_xa_bucket_journal_dirty(handle_t *handle,
1699 struct ocfs2_xa_loc *loc)
1700{
1701 struct ocfs2_xattr_bucket *bucket = loc->xl_storage;
1702
1703 ocfs2_xattr_bucket_journal_dirty(handle, bucket);
1704}
1705
Joel Becker11179f22009-08-14 16:07:44 -07001706static void *ocfs2_xa_bucket_offset_pointer(struct ocfs2_xa_loc *loc,
1707 int offset)
1708{
1709 struct ocfs2_xattr_bucket *bucket = loc->xl_storage;
1710 int block, block_offset;
1711
Joel Becker11179f22009-08-14 16:07:44 -07001712 /* The header is at the front of the bucket */
Joel Beckercf2bc802009-08-18 13:52:38 -07001713 block = offset >> loc->xl_inode->i_sb->s_blocksize_bits;
1714 block_offset = offset % loc->xl_inode->i_sb->s_blocksize;
Joel Becker11179f22009-08-14 16:07:44 -07001715
1716 return bucket_block(bucket, block) + block_offset;
1717}
1718
Joel Becker69a3e532009-08-17 12:24:39 -07001719static int ocfs2_xa_bucket_can_reuse(struct ocfs2_xa_loc *loc,
1720 struct ocfs2_xattr_info *xi)
1721{
1722 return namevalue_size_xe(loc->xl_entry) >=
1723 namevalue_size_xi(xi);
1724}
1725
1726static int ocfs2_xa_bucket_get_free_start(struct ocfs2_xa_loc *loc)
1727{
1728 struct ocfs2_xattr_bucket *bucket = loc->xl_storage;
1729 return le16_to_cpu(bucket_xh(bucket)->xh_free_start);
1730}
1731
1732static int ocfs2_bucket_align_free_start(struct super_block *sb,
1733 int free_start, int size)
1734{
1735 /*
1736 * We need to make sure that the name+value pair fits within
1737 * one block.
1738 */
1739 if (((free_start - size) >> sb->s_blocksize_bits) !=
1740 ((free_start - 1) >> sb->s_blocksize_bits))
1741 free_start -= free_start % sb->s_blocksize;
1742
1743 return free_start;
1744}
1745
1746static int ocfs2_xa_bucket_check_space(struct ocfs2_xa_loc *loc,
1747 struct ocfs2_xattr_info *xi)
1748{
1749 int rc;
1750 int count = le16_to_cpu(loc->xl_header->xh_count);
1751 int free_start = ocfs2_xa_get_free_start(loc);
1752 int needed_space = ocfs2_xi_entry_usage(xi);
1753 int size = namevalue_size_xi(xi);
Joel Beckercf2bc802009-08-18 13:52:38 -07001754 struct super_block *sb = loc->xl_inode->i_sb;
Joel Becker69a3e532009-08-17 12:24:39 -07001755
1756 /*
1757 * Bucket storage does not reclaim name+value pairs it cannot
1758 * reuse. They live as holes until the bucket fills, and then
1759 * the bucket is defragmented. However, the bucket can reclaim
1760 * the ocfs2_xattr_entry.
1761 */
1762 if (loc->xl_entry) {
1763 /* Don't need space if we're reusing! */
1764 if (ocfs2_xa_can_reuse_entry(loc, xi))
1765 needed_space = 0;
1766 else
1767 needed_space -= sizeof(struct ocfs2_xattr_entry);
1768 }
1769 BUG_ON(needed_space < 0);
1770
1771 if (free_start < size) {
1772 if (needed_space)
1773 return -ENOSPC;
1774 } else {
1775 /*
1776 * First we check if it would fit in the first place.
1777 * Below, we align the free start to a block. This may
1778 * slide us below the minimum gap. By checking unaligned
1779 * first, we avoid that error.
1780 */
1781 rc = ocfs2_xa_check_space_helper(needed_space, free_start,
1782 count);
1783 if (rc)
1784 return rc;
1785 free_start = ocfs2_bucket_align_free_start(sb, free_start,
1786 size);
1787 }
1788 return ocfs2_xa_check_space_helper(needed_space, free_start, count);
1789}
1790
Joel Becker11179f22009-08-14 16:07:44 -07001791static void ocfs2_xa_bucket_wipe_namevalue(struct ocfs2_xa_loc *loc)
1792{
Joel Becker199799a2009-08-14 19:04:15 -07001793 le16_add_cpu(&loc->xl_header->xh_name_value_len,
1794 -namevalue_size_xe(loc->xl_entry));
Joel Becker11179f22009-08-14 16:07:44 -07001795}
1796
Joel Becker69a3e532009-08-17 12:24:39 -07001797static void ocfs2_xa_bucket_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash)
1798{
1799 struct ocfs2_xattr_header *xh = loc->xl_header;
1800 int count = le16_to_cpu(xh->xh_count);
1801 int low = 0, high = count - 1, tmp;
1802 struct ocfs2_xattr_entry *tmp_xe;
1803
1804 /*
1805 * We keep buckets sorted by name_hash, so we need to find
1806 * our insert place.
1807 */
1808 while (low <= high && count) {
1809 tmp = (low + high) / 2;
1810 tmp_xe = &xh->xh_entries[tmp];
1811
1812 if (name_hash > le32_to_cpu(tmp_xe->xe_name_hash))
1813 low = tmp + 1;
1814 else if (name_hash < le32_to_cpu(tmp_xe->xe_name_hash))
1815 high = tmp - 1;
1816 else {
1817 low = tmp;
1818 break;
1819 }
1820 }
1821
1822 if (low != count)
1823 memmove(&xh->xh_entries[low + 1],
1824 &xh->xh_entries[low],
1825 ((count - low) * sizeof(struct ocfs2_xattr_entry)));
1826
1827 le16_add_cpu(&xh->xh_count, 1);
1828 loc->xl_entry = &xh->xh_entries[low];
1829 memset(loc->xl_entry, 0, sizeof(struct ocfs2_xattr_entry));
1830}
1831
1832static void ocfs2_xa_bucket_add_namevalue(struct ocfs2_xa_loc *loc, int size)
1833{
1834 int free_start = ocfs2_xa_get_free_start(loc);
1835 struct ocfs2_xattr_header *xh = loc->xl_header;
Joel Beckercf2bc802009-08-18 13:52:38 -07001836 struct super_block *sb = loc->xl_inode->i_sb;
Joel Becker69a3e532009-08-17 12:24:39 -07001837 int nameval_offset;
1838
1839 free_start = ocfs2_bucket_align_free_start(sb, free_start, size);
1840 nameval_offset = free_start - size;
1841 loc->xl_entry->xe_name_offset = cpu_to_le16(nameval_offset);
1842 xh->xh_free_start = cpu_to_le16(nameval_offset);
1843 le16_add_cpu(&xh->xh_name_value_len, size);
1844
1845}
1846
Joel Becker3fc12af2009-08-18 13:20:27 -07001847static void ocfs2_xa_bucket_fill_value_buf(struct ocfs2_xa_loc *loc,
1848 struct ocfs2_xattr_value_buf *vb)
1849{
1850 struct ocfs2_xattr_bucket *bucket = loc->xl_storage;
Joel Beckercf2bc802009-08-18 13:52:38 -07001851 struct super_block *sb = loc->xl_inode->i_sb;
Joel Becker3fc12af2009-08-18 13:20:27 -07001852 int nameval_offset = le16_to_cpu(loc->xl_entry->xe_name_offset);
1853 int size = namevalue_size_xe(loc->xl_entry);
1854 int block_offset = nameval_offset >> sb->s_blocksize_bits;
1855
1856 /* Values are not allowed to straddle block boundaries */
1857 BUG_ON(block_offset !=
1858 ((nameval_offset + size - 1) >> sb->s_blocksize_bits));
1859 /* We expect the bucket to be filled in */
1860 BUG_ON(!bucket->bu_bhs[block_offset]);
1861
1862 vb->vb_access = ocfs2_journal_access;
1863 vb->vb_bh = bucket->bu_bhs[block_offset];
1864}
1865
Joel Becker11179f22009-08-14 16:07:44 -07001866/* Operations for xattrs stored in buckets. */
1867static const struct ocfs2_xa_loc_operations ocfs2_xa_bucket_loc_ops = {
Joel Beckercf2bc802009-08-18 13:52:38 -07001868 .xlo_journal_access = ocfs2_xa_bucket_journal_access,
1869 .xlo_journal_dirty = ocfs2_xa_bucket_journal_dirty,
Joel Becker11179f22009-08-14 16:07:44 -07001870 .xlo_offset_pointer = ocfs2_xa_bucket_offset_pointer,
Joel Becker69a3e532009-08-17 12:24:39 -07001871 .xlo_check_space = ocfs2_xa_bucket_check_space,
1872 .xlo_can_reuse = ocfs2_xa_bucket_can_reuse,
1873 .xlo_get_free_start = ocfs2_xa_bucket_get_free_start,
Joel Becker11179f22009-08-14 16:07:44 -07001874 .xlo_wipe_namevalue = ocfs2_xa_bucket_wipe_namevalue,
Joel Becker69a3e532009-08-17 12:24:39 -07001875 .xlo_add_entry = ocfs2_xa_bucket_add_entry,
1876 .xlo_add_namevalue = ocfs2_xa_bucket_add_namevalue,
Joel Becker3fc12af2009-08-18 13:20:27 -07001877 .xlo_fill_value_buf = ocfs2_xa_bucket_fill_value_buf,
Joel Becker11179f22009-08-14 16:07:44 -07001878};
1879
Joel Becker399ff3a72009-09-01 18:38:27 -07001880static unsigned int ocfs2_xa_value_clusters(struct ocfs2_xa_loc *loc)
1881{
1882 struct ocfs2_xattr_value_buf vb;
1883
1884 if (ocfs2_xattr_is_local(loc->xl_entry))
1885 return 0;
1886
1887 ocfs2_xa_fill_value_buf(loc, &vb);
1888 return le32_to_cpu(vb.vb_xv->xr_clusters);
1889}
1890
Joel Becker73857ee2009-08-18 20:26:41 -07001891static int ocfs2_xa_value_truncate(struct ocfs2_xa_loc *loc, u64 bytes,
1892 struct ocfs2_xattr_set_ctxt *ctxt)
1893{
1894 int trunc_rc, access_rc;
1895 struct ocfs2_xattr_value_buf vb;
1896
1897 ocfs2_xa_fill_value_buf(loc, &vb);
1898 trunc_rc = ocfs2_xattr_value_truncate(loc->xl_inode, &vb, bytes,
1899 ctxt);
1900
1901 /*
1902 * The caller of ocfs2_xa_value_truncate() has already called
1903 * ocfs2_xa_journal_access on the loc. However, The truncate code
1904 * calls ocfs2_extend_trans(). This may commit the previous
1905 * transaction and open a new one. If this is a bucket, truncate
1906 * could leave only vb->vb_bh set up for journaling. Meanwhile,
1907 * the caller is expecting to dirty the entire bucket. So we must
1908 * reset the journal work. We do this even if truncate has failed,
1909 * as it could have failed after committing the extend.
1910 */
1911 access_rc = ocfs2_xa_journal_access(ctxt->handle, loc,
1912 OCFS2_JOURNAL_ACCESS_WRITE);
1913
1914 /* Errors in truncate take precedence */
1915 return trunc_rc ? trunc_rc : access_rc;
1916}
1917
Joel Becker11179f22009-08-14 16:07:44 -07001918static void ocfs2_xa_remove_entry(struct ocfs2_xa_loc *loc)
1919{
Joel Beckerbde1e542009-08-14 16:58:38 -07001920 int index, count;
1921 struct ocfs2_xattr_header *xh = loc->xl_header;
1922 struct ocfs2_xattr_entry *entry = loc->xl_entry;
1923
Joel Becker11179f22009-08-14 16:07:44 -07001924 ocfs2_xa_wipe_namevalue(loc);
Joel Beckerbde1e542009-08-14 16:58:38 -07001925 loc->xl_entry = NULL;
1926
1927 le16_add_cpu(&xh->xh_count, -1);
1928 count = le16_to_cpu(xh->xh_count);
1929
1930 /*
1931 * Only zero out the entry if there are more remaining. This is
1932 * important for an empty bucket, as it keeps track of the
1933 * bucket's hash value. It doesn't hurt empty block storage.
1934 */
1935 if (count) {
1936 index = ((char *)entry - (char *)&xh->xh_entries) /
1937 sizeof(struct ocfs2_xattr_entry);
1938 memmove(&xh->xh_entries[index], &xh->xh_entries[index + 1],
1939 (count - index) * sizeof(struct ocfs2_xattr_entry));
1940 memset(&xh->xh_entries[count], 0,
1941 sizeof(struct ocfs2_xattr_entry));
1942 }
Joel Becker11179f22009-08-14 16:07:44 -07001943}
1944
Joel Becker399ff3a72009-09-01 18:38:27 -07001945/*
1946 * If we have a problem adjusting the size of an external value during
1947 * ocfs2_xa_prepare_entry() or ocfs2_xa_remove(), we may have an xattr
1948 * in an intermediate state. For example, the value may be partially
1949 * truncated.
1950 *
1951 * If the value tree hasn't changed, the extend/truncate went nowhere.
1952 * We have nothing to do. The caller can treat it as a straight error.
1953 *
1954 * If the value tree got partially truncated, we now have a corrupted
1955 * extended attribute. We're going to wipe its entry and leak the
1956 * clusters. Better to leak some storage than leave a corrupt entry.
1957 *
1958 * If the value tree grew, it obviously didn't grow enough for the
1959 * new entry. We're not going to try and reclaim those clusters either.
1960 * If there was already an external value there (orig_clusters != 0),
1961 * the new clusters are attached safely and we can just leave the old
1962 * value in place. If there was no external value there, we remove
1963 * the entry.
1964 *
1965 * This way, the xattr block we store in the journal will be consistent.
1966 * If the size change broke because of the journal, no changes will hit
1967 * disk anyway.
1968 */
1969static void ocfs2_xa_cleanup_value_truncate(struct ocfs2_xa_loc *loc,
1970 const char *what,
1971 unsigned int orig_clusters)
1972{
1973 unsigned int new_clusters = ocfs2_xa_value_clusters(loc);
1974 char *nameval_buf = ocfs2_xa_offset_pointer(loc,
1975 le16_to_cpu(loc->xl_entry->xe_name_offset));
1976
1977 if (new_clusters < orig_clusters) {
1978 mlog(ML_ERROR,
1979 "Partial truncate while %s xattr %.*s. Leaking "
1980 "%u clusters and removing the entry\n",
1981 what, loc->xl_entry->xe_name_len, nameval_buf,
1982 orig_clusters - new_clusters);
1983 ocfs2_xa_remove_entry(loc);
1984 } else if (!orig_clusters) {
1985 mlog(ML_ERROR,
1986 "Unable to allocate an external value for xattr "
1987 "%.*s safely. Leaking %u clusters and removing the "
1988 "entry\n",
1989 loc->xl_entry->xe_name_len, nameval_buf,
1990 new_clusters - orig_clusters);
1991 ocfs2_xa_remove_entry(loc);
1992 } else if (new_clusters > orig_clusters)
1993 mlog(ML_ERROR,
1994 "Unable to grow xattr %.*s safely. %u new clusters "
1995 "have been added, but the value will not be "
1996 "modified\n",
1997 loc->xl_entry->xe_name_len, nameval_buf,
1998 new_clusters - orig_clusters);
1999}
2000
Joel Becker73857ee2009-08-18 20:26:41 -07002001static int ocfs2_xa_remove(struct ocfs2_xa_loc *loc,
2002 struct ocfs2_xattr_set_ctxt *ctxt)
2003{
2004 int rc = 0;
Joel Becker399ff3a72009-09-01 18:38:27 -07002005 unsigned int orig_clusters;
Joel Becker73857ee2009-08-18 20:26:41 -07002006
2007 if (!ocfs2_xattr_is_local(loc->xl_entry)) {
Joel Becker399ff3a72009-09-01 18:38:27 -07002008 orig_clusters = ocfs2_xa_value_clusters(loc);
Joel Becker73857ee2009-08-18 20:26:41 -07002009 rc = ocfs2_xa_value_truncate(loc, 0, ctxt);
2010 if (rc) {
2011 mlog_errno(rc);
Joel Becker399ff3a72009-09-01 18:38:27 -07002012 /*
2013 * Since this is remove, we can return 0 if
2014 * ocfs2_xa_cleanup_value_truncate() is going to
2015 * wipe the entry anyway. So we check the
2016 * cluster count as well.
2017 */
2018 if (orig_clusters != ocfs2_xa_value_clusters(loc))
2019 rc = 0;
2020 ocfs2_xa_cleanup_value_truncate(loc, "removing",
2021 orig_clusters);
2022 if (rc)
2023 goto out;
Joel Becker73857ee2009-08-18 20:26:41 -07002024 }
2025 }
2026
2027 ocfs2_xa_remove_entry(loc);
2028
2029out:
2030 return rc;
2031}
2032
2033static void ocfs2_xa_install_value_root(struct ocfs2_xa_loc *loc)
2034{
2035 int name_size = OCFS2_XATTR_SIZE(loc->xl_entry->xe_name_len);
2036 char *nameval_buf;
2037
2038 nameval_buf = ocfs2_xa_offset_pointer(loc,
2039 le16_to_cpu(loc->xl_entry->xe_name_offset));
2040 memcpy(nameval_buf + name_size, &def_xv, OCFS2_XATTR_ROOT_SIZE);
2041}
2042
2043/*
2044 * Take an existing entry and make it ready for the new value. This
2045 * won't allocate space, but it may free space. It should be ready for
2046 * ocfs2_xa_prepare_entry() to finish the work.
2047 */
2048static int ocfs2_xa_reuse_entry(struct ocfs2_xa_loc *loc,
2049 struct ocfs2_xattr_info *xi,
2050 struct ocfs2_xattr_set_ctxt *ctxt)
2051{
2052 int rc = 0;
2053 int name_size = OCFS2_XATTR_SIZE(xi->xi_name_len);
Joel Becker399ff3a72009-09-01 18:38:27 -07002054 unsigned int orig_clusters;
Joel Becker73857ee2009-08-18 20:26:41 -07002055 char *nameval_buf;
2056 int xe_local = ocfs2_xattr_is_local(loc->xl_entry);
2057 int xi_local = xi->xi_value_len <= OCFS2_XATTR_INLINE_SIZE;
2058
2059 BUG_ON(OCFS2_XATTR_SIZE(loc->xl_entry->xe_name_len) !=
2060 name_size);
2061
2062 nameval_buf = ocfs2_xa_offset_pointer(loc,
2063 le16_to_cpu(loc->xl_entry->xe_name_offset));
2064 if (xe_local) {
2065 memset(nameval_buf + name_size, 0,
2066 namevalue_size_xe(loc->xl_entry) - name_size);
2067 if (!xi_local)
2068 ocfs2_xa_install_value_root(loc);
2069 } else {
Joel Becker399ff3a72009-09-01 18:38:27 -07002070 orig_clusters = ocfs2_xa_value_clusters(loc);
Joel Becker73857ee2009-08-18 20:26:41 -07002071 if (xi_local) {
2072 rc = ocfs2_xa_value_truncate(loc, 0, ctxt);
Joel Becker399ff3a72009-09-01 18:38:27 -07002073 if (rc < 0)
Joel Becker73857ee2009-08-18 20:26:41 -07002074 mlog_errno(rc);
Joel Becker399ff3a72009-09-01 18:38:27 -07002075 else
2076 memset(nameval_buf + name_size, 0,
2077 namevalue_size_xe(loc->xl_entry) -
2078 name_size);
Joel Becker73857ee2009-08-18 20:26:41 -07002079 } else if (le64_to_cpu(loc->xl_entry->xe_value_size) >
2080 xi->xi_value_len) {
2081 rc = ocfs2_xa_value_truncate(loc, xi->xi_value_len,
2082 ctxt);
Joel Becker399ff3a72009-09-01 18:38:27 -07002083 if (rc < 0)
Joel Becker73857ee2009-08-18 20:26:41 -07002084 mlog_errno(rc);
Joel Becker399ff3a72009-09-01 18:38:27 -07002085 }
2086
2087 if (rc) {
2088 ocfs2_xa_cleanup_value_truncate(loc, "reusing",
2089 orig_clusters);
2090 goto out;
Joel Becker73857ee2009-08-18 20:26:41 -07002091 }
2092 }
2093
2094 loc->xl_entry->xe_value_size = cpu_to_le64(xi->xi_value_len);
2095 ocfs2_xattr_set_local(loc->xl_entry, xi_local);
2096
2097out:
2098 return rc;
2099}
2100
Joel Becker69a3e532009-08-17 12:24:39 -07002101/*
2102 * Prepares loc->xl_entry to receive the new xattr. This includes
2103 * properly setting up the name+value pair region. If loc->xl_entry
2104 * already exists, it will take care of modifying it appropriately.
Joel Becker69a3e532009-08-17 12:24:39 -07002105 *
2106 * Note that this modifies the data. You did journal_access already,
2107 * right?
2108 */
2109static int ocfs2_xa_prepare_entry(struct ocfs2_xa_loc *loc,
2110 struct ocfs2_xattr_info *xi,
Joel Becker73857ee2009-08-18 20:26:41 -07002111 u32 name_hash,
2112 struct ocfs2_xattr_set_ctxt *ctxt)
Joel Becker69a3e532009-08-17 12:24:39 -07002113{
2114 int rc = 0;
Joel Becker399ff3a72009-09-01 18:38:27 -07002115 unsigned int orig_clusters;
2116 __le64 orig_value_size = 0;
Joel Becker69a3e532009-08-17 12:24:39 -07002117
Joel Becker69a3e532009-08-17 12:24:39 -07002118 rc = ocfs2_xa_check_space(loc, xi);
2119 if (rc)
2120 goto out;
2121
2122 if (loc->xl_entry) {
2123 if (ocfs2_xa_can_reuse_entry(loc, xi)) {
Joel Becker399ff3a72009-09-01 18:38:27 -07002124 orig_value_size = loc->xl_entry->xe_value_size;
Joel Becker73857ee2009-08-18 20:26:41 -07002125 rc = ocfs2_xa_reuse_entry(loc, xi, ctxt);
2126 if (rc)
2127 goto out;
2128 goto alloc_value;
Joel Becker69a3e532009-08-17 12:24:39 -07002129 }
2130
Joel Becker73857ee2009-08-18 20:26:41 -07002131 if (!ocfs2_xattr_is_local(loc->xl_entry)) {
Joel Becker399ff3a72009-09-01 18:38:27 -07002132 orig_clusters = ocfs2_xa_value_clusters(loc);
Joel Becker73857ee2009-08-18 20:26:41 -07002133 rc = ocfs2_xa_value_truncate(loc, 0, ctxt);
2134 if (rc) {
2135 mlog_errno(rc);
Joel Becker399ff3a72009-09-01 18:38:27 -07002136 ocfs2_xa_cleanup_value_truncate(loc,
2137 "overwriting",
2138 orig_clusters);
Joel Becker73857ee2009-08-18 20:26:41 -07002139 goto out;
2140 }
2141 }
Joel Becker69a3e532009-08-17 12:24:39 -07002142 ocfs2_xa_wipe_namevalue(loc);
2143 } else
2144 ocfs2_xa_add_entry(loc, name_hash);
2145
2146 /*
2147 * If we get here, we have a blank entry. Fill it. We grow our
2148 * name+value pair back from the end.
2149 */
2150 ocfs2_xa_add_namevalue(loc, xi);
Joel Becker73857ee2009-08-18 20:26:41 -07002151 if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE)
2152 ocfs2_xa_install_value_root(loc);
2153
2154alloc_value:
2155 if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) {
Joel Becker399ff3a72009-09-01 18:38:27 -07002156 orig_clusters = ocfs2_xa_value_clusters(loc);
Joel Becker73857ee2009-08-18 20:26:41 -07002157 rc = ocfs2_xa_value_truncate(loc, xi->xi_value_len, ctxt);
Joel Becker399ff3a72009-09-01 18:38:27 -07002158 if (rc < 0) {
Tao Ma5f5261a2010-05-13 22:49:05 +08002159 ctxt->set_abort = 1;
Joel Becker399ff3a72009-09-01 18:38:27 -07002160 ocfs2_xa_cleanup_value_truncate(loc, "growing",
2161 orig_clusters);
Tao Mad5a7df02010-05-10 18:09:47 +08002162 /*
2163 * If we were growing an existing value,
2164 * ocfs2_xa_cleanup_value_truncate() won't remove
2165 * the entry. We need to restore the original value
2166 * size.
2167 */
2168 if (loc->xl_entry) {
2169 BUG_ON(!orig_value_size);
2170 loc->xl_entry->xe_value_size = orig_value_size;
2171 }
Joel Becker73857ee2009-08-18 20:26:41 -07002172 mlog_errno(rc);
Joel Becker399ff3a72009-09-01 18:38:27 -07002173 }
Joel Becker73857ee2009-08-18 20:26:41 -07002174 }
Joel Becker69a3e532009-08-17 12:24:39 -07002175
2176out:
2177 return rc;
2178}
2179
2180/*
Joel Becker73857ee2009-08-18 20:26:41 -07002181 * Store the value portion of the name+value pair. This will skip
2182 * values that are stored externally. Their tree roots were set up
2183 * by ocfs2_xa_prepare_entry().
Joel Becker69a3e532009-08-17 12:24:39 -07002184 */
Joel Becker73857ee2009-08-18 20:26:41 -07002185static int ocfs2_xa_store_value(struct ocfs2_xa_loc *loc,
2186 struct ocfs2_xattr_info *xi,
2187 struct ocfs2_xattr_set_ctxt *ctxt)
Joel Becker69a3e532009-08-17 12:24:39 -07002188{
Joel Becker73857ee2009-08-18 20:26:41 -07002189 int rc = 0;
Joel Becker69a3e532009-08-17 12:24:39 -07002190 int nameval_offset = le16_to_cpu(loc->xl_entry->xe_name_offset);
2191 int name_size = OCFS2_XATTR_SIZE(xi->xi_name_len);
Joel Becker69a3e532009-08-17 12:24:39 -07002192 char *nameval_buf;
Joel Becker73857ee2009-08-18 20:26:41 -07002193 struct ocfs2_xattr_value_buf vb;
Joel Becker69a3e532009-08-17 12:24:39 -07002194
Joel Becker69a3e532009-08-17 12:24:39 -07002195 nameval_buf = ocfs2_xa_offset_pointer(loc, nameval_offset);
Joel Becker73857ee2009-08-18 20:26:41 -07002196 if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) {
2197 ocfs2_xa_fill_value_buf(loc, &vb);
2198 rc = __ocfs2_xattr_set_value_outside(loc->xl_inode,
2199 ctxt->handle, &vb,
2200 xi->xi_value,
2201 xi->xi_value_len);
2202 } else
2203 memcpy(nameval_buf + name_size, xi->xi_value, xi->xi_value_len);
2204
Joel Becker73857ee2009-08-18 20:26:41 -07002205 return rc;
Joel Becker69a3e532009-08-17 12:24:39 -07002206}
2207
Joel Beckerbca5e9b2009-08-18 20:40:14 -07002208static int ocfs2_xa_set(struct ocfs2_xa_loc *loc,
2209 struct ocfs2_xattr_info *xi,
2210 struct ocfs2_xattr_set_ctxt *ctxt)
2211{
2212 int ret;
2213 u32 name_hash = ocfs2_xattr_name_hash(loc->xl_inode, xi->xi_name,
2214 xi->xi_name_len);
2215
2216 ret = ocfs2_xa_journal_access(ctxt->handle, loc,
2217 OCFS2_JOURNAL_ACCESS_WRITE);
2218 if (ret) {
2219 mlog_errno(ret);
2220 goto out;
2221 }
2222
Joel Becker399ff3a72009-09-01 18:38:27 -07002223 /*
2224 * From here on out, everything is going to modify the buffer a
2225 * little. Errors are going to leave the xattr header in a
2226 * sane state. Thus, even with errors we dirty the sucker.
2227 */
2228
Joel Beckerbca5e9b2009-08-18 20:40:14 -07002229 /* Don't worry, we are never called with !xi_value and !xl_entry */
2230 if (!xi->xi_value) {
2231 ret = ocfs2_xa_remove(loc, ctxt);
Joel Becker399ff3a72009-09-01 18:38:27 -07002232 goto out_dirty;
Joel Beckerbca5e9b2009-08-18 20:40:14 -07002233 }
2234
2235 ret = ocfs2_xa_prepare_entry(loc, xi, name_hash, ctxt);
2236 if (ret) {
2237 if (ret != -ENOSPC)
2238 mlog_errno(ret);
Joel Becker399ff3a72009-09-01 18:38:27 -07002239 goto out_dirty;
Joel Beckerbca5e9b2009-08-18 20:40:14 -07002240 }
2241
2242 ret = ocfs2_xa_store_value(loc, xi, ctxt);
Joel Becker399ff3a72009-09-01 18:38:27 -07002243 if (ret)
Joel Beckerbca5e9b2009-08-18 20:40:14 -07002244 mlog_errno(ret);
Joel Beckerbca5e9b2009-08-18 20:40:14 -07002245
Joel Becker399ff3a72009-09-01 18:38:27 -07002246out_dirty:
Joel Beckerbca5e9b2009-08-18 20:40:14 -07002247 ocfs2_xa_journal_dirty(ctxt->handle, loc);
2248
2249out:
2250 return ret;
2251}
2252
Joel Becker11179f22009-08-14 16:07:44 -07002253static void ocfs2_init_dinode_xa_loc(struct ocfs2_xa_loc *loc,
2254 struct inode *inode,
2255 struct buffer_head *bh,
2256 struct ocfs2_xattr_entry *entry)
2257{
2258 struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data;
2259
Joel Becker139ffac2009-08-19 11:09:17 -07002260 BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_XATTR_FL));
2261
Joel Beckercf2bc802009-08-18 13:52:38 -07002262 loc->xl_inode = inode;
Joel Becker11179f22009-08-14 16:07:44 -07002263 loc->xl_ops = &ocfs2_xa_block_loc_ops;
2264 loc->xl_storage = bh;
2265 loc->xl_entry = entry;
Joel Becker139ffac2009-08-19 11:09:17 -07002266 loc->xl_size = le16_to_cpu(di->i_xattr_inline_size);
Joel Becker11179f22009-08-14 16:07:44 -07002267 loc->xl_header =
2268 (struct ocfs2_xattr_header *)(bh->b_data + bh->b_size -
2269 loc->xl_size);
2270}
2271
2272static void ocfs2_init_xattr_block_xa_loc(struct ocfs2_xa_loc *loc,
Joel Beckercf2bc802009-08-18 13:52:38 -07002273 struct inode *inode,
Joel Becker11179f22009-08-14 16:07:44 -07002274 struct buffer_head *bh,
2275 struct ocfs2_xattr_entry *entry)
2276{
2277 struct ocfs2_xattr_block *xb =
2278 (struct ocfs2_xattr_block *)bh->b_data;
2279
2280 BUG_ON(le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED);
2281
Joel Beckercf2bc802009-08-18 13:52:38 -07002282 loc->xl_inode = inode;
Joel Becker11179f22009-08-14 16:07:44 -07002283 loc->xl_ops = &ocfs2_xa_block_loc_ops;
2284 loc->xl_storage = bh;
2285 loc->xl_header = &(xb->xb_attrs.xb_header);
2286 loc->xl_entry = entry;
2287 loc->xl_size = bh->b_size - offsetof(struct ocfs2_xattr_block,
2288 xb_attrs.xb_header);
2289}
2290
2291static void ocfs2_init_xattr_bucket_xa_loc(struct ocfs2_xa_loc *loc,
2292 struct ocfs2_xattr_bucket *bucket,
2293 struct ocfs2_xattr_entry *entry)
2294{
Joel Beckercf2bc802009-08-18 13:52:38 -07002295 loc->xl_inode = bucket->bu_inode;
Joel Becker11179f22009-08-14 16:07:44 -07002296 loc->xl_ops = &ocfs2_xa_bucket_loc_ops;
2297 loc->xl_storage = bucket;
2298 loc->xl_header = bucket_xh(bucket);
2299 loc->xl_entry = entry;
2300 loc->xl_size = OCFS2_XATTR_BUCKET_SIZE;
2301}
2302
Tao Mace9c5a52009-08-18 11:43:59 +08002303/*
2304 * In xattr remove, if it is stored outside and refcounted, we may have
2305 * the chance to split the refcount tree. So need the allocators.
2306 */
2307static int ocfs2_lock_xattr_remove_allocators(struct inode *inode,
2308 struct ocfs2_xattr_value_root *xv,
2309 struct ocfs2_caching_info *ref_ci,
2310 struct buffer_head *ref_root_bh,
2311 struct ocfs2_alloc_context **meta_ac,
2312 int *ref_credits)
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002313{
Tao Mace9c5a52009-08-18 11:43:59 +08002314 int ret, meta_add = 0;
2315 u32 p_cluster, num_clusters;
2316 unsigned int ext_flags;
Tao Ma78f30c32008-11-12 08:27:00 +08002317
Tao Mace9c5a52009-08-18 11:43:59 +08002318 *ref_credits = 0;
2319 ret = ocfs2_xattr_get_clusters(inode, 0, &p_cluster,
2320 &num_clusters,
2321 &xv->xr_list,
2322 &ext_flags);
2323 if (ret) {
Tao Ma85db90e2008-11-12 08:27:01 +08002324 mlog_errno(ret);
2325 goto out;
2326 }
2327
Tao Mace9c5a52009-08-18 11:43:59 +08002328 if (!(ext_flags & OCFS2_EXT_REFCOUNTED))
2329 goto out;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002330
Tao Mace9c5a52009-08-18 11:43:59 +08002331 ret = ocfs2_refcounted_xattr_delete_need(inode, ref_ci,
2332 ref_root_bh, xv,
2333 &meta_add, ref_credits);
2334 if (ret) {
2335 mlog_errno(ret);
2336 goto out;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002337 }
2338
Tao Mace9c5a52009-08-18 11:43:59 +08002339 ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(inode->i_sb),
2340 meta_add, meta_ac);
2341 if (ret)
2342 mlog_errno(ret);
2343
Tao Ma85db90e2008-11-12 08:27:01 +08002344out:
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002345 return ret;
2346}
2347
Tao Mace9c5a52009-08-18 11:43:59 +08002348static int ocfs2_remove_value_outside(struct inode*inode,
2349 struct ocfs2_xattr_value_buf *vb,
2350 struct ocfs2_xattr_header *header,
2351 struct ocfs2_caching_info *ref_ci,
2352 struct buffer_head *ref_root_bh)
2353{
2354 int ret = 0, i, ref_credits;
2355 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2356 struct ocfs2_xattr_set_ctxt ctxt = { NULL, NULL, };
2357 void *val;
2358
2359 ocfs2_init_dealloc_ctxt(&ctxt.dealloc);
2360
2361 for (i = 0; i < le16_to_cpu(header->xh_count); i++) {
2362 struct ocfs2_xattr_entry *entry = &header->xh_entries[i];
2363
2364 if (ocfs2_xattr_is_local(entry))
2365 continue;
2366
2367 val = (void *)header +
2368 le16_to_cpu(entry->xe_name_offset);
2369 vb->vb_xv = (struct ocfs2_xattr_value_root *)
2370 (val + OCFS2_XATTR_SIZE(entry->xe_name_len));
2371
2372 ret = ocfs2_lock_xattr_remove_allocators(inode, vb->vb_xv,
2373 ref_ci, ref_root_bh,
2374 &ctxt.meta_ac,
2375 &ref_credits);
2376
2377 ctxt.handle = ocfs2_start_trans(osb, ref_credits +
2378 ocfs2_remove_extent_credits(osb->sb));
2379 if (IS_ERR(ctxt.handle)) {
2380 ret = PTR_ERR(ctxt.handle);
2381 mlog_errno(ret);
2382 break;
2383 }
2384
2385 ret = ocfs2_xattr_value_truncate(inode, vb, 0, &ctxt);
Tao Mace9c5a52009-08-18 11:43:59 +08002386
2387 ocfs2_commit_trans(osb, ctxt.handle);
2388 if (ctxt.meta_ac) {
2389 ocfs2_free_alloc_context(ctxt.meta_ac);
2390 ctxt.meta_ac = NULL;
2391 }
Wengang Wangb8a0ae52011-10-12 15:22:15 +08002392
2393 if (ret < 0) {
2394 mlog_errno(ret);
2395 break;
2396 }
2397
Tao Mace9c5a52009-08-18 11:43:59 +08002398 }
2399
2400 if (ctxt.meta_ac)
2401 ocfs2_free_alloc_context(ctxt.meta_ac);
2402 ocfs2_schedule_truncate_log_flush(osb, 1);
2403 ocfs2_run_deallocs(osb, &ctxt.dealloc);
2404 return ret;
2405}
2406
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002407static int ocfs2_xattr_ibody_remove(struct inode *inode,
Tao Mace9c5a52009-08-18 11:43:59 +08002408 struct buffer_head *di_bh,
2409 struct ocfs2_caching_info *ref_ci,
2410 struct buffer_head *ref_root_bh)
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002411{
2412
2413 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
2414 struct ocfs2_xattr_header *header;
2415 int ret;
Joel Becker43119012008-12-09 16:24:43 -08002416 struct ocfs2_xattr_value_buf vb = {
2417 .vb_bh = di_bh,
2418 .vb_access = ocfs2_journal_access_di,
2419 };
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002420
2421 header = (struct ocfs2_xattr_header *)
2422 ((void *)di + inode->i_sb->s_blocksize -
2423 le16_to_cpu(di->i_xattr_inline_size));
2424
Tao Mace9c5a52009-08-18 11:43:59 +08002425 ret = ocfs2_remove_value_outside(inode, &vb, header,
2426 ref_ci, ref_root_bh);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002427
2428 return ret;
2429}
2430
Tao Mace9c5a52009-08-18 11:43:59 +08002431struct ocfs2_rm_xattr_bucket_para {
2432 struct ocfs2_caching_info *ref_ci;
2433 struct buffer_head *ref_root_bh;
2434};
2435
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002436static int ocfs2_xattr_block_remove(struct inode *inode,
Tao Mace9c5a52009-08-18 11:43:59 +08002437 struct buffer_head *blk_bh,
2438 struct ocfs2_caching_info *ref_ci,
2439 struct buffer_head *ref_root_bh)
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002440{
2441 struct ocfs2_xattr_block *xb;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002442 int ret = 0;
Joel Becker43119012008-12-09 16:24:43 -08002443 struct ocfs2_xattr_value_buf vb = {
2444 .vb_bh = blk_bh,
2445 .vb_access = ocfs2_journal_access_xb,
2446 };
Tao Mace9c5a52009-08-18 11:43:59 +08002447 struct ocfs2_rm_xattr_bucket_para args = {
2448 .ref_ci = ref_ci,
2449 .ref_root_bh = ref_root_bh,
2450 };
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002451
2452 xb = (struct ocfs2_xattr_block *)blk_bh->b_data;
Tao Maa3944252008-08-18 17:38:54 +08002453 if (!(le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED)) {
2454 struct ocfs2_xattr_header *header = &(xb->xb_attrs.xb_header);
Tao Mace9c5a52009-08-18 11:43:59 +08002455 ret = ocfs2_remove_value_outside(inode, &vb, header,
2456 ref_ci, ref_root_bh);
Tao Maa3944252008-08-18 17:38:54 +08002457 } else
Tao Ma47bca492009-08-18 11:43:42 +08002458 ret = ocfs2_iterate_xattr_index_block(inode,
2459 blk_bh,
2460 ocfs2_rm_xattr_cluster,
Tao Mace9c5a52009-08-18 11:43:59 +08002461 &args);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002462
2463 return ret;
2464}
2465
Tao Ma08413892008-08-29 09:00:19 +08002466static int ocfs2_xattr_free_block(struct inode *inode,
Tao Mace9c5a52009-08-18 11:43:59 +08002467 u64 block,
2468 struct ocfs2_caching_info *ref_ci,
2469 struct buffer_head *ref_root_bh)
Tao Ma08413892008-08-29 09:00:19 +08002470{
2471 struct inode *xb_alloc_inode;
2472 struct buffer_head *xb_alloc_bh = NULL;
2473 struct buffer_head *blk_bh = NULL;
2474 struct ocfs2_xattr_block *xb;
2475 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2476 handle_t *handle;
2477 int ret = 0;
2478 u64 blk, bg_blkno;
2479 u16 bit;
2480
Joel Becker4ae1d692008-11-13 14:49:18 -08002481 ret = ocfs2_read_xattr_block(inode, block, &blk_bh);
Tao Ma08413892008-08-29 09:00:19 +08002482 if (ret < 0) {
2483 mlog_errno(ret);
2484 goto out;
2485 }
2486
Tao Mace9c5a52009-08-18 11:43:59 +08002487 ret = ocfs2_xattr_block_remove(inode, blk_bh, ref_ci, ref_root_bh);
Tao Ma08413892008-08-29 09:00:19 +08002488 if (ret < 0) {
2489 mlog_errno(ret);
2490 goto out;
2491 }
2492
Joel Becker4ae1d692008-11-13 14:49:18 -08002493 xb = (struct ocfs2_xattr_block *)blk_bh->b_data;
Tao Ma08413892008-08-29 09:00:19 +08002494 blk = le64_to_cpu(xb->xb_blkno);
2495 bit = le16_to_cpu(xb->xb_suballoc_bit);
Tao Ma74380c42010-03-22 14:20:18 +08002496 if (xb->xb_suballoc_loc)
2497 bg_blkno = le64_to_cpu(xb->xb_suballoc_loc);
2498 else
2499 bg_blkno = ocfs2_which_suballoc_group(blk, bit);
Tao Ma08413892008-08-29 09:00:19 +08002500
2501 xb_alloc_inode = ocfs2_get_system_file_inode(osb,
2502 EXTENT_ALLOC_SYSTEM_INODE,
2503 le16_to_cpu(xb->xb_suballoc_slot));
2504 if (!xb_alloc_inode) {
2505 ret = -ENOMEM;
2506 mlog_errno(ret);
2507 goto out;
2508 }
2509 mutex_lock(&xb_alloc_inode->i_mutex);
2510
2511 ret = ocfs2_inode_lock(xb_alloc_inode, &xb_alloc_bh, 1);
2512 if (ret < 0) {
2513 mlog_errno(ret);
2514 goto out_mutex;
2515 }
2516
2517 handle = ocfs2_start_trans(osb, OCFS2_SUBALLOC_FREE);
2518 if (IS_ERR(handle)) {
2519 ret = PTR_ERR(handle);
2520 mlog_errno(ret);
2521 goto out_unlock;
2522 }
2523
2524 ret = ocfs2_free_suballoc_bits(handle, xb_alloc_inode, xb_alloc_bh,
2525 bit, bg_blkno, 1);
2526 if (ret < 0)
2527 mlog_errno(ret);
2528
2529 ocfs2_commit_trans(osb, handle);
2530out_unlock:
2531 ocfs2_inode_unlock(xb_alloc_inode, 1);
2532 brelse(xb_alloc_bh);
2533out_mutex:
2534 mutex_unlock(&xb_alloc_inode->i_mutex);
2535 iput(xb_alloc_inode);
2536out:
2537 brelse(blk_bh);
2538 return ret;
2539}
2540
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002541/*
2542 * ocfs2_xattr_remove()
2543 *
2544 * Free extended attribute resources associated with this inode.
2545 */
2546int ocfs2_xattr_remove(struct inode *inode, struct buffer_head *di_bh)
2547{
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002548 struct ocfs2_inode_info *oi = OCFS2_I(inode);
2549 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
Tao Mace9c5a52009-08-18 11:43:59 +08002550 struct ocfs2_refcount_tree *ref_tree = NULL;
2551 struct buffer_head *ref_root_bh = NULL;
2552 struct ocfs2_caching_info *ref_ci = NULL;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002553 handle_t *handle;
2554 int ret;
2555
Tiger Yang8154da32008-08-18 17:11:46 +08002556 if (!ocfs2_supports_xattr(OCFS2_SB(inode->i_sb)))
2557 return 0;
2558
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002559 if (!(oi->ip_dyn_features & OCFS2_HAS_XATTR_FL))
2560 return 0;
2561
Tao Mace9c5a52009-08-18 11:43:59 +08002562 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL) {
2563 ret = ocfs2_lock_refcount_tree(OCFS2_SB(inode->i_sb),
2564 le64_to_cpu(di->i_refcount_loc),
2565 1, &ref_tree, &ref_root_bh);
2566 if (ret) {
2567 mlog_errno(ret);
2568 goto out;
2569 }
2570 ref_ci = &ref_tree->rf_ci;
2571
2572 }
2573
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002574 if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL) {
Tao Mace9c5a52009-08-18 11:43:59 +08002575 ret = ocfs2_xattr_ibody_remove(inode, di_bh,
2576 ref_ci, ref_root_bh);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002577 if (ret < 0) {
2578 mlog_errno(ret);
2579 goto out;
2580 }
2581 }
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002582
Tao Ma08413892008-08-29 09:00:19 +08002583 if (di->i_xattr_loc) {
2584 ret = ocfs2_xattr_free_block(inode,
Tao Mace9c5a52009-08-18 11:43:59 +08002585 le64_to_cpu(di->i_xattr_loc),
2586 ref_ci, ref_root_bh);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002587 if (ret < 0) {
2588 mlog_errno(ret);
2589 goto out;
2590 }
2591 }
2592
2593 handle = ocfs2_start_trans((OCFS2_SB(inode->i_sb)),
2594 OCFS2_INODE_UPDATE_CREDITS);
2595 if (IS_ERR(handle)) {
2596 ret = PTR_ERR(handle);
2597 mlog_errno(ret);
2598 goto out;
2599 }
Joel Becker0cf2f762009-02-12 16:41:25 -08002600 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
Joel Becker84008972008-12-09 16:11:49 -08002601 OCFS2_JOURNAL_ACCESS_WRITE);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002602 if (ret) {
2603 mlog_errno(ret);
2604 goto out_commit;
2605 }
2606
Tao Ma08413892008-08-29 09:00:19 +08002607 di->i_xattr_loc = 0;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002608
2609 spin_lock(&oi->ip_lock);
2610 oi->ip_dyn_features &= ~(OCFS2_INLINE_XATTR_FL | OCFS2_HAS_XATTR_FL);
2611 di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
2612 spin_unlock(&oi->ip_lock);
Darrick J. Wong6fdb7022014-04-03 14:47:08 -07002613 ocfs2_update_inode_fsync_trans(handle, inode, 0);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002614
Joel Beckerec20cec2010-03-19 14:13:52 -07002615 ocfs2_journal_dirty(handle, di_bh);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002616out_commit:
2617 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
2618out:
Tao Mace9c5a52009-08-18 11:43:59 +08002619 if (ref_tree)
2620 ocfs2_unlock_refcount_tree(OCFS2_SB(inode->i_sb), ref_tree, 1);
2621 brelse(ref_root_bh);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002622 return ret;
2623}
2624
2625static int ocfs2_xattr_has_space_inline(struct inode *inode,
2626 struct ocfs2_dinode *di)
2627{
2628 struct ocfs2_inode_info *oi = OCFS2_I(inode);
2629 unsigned int xattrsize = OCFS2_SB(inode->i_sb)->s_xattr_inline_size;
2630 int free;
2631
2632 if (xattrsize < OCFS2_MIN_XATTR_INLINE_SIZE)
2633 return 0;
2634
2635 if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
2636 struct ocfs2_inline_data *idata = &di->id2.i_data;
2637 free = le16_to_cpu(idata->id_count) - le64_to_cpu(di->i_size);
2638 } else if (ocfs2_inode_is_fast_symlink(inode)) {
2639 free = ocfs2_fast_symlink_chars(inode->i_sb) -
2640 le64_to_cpu(di->i_size);
2641 } else {
2642 struct ocfs2_extent_list *el = &di->id2.i_list;
2643 free = (le16_to_cpu(el->l_count) -
2644 le16_to_cpu(el->l_next_free_rec)) *
2645 sizeof(struct ocfs2_extent_rec);
2646 }
2647 if (free >= xattrsize)
2648 return 1;
2649
2650 return 0;
2651}
2652
2653/*
2654 * ocfs2_xattr_ibody_find()
2655 *
2656 * Find extended attribute in inode block and
2657 * fill search info into struct ocfs2_xattr_search.
2658 */
2659static int ocfs2_xattr_ibody_find(struct inode *inode,
2660 int name_index,
2661 const char *name,
2662 struct ocfs2_xattr_search *xs)
2663{
2664 struct ocfs2_inode_info *oi = OCFS2_I(inode);
2665 struct ocfs2_dinode *di = (struct ocfs2_dinode *)xs->inode_bh->b_data;
2666 int ret;
2667 int has_space = 0;
2668
2669 if (inode->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE)
2670 return 0;
2671
2672 if (!(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL)) {
2673 down_read(&oi->ip_alloc_sem);
2674 has_space = ocfs2_xattr_has_space_inline(inode, di);
2675 up_read(&oi->ip_alloc_sem);
2676 if (!has_space)
2677 return 0;
2678 }
2679
2680 xs->xattr_bh = xs->inode_bh;
2681 xs->end = (void *)di + inode->i_sb->s_blocksize;
2682 if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL)
2683 xs->header = (struct ocfs2_xattr_header *)
2684 (xs->end - le16_to_cpu(di->i_xattr_inline_size));
2685 else
2686 xs->header = (struct ocfs2_xattr_header *)
2687 (xs->end - OCFS2_SB(inode->i_sb)->s_xattr_inline_size);
2688 xs->base = (void *)xs->header;
2689 xs->here = xs->header->xh_entries;
2690
2691 /* Find the named attribute. */
2692 if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL) {
2693 ret = ocfs2_xattr_find_entry(name_index, name, xs);
2694 if (ret && ret != -ENODATA)
2695 return ret;
2696 xs->not_found = ret;
2697 }
2698
2699 return 0;
2700}
2701
Joel Becker139ffac2009-08-19 11:09:17 -07002702static int ocfs2_xattr_ibody_init(struct inode *inode,
2703 struct buffer_head *di_bh,
2704 struct ocfs2_xattr_set_ctxt *ctxt)
2705{
2706 int ret;
2707 struct ocfs2_inode_info *oi = OCFS2_I(inode);
2708 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
2709 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2710 unsigned int xattrsize = osb->s_xattr_inline_size;
2711
2712 if (!ocfs2_xattr_has_space_inline(inode, di)) {
2713 ret = -ENOSPC;
2714 goto out;
2715 }
2716
2717 ret = ocfs2_journal_access_di(ctxt->handle, INODE_CACHE(inode), di_bh,
2718 OCFS2_JOURNAL_ACCESS_WRITE);
2719 if (ret) {
2720 mlog_errno(ret);
2721 goto out;
2722 }
2723
2724 /*
2725 * Adjust extent record count or inline data size
2726 * to reserve space for extended attribute.
2727 */
2728 if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
2729 struct ocfs2_inline_data *idata = &di->id2.i_data;
2730 le16_add_cpu(&idata->id_count, -xattrsize);
2731 } else if (!(ocfs2_inode_is_fast_symlink(inode))) {
2732 struct ocfs2_extent_list *el = &di->id2.i_list;
2733 le16_add_cpu(&el->l_count, -(xattrsize /
2734 sizeof(struct ocfs2_extent_rec)));
2735 }
2736 di->i_xattr_inline_size = cpu_to_le16(xattrsize);
2737
2738 spin_lock(&oi->ip_lock);
2739 oi->ip_dyn_features |= OCFS2_INLINE_XATTR_FL|OCFS2_HAS_XATTR_FL;
2740 di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
2741 spin_unlock(&oi->ip_lock);
2742
Joel Beckerec20cec2010-03-19 14:13:52 -07002743 ocfs2_journal_dirty(ctxt->handle, di_bh);
Joel Becker139ffac2009-08-19 11:09:17 -07002744
2745out:
2746 return ret;
2747}
2748
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002749/*
2750 * ocfs2_xattr_ibody_set()
2751 *
2752 * Set, replace or remove an extended attribute into inode block.
2753 *
2754 */
2755static int ocfs2_xattr_ibody_set(struct inode *inode,
2756 struct ocfs2_xattr_info *xi,
Tao Ma78f30c32008-11-12 08:27:00 +08002757 struct ocfs2_xattr_search *xs,
2758 struct ocfs2_xattr_set_ctxt *ctxt)
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002759{
Joel Becker139ffac2009-08-19 11:09:17 -07002760 int ret;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002761 struct ocfs2_inode_info *oi = OCFS2_I(inode);
Joel Becker139ffac2009-08-19 11:09:17 -07002762 struct ocfs2_xa_loc loc;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002763
2764 if (inode->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE)
2765 return -ENOSPC;
2766
2767 down_write(&oi->ip_alloc_sem);
2768 if (!(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL)) {
Joel Becker139ffac2009-08-19 11:09:17 -07002769 ret = ocfs2_xattr_ibody_init(inode, xs->inode_bh, ctxt);
2770 if (ret) {
2771 if (ret != -ENOSPC)
2772 mlog_errno(ret);
2773 goto out;
2774 }
2775 }
2776
2777 ocfs2_init_dinode_xa_loc(&loc, inode, xs->inode_bh,
2778 xs->not_found ? NULL : xs->here);
2779 ret = ocfs2_xa_set(&loc, xi, ctxt);
2780 if (ret) {
2781 if (ret != -ENOSPC)
2782 mlog_errno(ret);
2783 goto out;
2784 }
2785 xs->here = loc.xl_entry;
2786
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002787out:
2788 up_write(&oi->ip_alloc_sem);
2789
2790 return ret;
2791}
2792
2793/*
2794 * ocfs2_xattr_block_find()
2795 *
2796 * Find extended attribute in external block and
2797 * fill search info into struct ocfs2_xattr_search.
2798 */
2799static int ocfs2_xattr_block_find(struct inode *inode,
2800 int name_index,
2801 const char *name,
2802 struct ocfs2_xattr_search *xs)
2803{
2804 struct ocfs2_dinode *di = (struct ocfs2_dinode *)xs->inode_bh->b_data;
2805 struct buffer_head *blk_bh = NULL;
Tao Ma589dc262008-08-18 17:38:51 +08002806 struct ocfs2_xattr_block *xb;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002807 int ret = 0;
2808
2809 if (!di->i_xattr_loc)
2810 return ret;
2811
Joel Becker4ae1d692008-11-13 14:49:18 -08002812 ret = ocfs2_read_xattr_block(inode, le64_to_cpu(di->i_xattr_loc),
2813 &blk_bh);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002814 if (ret < 0) {
2815 mlog_errno(ret);
2816 return ret;
2817 }
Joel Beckerf6087fb2008-10-20 18:20:43 -07002818
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002819 xs->xattr_bh = blk_bh;
Joel Becker4ae1d692008-11-13 14:49:18 -08002820 xb = (struct ocfs2_xattr_block *)blk_bh->b_data;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002821
Tao Ma589dc262008-08-18 17:38:51 +08002822 if (!(le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED)) {
2823 xs->header = &xb->xb_attrs.xb_header;
2824 xs->base = (void *)xs->header;
2825 xs->end = (void *)(blk_bh->b_data) + blk_bh->b_size;
2826 xs->here = xs->header->xh_entries;
2827
2828 ret = ocfs2_xattr_find_entry(name_index, name, xs);
2829 } else
2830 ret = ocfs2_xattr_index_block_find(inode, blk_bh,
2831 name_index,
2832 name, xs);
2833
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002834 if (ret && ret != -ENODATA) {
2835 xs->xattr_bh = NULL;
2836 goto cleanup;
2837 }
2838 xs->not_found = ret;
2839 return 0;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002840cleanup:
2841 brelse(blk_bh);
2842
2843 return ret;
2844}
2845
Joel Beckerd3981542009-08-19 02:13:50 -07002846static int ocfs2_create_xattr_block(struct inode *inode,
Tao Ma5aea1f02009-08-18 11:43:24 +08002847 struct buffer_head *inode_bh,
Joel Beckerd3981542009-08-19 02:13:50 -07002848 struct ocfs2_xattr_set_ctxt *ctxt,
2849 int indexed,
2850 struct buffer_head **ret_bh)
Tao Ma5aea1f02009-08-18 11:43:24 +08002851{
2852 int ret;
2853 u16 suballoc_bit_start;
2854 u32 num_got;
Joel Becker2b6cb572010-03-26 10:09:15 +08002855 u64 suballoc_loc, first_blkno;
Tao Ma5aea1f02009-08-18 11:43:24 +08002856 struct ocfs2_dinode *di = (struct ocfs2_dinode *)inode_bh->b_data;
Tao Ma5aea1f02009-08-18 11:43:24 +08002857 struct buffer_head *new_bh = NULL;
2858 struct ocfs2_xattr_block *xblk;
2859
Joel Beckerd3981542009-08-19 02:13:50 -07002860 ret = ocfs2_journal_access_di(ctxt->handle, INODE_CACHE(inode),
2861 inode_bh, OCFS2_JOURNAL_ACCESS_CREATE);
Tao Ma5aea1f02009-08-18 11:43:24 +08002862 if (ret < 0) {
2863 mlog_errno(ret);
2864 goto end;
2865 }
2866
Joel Becker1ed9b772010-05-06 13:59:06 +08002867 ret = ocfs2_claim_metadata(ctxt->handle, ctxt->meta_ac, 1,
Joel Becker2b6cb572010-03-26 10:09:15 +08002868 &suballoc_loc, &suballoc_bit_start,
2869 &num_got, &first_blkno);
Tao Ma5aea1f02009-08-18 11:43:24 +08002870 if (ret < 0) {
2871 mlog_errno(ret);
2872 goto end;
2873 }
2874
2875 new_bh = sb_getblk(inode->i_sb, first_blkno);
Rui Xiang58796202013-11-12 15:06:55 -08002876 if (!new_bh) {
2877 ret = -ENOMEM;
2878 mlog_errno(ret);
2879 goto end;
2880 }
2881
Tao Ma5aea1f02009-08-18 11:43:24 +08002882 ocfs2_set_new_buffer_uptodate(INODE_CACHE(inode), new_bh);
2883
Joel Beckerd3981542009-08-19 02:13:50 -07002884 ret = ocfs2_journal_access_xb(ctxt->handle, INODE_CACHE(inode),
Tao Ma5aea1f02009-08-18 11:43:24 +08002885 new_bh,
2886 OCFS2_JOURNAL_ACCESS_CREATE);
2887 if (ret < 0) {
2888 mlog_errno(ret);
2889 goto end;
2890 }
2891
2892 /* Initialize ocfs2_xattr_block */
2893 xblk = (struct ocfs2_xattr_block *)new_bh->b_data;
2894 memset(xblk, 0, inode->i_sb->s_blocksize);
2895 strcpy((void *)xblk, OCFS2_XATTR_BLOCK_SIGNATURE);
Joel Beckerd3981542009-08-19 02:13:50 -07002896 xblk->xb_suballoc_slot = cpu_to_le16(ctxt->meta_ac->ac_alloc_slot);
Joel Becker2b6cb572010-03-26 10:09:15 +08002897 xblk->xb_suballoc_loc = cpu_to_le64(suballoc_loc);
Tao Ma5aea1f02009-08-18 11:43:24 +08002898 xblk->xb_suballoc_bit = cpu_to_le16(suballoc_bit_start);
Joel Becker1ed9b772010-05-06 13:59:06 +08002899 xblk->xb_fs_generation =
2900 cpu_to_le32(OCFS2_SB(inode->i_sb)->fs_generation);
Tao Ma5aea1f02009-08-18 11:43:24 +08002901 xblk->xb_blkno = cpu_to_le64(first_blkno);
Tao Maa7fe7a32009-08-18 11:43:52 +08002902 if (indexed) {
2903 struct ocfs2_xattr_tree_root *xr = &xblk->xb_attrs.xb_root;
2904 xr->xt_clusters = cpu_to_le32(1);
2905 xr->xt_last_eb_blk = 0;
2906 xr->xt_list.l_tree_depth = 0;
2907 xr->xt_list.l_count = cpu_to_le16(
2908 ocfs2_xattr_recs_per_xb(inode->i_sb));
2909 xr->xt_list.l_next_free_rec = cpu_to_le16(1);
2910 xblk->xb_flags = cpu_to_le16(OCFS2_XATTR_INDEXED);
2911 }
Joel Beckerd3981542009-08-19 02:13:50 -07002912 ocfs2_journal_dirty(ctxt->handle, new_bh);
Tao Maa7fe7a32009-08-18 11:43:52 +08002913
Joel Beckerd3981542009-08-19 02:13:50 -07002914 /* Add it to the inode */
Tao Ma5aea1f02009-08-18 11:43:24 +08002915 di->i_xattr_loc = cpu_to_le64(first_blkno);
Joel Beckerd3981542009-08-19 02:13:50 -07002916
2917 spin_lock(&OCFS2_I(inode)->ip_lock);
2918 OCFS2_I(inode)->ip_dyn_features |= OCFS2_HAS_XATTR_FL;
2919 di->i_dyn_features = cpu_to_le16(OCFS2_I(inode)->ip_dyn_features);
2920 spin_unlock(&OCFS2_I(inode)->ip_lock);
2921
2922 ocfs2_journal_dirty(ctxt->handle, inode_bh);
Tao Ma5aea1f02009-08-18 11:43:24 +08002923
2924 *ret_bh = new_bh;
2925 new_bh = NULL;
2926
2927end:
2928 brelse(new_bh);
2929 return ret;
2930}
2931
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002932/*
2933 * ocfs2_xattr_block_set()
2934 *
2935 * Set, replace or remove an extended attribute into external block.
2936 *
2937 */
2938static int ocfs2_xattr_block_set(struct inode *inode,
2939 struct ocfs2_xattr_info *xi,
Tao Ma78f30c32008-11-12 08:27:00 +08002940 struct ocfs2_xattr_search *xs,
2941 struct ocfs2_xattr_set_ctxt *ctxt)
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002942{
2943 struct buffer_head *new_bh = NULL;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002944 struct ocfs2_xattr_block *xblk = NULL;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002945 int ret;
Joel Beckerd3981542009-08-19 02:13:50 -07002946 struct ocfs2_xa_loc loc;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002947
2948 if (!xs->xattr_bh) {
Joel Beckerd3981542009-08-19 02:13:50 -07002949 ret = ocfs2_create_xattr_block(inode, xs->inode_bh, ctxt,
2950 0, &new_bh);
Tao Ma5aea1f02009-08-18 11:43:24 +08002951 if (ret) {
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002952 mlog_errno(ret);
Tao Ma85db90e2008-11-12 08:27:01 +08002953 goto end;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002954 }
2955
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002956 xs->xattr_bh = new_bh;
Tao Ma5aea1f02009-08-18 11:43:24 +08002957 xblk = (struct ocfs2_xattr_block *)xs->xattr_bh->b_data;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002958 xs->header = &xblk->xb_attrs.xb_header;
2959 xs->base = (void *)xs->header;
2960 xs->end = (void *)xblk + inode->i_sb->s_blocksize;
2961 xs->here = xs->header->xh_entries;
Tao Ma01225592008-08-18 17:38:53 +08002962 } else
2963 xblk = (struct ocfs2_xattr_block *)xs->xattr_bh->b_data;
2964
2965 if (!(le16_to_cpu(xblk->xb_flags) & OCFS2_XATTR_INDEXED)) {
Joel Beckerd3981542009-08-19 02:13:50 -07002966 ocfs2_init_xattr_block_xa_loc(&loc, inode, xs->xattr_bh,
2967 xs->not_found ? NULL : xs->here);
Tao Ma01225592008-08-18 17:38:53 +08002968
Joel Beckerd3981542009-08-19 02:13:50 -07002969 ret = ocfs2_xa_set(&loc, xi, ctxt);
2970 if (!ret)
2971 xs->here = loc.xl_entry;
Tao Ma5f5261a2010-05-13 22:49:05 +08002972 else if ((ret != -ENOSPC) || ctxt->set_abort)
Tao Ma01225592008-08-18 17:38:53 +08002973 goto end;
Joel Beckerd3981542009-08-19 02:13:50 -07002974 else {
2975 ret = ocfs2_xattr_create_index_block(inode, xs, ctxt);
2976 if (ret)
2977 goto end;
2978 }
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002979 }
2980
Joel Beckerd3981542009-08-19 02:13:50 -07002981 if (le16_to_cpu(xblk->xb_flags) & OCFS2_XATTR_INDEXED)
2982 ret = ocfs2_xattr_set_entry_index_block(inode, xi, xs, ctxt);
Tao Ma01225592008-08-18 17:38:53 +08002983
2984end:
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002985 return ret;
2986}
2987
Tao Ma78f30c32008-11-12 08:27:00 +08002988/* Check whether the new xattr can be inserted into the inode. */
2989static int ocfs2_xattr_can_be_in_inode(struct inode *inode,
2990 struct ocfs2_xattr_info *xi,
2991 struct ocfs2_xattr_search *xs)
2992{
Tao Ma78f30c32008-11-12 08:27:00 +08002993 struct ocfs2_xattr_entry *last;
2994 int free, i;
2995 size_t min_offs = xs->end - xs->base;
2996
2997 if (!xs->header)
2998 return 0;
2999
3000 last = xs->header->xh_entries;
3001
3002 for (i = 0; i < le16_to_cpu(xs->header->xh_count); i++) {
3003 size_t offs = le16_to_cpu(last->xe_name_offset);
3004 if (offs < min_offs)
3005 min_offs = offs;
3006 last += 1;
3007 }
3008
Tiger Yang4442f512009-02-20 11:11:50 +08003009 free = min_offs - ((void *)last - xs->base) - OCFS2_XATTR_HEADER_GAP;
Tao Ma78f30c32008-11-12 08:27:00 +08003010 if (free < 0)
3011 return 0;
3012
3013 BUG_ON(!xs->not_found);
3014
Joel Becker199799a2009-08-14 19:04:15 -07003015 if (free >= (sizeof(struct ocfs2_xattr_entry) + namevalue_size_xi(xi)))
Tao Ma78f30c32008-11-12 08:27:00 +08003016 return 1;
3017
3018 return 0;
3019}
3020
3021static int ocfs2_calc_xattr_set_need(struct inode *inode,
3022 struct ocfs2_dinode *di,
3023 struct ocfs2_xattr_info *xi,
3024 struct ocfs2_xattr_search *xis,
3025 struct ocfs2_xattr_search *xbs,
3026 int *clusters_need,
Tao Ma85db90e2008-11-12 08:27:01 +08003027 int *meta_need,
3028 int *credits_need)
Tao Ma78f30c32008-11-12 08:27:00 +08003029{
3030 int ret = 0, old_in_xb = 0;
Tao Ma85db90e2008-11-12 08:27:01 +08003031 int clusters_add = 0, meta_add = 0, credits = 0;
Tao Ma78f30c32008-11-12 08:27:00 +08003032 struct buffer_head *bh = NULL;
3033 struct ocfs2_xattr_block *xb = NULL;
3034 struct ocfs2_xattr_entry *xe = NULL;
3035 struct ocfs2_xattr_value_root *xv = NULL;
3036 char *base = NULL;
3037 int name_offset, name_len = 0;
3038 u32 new_clusters = ocfs2_clusters_for_bytes(inode->i_sb,
Joel Becker6b240ff2009-08-14 18:02:52 -07003039 xi->xi_value_len);
Tao Ma78f30c32008-11-12 08:27:00 +08003040 u64 value_size;
3041
Tao Ma71d548a2008-12-05 06:20:54 +08003042 /*
3043 * Calculate the clusters we need to write.
3044 * No matter whether we replace an old one or add a new one,
3045 * we need this for writing.
3046 */
Joel Becker6b240ff2009-08-14 18:02:52 -07003047 if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE)
Tao Ma71d548a2008-12-05 06:20:54 +08003048 credits += new_clusters *
3049 ocfs2_clusters_to_blocks(inode->i_sb, 1);
3050
Tao Ma78f30c32008-11-12 08:27:00 +08003051 if (xis->not_found && xbs->not_found) {
Tao Ma85db90e2008-11-12 08:27:01 +08003052 credits += ocfs2_blocks_per_xattr_bucket(inode->i_sb);
3053
Joel Becker6b240ff2009-08-14 18:02:52 -07003054 if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) {
Tao Ma78f30c32008-11-12 08:27:00 +08003055 clusters_add += new_clusters;
Tao Ma85db90e2008-11-12 08:27:01 +08003056 credits += ocfs2_calc_extend_credits(inode->i_sb,
Goldwyn Rodrigues06f9da62013-11-12 15:06:52 -08003057 &def_xv.xv.xr_list);
Tao Ma85db90e2008-11-12 08:27:01 +08003058 }
Tao Ma78f30c32008-11-12 08:27:00 +08003059
3060 goto meta_guess;
3061 }
3062
3063 if (!xis->not_found) {
3064 xe = xis->here;
3065 name_offset = le16_to_cpu(xe->xe_name_offset);
3066 name_len = OCFS2_XATTR_SIZE(xe->xe_name_len);
3067 base = xis->base;
Tao Ma85db90e2008-11-12 08:27:01 +08003068 credits += OCFS2_INODE_UPDATE_CREDITS;
Tao Ma78f30c32008-11-12 08:27:00 +08003069 } else {
Joel Becker970e4932008-11-13 14:49:19 -08003070 int i, block_off = 0;
Tao Ma78f30c32008-11-12 08:27:00 +08003071 xb = (struct ocfs2_xattr_block *)xbs->xattr_bh->b_data;
3072 xe = xbs->here;
3073 name_offset = le16_to_cpu(xe->xe_name_offset);
3074 name_len = OCFS2_XATTR_SIZE(xe->xe_name_len);
3075 i = xbs->here - xbs->header->xh_entries;
3076 old_in_xb = 1;
3077
3078 if (le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED) {
Tao Mafd68a892009-08-18 11:43:21 +08003079 ret = ocfs2_xattr_bucket_get_name_value(inode->i_sb,
Tao Ma78f30c32008-11-12 08:27:00 +08003080 bucket_xh(xbs->bucket),
3081 i, &block_off,
3082 &name_offset);
3083 base = bucket_block(xbs->bucket, block_off);
Tao Ma85db90e2008-11-12 08:27:01 +08003084 credits += ocfs2_blocks_per_xattr_bucket(inode->i_sb);
3085 } else {
Tao Ma78f30c32008-11-12 08:27:00 +08003086 base = xbs->base;
Tao Ma85db90e2008-11-12 08:27:01 +08003087 credits += OCFS2_XATTR_BLOCK_UPDATE_CREDITS;
3088 }
3089 }
3090
3091 /*
3092 * delete a xattr doesn't need metadata and cluster allocation.
3093 * so just calculate the credits and return.
3094 *
3095 * The credits for removing the value tree will be extended
3096 * by ocfs2_remove_extent itself.
3097 */
Joel Becker6b240ff2009-08-14 18:02:52 -07003098 if (!xi->xi_value) {
Tao Ma85db90e2008-11-12 08:27:01 +08003099 if (!ocfs2_xattr_is_local(xe))
Jan Karaa90714c2008-10-09 19:38:40 +02003100 credits += ocfs2_remove_extent_credits(inode->i_sb);
Tao Ma85db90e2008-11-12 08:27:01 +08003101
3102 goto out;
Tao Ma78f30c32008-11-12 08:27:00 +08003103 }
3104
3105 /* do cluster allocation guess first. */
3106 value_size = le64_to_cpu(xe->xe_value_size);
3107
3108 if (old_in_xb) {
3109 /*
3110 * In xattr set, we always try to set the xe in inode first,
3111 * so if it can be inserted into inode successfully, the old
3112 * one will be removed from the xattr block, and this xattr
3113 * will be inserted into inode as a new xattr in inode.
3114 */
3115 if (ocfs2_xattr_can_be_in_inode(inode, xi, xis)) {
3116 clusters_add += new_clusters;
Jan Karaa90714c2008-10-09 19:38:40 +02003117 credits += ocfs2_remove_extent_credits(inode->i_sb) +
Tao Ma85db90e2008-11-12 08:27:01 +08003118 OCFS2_INODE_UPDATE_CREDITS;
3119 if (!ocfs2_xattr_is_local(xe))
3120 credits += ocfs2_calc_extend_credits(
3121 inode->i_sb,
Goldwyn Rodrigues06f9da62013-11-12 15:06:52 -08003122 &def_xv.xv.xr_list);
Tao Ma78f30c32008-11-12 08:27:00 +08003123 goto out;
3124 }
3125 }
3126
Joel Becker6b240ff2009-08-14 18:02:52 -07003127 if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) {
Tao Ma78f30c32008-11-12 08:27:00 +08003128 /* the new values will be stored outside. */
3129 u32 old_clusters = 0;
3130
3131 if (!ocfs2_xattr_is_local(xe)) {
3132 old_clusters = ocfs2_clusters_for_bytes(inode->i_sb,
3133 value_size);
3134 xv = (struct ocfs2_xattr_value_root *)
3135 (base + name_offset + name_len);
Tao Ma97aff522008-11-19 16:48:41 +08003136 value_size = OCFS2_XATTR_ROOT_SIZE;
Tao Ma78f30c32008-11-12 08:27:00 +08003137 } else
3138 xv = &def_xv.xv;
3139
Tao Ma85db90e2008-11-12 08:27:01 +08003140 if (old_clusters >= new_clusters) {
Jan Karaa90714c2008-10-09 19:38:40 +02003141 credits += ocfs2_remove_extent_credits(inode->i_sb);
Tao Ma78f30c32008-11-12 08:27:00 +08003142 goto out;
Tao Ma85db90e2008-11-12 08:27:01 +08003143 } else {
Tao Ma78f30c32008-11-12 08:27:00 +08003144 meta_add += ocfs2_extend_meta_needed(&xv->xr_list);
3145 clusters_add += new_clusters - old_clusters;
Tao Ma85db90e2008-11-12 08:27:01 +08003146 credits += ocfs2_calc_extend_credits(inode->i_sb,
Goldwyn Rodrigues06f9da62013-11-12 15:06:52 -08003147 &xv->xr_list);
Tao Ma97aff522008-11-19 16:48:41 +08003148 if (value_size >= OCFS2_XATTR_ROOT_SIZE)
3149 goto out;
Tao Ma78f30c32008-11-12 08:27:00 +08003150 }
3151 } else {
3152 /*
3153 * Now the new value will be stored inside. So if the new
3154 * value is smaller than the size of value root or the old
3155 * value, we don't need any allocation, otherwise we have
3156 * to guess metadata allocation.
3157 */
Joel Becker6b240ff2009-08-14 18:02:52 -07003158 if ((ocfs2_xattr_is_local(xe) &&
3159 (value_size >= xi->xi_value_len)) ||
Tao Ma78f30c32008-11-12 08:27:00 +08003160 (!ocfs2_xattr_is_local(xe) &&
Joel Becker6b240ff2009-08-14 18:02:52 -07003161 OCFS2_XATTR_ROOT_SIZE >= xi->xi_value_len))
Tao Ma78f30c32008-11-12 08:27:00 +08003162 goto out;
3163 }
3164
3165meta_guess:
3166 /* calculate metadata allocation. */
3167 if (di->i_xattr_loc) {
3168 if (!xbs->xattr_bh) {
Joel Becker4ae1d692008-11-13 14:49:18 -08003169 ret = ocfs2_read_xattr_block(inode,
3170 le64_to_cpu(di->i_xattr_loc),
3171 &bh);
Tao Ma78f30c32008-11-12 08:27:00 +08003172 if (ret) {
3173 mlog_errno(ret);
3174 goto out;
3175 }
3176
3177 xb = (struct ocfs2_xattr_block *)bh->b_data;
3178 } else
3179 xb = (struct ocfs2_xattr_block *)xbs->xattr_bh->b_data;
3180
Tao Ma90cb5462008-12-05 06:20:56 +08003181 /*
3182 * If there is already an xattr tree, good, we can calculate
3183 * like other b-trees. Otherwise we may have the chance of
3184 * create a tree, the credit calculation is borrowed from
3185 * ocfs2_calc_extend_credits with root_el = NULL. And the
3186 * new tree will be cluster based, so no meta is needed.
3187 */
Tao Ma78f30c32008-11-12 08:27:00 +08003188 if (le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED) {
3189 struct ocfs2_extent_list *el =
3190 &xb->xb_attrs.xb_root.xt_list;
3191 meta_add += ocfs2_extend_meta_needed(el);
Tao Ma85db90e2008-11-12 08:27:01 +08003192 credits += ocfs2_calc_extend_credits(inode->i_sb,
Goldwyn Rodrigues06f9da62013-11-12 15:06:52 -08003193 el);
Tao Ma90cb5462008-12-05 06:20:56 +08003194 } else
3195 credits += OCFS2_SUBALLOC_ALLOC + 1;
Tao Ma78f30c32008-11-12 08:27:00 +08003196
3197 /*
3198 * This cluster will be used either for new bucket or for
3199 * new xattr block.
3200 * If the cluster size is the same as the bucket size, one
3201 * more is needed since we may need to extend the bucket
3202 * also.
3203 */
3204 clusters_add += 1;
Tao Ma85db90e2008-11-12 08:27:01 +08003205 credits += ocfs2_blocks_per_xattr_bucket(inode->i_sb);
Tao Ma78f30c32008-11-12 08:27:00 +08003206 if (OCFS2_XATTR_BUCKET_SIZE ==
Tao Ma85db90e2008-11-12 08:27:01 +08003207 OCFS2_SB(inode->i_sb)->s_clustersize) {
3208 credits += ocfs2_blocks_per_xattr_bucket(inode->i_sb);
Tao Ma78f30c32008-11-12 08:27:00 +08003209 clusters_add += 1;
Tao Ma85db90e2008-11-12 08:27:01 +08003210 }
3211 } else {
Tao Ma85db90e2008-11-12 08:27:01 +08003212 credits += OCFS2_XATTR_BLOCK_CREATE_CREDITS;
Tariq Saeed3ed2be72014-04-03 14:47:03 -07003213 if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) {
3214 struct ocfs2_extent_list *el = &def_xv.xv.xr_list;
3215 meta_add += ocfs2_extend_meta_needed(el);
3216 credits += ocfs2_calc_extend_credits(inode->i_sb,
3217 el);
3218 } else {
3219 meta_add += 1;
3220 }
Tao Ma85db90e2008-11-12 08:27:01 +08003221 }
Tao Ma78f30c32008-11-12 08:27:00 +08003222out:
3223 if (clusters_need)
3224 *clusters_need = clusters_add;
3225 if (meta_need)
3226 *meta_need = meta_add;
Tao Ma85db90e2008-11-12 08:27:01 +08003227 if (credits_need)
3228 *credits_need = credits;
Tao Ma78f30c32008-11-12 08:27:00 +08003229 brelse(bh);
3230 return ret;
3231}
3232
3233static int ocfs2_init_xattr_set_ctxt(struct inode *inode,
3234 struct ocfs2_dinode *di,
3235 struct ocfs2_xattr_info *xi,
3236 struct ocfs2_xattr_search *xis,
3237 struct ocfs2_xattr_search *xbs,
Tao Ma85db90e2008-11-12 08:27:01 +08003238 struct ocfs2_xattr_set_ctxt *ctxt,
Tao Ma492a8a32009-08-18 11:43:17 +08003239 int extra_meta,
Tao Ma85db90e2008-11-12 08:27:01 +08003240 int *credits)
Tao Ma78f30c32008-11-12 08:27:00 +08003241{
3242 int clusters_add, meta_add, ret;
3243 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
3244
3245 memset(ctxt, 0, sizeof(struct ocfs2_xattr_set_ctxt));
3246
3247 ocfs2_init_dealloc_ctxt(&ctxt->dealloc);
3248
3249 ret = ocfs2_calc_xattr_set_need(inode, di, xi, xis, xbs,
Tao Ma85db90e2008-11-12 08:27:01 +08003250 &clusters_add, &meta_add, credits);
Tao Ma78f30c32008-11-12 08:27:00 +08003251 if (ret) {
3252 mlog_errno(ret);
3253 return ret;
3254 }
3255
Tao Ma492a8a32009-08-18 11:43:17 +08003256 meta_add += extra_meta;
Tao Ma402b4182011-02-23 22:01:17 +08003257 trace_ocfs2_init_xattr_set_ctxt(xi->xi_name, meta_add,
3258 clusters_add, *credits);
Tao Ma78f30c32008-11-12 08:27:00 +08003259
3260 if (meta_add) {
3261 ret = ocfs2_reserve_new_metadata_blocks(osb, meta_add,
3262 &ctxt->meta_ac);
3263 if (ret) {
3264 mlog_errno(ret);
3265 goto out;
3266 }
3267 }
3268
3269 if (clusters_add) {
3270 ret = ocfs2_reserve_clusters(osb, clusters_add, &ctxt->data_ac);
3271 if (ret)
3272 mlog_errno(ret);
3273 }
3274out:
3275 if (ret) {
3276 if (ctxt->meta_ac) {
3277 ocfs2_free_alloc_context(ctxt->meta_ac);
3278 ctxt->meta_ac = NULL;
3279 }
3280
3281 /*
3282 * We cannot have an error and a non null ctxt->data_ac.
3283 */
3284 }
3285
3286 return ret;
3287}
3288
Tao Ma85db90e2008-11-12 08:27:01 +08003289static int __ocfs2_xattr_set_handle(struct inode *inode,
3290 struct ocfs2_dinode *di,
3291 struct ocfs2_xattr_info *xi,
3292 struct ocfs2_xattr_search *xis,
3293 struct ocfs2_xattr_search *xbs,
3294 struct ocfs2_xattr_set_ctxt *ctxt)
3295{
Tao Ma9f868f12008-11-19 16:48:42 +08003296 int ret = 0, credits, old_found;
Tao Ma85db90e2008-11-12 08:27:01 +08003297
Joel Becker6b240ff2009-08-14 18:02:52 -07003298 if (!xi->xi_value) {
Tao Ma85db90e2008-11-12 08:27:01 +08003299 /* Remove existing extended attribute */
3300 if (!xis->not_found)
3301 ret = ocfs2_xattr_ibody_set(inode, xi, xis, ctxt);
3302 else if (!xbs->not_found)
3303 ret = ocfs2_xattr_block_set(inode, xi, xbs, ctxt);
3304 } else {
3305 /* We always try to set extended attribute into inode first*/
3306 ret = ocfs2_xattr_ibody_set(inode, xi, xis, ctxt);
3307 if (!ret && !xbs->not_found) {
3308 /*
3309 * If succeed and that extended attribute existing in
3310 * external block, then we will remove it.
3311 */
Joel Becker6b240ff2009-08-14 18:02:52 -07003312 xi->xi_value = NULL;
3313 xi->xi_value_len = 0;
Tao Ma85db90e2008-11-12 08:27:01 +08003314
Tao Ma9f868f12008-11-19 16:48:42 +08003315 old_found = xis->not_found;
Tao Ma85db90e2008-11-12 08:27:01 +08003316 xis->not_found = -ENODATA;
3317 ret = ocfs2_calc_xattr_set_need(inode,
3318 di,
3319 xi,
3320 xis,
3321 xbs,
3322 NULL,
3323 NULL,
3324 &credits);
Tao Ma9f868f12008-11-19 16:48:42 +08003325 xis->not_found = old_found;
Tao Ma85db90e2008-11-12 08:27:01 +08003326 if (ret) {
3327 mlog_errno(ret);
3328 goto out;
3329 }
3330
Tao Mac901fb02010-04-26 14:34:57 +08003331 ret = ocfs2_extend_trans(ctxt->handle, credits);
Tao Ma85db90e2008-11-12 08:27:01 +08003332 if (ret) {
3333 mlog_errno(ret);
3334 goto out;
3335 }
3336 ret = ocfs2_xattr_block_set(inode, xi, xbs, ctxt);
Tao Ma5f5261a2010-05-13 22:49:05 +08003337 } else if ((ret == -ENOSPC) && !ctxt->set_abort) {
Tao Ma85db90e2008-11-12 08:27:01 +08003338 if (di->i_xattr_loc && !xbs->xattr_bh) {
3339 ret = ocfs2_xattr_block_find(inode,
Joel Becker6b240ff2009-08-14 18:02:52 -07003340 xi->xi_name_index,
3341 xi->xi_name, xbs);
Tao Ma85db90e2008-11-12 08:27:01 +08003342 if (ret)
3343 goto out;
3344
Tao Ma9f868f12008-11-19 16:48:42 +08003345 old_found = xis->not_found;
Tao Ma85db90e2008-11-12 08:27:01 +08003346 xis->not_found = -ENODATA;
3347 ret = ocfs2_calc_xattr_set_need(inode,
3348 di,
3349 xi,
3350 xis,
3351 xbs,
3352 NULL,
3353 NULL,
3354 &credits);
Tao Ma9f868f12008-11-19 16:48:42 +08003355 xis->not_found = old_found;
Tao Ma85db90e2008-11-12 08:27:01 +08003356 if (ret) {
3357 mlog_errno(ret);
3358 goto out;
3359 }
3360
Tao Mac901fb02010-04-26 14:34:57 +08003361 ret = ocfs2_extend_trans(ctxt->handle, credits);
Tao Ma85db90e2008-11-12 08:27:01 +08003362 if (ret) {
3363 mlog_errno(ret);
3364 goto out;
3365 }
3366 }
3367 /*
3368 * If no space in inode, we will set extended attribute
3369 * into external block.
3370 */
3371 ret = ocfs2_xattr_block_set(inode, xi, xbs, ctxt);
3372 if (ret)
3373 goto out;
3374 if (!xis->not_found) {
3375 /*
3376 * If succeed and that extended attribute
3377 * existing in inode, we will remove it.
3378 */
Joel Becker6b240ff2009-08-14 18:02:52 -07003379 xi->xi_value = NULL;
3380 xi->xi_value_len = 0;
Tao Ma85db90e2008-11-12 08:27:01 +08003381 xbs->not_found = -ENODATA;
3382 ret = ocfs2_calc_xattr_set_need(inode,
3383 di,
3384 xi,
3385 xis,
3386 xbs,
3387 NULL,
3388 NULL,
3389 &credits);
3390 if (ret) {
3391 mlog_errno(ret);
3392 goto out;
3393 }
3394
Tao Mac901fb02010-04-26 14:34:57 +08003395 ret = ocfs2_extend_trans(ctxt->handle, credits);
Tao Ma85db90e2008-11-12 08:27:01 +08003396 if (ret) {
3397 mlog_errno(ret);
3398 goto out;
3399 }
3400 ret = ocfs2_xattr_ibody_set(inode, xi,
3401 xis, ctxt);
3402 }
3403 }
3404 }
3405
Tao Ma4b3f6202008-12-05 06:20:55 +08003406 if (!ret) {
3407 /* Update inode ctime. */
Joel Becker0cf2f762009-02-12 16:41:25 -08003408 ret = ocfs2_journal_access_di(ctxt->handle, INODE_CACHE(inode),
Tao Ma89a907a2009-02-17 04:39:28 +08003409 xis->inode_bh,
3410 OCFS2_JOURNAL_ACCESS_WRITE);
Tao Ma4b3f6202008-12-05 06:20:55 +08003411 if (ret) {
3412 mlog_errno(ret);
3413 goto out;
3414 }
3415
3416 inode->i_ctime = CURRENT_TIME;
3417 di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
3418 di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
3419 ocfs2_journal_dirty(ctxt->handle, xis->inode_bh);
3420 }
Tao Ma85db90e2008-11-12 08:27:01 +08003421out:
3422 return ret;
3423}
3424
Tiger Yangcf1d6c72008-08-18 17:11:00 +08003425/*
Tiger Yang6c3faba2008-11-14 11:16:03 +08003426 * This function only called duing creating inode
3427 * for init security/acl xattrs of the new inode.
Tiger Yang008aafa2008-12-09 16:43:08 +08003428 * All transanction credits have been reserved in mknod.
Tiger Yang6c3faba2008-11-14 11:16:03 +08003429 */
3430int ocfs2_xattr_set_handle(handle_t *handle,
3431 struct inode *inode,
3432 struct buffer_head *di_bh,
3433 int name_index,
3434 const char *name,
3435 const void *value,
3436 size_t value_len,
3437 int flags,
3438 struct ocfs2_alloc_context *meta_ac,
3439 struct ocfs2_alloc_context *data_ac)
3440{
3441 struct ocfs2_dinode *di;
3442 int ret;
3443
3444 struct ocfs2_xattr_info xi = {
Joel Becker6b240ff2009-08-14 18:02:52 -07003445 .xi_name_index = name_index,
3446 .xi_name = name,
Joel Becker18853b92009-08-14 18:17:07 -07003447 .xi_name_len = strlen(name),
Joel Becker6b240ff2009-08-14 18:02:52 -07003448 .xi_value = value,
3449 .xi_value_len = value_len,
Tiger Yang6c3faba2008-11-14 11:16:03 +08003450 };
3451
3452 struct ocfs2_xattr_search xis = {
3453 .not_found = -ENODATA,
3454 };
3455
3456 struct ocfs2_xattr_search xbs = {
3457 .not_found = -ENODATA,
3458 };
3459
3460 struct ocfs2_xattr_set_ctxt ctxt = {
3461 .handle = handle,
3462 .meta_ac = meta_ac,
3463 .data_ac = data_ac,
3464 };
3465
3466 if (!ocfs2_supports_xattr(OCFS2_SB(inode->i_sb)))
3467 return -EOPNOTSUPP;
3468
Tiger Yang008aafa2008-12-09 16:43:08 +08003469 /*
3470 * In extreme situation, may need xattr bucket when
3471 * block size is too small. And we have already reserved
3472 * the credits for bucket in mknod.
3473 */
3474 if (inode->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE) {
3475 xbs.bucket = ocfs2_xattr_bucket_new(inode);
3476 if (!xbs.bucket) {
3477 mlog_errno(-ENOMEM);
3478 return -ENOMEM;
3479 }
3480 }
3481
Tiger Yang6c3faba2008-11-14 11:16:03 +08003482 xis.inode_bh = xbs.inode_bh = di_bh;
3483 di = (struct ocfs2_dinode *)di_bh->b_data;
3484
3485 down_write(&OCFS2_I(inode)->ip_xattr_sem);
3486
3487 ret = ocfs2_xattr_ibody_find(inode, name_index, name, &xis);
3488 if (ret)
3489 goto cleanup;
3490 if (xis.not_found) {
3491 ret = ocfs2_xattr_block_find(inode, name_index, name, &xbs);
3492 if (ret)
3493 goto cleanup;
3494 }
3495
3496 ret = __ocfs2_xattr_set_handle(inode, di, &xi, &xis, &xbs, &ctxt);
3497
3498cleanup:
3499 up_write(&OCFS2_I(inode)->ip_xattr_sem);
3500 brelse(xbs.xattr_bh);
Tiger Yang008aafa2008-12-09 16:43:08 +08003501 ocfs2_xattr_bucket_free(xbs.bucket);
Tiger Yang6c3faba2008-11-14 11:16:03 +08003502
3503 return ret;
3504}
3505
3506/*
Tiger Yangcf1d6c72008-08-18 17:11:00 +08003507 * ocfs2_xattr_set()
3508 *
3509 * Set, replace or remove an extended attribute for this inode.
3510 * value is NULL to remove an existing extended attribute, else either
3511 * create or replace an extended attribute.
3512 */
3513int ocfs2_xattr_set(struct inode *inode,
3514 int name_index,
3515 const char *name,
3516 const void *value,
3517 size_t value_len,
3518 int flags)
3519{
3520 struct buffer_head *di_bh = NULL;
3521 struct ocfs2_dinode *di;
Tao Ma492a8a32009-08-18 11:43:17 +08003522 int ret, credits, ref_meta = 0, ref_credits = 0;
Tao Ma78f30c32008-11-12 08:27:00 +08003523 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
Tao Ma85db90e2008-11-12 08:27:01 +08003524 struct inode *tl_inode = osb->osb_tl_inode;
Younger Liu6ea437a2013-09-11 14:19:56 -07003525 struct ocfs2_xattr_set_ctxt ctxt = { NULL, NULL, NULL, };
Tao Ma492a8a32009-08-18 11:43:17 +08003526 struct ocfs2_refcount_tree *ref_tree = NULL;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08003527
3528 struct ocfs2_xattr_info xi = {
Joel Becker6b240ff2009-08-14 18:02:52 -07003529 .xi_name_index = name_index,
3530 .xi_name = name,
Joel Becker18853b92009-08-14 18:17:07 -07003531 .xi_name_len = strlen(name),
Joel Becker6b240ff2009-08-14 18:02:52 -07003532 .xi_value = value,
3533 .xi_value_len = value_len,
Tiger Yangcf1d6c72008-08-18 17:11:00 +08003534 };
3535
3536 struct ocfs2_xattr_search xis = {
3537 .not_found = -ENODATA,
3538 };
3539
3540 struct ocfs2_xattr_search xbs = {
3541 .not_found = -ENODATA,
3542 };
3543
Tiger Yang8154da32008-08-18 17:11:46 +08003544 if (!ocfs2_supports_xattr(OCFS2_SB(inode->i_sb)))
3545 return -EOPNOTSUPP;
3546
Joel Beckerba937122008-10-24 19:13:20 -07003547 /*
3548 * Only xbs will be used on indexed trees. xis doesn't need a
3549 * bucket.
3550 */
3551 xbs.bucket = ocfs2_xattr_bucket_new(inode);
3552 if (!xbs.bucket) {
3553 mlog_errno(-ENOMEM);
3554 return -ENOMEM;
3555 }
3556
Tiger Yangcf1d6c72008-08-18 17:11:00 +08003557 ret = ocfs2_inode_lock(inode, &di_bh, 1);
3558 if (ret < 0) {
3559 mlog_errno(ret);
Joel Beckerba937122008-10-24 19:13:20 -07003560 goto cleanup_nolock;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08003561 }
3562 xis.inode_bh = xbs.inode_bh = di_bh;
3563 di = (struct ocfs2_dinode *)di_bh->b_data;
3564
3565 down_write(&OCFS2_I(inode)->ip_xattr_sem);
3566 /*
3567 * Scan inode and external block to find the same name
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003568 * extended attribute and collect search information.
Tiger Yangcf1d6c72008-08-18 17:11:00 +08003569 */
3570 ret = ocfs2_xattr_ibody_find(inode, name_index, name, &xis);
3571 if (ret)
3572 goto cleanup;
3573 if (xis.not_found) {
3574 ret = ocfs2_xattr_block_find(inode, name_index, name, &xbs);
3575 if (ret)
3576 goto cleanup;
3577 }
3578
3579 if (xis.not_found && xbs.not_found) {
3580 ret = -ENODATA;
3581 if (flags & XATTR_REPLACE)
3582 goto cleanup;
3583 ret = 0;
3584 if (!value)
3585 goto cleanup;
3586 } else {
3587 ret = -EEXIST;
3588 if (flags & XATTR_CREATE)
3589 goto cleanup;
3590 }
3591
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003592 /* Check whether the value is refcounted and do some preparation. */
Tao Ma492a8a32009-08-18 11:43:17 +08003593 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL &&
3594 (!xis.not_found || !xbs.not_found)) {
3595 ret = ocfs2_prepare_refcount_xattr(inode, di, &xi,
3596 &xis, &xbs, &ref_tree,
3597 &ref_meta, &ref_credits);
3598 if (ret) {
3599 mlog_errno(ret);
3600 goto cleanup;
3601 }
3602 }
Tao Ma85db90e2008-11-12 08:27:01 +08003603
3604 mutex_lock(&tl_inode->i_mutex);
3605
3606 if (ocfs2_truncate_log_needs_flush(osb)) {
3607 ret = __ocfs2_flush_truncate_log(osb);
3608 if (ret < 0) {
3609 mutex_unlock(&tl_inode->i_mutex);
3610 mlog_errno(ret);
3611 goto cleanup;
3612 }
3613 }
3614 mutex_unlock(&tl_inode->i_mutex);
3615
3616 ret = ocfs2_init_xattr_set_ctxt(inode, di, &xi, &xis,
Tao Ma492a8a32009-08-18 11:43:17 +08003617 &xbs, &ctxt, ref_meta, &credits);
Tao Ma78f30c32008-11-12 08:27:00 +08003618 if (ret) {
3619 mlog_errno(ret);
3620 goto cleanup;
3621 }
3622
Tao Ma4b3f6202008-12-05 06:20:55 +08003623 /* we need to update inode's ctime field, so add credit for it. */
3624 credits += OCFS2_INODE_UPDATE_CREDITS;
Tao Ma492a8a32009-08-18 11:43:17 +08003625 ctxt.handle = ocfs2_start_trans(osb, credits + ref_credits);
Tao Ma85db90e2008-11-12 08:27:01 +08003626 if (IS_ERR(ctxt.handle)) {
3627 ret = PTR_ERR(ctxt.handle);
3628 mlog_errno(ret);
Younger Liu6ea437a2013-09-11 14:19:56 -07003629 goto out_free_ac;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08003630 }
Tao Ma85db90e2008-11-12 08:27:01 +08003631
3632 ret = __ocfs2_xattr_set_handle(inode, di, &xi, &xis, &xbs, &ctxt);
Darrick J. Wong6fdb7022014-04-03 14:47:08 -07003633 ocfs2_update_inode_fsync_trans(ctxt.handle, inode, 0);
Tao Ma85db90e2008-11-12 08:27:01 +08003634
3635 ocfs2_commit_trans(osb, ctxt.handle);
3636
Younger Liu6ea437a2013-09-11 14:19:56 -07003637out_free_ac:
Tao Ma78f30c32008-11-12 08:27:00 +08003638 if (ctxt.data_ac)
3639 ocfs2_free_alloc_context(ctxt.data_ac);
3640 if (ctxt.meta_ac)
3641 ocfs2_free_alloc_context(ctxt.meta_ac);
3642 if (ocfs2_dealloc_has_cluster(&ctxt.dealloc))
3643 ocfs2_schedule_truncate_log_flush(osb, 1);
3644 ocfs2_run_deallocs(osb, &ctxt.dealloc);
Tao Ma8b2c0db2009-08-18 11:43:49 +08003645
Tiger Yangcf1d6c72008-08-18 17:11:00 +08003646cleanup:
Tao Ma492a8a32009-08-18 11:43:17 +08003647 if (ref_tree)
3648 ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08003649 up_write(&OCFS2_I(inode)->ip_xattr_sem);
Tao Ma8b2c0db2009-08-18 11:43:49 +08003650 if (!value && !ret) {
3651 ret = ocfs2_try_remove_refcount_tree(inode, di_bh);
3652 if (ret)
3653 mlog_errno(ret);
3654 }
Tiger Yangcf1d6c72008-08-18 17:11:00 +08003655 ocfs2_inode_unlock(inode, 1);
Joel Beckerba937122008-10-24 19:13:20 -07003656cleanup_nolock:
Tiger Yangcf1d6c72008-08-18 17:11:00 +08003657 brelse(di_bh);
3658 brelse(xbs.xattr_bh);
Joel Beckerba937122008-10-24 19:13:20 -07003659 ocfs2_xattr_bucket_free(xbs.bucket);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08003660
3661 return ret;
3662}
3663
Tao Ma0c044f02008-08-18 17:38:50 +08003664/*
3665 * Find the xattr extent rec which may contains name_hash.
3666 * e_cpos will be the first name hash of the xattr rec.
3667 * el must be the ocfs2_xattr_header.xb_attrs.xb_root.xt_list.
3668 */
3669static int ocfs2_xattr_get_rec(struct inode *inode,
3670 u32 name_hash,
3671 u64 *p_blkno,
3672 u32 *e_cpos,
3673 u32 *num_clusters,
3674 struct ocfs2_extent_list *el)
3675{
3676 int ret = 0, i;
3677 struct buffer_head *eb_bh = NULL;
3678 struct ocfs2_extent_block *eb;
3679 struct ocfs2_extent_rec *rec = NULL;
3680 u64 e_blkno = 0;
3681
3682 if (el->l_tree_depth) {
Joel Beckerfacdb772009-02-12 18:08:48 -08003683 ret = ocfs2_find_leaf(INODE_CACHE(inode), el, name_hash,
3684 &eb_bh);
Tao Ma0c044f02008-08-18 17:38:50 +08003685 if (ret) {
3686 mlog_errno(ret);
3687 goto out;
3688 }
3689
3690 eb = (struct ocfs2_extent_block *) eb_bh->b_data;
3691 el = &eb->h_list;
3692
3693 if (el->l_tree_depth) {
Goldwyn Rodrigues17a5b9a2015-09-04 15:44:17 -07003694 ret = ocfs2_error(inode->i_sb,
Tao Ma0c044f02008-08-18 17:38:50 +08003695 "Inode %lu has non zero tree depth in "
3696 "xattr tree block %llu\n", inode->i_ino,
3697 (unsigned long long)eb_bh->b_blocknr);
Tao Ma0c044f02008-08-18 17:38:50 +08003698 goto out;
3699 }
3700 }
3701
3702 for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) {
3703 rec = &el->l_recs[i];
3704
3705 if (le32_to_cpu(rec->e_cpos) <= name_hash) {
3706 e_blkno = le64_to_cpu(rec->e_blkno);
3707 break;
3708 }
3709 }
3710
3711 if (!e_blkno) {
Goldwyn Rodrigues17a5b9a2015-09-04 15:44:17 -07003712 ret = ocfs2_error(inode->i_sb, "Inode %lu has bad extent "
Tao Ma0c044f02008-08-18 17:38:50 +08003713 "record (%u, %u, 0) in xattr", inode->i_ino,
3714 le32_to_cpu(rec->e_cpos),
3715 ocfs2_rec_clusters(el, rec));
Tao Ma0c044f02008-08-18 17:38:50 +08003716 goto out;
3717 }
3718
3719 *p_blkno = le64_to_cpu(rec->e_blkno);
3720 *num_clusters = le16_to_cpu(rec->e_leaf_clusters);
3721 if (e_cpos)
3722 *e_cpos = le32_to_cpu(rec->e_cpos);
3723out:
3724 brelse(eb_bh);
3725 return ret;
3726}
3727
3728typedef int (xattr_bucket_func)(struct inode *inode,
3729 struct ocfs2_xattr_bucket *bucket,
3730 void *para);
3731
Tao Ma589dc262008-08-18 17:38:51 +08003732static int ocfs2_find_xe_in_bucket(struct inode *inode,
Joel Beckere2356a32008-10-27 15:01:54 -07003733 struct ocfs2_xattr_bucket *bucket,
Tao Ma589dc262008-08-18 17:38:51 +08003734 int name_index,
3735 const char *name,
3736 u32 name_hash,
3737 u16 *xe_index,
3738 int *found)
3739{
3740 int i, ret = 0, cmp = 1, block_off, new_offset;
Joel Beckere2356a32008-10-27 15:01:54 -07003741 struct ocfs2_xattr_header *xh = bucket_xh(bucket);
Tao Ma589dc262008-08-18 17:38:51 +08003742 size_t name_len = strlen(name);
3743 struct ocfs2_xattr_entry *xe = NULL;
Tao Ma589dc262008-08-18 17:38:51 +08003744 char *xe_name;
3745
3746 /*
3747 * We don't use binary search in the bucket because there
3748 * may be multiple entries with the same name hash.
3749 */
3750 for (i = 0; i < le16_to_cpu(xh->xh_count); i++) {
3751 xe = &xh->xh_entries[i];
3752
3753 if (name_hash > le32_to_cpu(xe->xe_name_hash))
3754 continue;
3755 else if (name_hash < le32_to_cpu(xe->xe_name_hash))
3756 break;
3757
3758 cmp = name_index - ocfs2_xattr_get_type(xe);
3759 if (!cmp)
3760 cmp = name_len - xe->xe_name_len;
3761 if (cmp)
3762 continue;
3763
Tao Mafd68a892009-08-18 11:43:21 +08003764 ret = ocfs2_xattr_bucket_get_name_value(inode->i_sb,
Tao Ma589dc262008-08-18 17:38:51 +08003765 xh,
3766 i,
3767 &block_off,
3768 &new_offset);
3769 if (ret) {
3770 mlog_errno(ret);
3771 break;
3772 }
3773
Joel Becker970e4932008-11-13 14:49:19 -08003774
Joel Beckere2356a32008-10-27 15:01:54 -07003775 xe_name = bucket_block(bucket, block_off) + new_offset;
3776 if (!memcmp(name, xe_name, name_len)) {
Tao Ma589dc262008-08-18 17:38:51 +08003777 *xe_index = i;
3778 *found = 1;
3779 ret = 0;
3780 break;
3781 }
3782 }
3783
3784 return ret;
3785}
3786
3787/*
3788 * Find the specified xattr entry in a series of buckets.
3789 * This series start from p_blkno and last for num_clusters.
3790 * The ocfs2_xattr_header.xh_num_buckets of the first bucket contains
3791 * the num of the valid buckets.
3792 *
3793 * Return the buffer_head this xattr should reside in. And if the xattr's
3794 * hash is in the gap of 2 buckets, return the lower bucket.
3795 */
3796static int ocfs2_xattr_bucket_find(struct inode *inode,
3797 int name_index,
3798 const char *name,
3799 u32 name_hash,
3800 u64 p_blkno,
3801 u32 first_hash,
3802 u32 num_clusters,
3803 struct ocfs2_xattr_search *xs)
3804{
3805 int ret, found = 0;
Tao Ma589dc262008-08-18 17:38:51 +08003806 struct ocfs2_xattr_header *xh = NULL;
3807 struct ocfs2_xattr_entry *xe = NULL;
3808 u16 index = 0;
3809 u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
3810 int low_bucket = 0, bucket, high_bucket;
Joel Beckere2356a32008-10-27 15:01:54 -07003811 struct ocfs2_xattr_bucket *search;
Tao Ma589dc262008-08-18 17:38:51 +08003812 u32 last_hash;
Joel Beckere2356a32008-10-27 15:01:54 -07003813 u64 blkno, lower_blkno = 0;
Tao Ma589dc262008-08-18 17:38:51 +08003814
Joel Beckere2356a32008-10-27 15:01:54 -07003815 search = ocfs2_xattr_bucket_new(inode);
3816 if (!search) {
3817 ret = -ENOMEM;
3818 mlog_errno(ret);
3819 goto out;
3820 }
3821
3822 ret = ocfs2_read_xattr_bucket(search, p_blkno);
Tao Ma589dc262008-08-18 17:38:51 +08003823 if (ret) {
3824 mlog_errno(ret);
3825 goto out;
3826 }
3827
Joel Beckere2356a32008-10-27 15:01:54 -07003828 xh = bucket_xh(search);
Tao Ma589dc262008-08-18 17:38:51 +08003829 high_bucket = le16_to_cpu(xh->xh_num_buckets) - 1;
Tao Ma589dc262008-08-18 17:38:51 +08003830 while (low_bucket <= high_bucket) {
Joel Beckere2356a32008-10-27 15:01:54 -07003831 ocfs2_xattr_bucket_relse(search);
3832
Tao Ma589dc262008-08-18 17:38:51 +08003833 bucket = (low_bucket + high_bucket) / 2;
Tao Ma589dc262008-08-18 17:38:51 +08003834 blkno = p_blkno + bucket * blk_per_bucket;
Joel Beckere2356a32008-10-27 15:01:54 -07003835 ret = ocfs2_read_xattr_bucket(search, blkno);
Tao Ma589dc262008-08-18 17:38:51 +08003836 if (ret) {
3837 mlog_errno(ret);
3838 goto out;
3839 }
3840
Joel Beckere2356a32008-10-27 15:01:54 -07003841 xh = bucket_xh(search);
Tao Ma589dc262008-08-18 17:38:51 +08003842 xe = &xh->xh_entries[0];
3843 if (name_hash < le32_to_cpu(xe->xe_name_hash)) {
3844 high_bucket = bucket - 1;
3845 continue;
3846 }
3847
3848 /*
3849 * Check whether the hash of the last entry in our
Tao Ma5a0956112008-09-19 22:17:41 +08003850 * bucket is larger than the search one. for an empty
3851 * bucket, the last one is also the first one.
Tao Ma589dc262008-08-18 17:38:51 +08003852 */
Tao Ma5a0956112008-09-19 22:17:41 +08003853 if (xh->xh_count)
3854 xe = &xh->xh_entries[le16_to_cpu(xh->xh_count) - 1];
3855
Tao Ma589dc262008-08-18 17:38:51 +08003856 last_hash = le32_to_cpu(xe->xe_name_hash);
3857
Joel Beckere2356a32008-10-27 15:01:54 -07003858 /* record lower_blkno which may be the insert place. */
3859 lower_blkno = blkno;
Tao Ma589dc262008-08-18 17:38:51 +08003860
3861 if (name_hash > le32_to_cpu(xe->xe_name_hash)) {
3862 low_bucket = bucket + 1;
3863 continue;
3864 }
3865
3866 /* the searched xattr should reside in this bucket if exists. */
Joel Beckere2356a32008-10-27 15:01:54 -07003867 ret = ocfs2_find_xe_in_bucket(inode, search,
Tao Ma589dc262008-08-18 17:38:51 +08003868 name_index, name, name_hash,
3869 &index, &found);
3870 if (ret) {
3871 mlog_errno(ret);
3872 goto out;
3873 }
3874 break;
3875 }
3876
3877 /*
3878 * Record the bucket we have found.
3879 * When the xattr's hash value is in the gap of 2 buckets, we will
3880 * always set it to the previous bucket.
3881 */
Joel Beckere2356a32008-10-27 15:01:54 -07003882 if (!lower_blkno)
3883 lower_blkno = p_blkno;
3884
3885 /* This should be in cache - we just read it during the search */
3886 ret = ocfs2_read_xattr_bucket(xs->bucket, lower_blkno);
3887 if (ret) {
3888 mlog_errno(ret);
3889 goto out;
Tao Ma589dc262008-08-18 17:38:51 +08003890 }
Tao Ma589dc262008-08-18 17:38:51 +08003891
Joel Beckerba937122008-10-24 19:13:20 -07003892 xs->header = bucket_xh(xs->bucket);
3893 xs->base = bucket_block(xs->bucket, 0);
Tao Ma589dc262008-08-18 17:38:51 +08003894 xs->end = xs->base + inode->i_sb->s_blocksize;
3895
3896 if (found) {
Tao Ma589dc262008-08-18 17:38:51 +08003897 xs->here = &xs->header->xh_entries[index];
Tao Ma402b4182011-02-23 22:01:17 +08003898 trace_ocfs2_xattr_bucket_find(OCFS2_I(inode)->ip_blkno,
3899 name, name_index, name_hash,
3900 (unsigned long long)bucket_blkno(xs->bucket),
3901 index);
Tao Ma589dc262008-08-18 17:38:51 +08003902 } else
3903 ret = -ENODATA;
3904
3905out:
Joel Beckere2356a32008-10-27 15:01:54 -07003906 ocfs2_xattr_bucket_free(search);
Tao Ma589dc262008-08-18 17:38:51 +08003907 return ret;
3908}
3909
3910static int ocfs2_xattr_index_block_find(struct inode *inode,
3911 struct buffer_head *root_bh,
3912 int name_index,
3913 const char *name,
3914 struct ocfs2_xattr_search *xs)
3915{
3916 int ret;
3917 struct ocfs2_xattr_block *xb =
3918 (struct ocfs2_xattr_block *)root_bh->b_data;
3919 struct ocfs2_xattr_tree_root *xb_root = &xb->xb_attrs.xb_root;
3920 struct ocfs2_extent_list *el = &xb_root->xt_list;
3921 u64 p_blkno = 0;
3922 u32 first_hash, num_clusters = 0;
Tao Ma2057e5c2008-10-09 23:06:13 +08003923 u32 name_hash = ocfs2_xattr_name_hash(inode, name, strlen(name));
Tao Ma589dc262008-08-18 17:38:51 +08003924
3925 if (le16_to_cpu(el->l_next_free_rec) == 0)
3926 return -ENODATA;
3927
Tao Ma402b4182011-02-23 22:01:17 +08003928 trace_ocfs2_xattr_index_block_find(OCFS2_I(inode)->ip_blkno,
3929 name, name_index, name_hash,
3930 (unsigned long long)root_bh->b_blocknr,
3931 -1);
Tao Ma589dc262008-08-18 17:38:51 +08003932
3933 ret = ocfs2_xattr_get_rec(inode, name_hash, &p_blkno, &first_hash,
3934 &num_clusters, el);
3935 if (ret) {
3936 mlog_errno(ret);
3937 goto out;
3938 }
3939
3940 BUG_ON(p_blkno == 0 || num_clusters == 0 || first_hash > name_hash);
3941
Tao Ma402b4182011-02-23 22:01:17 +08003942 trace_ocfs2_xattr_index_block_find_rec(OCFS2_I(inode)->ip_blkno,
3943 name, name_index, first_hash,
3944 (unsigned long long)p_blkno,
3945 num_clusters);
Tao Ma589dc262008-08-18 17:38:51 +08003946
3947 ret = ocfs2_xattr_bucket_find(inode, name_index, name, name_hash,
3948 p_blkno, first_hash, num_clusters, xs);
3949
3950out:
3951 return ret;
3952}
3953
Tao Ma0c044f02008-08-18 17:38:50 +08003954static int ocfs2_iterate_xattr_buckets(struct inode *inode,
3955 u64 blkno,
3956 u32 clusters,
3957 xattr_bucket_func *func,
3958 void *para)
3959{
Joel Becker6dde41d2008-10-24 17:16:48 -07003960 int i, ret = 0;
Tao Ma0c044f02008-08-18 17:38:50 +08003961 u32 bpc = ocfs2_xattr_buckets_per_cluster(OCFS2_SB(inode->i_sb));
3962 u32 num_buckets = clusters * bpc;
Joel Beckerba937122008-10-24 19:13:20 -07003963 struct ocfs2_xattr_bucket *bucket;
Tao Ma0c044f02008-08-18 17:38:50 +08003964
Joel Beckerba937122008-10-24 19:13:20 -07003965 bucket = ocfs2_xattr_bucket_new(inode);
3966 if (!bucket) {
3967 mlog_errno(-ENOMEM);
3968 return -ENOMEM;
3969 }
Tao Ma0c044f02008-08-18 17:38:50 +08003970
Tao Ma402b4182011-02-23 22:01:17 +08003971 trace_ocfs2_iterate_xattr_buckets(
3972 (unsigned long long)OCFS2_I(inode)->ip_blkno,
3973 (unsigned long long)blkno, clusters);
Tao Ma0c044f02008-08-18 17:38:50 +08003974
Joel Beckerba937122008-10-24 19:13:20 -07003975 for (i = 0; i < num_buckets; i++, blkno += bucket->bu_blocks) {
3976 ret = ocfs2_read_xattr_bucket(bucket, blkno);
Tao Ma0c044f02008-08-18 17:38:50 +08003977 if (ret) {
3978 mlog_errno(ret);
Joel Beckerba937122008-10-24 19:13:20 -07003979 break;
Tao Ma0c044f02008-08-18 17:38:50 +08003980 }
3981
Tao Ma0c044f02008-08-18 17:38:50 +08003982 /*
3983 * The real bucket num in this series of blocks is stored
3984 * in the 1st bucket.
3985 */
3986 if (i == 0)
Joel Beckerba937122008-10-24 19:13:20 -07003987 num_buckets = le16_to_cpu(bucket_xh(bucket)->xh_num_buckets);
Tao Ma0c044f02008-08-18 17:38:50 +08003988
Tao Ma402b4182011-02-23 22:01:17 +08003989 trace_ocfs2_iterate_xattr_bucket((unsigned long long)blkno,
Joel Beckerba937122008-10-24 19:13:20 -07003990 le32_to_cpu(bucket_xh(bucket)->xh_entries[0].xe_name_hash));
Tao Ma0c044f02008-08-18 17:38:50 +08003991 if (func) {
Joel Beckerba937122008-10-24 19:13:20 -07003992 ret = func(inode, bucket, para);
Tao Maa46fa682009-05-04 05:18:09 +08003993 if (ret && ret != -ERANGE)
Tao Ma0c044f02008-08-18 17:38:50 +08003994 mlog_errno(ret);
Joel Beckerba937122008-10-24 19:13:20 -07003995 /* Fall through to bucket_relse() */
Tao Ma0c044f02008-08-18 17:38:50 +08003996 }
3997
Joel Beckerba937122008-10-24 19:13:20 -07003998 ocfs2_xattr_bucket_relse(bucket);
3999 if (ret)
4000 break;
Tao Ma0c044f02008-08-18 17:38:50 +08004001 }
4002
Joel Beckerba937122008-10-24 19:13:20 -07004003 ocfs2_xattr_bucket_free(bucket);
Tao Ma0c044f02008-08-18 17:38:50 +08004004 return ret;
4005}
4006
4007struct ocfs2_xattr_tree_list {
4008 char *buffer;
4009 size_t buffer_size;
Tao Ma936b8832008-10-09 23:06:14 +08004010 size_t result;
Tao Ma0c044f02008-08-18 17:38:50 +08004011};
4012
Tao Mafd68a892009-08-18 11:43:21 +08004013static int ocfs2_xattr_bucket_get_name_value(struct super_block *sb,
Tao Ma0c044f02008-08-18 17:38:50 +08004014 struct ocfs2_xattr_header *xh,
4015 int index,
4016 int *block_off,
4017 int *new_offset)
4018{
4019 u16 name_offset;
4020
4021 if (index < 0 || index >= le16_to_cpu(xh->xh_count))
4022 return -EINVAL;
4023
4024 name_offset = le16_to_cpu(xh->xh_entries[index].xe_name_offset);
4025
Tao Mafd68a892009-08-18 11:43:21 +08004026 *block_off = name_offset >> sb->s_blocksize_bits;
4027 *new_offset = name_offset % sb->s_blocksize;
Tao Ma0c044f02008-08-18 17:38:50 +08004028
4029 return 0;
4030}
4031
4032static int ocfs2_list_xattr_bucket(struct inode *inode,
4033 struct ocfs2_xattr_bucket *bucket,
4034 void *para)
4035{
Tao Ma936b8832008-10-09 23:06:14 +08004036 int ret = 0, type;
Tao Ma0c044f02008-08-18 17:38:50 +08004037 struct ocfs2_xattr_tree_list *xl = (struct ocfs2_xattr_tree_list *)para;
Tao Ma0c044f02008-08-18 17:38:50 +08004038 int i, block_off, new_offset;
Tao Ma936b8832008-10-09 23:06:14 +08004039 const char *prefix, *name;
Tao Ma0c044f02008-08-18 17:38:50 +08004040
Joel Becker3e632942008-10-24 17:04:49 -07004041 for (i = 0 ; i < le16_to_cpu(bucket_xh(bucket)->xh_count); i++) {
4042 struct ocfs2_xattr_entry *entry = &bucket_xh(bucket)->xh_entries[i];
Tao Ma936b8832008-10-09 23:06:14 +08004043 type = ocfs2_xattr_get_type(entry);
4044 prefix = ocfs2_xattr_prefix(type);
Tao Ma0c044f02008-08-18 17:38:50 +08004045
Tao Ma936b8832008-10-09 23:06:14 +08004046 if (prefix) {
Tao Mafd68a892009-08-18 11:43:21 +08004047 ret = ocfs2_xattr_bucket_get_name_value(inode->i_sb,
Joel Becker3e632942008-10-24 17:04:49 -07004048 bucket_xh(bucket),
Tao Ma0c044f02008-08-18 17:38:50 +08004049 i,
4050 &block_off,
4051 &new_offset);
4052 if (ret)
4053 break;
Tao Ma936b8832008-10-09 23:06:14 +08004054
Joel Becker51def392008-10-24 16:57:21 -07004055 name = (const char *)bucket_block(bucket, block_off) +
Tao Ma936b8832008-10-09 23:06:14 +08004056 new_offset;
4057 ret = ocfs2_xattr_list_entry(xl->buffer,
4058 xl->buffer_size,
4059 &xl->result,
4060 prefix, name,
4061 entry->xe_name_len);
4062 if (ret)
4063 break;
Tao Ma0c044f02008-08-18 17:38:50 +08004064 }
4065 }
4066
4067 return ret;
4068}
4069
Tao Ma47bca492009-08-18 11:43:42 +08004070static int ocfs2_iterate_xattr_index_block(struct inode *inode,
4071 struct buffer_head *blk_bh,
4072 xattr_tree_rec_func *rec_func,
4073 void *para)
Tao Ma0c044f02008-08-18 17:38:50 +08004074{
Tao Ma47bca492009-08-18 11:43:42 +08004075 struct ocfs2_xattr_block *xb =
4076 (struct ocfs2_xattr_block *)blk_bh->b_data;
4077 struct ocfs2_extent_list *el = &xb->xb_attrs.xb_root.xt_list;
Tao Ma0c044f02008-08-18 17:38:50 +08004078 int ret = 0;
4079 u32 name_hash = UINT_MAX, e_cpos = 0, num_clusters = 0;
4080 u64 p_blkno = 0;
Tao Ma0c044f02008-08-18 17:38:50 +08004081
Tao Ma47bca492009-08-18 11:43:42 +08004082 if (!el->l_next_free_rec || !rec_func)
Tao Ma0c044f02008-08-18 17:38:50 +08004083 return 0;
4084
4085 while (name_hash > 0) {
4086 ret = ocfs2_xattr_get_rec(inode, name_hash, &p_blkno,
4087 &e_cpos, &num_clusters, el);
4088 if (ret) {
4089 mlog_errno(ret);
Tao Ma47bca492009-08-18 11:43:42 +08004090 break;
Tao Ma0c044f02008-08-18 17:38:50 +08004091 }
4092
Tao Ma47bca492009-08-18 11:43:42 +08004093 ret = rec_func(inode, blk_bh, p_blkno, e_cpos,
4094 num_clusters, para);
Tao Ma0c044f02008-08-18 17:38:50 +08004095 if (ret) {
Tao Maa46fa682009-05-04 05:18:09 +08004096 if (ret != -ERANGE)
4097 mlog_errno(ret);
Tao Ma47bca492009-08-18 11:43:42 +08004098 break;
Tao Ma0c044f02008-08-18 17:38:50 +08004099 }
4100
4101 if (e_cpos == 0)
4102 break;
4103
4104 name_hash = e_cpos - 1;
4105 }
4106
Tao Ma47bca492009-08-18 11:43:42 +08004107 return ret;
4108
4109}
4110
4111static int ocfs2_list_xattr_tree_rec(struct inode *inode,
4112 struct buffer_head *root_bh,
4113 u64 blkno, u32 cpos, u32 len, void *para)
4114{
4115 return ocfs2_iterate_xattr_buckets(inode, blkno, len,
4116 ocfs2_list_xattr_bucket, para);
4117}
4118
4119static int ocfs2_xattr_tree_list_index_block(struct inode *inode,
4120 struct buffer_head *blk_bh,
4121 char *buffer,
4122 size_t buffer_size)
4123{
4124 int ret;
4125 struct ocfs2_xattr_tree_list xl = {
4126 .buffer = buffer,
4127 .buffer_size = buffer_size,
4128 .result = 0,
4129 };
4130
4131 ret = ocfs2_iterate_xattr_index_block(inode, blk_bh,
4132 ocfs2_list_xattr_tree_rec, &xl);
4133 if (ret) {
4134 mlog_errno(ret);
4135 goto out;
4136 }
4137
Tao Ma936b8832008-10-09 23:06:14 +08004138 ret = xl.result;
Tao Ma0c044f02008-08-18 17:38:50 +08004139out:
4140 return ret;
4141}
Tao Ma01225592008-08-18 17:38:53 +08004142
4143static int cmp_xe(const void *a, const void *b)
4144{
4145 const struct ocfs2_xattr_entry *l = a, *r = b;
4146 u32 l_hash = le32_to_cpu(l->xe_name_hash);
4147 u32 r_hash = le32_to_cpu(r->xe_name_hash);
4148
4149 if (l_hash > r_hash)
4150 return 1;
4151 if (l_hash < r_hash)
4152 return -1;
4153 return 0;
4154}
4155
4156static void swap_xe(void *a, void *b, int size)
4157{
4158 struct ocfs2_xattr_entry *l = a, *r = b, tmp;
4159
4160 tmp = *l;
4161 memcpy(l, r, sizeof(struct ocfs2_xattr_entry));
4162 memcpy(r, &tmp, sizeof(struct ocfs2_xattr_entry));
4163}
4164
4165/*
4166 * When the ocfs2_xattr_block is filled up, new bucket will be created
4167 * and all the xattr entries will be moved to the new bucket.
Joel Becker178eeac2008-10-27 15:18:29 -07004168 * The header goes at the start of the bucket, and the names+values are
4169 * filled from the end. This is why *target starts as the last buffer.
Tao Ma01225592008-08-18 17:38:53 +08004170 * Note: we need to sort the entries since they are not saved in order
4171 * in the ocfs2_xattr_block.
4172 */
4173static void ocfs2_cp_xattr_block_to_bucket(struct inode *inode,
4174 struct buffer_head *xb_bh,
Joel Becker178eeac2008-10-27 15:18:29 -07004175 struct ocfs2_xattr_bucket *bucket)
Tao Ma01225592008-08-18 17:38:53 +08004176{
4177 int i, blocksize = inode->i_sb->s_blocksize;
Joel Becker178eeac2008-10-27 15:18:29 -07004178 int blks = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
Tao Ma01225592008-08-18 17:38:53 +08004179 u16 offset, size, off_change;
4180 struct ocfs2_xattr_entry *xe;
4181 struct ocfs2_xattr_block *xb =
4182 (struct ocfs2_xattr_block *)xb_bh->b_data;
4183 struct ocfs2_xattr_header *xb_xh = &xb->xb_attrs.xb_header;
Joel Becker178eeac2008-10-27 15:18:29 -07004184 struct ocfs2_xattr_header *xh = bucket_xh(bucket);
Tao Ma01225592008-08-18 17:38:53 +08004185 u16 count = le16_to_cpu(xb_xh->xh_count);
Joel Becker178eeac2008-10-27 15:18:29 -07004186 char *src = xb_bh->b_data;
4187 char *target = bucket_block(bucket, blks - 1);
Tao Ma01225592008-08-18 17:38:53 +08004188
Tao Ma402b4182011-02-23 22:01:17 +08004189 trace_ocfs2_cp_xattr_block_to_bucket_begin(
4190 (unsigned long long)xb_bh->b_blocknr,
4191 (unsigned long long)bucket_blkno(bucket));
Tao Ma01225592008-08-18 17:38:53 +08004192
Joel Becker178eeac2008-10-27 15:18:29 -07004193 for (i = 0; i < blks; i++)
4194 memset(bucket_block(bucket, i), 0, blocksize);
4195
Tao Ma01225592008-08-18 17:38:53 +08004196 /*
4197 * Since the xe_name_offset is based on ocfs2_xattr_header,
4198 * there is a offset change corresponding to the change of
4199 * ocfs2_xattr_header's position.
4200 */
4201 off_change = offsetof(struct ocfs2_xattr_block, xb_attrs.xb_header);
4202 xe = &xb_xh->xh_entries[count - 1];
4203 offset = le16_to_cpu(xe->xe_name_offset) + off_change;
4204 size = blocksize - offset;
4205
4206 /* copy all the names and values. */
Tao Ma01225592008-08-18 17:38:53 +08004207 memcpy(target + offset, src + offset, size);
4208
4209 /* Init new header now. */
4210 xh->xh_count = xb_xh->xh_count;
4211 xh->xh_num_buckets = cpu_to_le16(1);
4212 xh->xh_name_value_len = cpu_to_le16(size);
4213 xh->xh_free_start = cpu_to_le16(OCFS2_XATTR_BUCKET_SIZE - size);
4214
4215 /* copy all the entries. */
Joel Becker178eeac2008-10-27 15:18:29 -07004216 target = bucket_block(bucket, 0);
Tao Ma01225592008-08-18 17:38:53 +08004217 offset = offsetof(struct ocfs2_xattr_header, xh_entries);
4218 size = count * sizeof(struct ocfs2_xattr_entry);
4219 memcpy(target + offset, (char *)xb_xh + offset, size);
4220
4221 /* Change the xe offset for all the xe because of the move. */
4222 off_change = OCFS2_XATTR_BUCKET_SIZE - blocksize +
4223 offsetof(struct ocfs2_xattr_block, xb_attrs.xb_header);
4224 for (i = 0; i < count; i++)
4225 le16_add_cpu(&xh->xh_entries[i].xe_name_offset, off_change);
4226
Tao Ma402b4182011-02-23 22:01:17 +08004227 trace_ocfs2_cp_xattr_block_to_bucket_end(offset, size, off_change);
Tao Ma01225592008-08-18 17:38:53 +08004228
4229 sort(target + offset, count, sizeof(struct ocfs2_xattr_entry),
4230 cmp_xe, swap_xe);
4231}
4232
4233/*
4234 * After we move xattr from block to index btree, we have to
4235 * update ocfs2_xattr_search to the new xe and base.
4236 *
4237 * When the entry is in xattr block, xattr_bh indicates the storage place.
4238 * While if the entry is in index b-tree, "bucket" indicates the
4239 * real place of the xattr.
4240 */
Joel Becker178eeac2008-10-27 15:18:29 -07004241static void ocfs2_xattr_update_xattr_search(struct inode *inode,
4242 struct ocfs2_xattr_search *xs,
4243 struct buffer_head *old_bh)
Tao Ma01225592008-08-18 17:38:53 +08004244{
Tao Ma01225592008-08-18 17:38:53 +08004245 char *buf = old_bh->b_data;
4246 struct ocfs2_xattr_block *old_xb = (struct ocfs2_xattr_block *)buf;
4247 struct ocfs2_xattr_header *old_xh = &old_xb->xb_attrs.xb_header;
Joel Becker178eeac2008-10-27 15:18:29 -07004248 int i;
Tao Ma01225592008-08-18 17:38:53 +08004249
Joel Beckerba937122008-10-24 19:13:20 -07004250 xs->header = bucket_xh(xs->bucket);
Joel Becker178eeac2008-10-27 15:18:29 -07004251 xs->base = bucket_block(xs->bucket, 0);
Tao Ma01225592008-08-18 17:38:53 +08004252 xs->end = xs->base + inode->i_sb->s_blocksize;
4253
Joel Becker178eeac2008-10-27 15:18:29 -07004254 if (xs->not_found)
4255 return;
Tao Ma01225592008-08-18 17:38:53 +08004256
Joel Becker178eeac2008-10-27 15:18:29 -07004257 i = xs->here - old_xh->xh_entries;
4258 xs->here = &xs->header->xh_entries[i];
Tao Ma01225592008-08-18 17:38:53 +08004259}
4260
4261static int ocfs2_xattr_create_index_block(struct inode *inode,
Tao Ma78f30c32008-11-12 08:27:00 +08004262 struct ocfs2_xattr_search *xs,
4263 struct ocfs2_xattr_set_ctxt *ctxt)
Tao Ma01225592008-08-18 17:38:53 +08004264{
Tao Ma85db90e2008-11-12 08:27:01 +08004265 int ret;
Tao Ma01225592008-08-18 17:38:53 +08004266 u32 bit_off, len;
4267 u64 blkno;
Tao Ma85db90e2008-11-12 08:27:01 +08004268 handle_t *handle = ctxt->handle;
Tao Ma01225592008-08-18 17:38:53 +08004269 struct ocfs2_inode_info *oi = OCFS2_I(inode);
Tao Ma01225592008-08-18 17:38:53 +08004270 struct buffer_head *xb_bh = xs->xattr_bh;
4271 struct ocfs2_xattr_block *xb =
4272 (struct ocfs2_xattr_block *)xb_bh->b_data;
4273 struct ocfs2_xattr_tree_root *xr;
4274 u16 xb_flags = le16_to_cpu(xb->xb_flags);
Tao Ma01225592008-08-18 17:38:53 +08004275
Tao Ma402b4182011-02-23 22:01:17 +08004276 trace_ocfs2_xattr_create_index_block_begin(
4277 (unsigned long long)xb_bh->b_blocknr);
Tao Ma01225592008-08-18 17:38:53 +08004278
4279 BUG_ON(xb_flags & OCFS2_XATTR_INDEXED);
Joel Becker178eeac2008-10-27 15:18:29 -07004280 BUG_ON(!xs->bucket);
Tao Ma01225592008-08-18 17:38:53 +08004281
Tao Ma01225592008-08-18 17:38:53 +08004282 /*
4283 * XXX:
4284 * We can use this lock for now, and maybe move to a dedicated mutex
4285 * if performance becomes a problem later.
4286 */
4287 down_write(&oi->ip_alloc_sem);
4288
Joel Becker0cf2f762009-02-12 16:41:25 -08004289 ret = ocfs2_journal_access_xb(handle, INODE_CACHE(inode), xb_bh,
Joel Becker84008972008-12-09 16:11:49 -08004290 OCFS2_JOURNAL_ACCESS_WRITE);
Tao Ma01225592008-08-18 17:38:53 +08004291 if (ret) {
4292 mlog_errno(ret);
Tao Ma85db90e2008-11-12 08:27:01 +08004293 goto out;
Tao Ma01225592008-08-18 17:38:53 +08004294 }
4295
Joel Becker1ed9b772010-05-06 13:59:06 +08004296 ret = __ocfs2_claim_clusters(handle, ctxt->data_ac,
Tao Ma78f30c32008-11-12 08:27:00 +08004297 1, 1, &bit_off, &len);
Tao Ma01225592008-08-18 17:38:53 +08004298 if (ret) {
4299 mlog_errno(ret);
Tao Ma85db90e2008-11-12 08:27:01 +08004300 goto out;
Tao Ma01225592008-08-18 17:38:53 +08004301 }
4302
4303 /*
4304 * The bucket may spread in many blocks, and
4305 * we will only touch the 1st block and the last block
4306 * in the whole bucket(one for entry and one for data).
4307 */
4308 blkno = ocfs2_clusters_to_blocks(inode->i_sb, bit_off);
4309
Tao Ma402b4182011-02-23 22:01:17 +08004310 trace_ocfs2_xattr_create_index_block((unsigned long long)blkno);
Tao Ma01225592008-08-18 17:38:53 +08004311
Wengang Wang9c339252014-04-03 14:47:15 -07004312 ret = ocfs2_init_xattr_bucket(xs->bucket, blkno, 1);
Tao Ma01225592008-08-18 17:38:53 +08004313 if (ret) {
4314 mlog_errno(ret);
Tao Ma85db90e2008-11-12 08:27:01 +08004315 goto out;
Tao Ma01225592008-08-18 17:38:53 +08004316 }
4317
Joel Becker178eeac2008-10-27 15:18:29 -07004318 ret = ocfs2_xattr_bucket_journal_access(handle, xs->bucket,
4319 OCFS2_JOURNAL_ACCESS_CREATE);
Joel Beckerbd60bd32008-10-20 18:25:56 -07004320 if (ret) {
4321 mlog_errno(ret);
Tao Ma85db90e2008-11-12 08:27:01 +08004322 goto out;
Joel Beckerbd60bd32008-10-20 18:25:56 -07004323 }
Tao Ma01225592008-08-18 17:38:53 +08004324
Joel Becker178eeac2008-10-27 15:18:29 -07004325 ocfs2_cp_xattr_block_to_bucket(inode, xb_bh, xs->bucket);
4326 ocfs2_xattr_bucket_journal_dirty(handle, xs->bucket);
4327
4328 ocfs2_xattr_update_xattr_search(inode, xs, xb_bh);
4329
Tao Ma01225592008-08-18 17:38:53 +08004330 /* Change from ocfs2_xattr_header to ocfs2_xattr_tree_root */
4331 memset(&xb->xb_attrs, 0, inode->i_sb->s_blocksize -
4332 offsetof(struct ocfs2_xattr_block, xb_attrs));
4333
4334 xr = &xb->xb_attrs.xb_root;
4335 xr->xt_clusters = cpu_to_le32(1);
4336 xr->xt_last_eb_blk = 0;
4337 xr->xt_list.l_tree_depth = 0;
4338 xr->xt_list.l_count = cpu_to_le16(ocfs2_xattr_recs_per_xb(inode->i_sb));
4339 xr->xt_list.l_next_free_rec = cpu_to_le16(1);
4340
4341 xr->xt_list.l_recs[0].e_cpos = 0;
4342 xr->xt_list.l_recs[0].e_blkno = cpu_to_le64(blkno);
4343 xr->xt_list.l_recs[0].e_leaf_clusters = cpu_to_le16(1);
4344
4345 xb->xb_flags = cpu_to_le16(xb_flags | OCFS2_XATTR_INDEXED);
4346
Tao Ma85db90e2008-11-12 08:27:01 +08004347 ocfs2_journal_dirty(handle, xb_bh);
Tao Ma01225592008-08-18 17:38:53 +08004348
Tao Ma85db90e2008-11-12 08:27:01 +08004349out:
Tao Ma01225592008-08-18 17:38:53 +08004350 up_write(&oi->ip_alloc_sem);
4351
Tao Ma01225592008-08-18 17:38:53 +08004352 return ret;
4353}
4354
4355static int cmp_xe_offset(const void *a, const void *b)
4356{
4357 const struct ocfs2_xattr_entry *l = a, *r = b;
4358 u32 l_name_offset = le16_to_cpu(l->xe_name_offset);
4359 u32 r_name_offset = le16_to_cpu(r->xe_name_offset);
4360
4361 if (l_name_offset < r_name_offset)
4362 return 1;
4363 if (l_name_offset > r_name_offset)
4364 return -1;
4365 return 0;
4366}
4367
4368/*
4369 * defrag a xattr bucket if we find that the bucket has some
4370 * holes beteen name/value pairs.
4371 * We will move all the name/value pairs to the end of the bucket
4372 * so that we can spare some space for insertion.
4373 */
4374static int ocfs2_defrag_xattr_bucket(struct inode *inode,
Tao Ma85db90e2008-11-12 08:27:01 +08004375 handle_t *handle,
Tao Ma01225592008-08-18 17:38:53 +08004376 struct ocfs2_xattr_bucket *bucket)
4377{
4378 int ret, i;
Joel Becker199799a2009-08-14 19:04:15 -07004379 size_t end, offset, len;
Tao Ma01225592008-08-18 17:38:53 +08004380 struct ocfs2_xattr_header *xh;
4381 char *entries, *buf, *bucket_buf = NULL;
Joel Becker9c7759a2008-10-24 16:21:03 -07004382 u64 blkno = bucket_blkno(bucket);
Tao Ma01225592008-08-18 17:38:53 +08004383 u16 xh_free_start;
Tao Ma01225592008-08-18 17:38:53 +08004384 size_t blocksize = inode->i_sb->s_blocksize;
Tao Ma01225592008-08-18 17:38:53 +08004385 struct ocfs2_xattr_entry *xe;
Tao Ma01225592008-08-18 17:38:53 +08004386
4387 /*
4388 * In order to make the operation more efficient and generic,
4389 * we copy all the blocks into a contiguous memory and do the
4390 * defragment there, so if anything is error, we will not touch
4391 * the real block.
4392 */
4393 bucket_buf = kmalloc(OCFS2_XATTR_BUCKET_SIZE, GFP_NOFS);
4394 if (!bucket_buf) {
4395 ret = -EIO;
4396 goto out;
4397 }
4398
Joel Becker161d6f32008-10-27 15:25:18 -07004399 buf = bucket_buf;
Tao Ma1c32a2f2008-11-06 08:10:47 +08004400 for (i = 0; i < bucket->bu_blocks; i++, buf += blocksize)
4401 memcpy(buf, bucket_block(bucket, i), blocksize);
Joel Becker161d6f32008-10-27 15:25:18 -07004402
Tao Ma1c32a2f2008-11-06 08:10:47 +08004403 ret = ocfs2_xattr_bucket_journal_access(handle, bucket,
Joel Becker161d6f32008-10-27 15:25:18 -07004404 OCFS2_JOURNAL_ACCESS_WRITE);
4405 if (ret < 0) {
4406 mlog_errno(ret);
Tao Ma85db90e2008-11-12 08:27:01 +08004407 goto out;
Tao Ma01225592008-08-18 17:38:53 +08004408 }
4409
4410 xh = (struct ocfs2_xattr_header *)bucket_buf;
4411 entries = (char *)xh->xh_entries;
4412 xh_free_start = le16_to_cpu(xh->xh_free_start);
4413
Tao Ma402b4182011-02-23 22:01:17 +08004414 trace_ocfs2_defrag_xattr_bucket(
Mark Fashehde29c082008-10-29 14:45:30 -07004415 (unsigned long long)blkno, le16_to_cpu(xh->xh_count),
4416 xh_free_start, le16_to_cpu(xh->xh_name_value_len));
Tao Ma01225592008-08-18 17:38:53 +08004417
4418 /*
4419 * sort all the entries by their offset.
4420 * the largest will be the first, so that we can
4421 * move them to the end one by one.
4422 */
4423 sort(entries, le16_to_cpu(xh->xh_count),
4424 sizeof(struct ocfs2_xattr_entry),
4425 cmp_xe_offset, swap_xe);
4426
4427 /* Move all name/values to the end of the bucket. */
4428 xe = xh->xh_entries;
4429 end = OCFS2_XATTR_BUCKET_SIZE;
4430 for (i = 0; i < le16_to_cpu(xh->xh_count); i++, xe++) {
4431 offset = le16_to_cpu(xe->xe_name_offset);
Joel Becker199799a2009-08-14 19:04:15 -07004432 len = namevalue_size_xe(xe);
Tao Ma01225592008-08-18 17:38:53 +08004433
4434 /*
4435 * We must make sure that the name/value pair
4436 * exist in the same block. So adjust end to
4437 * the previous block end if needed.
4438 */
4439 if (((end - len) / blocksize !=
4440 (end - 1) / blocksize))
4441 end = end - end % blocksize;
4442
4443 if (end > offset + len) {
4444 memmove(bucket_buf + end - len,
4445 bucket_buf + offset, len);
4446 xe->xe_name_offset = cpu_to_le16(end - len);
4447 }
4448
4449 mlog_bug_on_msg(end < offset + len, "Defrag check failed for "
4450 "bucket %llu\n", (unsigned long long)blkno);
4451
4452 end -= len;
4453 }
4454
4455 mlog_bug_on_msg(xh_free_start > end, "Defrag check failed for "
4456 "bucket %llu\n", (unsigned long long)blkno);
4457
4458 if (xh_free_start == end)
Tao Ma85db90e2008-11-12 08:27:01 +08004459 goto out;
Tao Ma01225592008-08-18 17:38:53 +08004460
4461 memset(bucket_buf + xh_free_start, 0, end - xh_free_start);
4462 xh->xh_free_start = cpu_to_le16(end);
4463
4464 /* sort the entries by their name_hash. */
4465 sort(entries, le16_to_cpu(xh->xh_count),
4466 sizeof(struct ocfs2_xattr_entry),
4467 cmp_xe, swap_xe);
4468
4469 buf = bucket_buf;
Tao Ma1c32a2f2008-11-06 08:10:47 +08004470 for (i = 0; i < bucket->bu_blocks; i++, buf += blocksize)
4471 memcpy(bucket_block(bucket, i), buf, blocksize);
4472 ocfs2_xattr_bucket_journal_dirty(handle, bucket);
Tao Ma01225592008-08-18 17:38:53 +08004473
Tao Ma01225592008-08-18 17:38:53 +08004474out:
Tao Ma01225592008-08-18 17:38:53 +08004475 kfree(bucket_buf);
4476 return ret;
4477}
4478
4479/*
Joel Beckerb5c03e72008-11-25 19:58:16 -08004480 * prev_blkno points to the start of an existing extent. new_blkno
4481 * points to a newly allocated extent. Because we know each of our
4482 * clusters contains more than bucket, we can easily split one cluster
4483 * at a bucket boundary. So we take the last cluster of the existing
4484 * extent and split it down the middle. We move the last half of the
4485 * buckets in the last cluster of the existing extent over to the new
4486 * extent.
Tao Ma01225592008-08-18 17:38:53 +08004487 *
Joel Beckerb5c03e72008-11-25 19:58:16 -08004488 * first_bh is the buffer at prev_blkno so we can update the existing
4489 * extent's bucket count. header_bh is the bucket were we were hoping
4490 * to insert our xattr. If the bucket move places the target in the new
4491 * extent, we'll update first_bh and header_bh after modifying the old
4492 * extent.
4493 *
4494 * first_hash will be set as the 1st xe's name_hash in the new extent.
Tao Ma01225592008-08-18 17:38:53 +08004495 */
4496static int ocfs2_mv_xattr_bucket_cross_cluster(struct inode *inode,
4497 handle_t *handle,
Joel Becker41cb8142008-11-26 14:25:21 -08004498 struct ocfs2_xattr_bucket *first,
4499 struct ocfs2_xattr_bucket *target,
Tao Ma01225592008-08-18 17:38:53 +08004500 u64 new_blkno,
Tao Ma01225592008-08-18 17:38:53 +08004501 u32 num_clusters,
4502 u32 *first_hash)
4503{
Joel Beckerc58b6032008-11-26 13:36:24 -08004504 int ret;
Joel Becker41cb8142008-11-26 14:25:21 -08004505 struct super_block *sb = inode->i_sb;
4506 int blks_per_bucket = ocfs2_blocks_per_xattr_bucket(sb);
4507 int num_buckets = ocfs2_xattr_buckets_per_cluster(OCFS2_SB(sb));
Joel Beckerb5c03e72008-11-25 19:58:16 -08004508 int to_move = num_buckets / 2;
Joel Beckerc58b6032008-11-26 13:36:24 -08004509 u64 src_blkno;
Joel Becker41cb8142008-11-26 14:25:21 -08004510 u64 last_cluster_blkno = bucket_blkno(first) +
4511 ((num_clusters - 1) * ocfs2_clusters_to_blocks(sb, 1));
Tao Ma01225592008-08-18 17:38:53 +08004512
Joel Becker41cb8142008-11-26 14:25:21 -08004513 BUG_ON(le16_to_cpu(bucket_xh(first)->xh_num_buckets) < num_buckets);
4514 BUG_ON(OCFS2_XATTR_BUCKET_SIZE == OCFS2_SB(sb)->s_clustersize);
Tao Ma01225592008-08-18 17:38:53 +08004515
Tao Ma402b4182011-02-23 22:01:17 +08004516 trace_ocfs2_mv_xattr_bucket_cross_cluster(
4517 (unsigned long long)last_cluster_blkno,
4518 (unsigned long long)new_blkno);
Tao Ma01225592008-08-18 17:38:53 +08004519
Joel Becker41cb8142008-11-26 14:25:21 -08004520 ret = ocfs2_mv_xattr_buckets(inode, handle, bucket_blkno(first),
Joel Beckerc58b6032008-11-26 13:36:24 -08004521 last_cluster_blkno, new_blkno,
4522 to_move, first_hash);
Joel Beckerb5c03e72008-11-25 19:58:16 -08004523 if (ret) {
4524 mlog_errno(ret);
4525 goto out;
4526 }
4527
Joel Beckerc58b6032008-11-26 13:36:24 -08004528 /* This is the first bucket that got moved */
4529 src_blkno = last_cluster_blkno + (to_move * blks_per_bucket);
4530
Tao Ma01225592008-08-18 17:38:53 +08004531 /*
Joel Beckerc58b6032008-11-26 13:36:24 -08004532 * If the target bucket was part of the moved buckets, we need to
Joel Becker41cb8142008-11-26 14:25:21 -08004533 * update first and target.
Joel Beckerb5c03e72008-11-25 19:58:16 -08004534 */
Joel Becker41cb8142008-11-26 14:25:21 -08004535 if (bucket_blkno(target) >= src_blkno) {
Joel Beckerb5c03e72008-11-25 19:58:16 -08004536 /* Find the block for the new target bucket */
4537 src_blkno = new_blkno +
Joel Becker41cb8142008-11-26 14:25:21 -08004538 (bucket_blkno(target) - src_blkno);
4539
4540 ocfs2_xattr_bucket_relse(first);
4541 ocfs2_xattr_bucket_relse(target);
Joel Beckerb5c03e72008-11-25 19:58:16 -08004542
4543 /*
Joel Beckerc58b6032008-11-26 13:36:24 -08004544 * These shouldn't fail - the buffers are in the
Joel Beckerb5c03e72008-11-25 19:58:16 -08004545 * journal from ocfs2_cp_xattr_bucket().
4546 */
Joel Becker41cb8142008-11-26 14:25:21 -08004547 ret = ocfs2_read_xattr_bucket(first, new_blkno);
Joel Beckerc58b6032008-11-26 13:36:24 -08004548 if (ret) {
4549 mlog_errno(ret);
4550 goto out;
4551 }
Joel Becker41cb8142008-11-26 14:25:21 -08004552 ret = ocfs2_read_xattr_bucket(target, src_blkno);
4553 if (ret)
Joel Beckerb5c03e72008-11-25 19:58:16 -08004554 mlog_errno(ret);
Joel Beckerb5c03e72008-11-25 19:58:16 -08004555
Joel Beckerb5c03e72008-11-25 19:58:16 -08004556 }
4557
Tao Ma01225592008-08-18 17:38:53 +08004558out:
Tao Ma01225592008-08-18 17:38:53 +08004559 return ret;
4560}
4561
Tao Ma01225592008-08-18 17:38:53 +08004562/*
Tao Ma80bcaf32008-10-27 06:06:24 +08004563 * Find the suitable pos when we divide a bucket into 2.
4564 * We have to make sure the xattrs with the same hash value exist
4565 * in the same bucket.
4566 *
4567 * If this ocfs2_xattr_header covers more than one hash value, find a
4568 * place where the hash value changes. Try to find the most even split.
4569 * The most common case is that all entries have different hash values,
4570 * and the first check we make will find a place to split.
Tao Ma01225592008-08-18 17:38:53 +08004571 */
Tao Ma80bcaf32008-10-27 06:06:24 +08004572static int ocfs2_xattr_find_divide_pos(struct ocfs2_xattr_header *xh)
4573{
4574 struct ocfs2_xattr_entry *entries = xh->xh_entries;
4575 int count = le16_to_cpu(xh->xh_count);
4576 int delta, middle = count / 2;
4577
4578 /*
4579 * We start at the middle. Each step gets farther away in both
4580 * directions. We therefore hit the change in hash value
4581 * nearest to the middle. Note that this loop does not execute for
4582 * count < 2.
4583 */
4584 for (delta = 0; delta < middle; delta++) {
4585 /* Let's check delta earlier than middle */
4586 if (cmp_xe(&entries[middle - delta - 1],
4587 &entries[middle - delta]))
4588 return middle - delta;
4589
4590 /* For even counts, don't walk off the end */
4591 if ((middle + delta + 1) == count)
4592 continue;
4593
4594 /* Now try delta past middle */
4595 if (cmp_xe(&entries[middle + delta],
4596 &entries[middle + delta + 1]))
4597 return middle + delta + 1;
4598 }
4599
4600 /* Every entry had the same hash */
4601 return count;
4602}
4603
4604/*
4605 * Move some xattrs in old bucket(blk) to new bucket(new_blk).
4606 * first_hash will record the 1st hash of the new bucket.
4607 *
4608 * Normally half of the xattrs will be moved. But we have to make
4609 * sure that the xattrs with the same hash value are stored in the
4610 * same bucket. If all the xattrs in this bucket have the same hash
4611 * value, the new bucket will be initialized as an empty one and the
4612 * first_hash will be initialized as (hash_value+1).
4613 */
4614static int ocfs2_divide_xattr_bucket(struct inode *inode,
4615 handle_t *handle,
4616 u64 blk,
4617 u64 new_blk,
4618 u32 *first_hash,
4619 int new_bucket_head)
Tao Ma01225592008-08-18 17:38:53 +08004620{
4621 int ret, i;
Joel Becker199799a2009-08-14 19:04:15 -07004622 int count, start, len, name_value_len = 0, name_offset = 0;
Joel Beckerba937122008-10-24 19:13:20 -07004623 struct ocfs2_xattr_bucket *s_bucket = NULL, *t_bucket = NULL;
Tao Ma01225592008-08-18 17:38:53 +08004624 struct ocfs2_xattr_header *xh;
4625 struct ocfs2_xattr_entry *xe;
4626 int blocksize = inode->i_sb->s_blocksize;
4627
Tao Ma402b4182011-02-23 22:01:17 +08004628 trace_ocfs2_divide_xattr_bucket_begin((unsigned long long)blk,
4629 (unsigned long long)new_blk);
Tao Ma01225592008-08-18 17:38:53 +08004630
Joel Beckerba937122008-10-24 19:13:20 -07004631 s_bucket = ocfs2_xattr_bucket_new(inode);
4632 t_bucket = ocfs2_xattr_bucket_new(inode);
4633 if (!s_bucket || !t_bucket) {
4634 ret = -ENOMEM;
4635 mlog_errno(ret);
4636 goto out;
4637 }
Tao Ma01225592008-08-18 17:38:53 +08004638
Joel Beckerba937122008-10-24 19:13:20 -07004639 ret = ocfs2_read_xattr_bucket(s_bucket, blk);
Tao Ma01225592008-08-18 17:38:53 +08004640 if (ret) {
4641 mlog_errno(ret);
4642 goto out;
4643 }
4644
Joel Beckerba937122008-10-24 19:13:20 -07004645 ret = ocfs2_xattr_bucket_journal_access(handle, s_bucket,
Joel Becker1224be02008-10-24 18:47:33 -07004646 OCFS2_JOURNAL_ACCESS_WRITE);
Tao Ma01225592008-08-18 17:38:53 +08004647 if (ret) {
4648 mlog_errno(ret);
4649 goto out;
4650 }
4651
Joel Becker784b8162008-10-24 17:33:40 -07004652 /*
4653 * Even if !new_bucket_head, we're overwriting t_bucket. Thus,
4654 * there's no need to read it.
4655 */
Wengang Wang9c339252014-04-03 14:47:15 -07004656 ret = ocfs2_init_xattr_bucket(t_bucket, new_blk, new_bucket_head);
Tao Ma01225592008-08-18 17:38:53 +08004657 if (ret) {
4658 mlog_errno(ret);
4659 goto out;
4660 }
4661
Joel Becker2b656c12008-11-25 19:00:15 -08004662 /*
4663 * Hey, if we're overwriting t_bucket, what difference does
4664 * ACCESS_CREATE vs ACCESS_WRITE make? See the comment in the
4665 * same part of ocfs2_cp_xattr_bucket().
4666 */
Joel Beckerba937122008-10-24 19:13:20 -07004667 ret = ocfs2_xattr_bucket_journal_access(handle, t_bucket,
Joel Becker1224be02008-10-24 18:47:33 -07004668 new_bucket_head ?
4669 OCFS2_JOURNAL_ACCESS_CREATE :
4670 OCFS2_JOURNAL_ACCESS_WRITE);
4671 if (ret) {
4672 mlog_errno(ret);
4673 goto out;
Tao Ma01225592008-08-18 17:38:53 +08004674 }
4675
Joel Beckerba937122008-10-24 19:13:20 -07004676 xh = bucket_xh(s_bucket);
Tao Ma80bcaf32008-10-27 06:06:24 +08004677 count = le16_to_cpu(xh->xh_count);
4678 start = ocfs2_xattr_find_divide_pos(xh);
4679
4680 if (start == count) {
4681 xe = &xh->xh_entries[start-1];
4682
4683 /*
4684 * initialized a new empty bucket here.
4685 * The hash value is set as one larger than
4686 * that of the last entry in the previous bucket.
4687 */
Joel Beckerba937122008-10-24 19:13:20 -07004688 for (i = 0; i < t_bucket->bu_blocks; i++)
4689 memset(bucket_block(t_bucket, i), 0, blocksize);
Tao Ma80bcaf32008-10-27 06:06:24 +08004690
Joel Beckerba937122008-10-24 19:13:20 -07004691 xh = bucket_xh(t_bucket);
Tao Ma80bcaf32008-10-27 06:06:24 +08004692 xh->xh_free_start = cpu_to_le16(blocksize);
4693 xh->xh_entries[0].xe_name_hash = xe->xe_name_hash;
4694 le32_add_cpu(&xh->xh_entries[0].xe_name_hash, 1);
4695
4696 goto set_num_buckets;
4697 }
4698
Tao Ma01225592008-08-18 17:38:53 +08004699 /* copy the whole bucket to the new first. */
Joel Beckerba937122008-10-24 19:13:20 -07004700 ocfs2_xattr_bucket_copy_data(t_bucket, s_bucket);
Tao Ma01225592008-08-18 17:38:53 +08004701
4702 /* update the new bucket. */
Joel Beckerba937122008-10-24 19:13:20 -07004703 xh = bucket_xh(t_bucket);
Tao Ma01225592008-08-18 17:38:53 +08004704
4705 /*
4706 * Calculate the total name/value len and xh_free_start for
4707 * the old bucket first.
4708 */
4709 name_offset = OCFS2_XATTR_BUCKET_SIZE;
4710 name_value_len = 0;
4711 for (i = 0; i < start; i++) {
4712 xe = &xh->xh_entries[i];
Joel Becker199799a2009-08-14 19:04:15 -07004713 name_value_len += namevalue_size_xe(xe);
Tao Ma01225592008-08-18 17:38:53 +08004714 if (le16_to_cpu(xe->xe_name_offset) < name_offset)
4715 name_offset = le16_to_cpu(xe->xe_name_offset);
4716 }
4717
4718 /*
4719 * Now begin the modification to the new bucket.
4720 *
4721 * In the new bucket, We just move the xattr entry to the beginning
4722 * and don't touch the name/value. So there will be some holes in the
4723 * bucket, and they will be removed when ocfs2_defrag_xattr_bucket is
4724 * called.
4725 */
4726 xe = &xh->xh_entries[start];
4727 len = sizeof(struct ocfs2_xattr_entry) * (count - start);
Tao Ma402b4182011-02-23 22:01:17 +08004728 trace_ocfs2_divide_xattr_bucket_move(len,
4729 (int)((char *)xe - (char *)xh),
4730 (int)((char *)xh->xh_entries - (char *)xh));
Tao Ma01225592008-08-18 17:38:53 +08004731 memmove((char *)xh->xh_entries, (char *)xe, len);
4732 xe = &xh->xh_entries[count - start];
4733 len = sizeof(struct ocfs2_xattr_entry) * start;
4734 memset((char *)xe, 0, len);
4735
4736 le16_add_cpu(&xh->xh_count, -start);
4737 le16_add_cpu(&xh->xh_name_value_len, -name_value_len);
4738
4739 /* Calculate xh_free_start for the new bucket. */
4740 xh->xh_free_start = cpu_to_le16(OCFS2_XATTR_BUCKET_SIZE);
4741 for (i = 0; i < le16_to_cpu(xh->xh_count); i++) {
4742 xe = &xh->xh_entries[i];
Tao Ma01225592008-08-18 17:38:53 +08004743 if (le16_to_cpu(xe->xe_name_offset) <
4744 le16_to_cpu(xh->xh_free_start))
4745 xh->xh_free_start = xe->xe_name_offset;
4746 }
4747
Tao Ma80bcaf32008-10-27 06:06:24 +08004748set_num_buckets:
Tao Ma01225592008-08-18 17:38:53 +08004749 /* set xh->xh_num_buckets for the new xh. */
4750 if (new_bucket_head)
4751 xh->xh_num_buckets = cpu_to_le16(1);
4752 else
4753 xh->xh_num_buckets = 0;
4754
Joel Beckerba937122008-10-24 19:13:20 -07004755 ocfs2_xattr_bucket_journal_dirty(handle, t_bucket);
Tao Ma01225592008-08-18 17:38:53 +08004756
4757 /* store the first_hash of the new bucket. */
4758 if (first_hash)
4759 *first_hash = le32_to_cpu(xh->xh_entries[0].xe_name_hash);
4760
4761 /*
Tao Ma80bcaf32008-10-27 06:06:24 +08004762 * Now only update the 1st block of the old bucket. If we
4763 * just added a new empty bucket, there is no need to modify
4764 * it.
Tao Ma01225592008-08-18 17:38:53 +08004765 */
Tao Ma80bcaf32008-10-27 06:06:24 +08004766 if (start == count)
4767 goto out;
4768
Joel Beckerba937122008-10-24 19:13:20 -07004769 xh = bucket_xh(s_bucket);
Tao Ma01225592008-08-18 17:38:53 +08004770 memset(&xh->xh_entries[start], 0,
4771 sizeof(struct ocfs2_xattr_entry) * (count - start));
4772 xh->xh_count = cpu_to_le16(start);
4773 xh->xh_free_start = cpu_to_le16(name_offset);
4774 xh->xh_name_value_len = cpu_to_le16(name_value_len);
4775
Joel Beckerba937122008-10-24 19:13:20 -07004776 ocfs2_xattr_bucket_journal_dirty(handle, s_bucket);
Tao Ma01225592008-08-18 17:38:53 +08004777
4778out:
Joel Beckerba937122008-10-24 19:13:20 -07004779 ocfs2_xattr_bucket_free(s_bucket);
4780 ocfs2_xattr_bucket_free(t_bucket);
Tao Ma01225592008-08-18 17:38:53 +08004781
4782 return ret;
4783}
4784
4785/*
4786 * Copy xattr from one bucket to another bucket.
4787 *
4788 * The caller must make sure that the journal transaction
4789 * has enough space for journaling.
4790 */
4791static int ocfs2_cp_xattr_bucket(struct inode *inode,
4792 handle_t *handle,
4793 u64 s_blkno,
4794 u64 t_blkno,
4795 int t_is_new)
4796{
Joel Becker4980c6d2008-10-24 18:54:43 -07004797 int ret;
Joel Beckerba937122008-10-24 19:13:20 -07004798 struct ocfs2_xattr_bucket *s_bucket = NULL, *t_bucket = NULL;
Tao Ma01225592008-08-18 17:38:53 +08004799
4800 BUG_ON(s_blkno == t_blkno);
4801
Tao Ma402b4182011-02-23 22:01:17 +08004802 trace_ocfs2_cp_xattr_bucket((unsigned long long)s_blkno,
4803 (unsigned long long)t_blkno,
4804 t_is_new);
Tao Ma01225592008-08-18 17:38:53 +08004805
Joel Beckerba937122008-10-24 19:13:20 -07004806 s_bucket = ocfs2_xattr_bucket_new(inode);
4807 t_bucket = ocfs2_xattr_bucket_new(inode);
4808 if (!s_bucket || !t_bucket) {
4809 ret = -ENOMEM;
4810 mlog_errno(ret);
4811 goto out;
4812 }
Joel Becker92de1092008-11-25 17:06:40 -08004813
Joel Beckerba937122008-10-24 19:13:20 -07004814 ret = ocfs2_read_xattr_bucket(s_bucket, s_blkno);
Tao Ma01225592008-08-18 17:38:53 +08004815 if (ret)
4816 goto out;
4817
Joel Becker784b8162008-10-24 17:33:40 -07004818 /*
4819 * Even if !t_is_new, we're overwriting t_bucket. Thus,
4820 * there's no need to read it.
4821 */
Wengang Wang9c339252014-04-03 14:47:15 -07004822 ret = ocfs2_init_xattr_bucket(t_bucket, t_blkno, t_is_new);
Tao Ma01225592008-08-18 17:38:53 +08004823 if (ret)
4824 goto out;
4825
Joel Becker2b656c12008-11-25 19:00:15 -08004826 /*
4827 * Hey, if we're overwriting t_bucket, what difference does
4828 * ACCESS_CREATE vs ACCESS_WRITE make? Well, if we allocated a new
Joel Becker874d65a2008-11-26 13:02:18 -08004829 * cluster to fill, we came here from
4830 * ocfs2_mv_xattr_buckets(), and it is really new -
4831 * ACCESS_CREATE is required. But we also might have moved data
4832 * out of t_bucket before extending back into it.
4833 * ocfs2_add_new_xattr_bucket() can do this - its call to
4834 * ocfs2_add_new_xattr_cluster() may have created a new extent
Joel Becker2b656c12008-11-25 19:00:15 -08004835 * and copied out the end of the old extent. Then it re-extends
4836 * the old extent back to create space for new xattrs. That's
4837 * how we get here, and the bucket isn't really new.
4838 */
Joel Beckerba937122008-10-24 19:13:20 -07004839 ret = ocfs2_xattr_bucket_journal_access(handle, t_bucket,
Joel Becker1224be02008-10-24 18:47:33 -07004840 t_is_new ?
4841 OCFS2_JOURNAL_ACCESS_CREATE :
4842 OCFS2_JOURNAL_ACCESS_WRITE);
4843 if (ret)
4844 goto out;
Tao Ma01225592008-08-18 17:38:53 +08004845
Joel Beckerba937122008-10-24 19:13:20 -07004846 ocfs2_xattr_bucket_copy_data(t_bucket, s_bucket);
4847 ocfs2_xattr_bucket_journal_dirty(handle, t_bucket);
Tao Ma01225592008-08-18 17:38:53 +08004848
4849out:
Joel Beckerba937122008-10-24 19:13:20 -07004850 ocfs2_xattr_bucket_free(t_bucket);
4851 ocfs2_xattr_bucket_free(s_bucket);
Tao Ma01225592008-08-18 17:38:53 +08004852
4853 return ret;
4854}
4855
4856/*
Joel Becker874d65a2008-11-26 13:02:18 -08004857 * src_blk points to the start of an existing extent. last_blk points to
4858 * last cluster in that extent. to_blk points to a newly allocated
Joel Becker54ecb6b2008-11-26 13:18:31 -08004859 * extent. We copy the buckets from the cluster at last_blk to the new
4860 * extent. If start_bucket is non-zero, we skip that many buckets before
4861 * we start copying. The new extent's xh_num_buckets gets set to the
4862 * number of buckets we copied. The old extent's xh_num_buckets shrinks
4863 * by the same amount.
Tao Ma01225592008-08-18 17:38:53 +08004864 */
Joel Becker54ecb6b2008-11-26 13:18:31 -08004865static int ocfs2_mv_xattr_buckets(struct inode *inode, handle_t *handle,
4866 u64 src_blk, u64 last_blk, u64 to_blk,
4867 unsigned int start_bucket,
4868 u32 *first_hash)
Tao Ma01225592008-08-18 17:38:53 +08004869{
4870 int i, ret, credits;
4871 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
Joel Becker15d60922008-11-25 18:36:42 -08004872 int blks_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
Tao Ma01225592008-08-18 17:38:53 +08004873 int num_buckets = ocfs2_xattr_buckets_per_cluster(osb);
Joel Becker15d60922008-11-25 18:36:42 -08004874 struct ocfs2_xattr_bucket *old_first, *new_first;
Tao Ma01225592008-08-18 17:38:53 +08004875
Tao Ma402b4182011-02-23 22:01:17 +08004876 trace_ocfs2_mv_xattr_buckets((unsigned long long)last_blk,
4877 (unsigned long long)to_blk);
Tao Ma01225592008-08-18 17:38:53 +08004878
Joel Becker54ecb6b2008-11-26 13:18:31 -08004879 BUG_ON(start_bucket >= num_buckets);
4880 if (start_bucket) {
4881 num_buckets -= start_bucket;
4882 last_blk += (start_bucket * blks_per_bucket);
4883 }
4884
Joel Becker15d60922008-11-25 18:36:42 -08004885 /* The first bucket of the original extent */
4886 old_first = ocfs2_xattr_bucket_new(inode);
4887 /* The first bucket of the new extent */
4888 new_first = ocfs2_xattr_bucket_new(inode);
4889 if (!old_first || !new_first) {
4890 ret = -ENOMEM;
4891 mlog_errno(ret);
4892 goto out;
4893 }
4894
Joel Becker874d65a2008-11-26 13:02:18 -08004895 ret = ocfs2_read_xattr_bucket(old_first, src_blk);
Joel Becker15d60922008-11-25 18:36:42 -08004896 if (ret) {
4897 mlog_errno(ret);
4898 goto out;
4899 }
4900
Tao Ma01225592008-08-18 17:38:53 +08004901 /*
Joel Becker54ecb6b2008-11-26 13:18:31 -08004902 * We need to update the first bucket of the old extent and all
4903 * the buckets going to the new extent.
Tao Ma01225592008-08-18 17:38:53 +08004904 */
Tao Mac901fb02010-04-26 14:34:57 +08004905 credits = ((num_buckets + 1) * blks_per_bucket);
Tao Ma01225592008-08-18 17:38:53 +08004906 ret = ocfs2_extend_trans(handle, credits);
4907 if (ret) {
4908 mlog_errno(ret);
4909 goto out;
4910 }
4911
Joel Becker15d60922008-11-25 18:36:42 -08004912 ret = ocfs2_xattr_bucket_journal_access(handle, old_first,
4913 OCFS2_JOURNAL_ACCESS_WRITE);
Tao Ma01225592008-08-18 17:38:53 +08004914 if (ret) {
4915 mlog_errno(ret);
4916 goto out;
4917 }
4918
4919 for (i = 0; i < num_buckets; i++) {
4920 ret = ocfs2_cp_xattr_bucket(inode, handle,
Joel Becker874d65a2008-11-26 13:02:18 -08004921 last_blk + (i * blks_per_bucket),
Joel Becker15d60922008-11-25 18:36:42 -08004922 to_blk + (i * blks_per_bucket),
4923 1);
Tao Ma01225592008-08-18 17:38:53 +08004924 if (ret) {
4925 mlog_errno(ret);
4926 goto out;
4927 }
Tao Ma01225592008-08-18 17:38:53 +08004928 }
4929
Joel Becker15d60922008-11-25 18:36:42 -08004930 /*
4931 * Get the new bucket ready before we dirty anything
4932 * (This actually shouldn't fail, because we already dirtied
4933 * it once in ocfs2_cp_xattr_bucket()).
4934 */
4935 ret = ocfs2_read_xattr_bucket(new_first, to_blk);
4936 if (ret) {
Tao Ma01225592008-08-18 17:38:53 +08004937 mlog_errno(ret);
4938 goto out;
4939 }
Joel Becker15d60922008-11-25 18:36:42 -08004940 ret = ocfs2_xattr_bucket_journal_access(handle, new_first,
4941 OCFS2_JOURNAL_ACCESS_WRITE);
Tao Ma01225592008-08-18 17:38:53 +08004942 if (ret) {
4943 mlog_errno(ret);
4944 goto out;
4945 }
4946
Joel Becker15d60922008-11-25 18:36:42 -08004947 /* Now update the headers */
4948 le16_add_cpu(&bucket_xh(old_first)->xh_num_buckets, -num_buckets);
4949 ocfs2_xattr_bucket_journal_dirty(handle, old_first);
Tao Ma01225592008-08-18 17:38:53 +08004950
Joel Becker15d60922008-11-25 18:36:42 -08004951 bucket_xh(new_first)->xh_num_buckets = cpu_to_le16(num_buckets);
4952 ocfs2_xattr_bucket_journal_dirty(handle, new_first);
Tao Ma01225592008-08-18 17:38:53 +08004953
4954 if (first_hash)
Joel Becker15d60922008-11-25 18:36:42 -08004955 *first_hash = le32_to_cpu(bucket_xh(new_first)->xh_entries[0].xe_name_hash);
4956
Tao Ma01225592008-08-18 17:38:53 +08004957out:
Joel Becker15d60922008-11-25 18:36:42 -08004958 ocfs2_xattr_bucket_free(new_first);
4959 ocfs2_xattr_bucket_free(old_first);
Tao Ma01225592008-08-18 17:38:53 +08004960 return ret;
4961}
4962
4963/*
Tao Ma80bcaf32008-10-27 06:06:24 +08004964 * Move some xattrs in this cluster to the new cluster.
Tao Ma01225592008-08-18 17:38:53 +08004965 * This function should only be called when bucket size == cluster size.
4966 * Otherwise ocfs2_mv_xattr_bucket_cross_cluster should be used instead.
4967 */
Tao Ma80bcaf32008-10-27 06:06:24 +08004968static int ocfs2_divide_xattr_cluster(struct inode *inode,
4969 handle_t *handle,
4970 u64 prev_blk,
4971 u64 new_blk,
4972 u32 *first_hash)
Tao Ma01225592008-08-18 17:38:53 +08004973{
4974 u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
Tao Mac901fb02010-04-26 14:34:57 +08004975 int ret, credits = 2 * blk_per_bucket;
Tao Ma01225592008-08-18 17:38:53 +08004976
4977 BUG_ON(OCFS2_XATTR_BUCKET_SIZE < OCFS2_SB(inode->i_sb)->s_clustersize);
4978
4979 ret = ocfs2_extend_trans(handle, credits);
4980 if (ret) {
4981 mlog_errno(ret);
4982 return ret;
4983 }
4984
4985 /* Move half of the xattr in start_blk to the next bucket. */
Tao Ma80bcaf32008-10-27 06:06:24 +08004986 return ocfs2_divide_xattr_bucket(inode, handle, prev_blk,
4987 new_blk, first_hash, 1);
Tao Ma01225592008-08-18 17:38:53 +08004988}
4989
4990/*
4991 * Move some xattrs from the old cluster to the new one since they are not
4992 * contiguous in ocfs2 xattr tree.
4993 *
4994 * new_blk starts a new separate cluster, and we will move some xattrs from
4995 * prev_blk to it. v_start will be set as the first name hash value in this
4996 * new cluster so that it can be used as e_cpos during tree insertion and
4997 * don't collide with our original b-tree operations. first_bh and header_bh
4998 * will also be updated since they will be used in ocfs2_extend_xattr_bucket
4999 * to extend the insert bucket.
5000 *
5001 * The problem is how much xattr should we move to the new one and when should
5002 * we update first_bh and header_bh?
5003 * 1. If cluster size > bucket size, that means the previous cluster has more
5004 * than 1 bucket, so just move half nums of bucket into the new cluster and
5005 * update the first_bh and header_bh if the insert bucket has been moved
5006 * to the new cluster.
5007 * 2. If cluster_size == bucket_size:
5008 * a) If the previous extent rec has more than one cluster and the insert
5009 * place isn't in the last cluster, copy the entire last cluster to the
5010 * new one. This time, we don't need to upate the first_bh and header_bh
5011 * since they will not be moved into the new cluster.
5012 * b) Otherwise, move the bottom half of the xattrs in the last cluster into
5013 * the new one. And we set the extend flag to zero if the insert place is
5014 * moved into the new allocated cluster since no extend is needed.
5015 */
5016static int ocfs2_adjust_xattr_cross_cluster(struct inode *inode,
5017 handle_t *handle,
Joel Becker012ee912008-11-26 14:43:31 -08005018 struct ocfs2_xattr_bucket *first,
5019 struct ocfs2_xattr_bucket *target,
Tao Ma01225592008-08-18 17:38:53 +08005020 u64 new_blk,
Tao Ma01225592008-08-18 17:38:53 +08005021 u32 prev_clusters,
5022 u32 *v_start,
5023 int *extend)
5024{
Joel Becker92cf3ad2008-11-26 14:12:09 -08005025 int ret;
Tao Ma01225592008-08-18 17:38:53 +08005026
Tao Ma402b4182011-02-23 22:01:17 +08005027 trace_ocfs2_adjust_xattr_cross_cluster(
5028 (unsigned long long)bucket_blkno(first),
5029 (unsigned long long)new_blk, prev_clusters);
Tao Ma01225592008-08-18 17:38:53 +08005030
Joel Becker41cb8142008-11-26 14:25:21 -08005031 if (ocfs2_xattr_buckets_per_cluster(OCFS2_SB(inode->i_sb)) > 1) {
Tao Ma01225592008-08-18 17:38:53 +08005032 ret = ocfs2_mv_xattr_bucket_cross_cluster(inode,
5033 handle,
Joel Becker41cb8142008-11-26 14:25:21 -08005034 first, target,
Tao Ma01225592008-08-18 17:38:53 +08005035 new_blk,
Tao Ma01225592008-08-18 17:38:53 +08005036 prev_clusters,
5037 v_start);
Joel Becker012ee912008-11-26 14:43:31 -08005038 if (ret)
Joel Becker41cb8142008-11-26 14:25:21 -08005039 mlog_errno(ret);
Joel Becker41cb8142008-11-26 14:25:21 -08005040 } else {
Joel Becker92cf3ad2008-11-26 14:12:09 -08005041 /* The start of the last cluster in the first extent */
5042 u64 last_blk = bucket_blkno(first) +
5043 ((prev_clusters - 1) *
5044 ocfs2_clusters_to_blocks(inode->i_sb, 1));
Tao Ma01225592008-08-18 17:38:53 +08005045
Joel Becker012ee912008-11-26 14:43:31 -08005046 if (prev_clusters > 1 && bucket_blkno(target) != last_blk) {
Joel Becker874d65a2008-11-26 13:02:18 -08005047 ret = ocfs2_mv_xattr_buckets(inode, handle,
Joel Becker92cf3ad2008-11-26 14:12:09 -08005048 bucket_blkno(first),
Joel Becker54ecb6b2008-11-26 13:18:31 -08005049 last_blk, new_blk, 0,
Tao Ma01225592008-08-18 17:38:53 +08005050 v_start);
Joel Becker012ee912008-11-26 14:43:31 -08005051 if (ret)
5052 mlog_errno(ret);
5053 } else {
Tao Ma80bcaf32008-10-27 06:06:24 +08005054 ret = ocfs2_divide_xattr_cluster(inode, handle,
5055 last_blk, new_blk,
5056 v_start);
Joel Becker012ee912008-11-26 14:43:31 -08005057 if (ret)
5058 mlog_errno(ret);
Tao Ma01225592008-08-18 17:38:53 +08005059
Joel Becker92cf3ad2008-11-26 14:12:09 -08005060 if ((bucket_blkno(target) == last_blk) && extend)
Tao Ma01225592008-08-18 17:38:53 +08005061 *extend = 0;
5062 }
5063 }
5064
5065 return ret;
5066}
5067
5068/*
5069 * Add a new cluster for xattr storage.
5070 *
5071 * If the new cluster is contiguous with the previous one, it will be
5072 * appended to the same extent record, and num_clusters will be updated.
5073 * If not, we will insert a new extent for it and move some xattrs in
5074 * the last cluster into the new allocated one.
5075 * We also need to limit the maximum size of a btree leaf, otherwise we'll
5076 * lose the benefits of hashing because we'll have to search large leaves.
5077 * So now the maximum size is OCFS2_MAX_XATTR_TREE_LEAF_SIZE(or clustersize,
5078 * if it's bigger).
5079 *
5080 * first_bh is the first block of the previous extent rec and header_bh
5081 * indicates the bucket we will insert the new xattrs. They will be updated
5082 * when the header_bh is moved into the new cluster.
5083 */
5084static int ocfs2_add_new_xattr_cluster(struct inode *inode,
5085 struct buffer_head *root_bh,
Joel Beckered29c0c2008-11-26 15:08:44 -08005086 struct ocfs2_xattr_bucket *first,
5087 struct ocfs2_xattr_bucket *target,
Tao Ma01225592008-08-18 17:38:53 +08005088 u32 *num_clusters,
5089 u32 prev_cpos,
Tao Ma78f30c32008-11-12 08:27:00 +08005090 int *extend,
5091 struct ocfs2_xattr_set_ctxt *ctxt)
Tao Ma01225592008-08-18 17:38:53 +08005092{
Tao Ma85db90e2008-11-12 08:27:01 +08005093 int ret;
Tao Ma01225592008-08-18 17:38:53 +08005094 u16 bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1);
5095 u32 prev_clusters = *num_clusters;
5096 u32 clusters_to_add = 1, bit_off, num_bits, v_start = 0;
5097 u64 block;
Tao Ma85db90e2008-11-12 08:27:01 +08005098 handle_t *handle = ctxt->handle;
Tao Ma01225592008-08-18 17:38:53 +08005099 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
Joel Beckerf99b9b72008-08-20 19:36:33 -07005100 struct ocfs2_extent_tree et;
Tao Ma01225592008-08-18 17:38:53 +08005101
Tao Ma402b4182011-02-23 22:01:17 +08005102 trace_ocfs2_add_new_xattr_cluster_begin(
5103 (unsigned long long)OCFS2_I(inode)->ip_blkno,
5104 (unsigned long long)bucket_blkno(first),
5105 prev_cpos, prev_clusters);
Tao Ma01225592008-08-18 17:38:53 +08005106
Joel Becker5e404e92009-02-13 03:54:22 -08005107 ocfs2_init_xattr_tree_extent_tree(&et, INODE_CACHE(inode), root_bh);
Joel Beckerf99b9b72008-08-20 19:36:33 -07005108
Joel Becker0cf2f762009-02-12 16:41:25 -08005109 ret = ocfs2_journal_access_xb(handle, INODE_CACHE(inode), root_bh,
Joel Becker84008972008-12-09 16:11:49 -08005110 OCFS2_JOURNAL_ACCESS_WRITE);
Tao Ma01225592008-08-18 17:38:53 +08005111 if (ret < 0) {
5112 mlog_errno(ret);
5113 goto leave;
5114 }
5115
Joel Becker1ed9b772010-05-06 13:59:06 +08005116 ret = __ocfs2_claim_clusters(handle, ctxt->data_ac, 1,
Tao Ma01225592008-08-18 17:38:53 +08005117 clusters_to_add, &bit_off, &num_bits);
5118 if (ret < 0) {
5119 if (ret != -ENOSPC)
5120 mlog_errno(ret);
5121 goto leave;
5122 }
5123
5124 BUG_ON(num_bits > clusters_to_add);
5125
5126 block = ocfs2_clusters_to_blocks(osb->sb, bit_off);
Tao Ma402b4182011-02-23 22:01:17 +08005127 trace_ocfs2_add_new_xattr_cluster((unsigned long long)block, num_bits);
Tao Ma01225592008-08-18 17:38:53 +08005128
Joel Beckered29c0c2008-11-26 15:08:44 -08005129 if (bucket_blkno(first) + (prev_clusters * bpc) == block &&
Tao Ma01225592008-08-18 17:38:53 +08005130 (prev_clusters + num_bits) << osb->s_clustersize_bits <=
5131 OCFS2_MAX_XATTR_TREE_LEAF_SIZE) {
5132 /*
5133 * If this cluster is contiguous with the old one and
5134 * adding this new cluster, we don't surpass the limit of
5135 * OCFS2_MAX_XATTR_TREE_LEAF_SIZE, cool. We will let it be
5136 * initialized and used like other buckets in the previous
5137 * cluster.
5138 * So add it as a contiguous one. The caller will handle
5139 * its init process.
5140 */
5141 v_start = prev_cpos + prev_clusters;
5142 *num_clusters = prev_clusters + num_bits;
Tao Ma01225592008-08-18 17:38:53 +08005143 } else {
5144 ret = ocfs2_adjust_xattr_cross_cluster(inode,
5145 handle,
Joel Becker012ee912008-11-26 14:43:31 -08005146 first,
5147 target,
Tao Ma01225592008-08-18 17:38:53 +08005148 block,
Tao Ma01225592008-08-18 17:38:53 +08005149 prev_clusters,
5150 &v_start,
5151 extend);
5152 if (ret) {
5153 mlog_errno(ret);
5154 goto leave;
5155 }
5156 }
5157
Tao Ma402b4182011-02-23 22:01:17 +08005158 trace_ocfs2_add_new_xattr_cluster_insert((unsigned long long)block,
5159 v_start, num_bits);
Joel Beckercc79d8c2009-02-13 03:24:43 -08005160 ret = ocfs2_insert_extent(handle, &et, v_start, block,
Tao Ma78f30c32008-11-12 08:27:00 +08005161 num_bits, 0, ctxt->meta_ac);
Tao Ma01225592008-08-18 17:38:53 +08005162 if (ret < 0) {
5163 mlog_errno(ret);
5164 goto leave;
5165 }
5166
Joel Beckerec20cec2010-03-19 14:13:52 -07005167 ocfs2_journal_dirty(handle, root_bh);
Tao Ma01225592008-08-18 17:38:53 +08005168
5169leave:
Tao Ma01225592008-08-18 17:38:53 +08005170 return ret;
5171}
5172
5173/*
Joel Becker92de1092008-11-25 17:06:40 -08005174 * We are given an extent. 'first' is the bucket at the very front of
5175 * the extent. The extent has space for an additional bucket past
5176 * bucket_xh(first)->xh_num_buckets. 'target_blkno' is the block number
5177 * of the target bucket. We wish to shift every bucket past the target
5178 * down one, filling in that additional space. When we get back to the
5179 * target, we split the target between itself and the now-empty bucket
5180 * at target+1 (aka, target_blkno + blks_per_bucket).
Tao Ma01225592008-08-18 17:38:53 +08005181 */
5182static int ocfs2_extend_xattr_bucket(struct inode *inode,
Tao Ma85db90e2008-11-12 08:27:01 +08005183 handle_t *handle,
Joel Becker92de1092008-11-25 17:06:40 -08005184 struct ocfs2_xattr_bucket *first,
5185 u64 target_blk,
Tao Ma01225592008-08-18 17:38:53 +08005186 u32 num_clusters)
5187{
5188 int ret, credits;
5189 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
5190 u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
Joel Becker92de1092008-11-25 17:06:40 -08005191 u64 end_blk;
5192 u16 new_bucket = le16_to_cpu(bucket_xh(first)->xh_num_buckets);
Tao Ma01225592008-08-18 17:38:53 +08005193
Tao Ma402b4182011-02-23 22:01:17 +08005194 trace_ocfs2_extend_xattr_bucket((unsigned long long)target_blk,
5195 (unsigned long long)bucket_blkno(first),
5196 num_clusters, new_bucket);
Tao Ma01225592008-08-18 17:38:53 +08005197
Joel Becker92de1092008-11-25 17:06:40 -08005198 /* The extent must have room for an additional bucket */
5199 BUG_ON(new_bucket >=
5200 (num_clusters * ocfs2_xattr_buckets_per_cluster(osb)));
Tao Ma01225592008-08-18 17:38:53 +08005201
Joel Becker92de1092008-11-25 17:06:40 -08005202 /* end_blk points to the last existing bucket */
5203 end_blk = bucket_blkno(first) + ((new_bucket - 1) * blk_per_bucket);
Tao Ma01225592008-08-18 17:38:53 +08005204
5205 /*
Joel Becker92de1092008-11-25 17:06:40 -08005206 * end_blk is the start of the last existing bucket.
5207 * Thus, (end_blk - target_blk) covers the target bucket and
5208 * every bucket after it up to, but not including, the last
5209 * existing bucket. Then we add the last existing bucket, the
5210 * new bucket, and the first bucket (3 * blk_per_bucket).
Tao Ma01225592008-08-18 17:38:53 +08005211 */
Tao Mac901fb02010-04-26 14:34:57 +08005212 credits = (end_blk - target_blk) + (3 * blk_per_bucket);
Tao Ma85db90e2008-11-12 08:27:01 +08005213 ret = ocfs2_extend_trans(handle, credits);
5214 if (ret) {
Tao Ma01225592008-08-18 17:38:53 +08005215 mlog_errno(ret);
5216 goto out;
5217 }
5218
Joel Becker92de1092008-11-25 17:06:40 -08005219 ret = ocfs2_xattr_bucket_journal_access(handle, first,
5220 OCFS2_JOURNAL_ACCESS_WRITE);
Tao Ma01225592008-08-18 17:38:53 +08005221 if (ret) {
5222 mlog_errno(ret);
Tao Ma85db90e2008-11-12 08:27:01 +08005223 goto out;
Tao Ma01225592008-08-18 17:38:53 +08005224 }
5225
Joel Becker92de1092008-11-25 17:06:40 -08005226 while (end_blk != target_blk) {
Tao Ma01225592008-08-18 17:38:53 +08005227 ret = ocfs2_cp_xattr_bucket(inode, handle, end_blk,
5228 end_blk + blk_per_bucket, 0);
5229 if (ret)
Tao Ma85db90e2008-11-12 08:27:01 +08005230 goto out;
Tao Ma01225592008-08-18 17:38:53 +08005231 end_blk -= blk_per_bucket;
5232 }
5233
Joel Becker92de1092008-11-25 17:06:40 -08005234 /* Move half of the xattr in target_blkno to the next bucket. */
5235 ret = ocfs2_divide_xattr_bucket(inode, handle, target_blk,
5236 target_blk + blk_per_bucket, NULL, 0);
Tao Ma01225592008-08-18 17:38:53 +08005237
Joel Becker92de1092008-11-25 17:06:40 -08005238 le16_add_cpu(&bucket_xh(first)->xh_num_buckets, 1);
5239 ocfs2_xattr_bucket_journal_dirty(handle, first);
Tao Ma01225592008-08-18 17:38:53 +08005240
Tao Ma01225592008-08-18 17:38:53 +08005241out:
5242 return ret;
5243}
5244
5245/*
Joel Becker91f20332008-11-26 15:25:41 -08005246 * Add new xattr bucket in an extent record and adjust the buckets
5247 * accordingly. xb_bh is the ocfs2_xattr_block, and target is the
5248 * bucket we want to insert into.
Tao Ma01225592008-08-18 17:38:53 +08005249 *
Joel Becker91f20332008-11-26 15:25:41 -08005250 * In the easy case, we will move all the buckets after target down by
5251 * one. Half of target's xattrs will be moved to the next bucket.
5252 *
5253 * If current cluster is full, we'll allocate a new one. This may not
5254 * be contiguous. The underlying calls will make sure that there is
5255 * space for the insert, shifting buckets around if necessary.
5256 * 'target' may be moved by those calls.
Tao Ma01225592008-08-18 17:38:53 +08005257 */
5258static int ocfs2_add_new_xattr_bucket(struct inode *inode,
5259 struct buffer_head *xb_bh,
Joel Becker91f20332008-11-26 15:25:41 -08005260 struct ocfs2_xattr_bucket *target,
Tao Ma78f30c32008-11-12 08:27:00 +08005261 struct ocfs2_xattr_set_ctxt *ctxt)
Tao Ma01225592008-08-18 17:38:53 +08005262{
Tao Ma01225592008-08-18 17:38:53 +08005263 struct ocfs2_xattr_block *xb =
5264 (struct ocfs2_xattr_block *)xb_bh->b_data;
5265 struct ocfs2_xattr_tree_root *xb_root = &xb->xb_attrs.xb_root;
5266 struct ocfs2_extent_list *el = &xb_root->xt_list;
Joel Becker91f20332008-11-26 15:25:41 -08005267 u32 name_hash =
5268 le32_to_cpu(bucket_xh(target)->xh_entries[0].xe_name_hash);
Joel Beckered29c0c2008-11-26 15:08:44 -08005269 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
Tao Ma01225592008-08-18 17:38:53 +08005270 int ret, num_buckets, extend = 1;
5271 u64 p_blkno;
5272 u32 e_cpos, num_clusters;
Joel Becker92de1092008-11-25 17:06:40 -08005273 /* The bucket at the front of the extent */
Joel Becker91f20332008-11-26 15:25:41 -08005274 struct ocfs2_xattr_bucket *first;
Tao Ma01225592008-08-18 17:38:53 +08005275
Tao Ma402b4182011-02-23 22:01:17 +08005276 trace_ocfs2_add_new_xattr_bucket(
5277 (unsigned long long)bucket_blkno(target));
Tao Ma01225592008-08-18 17:38:53 +08005278
Joel Beckered29c0c2008-11-26 15:08:44 -08005279 /* The first bucket of the original extent */
Joel Becker92de1092008-11-25 17:06:40 -08005280 first = ocfs2_xattr_bucket_new(inode);
Joel Becker91f20332008-11-26 15:25:41 -08005281 if (!first) {
Joel Becker92de1092008-11-25 17:06:40 -08005282 ret = -ENOMEM;
5283 mlog_errno(ret);
5284 goto out;
5285 }
5286
Tao Ma01225592008-08-18 17:38:53 +08005287 ret = ocfs2_xattr_get_rec(inode, name_hash, &p_blkno, &e_cpos,
5288 &num_clusters, el);
5289 if (ret) {
5290 mlog_errno(ret);
5291 goto out;
5292 }
5293
Joel Beckered29c0c2008-11-26 15:08:44 -08005294 ret = ocfs2_read_xattr_bucket(first, p_blkno);
5295 if (ret) {
5296 mlog_errno(ret);
5297 goto out;
5298 }
5299
Tao Ma01225592008-08-18 17:38:53 +08005300 num_buckets = ocfs2_xattr_buckets_per_cluster(osb) * num_clusters;
Joel Beckered29c0c2008-11-26 15:08:44 -08005301 if (num_buckets == le16_to_cpu(bucket_xh(first)->xh_num_buckets)) {
5302 /*
5303 * This can move first+target if the target bucket moves
5304 * to the new extent.
5305 */
Tao Ma01225592008-08-18 17:38:53 +08005306 ret = ocfs2_add_new_xattr_cluster(inode,
5307 xb_bh,
Joel Beckered29c0c2008-11-26 15:08:44 -08005308 first,
5309 target,
Tao Ma01225592008-08-18 17:38:53 +08005310 &num_clusters,
5311 e_cpos,
Tao Ma78f30c32008-11-12 08:27:00 +08005312 &extend,
5313 ctxt);
Tao Ma01225592008-08-18 17:38:53 +08005314 if (ret) {
5315 mlog_errno(ret);
5316 goto out;
5317 }
5318 }
5319
Joel Becker92de1092008-11-25 17:06:40 -08005320 if (extend) {
Tao Ma01225592008-08-18 17:38:53 +08005321 ret = ocfs2_extend_xattr_bucket(inode,
Tao Ma85db90e2008-11-12 08:27:01 +08005322 ctxt->handle,
Joel Beckered29c0c2008-11-26 15:08:44 -08005323 first,
5324 bucket_blkno(target),
Tao Ma01225592008-08-18 17:38:53 +08005325 num_clusters);
Joel Becker92de1092008-11-25 17:06:40 -08005326 if (ret)
5327 mlog_errno(ret);
5328 }
5329
Tao Ma01225592008-08-18 17:38:53 +08005330out:
Joel Becker92de1092008-11-25 17:06:40 -08005331 ocfs2_xattr_bucket_free(first);
Joel Beckered29c0c2008-11-26 15:08:44 -08005332
Tao Ma01225592008-08-18 17:38:53 +08005333 return ret;
5334}
5335
Tao Ma01225592008-08-18 17:38:53 +08005336/*
Tao Ma01225592008-08-18 17:38:53 +08005337 * Truncate the specified xe_off entry in xattr bucket.
5338 * bucket is indicated by header_bh and len is the new length.
5339 * Both the ocfs2_xattr_value_root and the entry will be updated here.
5340 *
5341 * Copy the new updated xe and xe_value_root to new_xe and new_xv if needed.
5342 */
5343static int ocfs2_xattr_bucket_value_truncate(struct inode *inode,
Joel Becker548b0f22008-11-24 19:32:13 -08005344 struct ocfs2_xattr_bucket *bucket,
Tao Ma01225592008-08-18 17:38:53 +08005345 int xe_off,
Tao Ma78f30c32008-11-12 08:27:00 +08005346 int len,
5347 struct ocfs2_xattr_set_ctxt *ctxt)
Tao Ma01225592008-08-18 17:38:53 +08005348{
5349 int ret, offset;
5350 u64 value_blk;
Tao Ma01225592008-08-18 17:38:53 +08005351 struct ocfs2_xattr_entry *xe;
Joel Becker548b0f22008-11-24 19:32:13 -08005352 struct ocfs2_xattr_header *xh = bucket_xh(bucket);
Tao Ma01225592008-08-18 17:38:53 +08005353 size_t blocksize = inode->i_sb->s_blocksize;
Joel Beckerb3e5d372008-12-09 15:01:04 -08005354 struct ocfs2_xattr_value_buf vb = {
5355 .vb_access = ocfs2_journal_access,
5356 };
Tao Ma01225592008-08-18 17:38:53 +08005357
5358 xe = &xh->xh_entries[xe_off];
5359
5360 BUG_ON(!xe || ocfs2_xattr_is_local(xe));
5361
5362 offset = le16_to_cpu(xe->xe_name_offset) +
5363 OCFS2_XATTR_SIZE(xe->xe_name_len);
5364
5365 value_blk = offset / blocksize;
5366
5367 /* We don't allow ocfs2_xattr_value to be stored in different block. */
5368 BUG_ON(value_blk != (offset + OCFS2_XATTR_ROOT_SIZE - 1) / blocksize);
Tao Ma01225592008-08-18 17:38:53 +08005369
Joel Beckerb3e5d372008-12-09 15:01:04 -08005370 vb.vb_bh = bucket->bu_bhs[value_blk];
5371 BUG_ON(!vb.vb_bh);
Tao Ma01225592008-08-18 17:38:53 +08005372
Joel Beckerb3e5d372008-12-09 15:01:04 -08005373 vb.vb_xv = (struct ocfs2_xattr_value_root *)
5374 (vb.vb_bh->b_data + offset % blocksize);
Tao Ma01225592008-08-18 17:38:53 +08005375
Joel Becker548b0f22008-11-24 19:32:13 -08005376 /*
5377 * From here on out we have to dirty the bucket. The generic
5378 * value calls only modify one of the bucket's bhs, but we need
5379 * to send the bucket at once. So if they error, they *could* have
5380 * modified something. We have to assume they did, and dirty
5381 * the whole bucket. This leaves us in a consistent state.
5382 */
Tao Ma402b4182011-02-23 22:01:17 +08005383 trace_ocfs2_xattr_bucket_value_truncate(
5384 (unsigned long long)bucket_blkno(bucket), xe_off, len);
Joel Beckerb3e5d372008-12-09 15:01:04 -08005385 ret = ocfs2_xattr_value_truncate(inode, &vb, len, ctxt);
Tao Ma01225592008-08-18 17:38:53 +08005386 if (ret) {
5387 mlog_errno(ret);
Tao Ma554e7f92009-01-08 08:21:43 +08005388 goto out;
5389 }
5390
5391 ret = ocfs2_xattr_bucket_journal_access(ctxt->handle, bucket,
5392 OCFS2_JOURNAL_ACCESS_WRITE);
5393 if (ret) {
5394 mlog_errno(ret);
5395 goto out;
Tao Ma01225592008-08-18 17:38:53 +08005396 }
5397
Joel Becker548b0f22008-11-24 19:32:13 -08005398 xe->xe_value_size = cpu_to_le64(len);
5399
Joel Becker548b0f22008-11-24 19:32:13 -08005400 ocfs2_xattr_bucket_journal_dirty(ctxt->handle, bucket);
Tao Ma01225592008-08-18 17:38:53 +08005401
5402out:
Tao Ma01225592008-08-18 17:38:53 +08005403 return ret;
5404}
5405
Tao Ma01225592008-08-18 17:38:53 +08005406static int ocfs2_rm_xattr_cluster(struct inode *inode,
5407 struct buffer_head *root_bh,
5408 u64 blkno,
5409 u32 cpos,
Tao Ma47bca492009-08-18 11:43:42 +08005410 u32 len,
5411 void *para)
Tao Ma01225592008-08-18 17:38:53 +08005412{
5413 int ret;
5414 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
5415 struct inode *tl_inode = osb->osb_tl_inode;
5416 handle_t *handle;
5417 struct ocfs2_xattr_block *xb =
5418 (struct ocfs2_xattr_block *)root_bh->b_data;
Tao Ma01225592008-08-18 17:38:53 +08005419 struct ocfs2_alloc_context *meta_ac = NULL;
5420 struct ocfs2_cached_dealloc_ctxt dealloc;
Joel Beckerf99b9b72008-08-20 19:36:33 -07005421 struct ocfs2_extent_tree et;
5422
Tao Ma47bca492009-08-18 11:43:42 +08005423 ret = ocfs2_iterate_xattr_buckets(inode, blkno, len,
Tao Mace9c5a52009-08-18 11:43:59 +08005424 ocfs2_delete_xattr_in_bucket, para);
Tao Ma47bca492009-08-18 11:43:42 +08005425 if (ret) {
5426 mlog_errno(ret);
5427 return ret;
5428 }
5429
Joel Becker5e404e92009-02-13 03:54:22 -08005430 ocfs2_init_xattr_tree_extent_tree(&et, INODE_CACHE(inode), root_bh);
Tao Ma01225592008-08-18 17:38:53 +08005431
5432 ocfs2_init_dealloc_ctxt(&dealloc);
5433
Tao Ma402b4182011-02-23 22:01:17 +08005434 trace_ocfs2_rm_xattr_cluster(
5435 (unsigned long long)OCFS2_I(inode)->ip_blkno,
5436 (unsigned long long)blkno, cpos, len);
Tao Ma01225592008-08-18 17:38:53 +08005437
Joel Becker8cb471e2009-02-10 20:00:41 -08005438 ocfs2_remove_xattr_clusters_from_cache(INODE_CACHE(inode), blkno,
5439 len);
Tao Ma01225592008-08-18 17:38:53 +08005440
Joel Beckerf99b9b72008-08-20 19:36:33 -07005441 ret = ocfs2_lock_allocators(inode, &et, 0, 1, NULL, &meta_ac);
Tao Ma01225592008-08-18 17:38:53 +08005442 if (ret) {
5443 mlog_errno(ret);
5444 return ret;
5445 }
5446
5447 mutex_lock(&tl_inode->i_mutex);
5448
5449 if (ocfs2_truncate_log_needs_flush(osb)) {
5450 ret = __ocfs2_flush_truncate_log(osb);
5451 if (ret < 0) {
5452 mlog_errno(ret);
5453 goto out;
5454 }
5455 }
5456
Jan Karaa90714c2008-10-09 19:38:40 +02005457 handle = ocfs2_start_trans(osb, ocfs2_remove_extent_credits(osb->sb));
Tao Mad3264792008-10-24 07:57:28 +08005458 if (IS_ERR(handle)) {
Tao Ma01225592008-08-18 17:38:53 +08005459 ret = -ENOMEM;
5460 mlog_errno(ret);
5461 goto out;
5462 }
5463
Joel Becker0cf2f762009-02-12 16:41:25 -08005464 ret = ocfs2_journal_access_xb(handle, INODE_CACHE(inode), root_bh,
Joel Becker84008972008-12-09 16:11:49 -08005465 OCFS2_JOURNAL_ACCESS_WRITE);
Tao Ma01225592008-08-18 17:38:53 +08005466 if (ret) {
5467 mlog_errno(ret);
5468 goto out_commit;
5469 }
5470
Joel Beckerdbdcf6a2009-02-13 03:41:26 -08005471 ret = ocfs2_remove_extent(handle, &et, cpos, len, meta_ac,
Joel Beckerf99b9b72008-08-20 19:36:33 -07005472 &dealloc);
Tao Ma01225592008-08-18 17:38:53 +08005473 if (ret) {
5474 mlog_errno(ret);
5475 goto out_commit;
5476 }
5477
5478 le32_add_cpu(&xb->xb_attrs.xb_root.xt_clusters, -len);
Joel Beckerec20cec2010-03-19 14:13:52 -07005479 ocfs2_journal_dirty(handle, root_bh);
Tao Ma01225592008-08-18 17:38:53 +08005480
5481 ret = ocfs2_truncate_log_append(osb, handle, blkno, len);
5482 if (ret)
5483 mlog_errno(ret);
Darrick J. Wong6fdb7022014-04-03 14:47:08 -07005484 ocfs2_update_inode_fsync_trans(handle, inode, 0);
Tao Ma01225592008-08-18 17:38:53 +08005485
5486out_commit:
5487 ocfs2_commit_trans(osb, handle);
5488out:
5489 ocfs2_schedule_truncate_log_flush(osb, 1);
5490
5491 mutex_unlock(&tl_inode->i_mutex);
5492
5493 if (meta_ac)
5494 ocfs2_free_alloc_context(meta_ac);
5495
5496 ocfs2_run_deallocs(osb, &dealloc);
5497
5498 return ret;
5499}
5500
Tao Ma01225592008-08-18 17:38:53 +08005501/*
Tao Ma80bcaf32008-10-27 06:06:24 +08005502 * check whether the xattr bucket is filled up with the same hash value.
5503 * If we want to insert the xattr with the same hash, return -ENOSPC.
5504 * If we want to insert a xattr with different hash value, go ahead
5505 * and ocfs2_divide_xattr_bucket will handle this.
5506 */
Tao Ma01225592008-08-18 17:38:53 +08005507static int ocfs2_check_xattr_bucket_collision(struct inode *inode,
Tao Ma80bcaf32008-10-27 06:06:24 +08005508 struct ocfs2_xattr_bucket *bucket,
5509 const char *name)
Tao Ma01225592008-08-18 17:38:53 +08005510{
Joel Becker3e632942008-10-24 17:04:49 -07005511 struct ocfs2_xattr_header *xh = bucket_xh(bucket);
Tao Ma80bcaf32008-10-27 06:06:24 +08005512 u32 name_hash = ocfs2_xattr_name_hash(inode, name, strlen(name));
5513
5514 if (name_hash != le32_to_cpu(xh->xh_entries[0].xe_name_hash))
5515 return 0;
Tao Ma01225592008-08-18 17:38:53 +08005516
5517 if (xh->xh_entries[le16_to_cpu(xh->xh_count) - 1].xe_name_hash ==
5518 xh->xh_entries[0].xe_name_hash) {
5519 mlog(ML_ERROR, "Too much hash collision in xattr bucket %llu, "
5520 "hash = %u\n",
Joel Becker9c7759a2008-10-24 16:21:03 -07005521 (unsigned long long)bucket_blkno(bucket),
Tao Ma01225592008-08-18 17:38:53 +08005522 le32_to_cpu(xh->xh_entries[0].xe_name_hash));
5523 return -ENOSPC;
5524 }
5525
5526 return 0;
5527}
5528
Joel Beckerc5d95df2009-08-18 21:03:24 -07005529/*
5530 * Try to set the entry in the current bucket. If we fail, the caller
5531 * will handle getting us another bucket.
5532 */
5533static int ocfs2_xattr_set_entry_bucket(struct inode *inode,
5534 struct ocfs2_xattr_info *xi,
5535 struct ocfs2_xattr_search *xs,
5536 struct ocfs2_xattr_set_ctxt *ctxt)
5537{
5538 int ret;
5539 struct ocfs2_xa_loc loc;
5540
Tao Ma402b4182011-02-23 22:01:17 +08005541 trace_ocfs2_xattr_set_entry_bucket(xi->xi_name);
Joel Beckerc5d95df2009-08-18 21:03:24 -07005542
5543 ocfs2_init_xattr_bucket_xa_loc(&loc, xs->bucket,
5544 xs->not_found ? NULL : xs->here);
5545 ret = ocfs2_xa_set(&loc, xi, ctxt);
5546 if (!ret) {
5547 xs->here = loc.xl_entry;
5548 goto out;
5549 }
5550 if (ret != -ENOSPC) {
5551 mlog_errno(ret);
5552 goto out;
5553 }
5554
5555 /* Ok, we need space. Let's try defragmenting the bucket. */
5556 ret = ocfs2_defrag_xattr_bucket(inode, ctxt->handle,
5557 xs->bucket);
5558 if (ret) {
5559 mlog_errno(ret);
5560 goto out;
5561 }
5562
5563 ret = ocfs2_xa_set(&loc, xi, ctxt);
5564 if (!ret) {
5565 xs->here = loc.xl_entry;
5566 goto out;
5567 }
5568 if (ret != -ENOSPC)
5569 mlog_errno(ret);
5570
5571
5572out:
Joel Beckerc5d95df2009-08-18 21:03:24 -07005573 return ret;
5574}
5575
Tao Ma01225592008-08-18 17:38:53 +08005576static int ocfs2_xattr_set_entry_index_block(struct inode *inode,
5577 struct ocfs2_xattr_info *xi,
Tao Ma78f30c32008-11-12 08:27:00 +08005578 struct ocfs2_xattr_search *xs,
5579 struct ocfs2_xattr_set_ctxt *ctxt)
Tao Ma01225592008-08-18 17:38:53 +08005580{
Joel Beckerc5d95df2009-08-18 21:03:24 -07005581 int ret;
Tao Ma01225592008-08-18 17:38:53 +08005582
Tao Ma402b4182011-02-23 22:01:17 +08005583 trace_ocfs2_xattr_set_entry_index_block(xi->xi_name);
Tao Ma01225592008-08-18 17:38:53 +08005584
Joel Beckerc5d95df2009-08-18 21:03:24 -07005585 ret = ocfs2_xattr_set_entry_bucket(inode, xi, xs, ctxt);
5586 if (!ret)
5587 goto out;
5588 if (ret != -ENOSPC) {
5589 mlog_errno(ret);
5590 goto out;
Tao Ma01225592008-08-18 17:38:53 +08005591 }
5592
Joel Beckerc5d95df2009-08-18 21:03:24 -07005593 /* Ack, need more space. Let's try to get another bucket! */
5594
Tao Ma01225592008-08-18 17:38:53 +08005595 /*
Joel Beckerc5d95df2009-08-18 21:03:24 -07005596 * We do not allow for overlapping ranges between buckets. And
5597 * the maximum number of collisions we will allow for then is
5598 * one bucket's worth, so check it here whether we need to
5599 * add a new bucket for the insert.
Tao Ma01225592008-08-18 17:38:53 +08005600 */
Joel Beckerc5d95df2009-08-18 21:03:24 -07005601 ret = ocfs2_check_xattr_bucket_collision(inode,
Joel Becker91f20332008-11-26 15:25:41 -08005602 xs->bucket,
Joel Beckerc5d95df2009-08-18 21:03:24 -07005603 xi->xi_name);
5604 if (ret) {
5605 mlog_errno(ret);
5606 goto out;
Tao Ma01225592008-08-18 17:38:53 +08005607 }
5608
Joel Beckerc5d95df2009-08-18 21:03:24 -07005609 ret = ocfs2_add_new_xattr_bucket(inode,
5610 xs->xattr_bh,
5611 xs->bucket,
5612 ctxt);
5613 if (ret) {
5614 mlog_errno(ret);
5615 goto out;
5616 }
5617
5618 /*
5619 * ocfs2_add_new_xattr_bucket() will have updated
5620 * xs->bucket if it moved, but it will not have updated
5621 * any of the other search fields. Thus, we drop it and
5622 * re-search. Everything should be cached, so it'll be
5623 * quick.
5624 */
5625 ocfs2_xattr_bucket_relse(xs->bucket);
5626 ret = ocfs2_xattr_index_block_find(inode, xs->xattr_bh,
5627 xi->xi_name_index,
5628 xi->xi_name, xs);
5629 if (ret && ret != -ENODATA)
5630 goto out;
5631 xs->not_found = ret;
5632
5633 /* Ok, we have a new bucket, let's try again */
5634 ret = ocfs2_xattr_set_entry_bucket(inode, xi, xs, ctxt);
5635 if (ret && (ret != -ENOSPC))
5636 mlog_errno(ret);
5637
Tao Ma01225592008-08-18 17:38:53 +08005638out:
Tao Ma01225592008-08-18 17:38:53 +08005639 return ret;
5640}
Tao Maa3944252008-08-18 17:38:54 +08005641
5642static int ocfs2_delete_xattr_in_bucket(struct inode *inode,
5643 struct ocfs2_xattr_bucket *bucket,
5644 void *para)
5645{
Tao Mace9c5a52009-08-18 11:43:59 +08005646 int ret = 0, ref_credits;
Joel Becker3e632942008-10-24 17:04:49 -07005647 struct ocfs2_xattr_header *xh = bucket_xh(bucket);
Tao Maa3944252008-08-18 17:38:54 +08005648 u16 i;
5649 struct ocfs2_xattr_entry *xe;
Tao Ma78f30c32008-11-12 08:27:00 +08005650 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
5651 struct ocfs2_xattr_set_ctxt ctxt = {NULL, NULL,};
Joel Becker548b0f22008-11-24 19:32:13 -08005652 int credits = ocfs2_remove_extent_credits(osb->sb) +
5653 ocfs2_blocks_per_xattr_bucket(inode->i_sb);
Tao Mace9c5a52009-08-18 11:43:59 +08005654 struct ocfs2_xattr_value_root *xv;
5655 struct ocfs2_rm_xattr_bucket_para *args =
5656 (struct ocfs2_rm_xattr_bucket_para *)para;
Tao Ma78f30c32008-11-12 08:27:00 +08005657
5658 ocfs2_init_dealloc_ctxt(&ctxt.dealloc);
Tao Maa3944252008-08-18 17:38:54 +08005659
5660 for (i = 0; i < le16_to_cpu(xh->xh_count); i++) {
5661 xe = &xh->xh_entries[i];
5662 if (ocfs2_xattr_is_local(xe))
5663 continue;
5664
Tao Mace9c5a52009-08-18 11:43:59 +08005665 ret = ocfs2_get_xattr_tree_value_root(inode->i_sb, bucket,
5666 i, &xv, NULL);
Joseph Qi023d4ea2015-04-14 15:43:33 -07005667 if (ret) {
5668 mlog_errno(ret);
5669 break;
5670 }
Tao Mace9c5a52009-08-18 11:43:59 +08005671
5672 ret = ocfs2_lock_xattr_remove_allocators(inode, xv,
5673 args->ref_ci,
5674 args->ref_root_bh,
5675 &ctxt.meta_ac,
5676 &ref_credits);
5677
5678 ctxt.handle = ocfs2_start_trans(osb, credits + ref_credits);
Tao Ma88c3b062008-12-11 08:54:11 +08005679 if (IS_ERR(ctxt.handle)) {
5680 ret = PTR_ERR(ctxt.handle);
5681 mlog_errno(ret);
5682 break;
5683 }
5684
Joel Becker548b0f22008-11-24 19:32:13 -08005685 ret = ocfs2_xattr_bucket_value_truncate(inode, bucket,
Tao Ma78f30c32008-11-12 08:27:00 +08005686 i, 0, &ctxt);
Tao Ma88c3b062008-12-11 08:54:11 +08005687
5688 ocfs2_commit_trans(osb, ctxt.handle);
Tao Mace9c5a52009-08-18 11:43:59 +08005689 if (ctxt.meta_ac) {
5690 ocfs2_free_alloc_context(ctxt.meta_ac);
5691 ctxt.meta_ac = NULL;
5692 }
Tao Maa3944252008-08-18 17:38:54 +08005693 if (ret) {
5694 mlog_errno(ret);
5695 break;
5696 }
5697 }
5698
Tao Mace9c5a52009-08-18 11:43:59 +08005699 if (ctxt.meta_ac)
5700 ocfs2_free_alloc_context(ctxt.meta_ac);
Tao Ma78f30c32008-11-12 08:27:00 +08005701 ocfs2_schedule_truncate_log_flush(osb, 1);
5702 ocfs2_run_deallocs(osb, &ctxt.dealloc);
Tao Maa3944252008-08-18 17:38:54 +08005703 return ret;
5704}
5705
Mark Fasheh99219ae2008-10-07 14:52:59 -07005706/*
Tao Ma492a8a32009-08-18 11:43:17 +08005707 * Whenever we modify a xattr value root in the bucket(e.g, CoW
5708 * or change the extent record flag), we need to recalculate
5709 * the metaecc for the whole bucket. So it is done here.
5710 *
5711 * Note:
5712 * We have to give the extra credits for the caller.
5713 */
5714static int ocfs2_xattr_bucket_post_refcount(struct inode *inode,
5715 handle_t *handle,
5716 void *para)
5717{
5718 int ret;
5719 struct ocfs2_xattr_bucket *bucket =
5720 (struct ocfs2_xattr_bucket *)para;
5721
5722 ret = ocfs2_xattr_bucket_journal_access(handle, bucket,
5723 OCFS2_JOURNAL_ACCESS_WRITE);
5724 if (ret) {
5725 mlog_errno(ret);
5726 return ret;
5727 }
5728
5729 ocfs2_xattr_bucket_journal_dirty(handle, bucket);
5730
5731 return 0;
5732}
5733
5734/*
5735 * Special action we need if the xattr value is refcounted.
5736 *
5737 * 1. If the xattr is refcounted, lock the tree.
5738 * 2. CoW the xattr if we are setting the new value and the value
5739 * will be stored outside.
5740 * 3. In other case, decrease_refcount will work for us, so just
5741 * lock the refcount tree, calculate the meta and credits is OK.
5742 *
5743 * We have to do CoW before ocfs2_init_xattr_set_ctxt since
5744 * currently CoW is a completed transaction, while this function
5745 * will also lock the allocators and let us deadlock. So we will
5746 * CoW the whole xattr value.
5747 */
5748static int ocfs2_prepare_refcount_xattr(struct inode *inode,
5749 struct ocfs2_dinode *di,
5750 struct ocfs2_xattr_info *xi,
5751 struct ocfs2_xattr_search *xis,
5752 struct ocfs2_xattr_search *xbs,
5753 struct ocfs2_refcount_tree **ref_tree,
5754 int *meta_add,
5755 int *credits)
5756{
5757 int ret = 0;
5758 struct ocfs2_xattr_block *xb;
5759 struct ocfs2_xattr_entry *xe;
5760 char *base;
5761 u32 p_cluster, num_clusters;
5762 unsigned int ext_flags;
5763 int name_offset, name_len;
5764 struct ocfs2_xattr_value_buf vb;
5765 struct ocfs2_xattr_bucket *bucket = NULL;
5766 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
5767 struct ocfs2_post_refcount refcount;
5768 struct ocfs2_post_refcount *p = NULL;
5769 struct buffer_head *ref_root_bh = NULL;
5770
5771 if (!xis->not_found) {
5772 xe = xis->here;
5773 name_offset = le16_to_cpu(xe->xe_name_offset);
5774 name_len = OCFS2_XATTR_SIZE(xe->xe_name_len);
5775 base = xis->base;
5776 vb.vb_bh = xis->inode_bh;
5777 vb.vb_access = ocfs2_journal_access_di;
5778 } else {
5779 int i, block_off = 0;
5780 xb = (struct ocfs2_xattr_block *)xbs->xattr_bh->b_data;
5781 xe = xbs->here;
5782 name_offset = le16_to_cpu(xe->xe_name_offset);
5783 name_len = OCFS2_XATTR_SIZE(xe->xe_name_len);
5784 i = xbs->here - xbs->header->xh_entries;
5785
5786 if (le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED) {
Tao Mafd68a892009-08-18 11:43:21 +08005787 ret = ocfs2_xattr_bucket_get_name_value(inode->i_sb,
Tao Ma492a8a32009-08-18 11:43:17 +08005788 bucket_xh(xbs->bucket),
5789 i, &block_off,
5790 &name_offset);
5791 if (ret) {
5792 mlog_errno(ret);
5793 goto out;
5794 }
5795 base = bucket_block(xbs->bucket, block_off);
5796 vb.vb_bh = xbs->bucket->bu_bhs[block_off];
5797 vb.vb_access = ocfs2_journal_access;
5798
5799 if (ocfs2_meta_ecc(osb)) {
5800 /*create parameters for ocfs2_post_refcount. */
5801 bucket = xbs->bucket;
5802 refcount.credits = bucket->bu_blocks;
5803 refcount.para = bucket;
5804 refcount.func =
5805 ocfs2_xattr_bucket_post_refcount;
5806 p = &refcount;
5807 }
5808 } else {
5809 base = xbs->base;
5810 vb.vb_bh = xbs->xattr_bh;
5811 vb.vb_access = ocfs2_journal_access_xb;
5812 }
5813 }
5814
5815 if (ocfs2_xattr_is_local(xe))
5816 goto out;
5817
5818 vb.vb_xv = (struct ocfs2_xattr_value_root *)
5819 (base + name_offset + name_len);
5820
5821 ret = ocfs2_xattr_get_clusters(inode, 0, &p_cluster,
5822 &num_clusters, &vb.vb_xv->xr_list,
5823 &ext_flags);
5824 if (ret) {
5825 mlog_errno(ret);
5826 goto out;
5827 }
5828
5829 /*
5830 * We just need to check the 1st extent record, since we always
5831 * CoW the whole xattr. So there shouldn't be a xattr with
5832 * some REFCOUNT extent recs after the 1st one.
5833 */
5834 if (!(ext_flags & OCFS2_EXT_REFCOUNTED))
5835 goto out;
5836
5837 ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc),
5838 1, ref_tree, &ref_root_bh);
5839 if (ret) {
5840 mlog_errno(ret);
5841 goto out;
5842 }
5843
5844 /*
5845 * If we are deleting the xattr or the new size will be stored inside,
5846 * cool, leave it there, the xattr truncate process will remove them
5847 * for us(it still needs the refcount tree lock and the meta, credits).
5848 * And the worse case is that every cluster truncate will split the
5849 * refcount tree, and make the original extent become 3. So we will need
5850 * 2 * cluster more extent recs at most.
5851 */
Joel Becker6b240ff2009-08-14 18:02:52 -07005852 if (!xi->xi_value || xi->xi_value_len <= OCFS2_XATTR_INLINE_SIZE) {
Tao Ma492a8a32009-08-18 11:43:17 +08005853
5854 ret = ocfs2_refcounted_xattr_delete_need(inode,
5855 &(*ref_tree)->rf_ci,
5856 ref_root_bh, vb.vb_xv,
5857 meta_add, credits);
5858 if (ret)
5859 mlog_errno(ret);
5860 goto out;
5861 }
5862
5863 ret = ocfs2_refcount_cow_xattr(inode, di, &vb,
5864 *ref_tree, ref_root_bh, 0,
5865 le32_to_cpu(vb.vb_xv->xr_clusters), p);
5866 if (ret)
5867 mlog_errno(ret);
5868
5869out:
5870 brelse(ref_root_bh);
5871 return ret;
5872}
5873
5874/*
Tao Ma01292412009-09-21 13:04:19 +08005875 * Add the REFCOUNTED flags for all the extent rec in ocfs2_xattr_value_root.
5876 * The physical clusters will be added to refcount tree.
5877 */
5878static int ocfs2_xattr_value_attach_refcount(struct inode *inode,
5879 struct ocfs2_xattr_value_root *xv,
5880 struct ocfs2_extent_tree *value_et,
5881 struct ocfs2_caching_info *ref_ci,
5882 struct buffer_head *ref_root_bh,
5883 struct ocfs2_cached_dealloc_ctxt *dealloc,
5884 struct ocfs2_post_refcount *refcount)
5885{
5886 int ret = 0;
5887 u32 clusters = le32_to_cpu(xv->xr_clusters);
5888 u32 cpos, p_cluster, num_clusters;
5889 struct ocfs2_extent_list *el = &xv->xr_list;
5890 unsigned int ext_flags;
5891
5892 cpos = 0;
5893 while (cpos < clusters) {
5894 ret = ocfs2_xattr_get_clusters(inode, cpos, &p_cluster,
5895 &num_clusters, el, &ext_flags);
Joseph Qi17caf952013-09-11 14:19:55 -07005896 if (ret) {
5897 mlog_errno(ret);
5898 break;
5899 }
Tao Ma01292412009-09-21 13:04:19 +08005900
5901 cpos += num_clusters;
5902 if ((ext_flags & OCFS2_EXT_REFCOUNTED))
5903 continue;
5904
5905 BUG_ON(!p_cluster);
5906
5907 ret = ocfs2_add_refcount_flag(inode, value_et,
5908 ref_ci, ref_root_bh,
5909 cpos - num_clusters,
5910 p_cluster, num_clusters,
5911 dealloc, refcount);
5912 if (ret) {
5913 mlog_errno(ret);
5914 break;
5915 }
5916 }
5917
5918 return ret;
5919}
5920
5921/*
5922 * Given a normal ocfs2_xattr_header, refcount all the entries which
5923 * have value stored outside.
5924 * Used for xattrs stored in inode and ocfs2_xattr_block.
5925 */
5926static int ocfs2_xattr_attach_refcount_normal(struct inode *inode,
5927 struct ocfs2_xattr_value_buf *vb,
5928 struct ocfs2_xattr_header *header,
5929 struct ocfs2_caching_info *ref_ci,
5930 struct buffer_head *ref_root_bh,
5931 struct ocfs2_cached_dealloc_ctxt *dealloc)
5932{
5933
5934 struct ocfs2_xattr_entry *xe;
5935 struct ocfs2_xattr_value_root *xv;
5936 struct ocfs2_extent_tree et;
5937 int i, ret = 0;
5938
5939 for (i = 0; i < le16_to_cpu(header->xh_count); i++) {
5940 xe = &header->xh_entries[i];
5941
5942 if (ocfs2_xattr_is_local(xe))
5943 continue;
5944
5945 xv = (struct ocfs2_xattr_value_root *)((void *)header +
5946 le16_to_cpu(xe->xe_name_offset) +
5947 OCFS2_XATTR_SIZE(xe->xe_name_len));
5948
5949 vb->vb_xv = xv;
5950 ocfs2_init_xattr_value_extent_tree(&et, INODE_CACHE(inode), vb);
5951
5952 ret = ocfs2_xattr_value_attach_refcount(inode, xv, &et,
5953 ref_ci, ref_root_bh,
5954 dealloc, NULL);
5955 if (ret) {
5956 mlog_errno(ret);
5957 break;
5958 }
5959 }
5960
5961 return ret;
5962}
5963
5964static int ocfs2_xattr_inline_attach_refcount(struct inode *inode,
5965 struct buffer_head *fe_bh,
5966 struct ocfs2_caching_info *ref_ci,
5967 struct buffer_head *ref_root_bh,
5968 struct ocfs2_cached_dealloc_ctxt *dealloc)
5969{
5970 struct ocfs2_dinode *di = (struct ocfs2_dinode *)fe_bh->b_data;
5971 struct ocfs2_xattr_header *header = (struct ocfs2_xattr_header *)
5972 (fe_bh->b_data + inode->i_sb->s_blocksize -
5973 le16_to_cpu(di->i_xattr_inline_size));
5974 struct ocfs2_xattr_value_buf vb = {
5975 .vb_bh = fe_bh,
5976 .vb_access = ocfs2_journal_access_di,
5977 };
5978
5979 return ocfs2_xattr_attach_refcount_normal(inode, &vb, header,
5980 ref_ci, ref_root_bh, dealloc);
5981}
5982
5983struct ocfs2_xattr_tree_value_refcount_para {
5984 struct ocfs2_caching_info *ref_ci;
5985 struct buffer_head *ref_root_bh;
5986 struct ocfs2_cached_dealloc_ctxt *dealloc;
5987};
5988
5989static int ocfs2_get_xattr_tree_value_root(struct super_block *sb,
5990 struct ocfs2_xattr_bucket *bucket,
5991 int offset,
5992 struct ocfs2_xattr_value_root **xv,
5993 struct buffer_head **bh)
5994{
5995 int ret, block_off, name_offset;
5996 struct ocfs2_xattr_header *xh = bucket_xh(bucket);
5997 struct ocfs2_xattr_entry *xe = &xh->xh_entries[offset];
5998 void *base;
5999
6000 ret = ocfs2_xattr_bucket_get_name_value(sb,
6001 bucket_xh(bucket),
6002 offset,
6003 &block_off,
6004 &name_offset);
6005 if (ret) {
6006 mlog_errno(ret);
6007 goto out;
6008 }
6009
6010 base = bucket_block(bucket, block_off);
6011
6012 *xv = (struct ocfs2_xattr_value_root *)(base + name_offset +
6013 OCFS2_XATTR_SIZE(xe->xe_name_len));
6014
6015 if (bh)
6016 *bh = bucket->bu_bhs[block_off];
6017out:
6018 return ret;
6019}
6020
6021/*
6022 * For a given xattr bucket, refcount all the entries which
6023 * have value stored outside.
6024 */
6025static int ocfs2_xattr_bucket_value_refcount(struct inode *inode,
6026 struct ocfs2_xattr_bucket *bucket,
6027 void *para)
6028{
6029 int i, ret = 0;
6030 struct ocfs2_extent_tree et;
6031 struct ocfs2_xattr_tree_value_refcount_para *ref =
6032 (struct ocfs2_xattr_tree_value_refcount_para *)para;
6033 struct ocfs2_xattr_header *xh =
6034 (struct ocfs2_xattr_header *)bucket->bu_bhs[0]->b_data;
6035 struct ocfs2_xattr_entry *xe;
6036 struct ocfs2_xattr_value_buf vb = {
6037 .vb_access = ocfs2_journal_access,
6038 };
6039 struct ocfs2_post_refcount refcount = {
6040 .credits = bucket->bu_blocks,
6041 .para = bucket,
6042 .func = ocfs2_xattr_bucket_post_refcount,
6043 };
6044 struct ocfs2_post_refcount *p = NULL;
6045
6046 /* We only need post_refcount if we support metaecc. */
6047 if (ocfs2_meta_ecc(OCFS2_SB(inode->i_sb)))
6048 p = &refcount;
6049
Tao Ma402b4182011-02-23 22:01:17 +08006050 trace_ocfs2_xattr_bucket_value_refcount(
6051 (unsigned long long)bucket_blkno(bucket),
6052 le16_to_cpu(xh->xh_count));
Tao Ma01292412009-09-21 13:04:19 +08006053 for (i = 0; i < le16_to_cpu(xh->xh_count); i++) {
6054 xe = &xh->xh_entries[i];
6055
6056 if (ocfs2_xattr_is_local(xe))
6057 continue;
6058
6059 ret = ocfs2_get_xattr_tree_value_root(inode->i_sb, bucket, i,
6060 &vb.vb_xv, &vb.vb_bh);
6061 if (ret) {
6062 mlog_errno(ret);
6063 break;
6064 }
6065
6066 ocfs2_init_xattr_value_extent_tree(&et,
6067 INODE_CACHE(inode), &vb);
6068
6069 ret = ocfs2_xattr_value_attach_refcount(inode, vb.vb_xv,
6070 &et, ref->ref_ci,
6071 ref->ref_root_bh,
6072 ref->dealloc, p);
6073 if (ret) {
6074 mlog_errno(ret);
6075 break;
6076 }
6077 }
6078
6079 return ret;
6080
6081}
6082
6083static int ocfs2_refcount_xattr_tree_rec(struct inode *inode,
6084 struct buffer_head *root_bh,
6085 u64 blkno, u32 cpos, u32 len, void *para)
6086{
6087 return ocfs2_iterate_xattr_buckets(inode, blkno, len,
6088 ocfs2_xattr_bucket_value_refcount,
6089 para);
6090}
6091
6092static int ocfs2_xattr_block_attach_refcount(struct inode *inode,
6093 struct buffer_head *blk_bh,
6094 struct ocfs2_caching_info *ref_ci,
6095 struct buffer_head *ref_root_bh,
6096 struct ocfs2_cached_dealloc_ctxt *dealloc)
6097{
6098 int ret = 0;
6099 struct ocfs2_xattr_block *xb =
6100 (struct ocfs2_xattr_block *)blk_bh->b_data;
6101
6102 if (!(le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED)) {
6103 struct ocfs2_xattr_header *header = &xb->xb_attrs.xb_header;
6104 struct ocfs2_xattr_value_buf vb = {
6105 .vb_bh = blk_bh,
6106 .vb_access = ocfs2_journal_access_xb,
6107 };
6108
6109 ret = ocfs2_xattr_attach_refcount_normal(inode, &vb, header,
6110 ref_ci, ref_root_bh,
6111 dealloc);
6112 } else {
6113 struct ocfs2_xattr_tree_value_refcount_para para = {
6114 .ref_ci = ref_ci,
6115 .ref_root_bh = ref_root_bh,
6116 .dealloc = dealloc,
6117 };
6118
6119 ret = ocfs2_iterate_xattr_index_block(inode, blk_bh,
6120 ocfs2_refcount_xattr_tree_rec,
6121 &para);
6122 }
6123
6124 return ret;
6125}
6126
6127int ocfs2_xattr_attach_refcount_tree(struct inode *inode,
6128 struct buffer_head *fe_bh,
6129 struct ocfs2_caching_info *ref_ci,
6130 struct buffer_head *ref_root_bh,
6131 struct ocfs2_cached_dealloc_ctxt *dealloc)
6132{
6133 int ret = 0;
6134 struct ocfs2_inode_info *oi = OCFS2_I(inode);
6135 struct ocfs2_dinode *di = (struct ocfs2_dinode *)fe_bh->b_data;
6136 struct buffer_head *blk_bh = NULL;
6137
6138 if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL) {
6139 ret = ocfs2_xattr_inline_attach_refcount(inode, fe_bh,
6140 ref_ci, ref_root_bh,
6141 dealloc);
6142 if (ret) {
6143 mlog_errno(ret);
6144 goto out;
6145 }
6146 }
6147
6148 if (!di->i_xattr_loc)
6149 goto out;
6150
6151 ret = ocfs2_read_xattr_block(inode, le64_to_cpu(di->i_xattr_loc),
6152 &blk_bh);
6153 if (ret < 0) {
6154 mlog_errno(ret);
6155 goto out;
6156 }
6157
6158 ret = ocfs2_xattr_block_attach_refcount(inode, blk_bh, ref_ci,
6159 ref_root_bh, dealloc);
6160 if (ret)
6161 mlog_errno(ret);
6162
6163 brelse(blk_bh);
6164out:
6165
6166 return ret;
6167}
6168
Tao Ma0fe9b662009-08-18 11:47:56 +08006169typedef int (should_xattr_reflinked)(struct ocfs2_xattr_entry *xe);
Tao Ma01292412009-09-21 13:04:19 +08006170/*
Tao Ma2999d122009-08-18 11:43:55 +08006171 * Store the information we need in xattr reflink.
6172 * old_bh and new_bh are inode bh for the old and new inode.
6173 */
6174struct ocfs2_xattr_reflink {
6175 struct inode *old_inode;
6176 struct inode *new_inode;
6177 struct buffer_head *old_bh;
6178 struct buffer_head *new_bh;
6179 struct ocfs2_caching_info *ref_ci;
6180 struct buffer_head *ref_root_bh;
6181 struct ocfs2_cached_dealloc_ctxt *dealloc;
Tao Ma0fe9b662009-08-18 11:47:56 +08006182 should_xattr_reflinked *xattr_reflinked;
Tao Ma2999d122009-08-18 11:43:55 +08006183};
6184
6185/*
6186 * Given a xattr header and xe offset,
6187 * return the proper xv and the corresponding bh.
6188 * xattr in inode, block and xattr tree have different implementaions.
6189 */
6190typedef int (get_xattr_value_root)(struct super_block *sb,
6191 struct buffer_head *bh,
6192 struct ocfs2_xattr_header *xh,
6193 int offset,
6194 struct ocfs2_xattr_value_root **xv,
6195 struct buffer_head **ret_bh,
6196 void *para);
6197
6198/*
6199 * Calculate all the xattr value root metadata stored in this xattr header and
6200 * credits we need if we create them from the scratch.
6201 * We use get_xattr_value_root so that all types of xattr container can use it.
6202 */
6203static int ocfs2_value_metas_in_xattr_header(struct super_block *sb,
6204 struct buffer_head *bh,
6205 struct ocfs2_xattr_header *xh,
6206 int *metas, int *credits,
6207 int *num_recs,
6208 get_xattr_value_root *func,
6209 void *para)
6210{
6211 int i, ret = 0;
6212 struct ocfs2_xattr_value_root *xv;
6213 struct ocfs2_xattr_entry *xe;
6214
6215 for (i = 0; i < le16_to_cpu(xh->xh_count); i++) {
6216 xe = &xh->xh_entries[i];
6217 if (ocfs2_xattr_is_local(xe))
6218 continue;
6219
6220 ret = func(sb, bh, xh, i, &xv, NULL, para);
6221 if (ret) {
6222 mlog_errno(ret);
6223 break;
6224 }
6225
6226 *metas += le16_to_cpu(xv->xr_list.l_tree_depth) *
6227 le16_to_cpu(xv->xr_list.l_next_free_rec);
6228
6229 *credits += ocfs2_calc_extend_credits(sb,
Goldwyn Rodrigues06f9da62013-11-12 15:06:52 -08006230 &def_xv.xv.xr_list);
Tao Ma2999d122009-08-18 11:43:55 +08006231
6232 /*
6233 * If the value is a tree with depth > 1, We don't go deep
6234 * to the extent block, so just calculate a maximum record num.
6235 */
6236 if (!xv->xr_list.l_tree_depth)
Tao Ma8ff6af82009-12-23 14:31:15 +08006237 *num_recs += le16_to_cpu(xv->xr_list.l_next_free_rec);
Tao Ma2999d122009-08-18 11:43:55 +08006238 else
6239 *num_recs += ocfs2_clusters_for_bytes(sb,
6240 XATTR_SIZE_MAX);
6241 }
6242
6243 return ret;
6244}
6245
6246/* Used by xattr inode and block to return the right xv and buffer_head. */
6247static int ocfs2_get_xattr_value_root(struct super_block *sb,
6248 struct buffer_head *bh,
6249 struct ocfs2_xattr_header *xh,
6250 int offset,
6251 struct ocfs2_xattr_value_root **xv,
6252 struct buffer_head **ret_bh,
6253 void *para)
6254{
6255 struct ocfs2_xattr_entry *xe = &xh->xh_entries[offset];
6256
6257 *xv = (struct ocfs2_xattr_value_root *)((void *)xh +
6258 le16_to_cpu(xe->xe_name_offset) +
6259 OCFS2_XATTR_SIZE(xe->xe_name_len));
6260
6261 if (ret_bh)
6262 *ret_bh = bh;
6263
6264 return 0;
6265}
6266
6267/*
6268 * Lock the meta_ac and caculate how much credits we need for reflink xattrs.
6269 * It is only used for inline xattr and xattr block.
6270 */
6271static int ocfs2_reflink_lock_xattr_allocators(struct ocfs2_super *osb,
6272 struct ocfs2_xattr_header *xh,
6273 struct buffer_head *ref_root_bh,
6274 int *credits,
6275 struct ocfs2_alloc_context **meta_ac)
6276{
6277 int ret, meta_add = 0, num_recs = 0;
6278 struct ocfs2_refcount_block *rb =
6279 (struct ocfs2_refcount_block *)ref_root_bh->b_data;
6280
6281 *credits = 0;
6282
6283 ret = ocfs2_value_metas_in_xattr_header(osb->sb, NULL, xh,
6284 &meta_add, credits, &num_recs,
6285 ocfs2_get_xattr_value_root,
6286 NULL);
6287 if (ret) {
6288 mlog_errno(ret);
6289 goto out;
6290 }
6291
6292 /*
6293 * We need to add/modify num_recs in refcount tree, so just calculate
6294 * an approximate number we need for refcount tree change.
6295 * Sometimes we need to split the tree, and after split, half recs
6296 * will be moved to the new block, and a new block can only provide
6297 * half number of recs. So we multiple new blocks by 2.
6298 */
6299 num_recs = num_recs / ocfs2_refcount_recs_per_rb(osb->sb) * 2;
6300 meta_add += num_recs;
6301 *credits += num_recs + num_recs * OCFS2_EXPAND_REFCOUNT_TREE_CREDITS;
6302 if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)
6303 *credits += le16_to_cpu(rb->rf_list.l_tree_depth) *
6304 le16_to_cpu(rb->rf_list.l_next_free_rec) + 1;
6305 else
6306 *credits += 1;
6307
6308 ret = ocfs2_reserve_new_metadata_blocks(osb, meta_add, meta_ac);
6309 if (ret)
6310 mlog_errno(ret);
6311
6312out:
6313 return ret;
6314}
6315
6316/*
6317 * Given a xattr header, reflink all the xattrs in this container.
6318 * It can be used for inode, block and bucket.
6319 *
6320 * NOTE:
6321 * Before we call this function, the caller has memcpy the xattr in
6322 * old_xh to the new_xh.
Tao Ma0fe9b662009-08-18 11:47:56 +08006323 *
6324 * If args.xattr_reflinked is set, call it to decide whether the xe should
6325 * be reflinked or not. If not, remove it from the new xattr header.
Tao Ma2999d122009-08-18 11:43:55 +08006326 */
6327static int ocfs2_reflink_xattr_header(handle_t *handle,
6328 struct ocfs2_xattr_reflink *args,
6329 struct buffer_head *old_bh,
6330 struct ocfs2_xattr_header *xh,
6331 struct buffer_head *new_bh,
6332 struct ocfs2_xattr_header *new_xh,
6333 struct ocfs2_xattr_value_buf *vb,
6334 struct ocfs2_alloc_context *meta_ac,
6335 get_xattr_value_root *func,
6336 void *para)
6337{
Tao Ma0fe9b662009-08-18 11:47:56 +08006338 int ret = 0, i, j;
Tao Ma2999d122009-08-18 11:43:55 +08006339 struct super_block *sb = args->old_inode->i_sb;
6340 struct buffer_head *value_bh;
Tao Ma0fe9b662009-08-18 11:47:56 +08006341 struct ocfs2_xattr_entry *xe, *last;
Tao Ma2999d122009-08-18 11:43:55 +08006342 struct ocfs2_xattr_value_root *xv, *new_xv;
6343 struct ocfs2_extent_tree data_et;
6344 u32 clusters, cpos, p_cluster, num_clusters;
6345 unsigned int ext_flags = 0;
6346
Tao Ma402b4182011-02-23 22:01:17 +08006347 trace_ocfs2_reflink_xattr_header((unsigned long long)old_bh->b_blocknr,
6348 le16_to_cpu(xh->xh_count));
Tao Ma0fe9b662009-08-18 11:47:56 +08006349
6350 last = &new_xh->xh_entries[le16_to_cpu(new_xh->xh_count)];
6351 for (i = 0, j = 0; i < le16_to_cpu(xh->xh_count); i++, j++) {
Tao Ma2999d122009-08-18 11:43:55 +08006352 xe = &xh->xh_entries[i];
6353
Tao Ma0fe9b662009-08-18 11:47:56 +08006354 if (args->xattr_reflinked && !args->xattr_reflinked(xe)) {
6355 xe = &new_xh->xh_entries[j];
6356
6357 le16_add_cpu(&new_xh->xh_count, -1);
6358 if (new_xh->xh_count) {
6359 memmove(xe, xe + 1,
6360 (void *)last - (void *)xe);
6361 memset(last, 0,
6362 sizeof(struct ocfs2_xattr_entry));
6363 }
6364
6365 /*
6366 * We don't want j to increase in the next round since
6367 * it is already moved ahead.
6368 */
6369 j--;
6370 continue;
6371 }
6372
Tao Ma2999d122009-08-18 11:43:55 +08006373 if (ocfs2_xattr_is_local(xe))
6374 continue;
6375
6376 ret = func(sb, old_bh, xh, i, &xv, NULL, para);
6377 if (ret) {
6378 mlog_errno(ret);
6379 break;
6380 }
6381
Tao Ma0fe9b662009-08-18 11:47:56 +08006382 ret = func(sb, new_bh, new_xh, j, &new_xv, &value_bh, para);
Tao Ma2999d122009-08-18 11:43:55 +08006383 if (ret) {
6384 mlog_errno(ret);
6385 break;
6386 }
6387
6388 /*
6389 * For the xattr which has l_tree_depth = 0, all the extent
6390 * recs have already be copied to the new xh with the
6391 * propriate OCFS2_EXT_REFCOUNTED flag we just need to
6392 * increase the refount count int the refcount tree.
6393 *
6394 * For the xattr which has l_tree_depth > 0, we need
6395 * to initialize it to the empty default value root,
6396 * and then insert the extents one by one.
6397 */
6398 if (xv->xr_list.l_tree_depth) {
6399 memcpy(new_xv, &def_xv, sizeof(def_xv));
6400 vb->vb_xv = new_xv;
6401 vb->vb_bh = value_bh;
6402 ocfs2_init_xattr_value_extent_tree(&data_et,
6403 INODE_CACHE(args->new_inode), vb);
6404 }
6405
6406 clusters = le32_to_cpu(xv->xr_clusters);
6407 cpos = 0;
6408 while (cpos < clusters) {
6409 ret = ocfs2_xattr_get_clusters(args->old_inode,
6410 cpos,
6411 &p_cluster,
6412 &num_clusters,
6413 &xv->xr_list,
6414 &ext_flags);
6415 if (ret) {
6416 mlog_errno(ret);
6417 goto out;
6418 }
6419
6420 BUG_ON(!p_cluster);
6421
6422 if (xv->xr_list.l_tree_depth) {
6423 ret = ocfs2_insert_extent(handle,
6424 &data_et, cpos,
6425 ocfs2_clusters_to_blocks(
6426 args->old_inode->i_sb,
6427 p_cluster),
6428 num_clusters, ext_flags,
6429 meta_ac);
6430 if (ret) {
6431 mlog_errno(ret);
6432 goto out;
6433 }
6434 }
6435
6436 ret = ocfs2_increase_refcount(handle, args->ref_ci,
6437 args->ref_root_bh,
6438 p_cluster, num_clusters,
6439 meta_ac, args->dealloc);
6440 if (ret) {
6441 mlog_errno(ret);
6442 goto out;
6443 }
6444
6445 cpos += num_clusters;
6446 }
6447 }
6448
6449out:
6450 return ret;
6451}
6452
6453static int ocfs2_reflink_xattr_inline(struct ocfs2_xattr_reflink *args)
6454{
6455 int ret = 0, credits = 0;
6456 handle_t *handle;
6457 struct ocfs2_super *osb = OCFS2_SB(args->old_inode->i_sb);
6458 struct ocfs2_dinode *di = (struct ocfs2_dinode *)args->old_bh->b_data;
6459 int inline_size = le16_to_cpu(di->i_xattr_inline_size);
6460 int header_off = osb->sb->s_blocksize - inline_size;
6461 struct ocfs2_xattr_header *xh = (struct ocfs2_xattr_header *)
6462 (args->old_bh->b_data + header_off);
6463 struct ocfs2_xattr_header *new_xh = (struct ocfs2_xattr_header *)
6464 (args->new_bh->b_data + header_off);
6465 struct ocfs2_alloc_context *meta_ac = NULL;
6466 struct ocfs2_inode_info *new_oi;
6467 struct ocfs2_dinode *new_di;
6468 struct ocfs2_xattr_value_buf vb = {
6469 .vb_bh = args->new_bh,
6470 .vb_access = ocfs2_journal_access_di,
6471 };
6472
6473 ret = ocfs2_reflink_lock_xattr_allocators(osb, xh, args->ref_root_bh,
6474 &credits, &meta_ac);
6475 if (ret) {
6476 mlog_errno(ret);
6477 goto out;
6478 }
6479
6480 handle = ocfs2_start_trans(osb, credits);
6481 if (IS_ERR(handle)) {
6482 ret = PTR_ERR(handle);
6483 mlog_errno(ret);
6484 goto out;
6485 }
6486
6487 ret = ocfs2_journal_access_di(handle, INODE_CACHE(args->new_inode),
6488 args->new_bh, OCFS2_JOURNAL_ACCESS_WRITE);
6489 if (ret) {
6490 mlog_errno(ret);
6491 goto out_commit;
6492 }
6493
6494 memcpy(args->new_bh->b_data + header_off,
6495 args->old_bh->b_data + header_off, inline_size);
6496
6497 new_di = (struct ocfs2_dinode *)args->new_bh->b_data;
6498 new_di->i_xattr_inline_size = cpu_to_le16(inline_size);
6499
6500 ret = ocfs2_reflink_xattr_header(handle, args, args->old_bh, xh,
6501 args->new_bh, new_xh, &vb, meta_ac,
6502 ocfs2_get_xattr_value_root, NULL);
6503 if (ret) {
6504 mlog_errno(ret);
6505 goto out_commit;
6506 }
6507
6508 new_oi = OCFS2_I(args->new_inode);
Junxiao Bief962df2013-07-03 15:01:03 -07006509 /*
6510 * Adjust extent record count to reserve space for extended attribute.
6511 * Inline data count had been adjusted in ocfs2_duplicate_inline_data().
6512 */
6513 if (!(new_oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) &&
6514 !(ocfs2_inode_is_fast_symlink(args->new_inode))) {
6515 struct ocfs2_extent_list *el = &new_di->id2.i_list;
6516 le16_add_cpu(&el->l_count, -(inline_size /
6517 sizeof(struct ocfs2_extent_rec)));
6518 }
Tao Ma2999d122009-08-18 11:43:55 +08006519 spin_lock(&new_oi->ip_lock);
6520 new_oi->ip_dyn_features |= OCFS2_HAS_XATTR_FL | OCFS2_INLINE_XATTR_FL;
6521 new_di->i_dyn_features = cpu_to_le16(new_oi->ip_dyn_features);
6522 spin_unlock(&new_oi->ip_lock);
6523
6524 ocfs2_journal_dirty(handle, args->new_bh);
6525
6526out_commit:
6527 ocfs2_commit_trans(osb, handle);
6528
6529out:
6530 if (meta_ac)
6531 ocfs2_free_alloc_context(meta_ac);
6532 return ret;
6533}
6534
6535static int ocfs2_create_empty_xattr_block(struct inode *inode,
6536 struct buffer_head *fe_bh,
6537 struct buffer_head **ret_bh,
6538 int indexed)
6539{
6540 int ret;
Tao Ma2999d122009-08-18 11:43:55 +08006541 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
Tao Mab2317962010-03-19 15:04:24 +08006542 struct ocfs2_xattr_set_ctxt ctxt;
Tao Ma2999d122009-08-18 11:43:55 +08006543
Tao Mab2317962010-03-19 15:04:24 +08006544 memset(&ctxt, 0, sizeof(ctxt));
6545 ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &ctxt.meta_ac);
Tao Ma2999d122009-08-18 11:43:55 +08006546 if (ret < 0) {
6547 mlog_errno(ret);
6548 return ret;
6549 }
6550
Joel Beckerd3981542009-08-19 02:13:50 -07006551 ctxt.handle = ocfs2_start_trans(osb, OCFS2_XATTR_BLOCK_CREATE_CREDITS);
6552 if (IS_ERR(ctxt.handle)) {
6553 ret = PTR_ERR(ctxt.handle);
Tao Ma2999d122009-08-18 11:43:55 +08006554 mlog_errno(ret);
6555 goto out;
6556 }
6557
Tao Ma402b4182011-02-23 22:01:17 +08006558 trace_ocfs2_create_empty_xattr_block(
6559 (unsigned long long)fe_bh->b_blocknr, indexed);
Joel Beckerd3981542009-08-19 02:13:50 -07006560 ret = ocfs2_create_xattr_block(inode, fe_bh, &ctxt, indexed,
6561 ret_bh);
Tao Ma2999d122009-08-18 11:43:55 +08006562 if (ret)
6563 mlog_errno(ret);
6564
Joel Beckerd3981542009-08-19 02:13:50 -07006565 ocfs2_commit_trans(osb, ctxt.handle);
Tao Ma2999d122009-08-18 11:43:55 +08006566out:
Tao Mab2317962010-03-19 15:04:24 +08006567 ocfs2_free_alloc_context(ctxt.meta_ac);
Tao Ma2999d122009-08-18 11:43:55 +08006568 return ret;
6569}
6570
6571static int ocfs2_reflink_xattr_block(struct ocfs2_xattr_reflink *args,
6572 struct buffer_head *blk_bh,
6573 struct buffer_head *new_blk_bh)
6574{
6575 int ret = 0, credits = 0;
6576 handle_t *handle;
6577 struct ocfs2_inode_info *new_oi = OCFS2_I(args->new_inode);
6578 struct ocfs2_dinode *new_di;
6579 struct ocfs2_super *osb = OCFS2_SB(args->new_inode->i_sb);
6580 int header_off = offsetof(struct ocfs2_xattr_block, xb_attrs.xb_header);
6581 struct ocfs2_xattr_block *xb =
6582 (struct ocfs2_xattr_block *)blk_bh->b_data;
6583 struct ocfs2_xattr_header *xh = &xb->xb_attrs.xb_header;
6584 struct ocfs2_xattr_block *new_xb =
6585 (struct ocfs2_xattr_block *)new_blk_bh->b_data;
6586 struct ocfs2_xattr_header *new_xh = &new_xb->xb_attrs.xb_header;
6587 struct ocfs2_alloc_context *meta_ac;
6588 struct ocfs2_xattr_value_buf vb = {
6589 .vb_bh = new_blk_bh,
6590 .vb_access = ocfs2_journal_access_xb,
6591 };
6592
6593 ret = ocfs2_reflink_lock_xattr_allocators(osb, xh, args->ref_root_bh,
6594 &credits, &meta_ac);
6595 if (ret) {
6596 mlog_errno(ret);
6597 return ret;
6598 }
6599
6600 /* One more credits in case we need to add xattr flags in new inode. */
6601 handle = ocfs2_start_trans(osb, credits + 1);
6602 if (IS_ERR(handle)) {
6603 ret = PTR_ERR(handle);
6604 mlog_errno(ret);
6605 goto out;
6606 }
6607
6608 if (!(new_oi->ip_dyn_features & OCFS2_HAS_XATTR_FL)) {
6609 ret = ocfs2_journal_access_di(handle,
6610 INODE_CACHE(args->new_inode),
6611 args->new_bh,
6612 OCFS2_JOURNAL_ACCESS_WRITE);
6613 if (ret) {
6614 mlog_errno(ret);
6615 goto out_commit;
6616 }
6617 }
6618
6619 ret = ocfs2_journal_access_xb(handle, INODE_CACHE(args->new_inode),
6620 new_blk_bh, OCFS2_JOURNAL_ACCESS_WRITE);
6621 if (ret) {
6622 mlog_errno(ret);
6623 goto out_commit;
6624 }
6625
6626 memcpy(new_blk_bh->b_data + header_off, blk_bh->b_data + header_off,
6627 osb->sb->s_blocksize - header_off);
6628
6629 ret = ocfs2_reflink_xattr_header(handle, args, blk_bh, xh,
6630 new_blk_bh, new_xh, &vb, meta_ac,
6631 ocfs2_get_xattr_value_root, NULL);
6632 if (ret) {
6633 mlog_errno(ret);
6634 goto out_commit;
6635 }
6636
6637 ocfs2_journal_dirty(handle, new_blk_bh);
6638
6639 if (!(new_oi->ip_dyn_features & OCFS2_HAS_XATTR_FL)) {
6640 new_di = (struct ocfs2_dinode *)args->new_bh->b_data;
6641 spin_lock(&new_oi->ip_lock);
6642 new_oi->ip_dyn_features |= OCFS2_HAS_XATTR_FL;
6643 new_di->i_dyn_features = cpu_to_le16(new_oi->ip_dyn_features);
6644 spin_unlock(&new_oi->ip_lock);
6645
6646 ocfs2_journal_dirty(handle, args->new_bh);
6647 }
6648
6649out_commit:
6650 ocfs2_commit_trans(osb, handle);
6651
6652out:
6653 ocfs2_free_alloc_context(meta_ac);
6654 return ret;
6655}
6656
6657struct ocfs2_reflink_xattr_tree_args {
6658 struct ocfs2_xattr_reflink *reflink;
6659 struct buffer_head *old_blk_bh;
6660 struct buffer_head *new_blk_bh;
6661 struct ocfs2_xattr_bucket *old_bucket;
6662 struct ocfs2_xattr_bucket *new_bucket;
6663};
6664
6665/*
6666 * NOTE:
6667 * We have to handle the case that both old bucket and new bucket
6668 * will call this function to get the right ret_bh.
6669 * So The caller must give us the right bh.
6670 */
6671static int ocfs2_get_reflink_xattr_value_root(struct super_block *sb,
6672 struct buffer_head *bh,
6673 struct ocfs2_xattr_header *xh,
6674 int offset,
6675 struct ocfs2_xattr_value_root **xv,
6676 struct buffer_head **ret_bh,
6677 void *para)
6678{
6679 struct ocfs2_reflink_xattr_tree_args *args =
6680 (struct ocfs2_reflink_xattr_tree_args *)para;
6681 struct ocfs2_xattr_bucket *bucket;
6682
6683 if (bh == args->old_bucket->bu_bhs[0])
6684 bucket = args->old_bucket;
6685 else
6686 bucket = args->new_bucket;
6687
6688 return ocfs2_get_xattr_tree_value_root(sb, bucket, offset,
6689 xv, ret_bh);
6690}
6691
6692struct ocfs2_value_tree_metas {
6693 int num_metas;
6694 int credits;
6695 int num_recs;
6696};
6697
6698static int ocfs2_value_tree_metas_in_bucket(struct super_block *sb,
6699 struct buffer_head *bh,
6700 struct ocfs2_xattr_header *xh,
6701 int offset,
6702 struct ocfs2_xattr_value_root **xv,
6703 struct buffer_head **ret_bh,
6704 void *para)
6705{
6706 struct ocfs2_xattr_bucket *bucket =
6707 (struct ocfs2_xattr_bucket *)para;
6708
6709 return ocfs2_get_xattr_tree_value_root(sb, bucket, offset,
6710 xv, ret_bh);
6711}
6712
6713static int ocfs2_calc_value_tree_metas(struct inode *inode,
6714 struct ocfs2_xattr_bucket *bucket,
6715 void *para)
6716{
6717 struct ocfs2_value_tree_metas *metas =
6718 (struct ocfs2_value_tree_metas *)para;
6719 struct ocfs2_xattr_header *xh =
6720 (struct ocfs2_xattr_header *)bucket->bu_bhs[0]->b_data;
6721
6722 /* Add the credits for this bucket first. */
6723 metas->credits += bucket->bu_blocks;
6724 return ocfs2_value_metas_in_xattr_header(inode->i_sb, bucket->bu_bhs[0],
6725 xh, &metas->num_metas,
6726 &metas->credits, &metas->num_recs,
6727 ocfs2_value_tree_metas_in_bucket,
6728 bucket);
6729}
6730
6731/*
6732 * Given a xattr extent rec starting from blkno and having len clusters,
6733 * iterate all the buckets calculate how much metadata we need for reflinking
6734 * all the ocfs2_xattr_value_root and lock the allocators accordingly.
6735 */
6736static int ocfs2_lock_reflink_xattr_rec_allocators(
6737 struct ocfs2_reflink_xattr_tree_args *args,
6738 struct ocfs2_extent_tree *xt_et,
6739 u64 blkno, u32 len, int *credits,
6740 struct ocfs2_alloc_context **meta_ac,
6741 struct ocfs2_alloc_context **data_ac)
6742{
6743 int ret, num_free_extents;
6744 struct ocfs2_value_tree_metas metas;
6745 struct ocfs2_super *osb = OCFS2_SB(args->reflink->old_inode->i_sb);
6746 struct ocfs2_refcount_block *rb;
6747
6748 memset(&metas, 0, sizeof(metas));
6749
6750 ret = ocfs2_iterate_xattr_buckets(args->reflink->old_inode, blkno, len,
6751 ocfs2_calc_value_tree_metas, &metas);
6752 if (ret) {
6753 mlog_errno(ret);
6754 goto out;
6755 }
6756
6757 *credits = metas.credits;
6758
6759 /*
6760 * Calculate we need for refcount tree change.
6761 *
6762 * We need to add/modify num_recs in refcount tree, so just calculate
6763 * an approximate number we need for refcount tree change.
6764 * Sometimes we need to split the tree, and after split, half recs
6765 * will be moved to the new block, and a new block can only provide
6766 * half number of recs. So we multiple new blocks by 2.
6767 * In the end, we have to add credits for modifying the already
6768 * existed refcount block.
6769 */
6770 rb = (struct ocfs2_refcount_block *)args->reflink->ref_root_bh->b_data;
6771 metas.num_recs =
6772 (metas.num_recs + ocfs2_refcount_recs_per_rb(osb->sb) - 1) /
6773 ocfs2_refcount_recs_per_rb(osb->sb) * 2;
6774 metas.num_metas += metas.num_recs;
6775 *credits += metas.num_recs +
6776 metas.num_recs * OCFS2_EXPAND_REFCOUNT_TREE_CREDITS;
6777 if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)
6778 *credits += le16_to_cpu(rb->rf_list.l_tree_depth) *
6779 le16_to_cpu(rb->rf_list.l_next_free_rec) + 1;
6780 else
6781 *credits += 1;
6782
6783 /* count in the xattr tree change. */
6784 num_free_extents = ocfs2_num_free_extents(osb, xt_et);
6785 if (num_free_extents < 0) {
6786 ret = num_free_extents;
6787 mlog_errno(ret);
6788 goto out;
6789 }
6790
6791 if (num_free_extents < len)
6792 metas.num_metas += ocfs2_extend_meta_needed(xt_et->et_root_el);
6793
6794 *credits += ocfs2_calc_extend_credits(osb->sb,
Goldwyn Rodrigues06f9da62013-11-12 15:06:52 -08006795 xt_et->et_root_el);
Tao Ma2999d122009-08-18 11:43:55 +08006796
6797 if (metas.num_metas) {
6798 ret = ocfs2_reserve_new_metadata_blocks(osb, metas.num_metas,
6799 meta_ac);
6800 if (ret) {
6801 mlog_errno(ret);
6802 goto out;
6803 }
6804 }
6805
6806 if (len) {
6807 ret = ocfs2_reserve_clusters(osb, len, data_ac);
6808 if (ret)
6809 mlog_errno(ret);
6810 }
6811out:
6812 if (ret) {
6813 if (*meta_ac) {
6814 ocfs2_free_alloc_context(*meta_ac);
Joseph Qi6cae6d32013-09-11 14:19:58 -07006815 *meta_ac = NULL;
Tao Ma2999d122009-08-18 11:43:55 +08006816 }
6817 }
6818
6819 return ret;
6820}
6821
Tao Ma121a39b2010-07-09 14:53:12 +08006822static int ocfs2_reflink_xattr_bucket(handle_t *handle,
Tao Ma2999d122009-08-18 11:43:55 +08006823 u64 blkno, u64 new_blkno, u32 clusters,
Tao Ma121a39b2010-07-09 14:53:12 +08006824 u32 *cpos, int num_buckets,
Tao Ma2999d122009-08-18 11:43:55 +08006825 struct ocfs2_alloc_context *meta_ac,
6826 struct ocfs2_alloc_context *data_ac,
6827 struct ocfs2_reflink_xattr_tree_args *args)
6828{
6829 int i, j, ret = 0;
6830 struct super_block *sb = args->reflink->old_inode->i_sb;
Tao Ma2999d122009-08-18 11:43:55 +08006831 int bpb = args->old_bucket->bu_blocks;
6832 struct ocfs2_xattr_value_buf vb = {
6833 .vb_access = ocfs2_journal_access,
6834 };
6835
6836 for (i = 0; i < num_buckets; i++, blkno += bpb, new_blkno += bpb) {
6837 ret = ocfs2_read_xattr_bucket(args->old_bucket, blkno);
6838 if (ret) {
6839 mlog_errno(ret);
6840 break;
6841 }
6842
Wengang Wang9c339252014-04-03 14:47:15 -07006843 ret = ocfs2_init_xattr_bucket(args->new_bucket, new_blkno, 1);
Tao Ma2999d122009-08-18 11:43:55 +08006844 if (ret) {
6845 mlog_errno(ret);
6846 break;
6847 }
6848
Tao Ma2999d122009-08-18 11:43:55 +08006849 ret = ocfs2_xattr_bucket_journal_access(handle,
6850 args->new_bucket,
6851 OCFS2_JOURNAL_ACCESS_CREATE);
6852 if (ret) {
6853 mlog_errno(ret);
6854 break;
6855 }
6856
6857 for (j = 0; j < bpb; j++)
6858 memcpy(bucket_block(args->new_bucket, j),
6859 bucket_block(args->old_bucket, j),
6860 sb->s_blocksize);
6861
Tao Ma121a39b2010-07-09 14:53:12 +08006862 /*
6863 * Record the start cpos so that we can use it to initialize
6864 * our xattr tree we also set the xh_num_bucket for the new
6865 * bucket.
6866 */
6867 if (i == 0) {
6868 *cpos = le32_to_cpu(bucket_xh(args->new_bucket)->
6869 xh_entries[0].xe_name_hash);
6870 bucket_xh(args->new_bucket)->xh_num_buckets =
6871 cpu_to_le16(num_buckets);
6872 }
6873
Tao Ma2999d122009-08-18 11:43:55 +08006874 ocfs2_xattr_bucket_journal_dirty(handle, args->new_bucket);
6875
6876 ret = ocfs2_reflink_xattr_header(handle, args->reflink,
6877 args->old_bucket->bu_bhs[0],
6878 bucket_xh(args->old_bucket),
6879 args->new_bucket->bu_bhs[0],
6880 bucket_xh(args->new_bucket),
6881 &vb, meta_ac,
6882 ocfs2_get_reflink_xattr_value_root,
6883 args);
6884 if (ret) {
6885 mlog_errno(ret);
6886 break;
6887 }
6888
6889 /*
6890 * Re-access and dirty the bucket to calculate metaecc.
6891 * Because we may extend the transaction in reflink_xattr_header
6892 * which will let the already accessed block gone.
6893 */
6894 ret = ocfs2_xattr_bucket_journal_access(handle,
6895 args->new_bucket,
6896 OCFS2_JOURNAL_ACCESS_WRITE);
6897 if (ret) {
6898 mlog_errno(ret);
6899 break;
6900 }
6901
6902 ocfs2_xattr_bucket_journal_dirty(handle, args->new_bucket);
Tao Ma121a39b2010-07-09 14:53:12 +08006903
Tao Ma2999d122009-08-18 11:43:55 +08006904 ocfs2_xattr_bucket_relse(args->old_bucket);
6905 ocfs2_xattr_bucket_relse(args->new_bucket);
6906 }
6907
6908 ocfs2_xattr_bucket_relse(args->old_bucket);
6909 ocfs2_xattr_bucket_relse(args->new_bucket);
6910 return ret;
6911}
Tao Ma121a39b2010-07-09 14:53:12 +08006912
6913static int ocfs2_reflink_xattr_buckets(handle_t *handle,
6914 struct inode *inode,
6915 struct ocfs2_reflink_xattr_tree_args *args,
6916 struct ocfs2_extent_tree *et,
6917 struct ocfs2_alloc_context *meta_ac,
6918 struct ocfs2_alloc_context *data_ac,
6919 u64 blkno, u32 cpos, u32 len)
6920{
6921 int ret, first_inserted = 0;
6922 u32 p_cluster, num_clusters, reflink_cpos = 0;
6923 u64 new_blkno;
6924 unsigned int num_buckets, reflink_buckets;
6925 unsigned int bpc =
6926 ocfs2_xattr_buckets_per_cluster(OCFS2_SB(inode->i_sb));
6927
6928 ret = ocfs2_read_xattr_bucket(args->old_bucket, blkno);
6929 if (ret) {
6930 mlog_errno(ret);
6931 goto out;
6932 }
6933 num_buckets = le16_to_cpu(bucket_xh(args->old_bucket)->xh_num_buckets);
6934 ocfs2_xattr_bucket_relse(args->old_bucket);
6935
6936 while (len && num_buckets) {
6937 ret = ocfs2_claim_clusters(handle, data_ac,
6938 1, &p_cluster, &num_clusters);
6939 if (ret) {
6940 mlog_errno(ret);
6941 goto out;
6942 }
6943
6944 new_blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cluster);
6945 reflink_buckets = min(num_buckets, bpc * num_clusters);
6946
6947 ret = ocfs2_reflink_xattr_bucket(handle, blkno,
6948 new_blkno, num_clusters,
6949 &reflink_cpos, reflink_buckets,
6950 meta_ac, data_ac, args);
6951 if (ret) {
6952 mlog_errno(ret);
6953 goto out;
6954 }
6955
6956 /*
6957 * For the 1st allocated cluster, we make it use the same cpos
6958 * so that the xattr tree looks the same as the original one
6959 * in the most case.
6960 */
6961 if (!first_inserted) {
6962 reflink_cpos = cpos;
6963 first_inserted = 1;
6964 }
6965 ret = ocfs2_insert_extent(handle, et, reflink_cpos, new_blkno,
6966 num_clusters, 0, meta_ac);
6967 if (ret)
6968 mlog_errno(ret);
6969
Tao Ma402b4182011-02-23 22:01:17 +08006970 trace_ocfs2_reflink_xattr_buckets((unsigned long long)new_blkno,
6971 num_clusters, reflink_cpos);
Tao Ma121a39b2010-07-09 14:53:12 +08006972
6973 len -= num_clusters;
6974 blkno += ocfs2_clusters_to_blocks(inode->i_sb, num_clusters);
6975 num_buckets -= reflink_buckets;
6976 }
6977out:
6978 return ret;
6979}
6980
Tao Ma2999d122009-08-18 11:43:55 +08006981/*
6982 * Create the same xattr extent record in the new inode's xattr tree.
6983 */
6984static int ocfs2_reflink_xattr_rec(struct inode *inode,
6985 struct buffer_head *root_bh,
6986 u64 blkno,
6987 u32 cpos,
6988 u32 len,
6989 void *para)
6990{
6991 int ret, credits = 0;
Tao Ma2999d122009-08-18 11:43:55 +08006992 handle_t *handle;
6993 struct ocfs2_reflink_xattr_tree_args *args =
6994 (struct ocfs2_reflink_xattr_tree_args *)para;
6995 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
6996 struct ocfs2_alloc_context *meta_ac = NULL;
6997 struct ocfs2_alloc_context *data_ac = NULL;
6998 struct ocfs2_extent_tree et;
6999
Tao Ma402b4182011-02-23 22:01:17 +08007000 trace_ocfs2_reflink_xattr_rec((unsigned long long)blkno, len);
Tao Ma121a39b2010-07-09 14:53:12 +08007001
Tao Ma2999d122009-08-18 11:43:55 +08007002 ocfs2_init_xattr_tree_extent_tree(&et,
7003 INODE_CACHE(args->reflink->new_inode),
7004 args->new_blk_bh);
7005
7006 ret = ocfs2_lock_reflink_xattr_rec_allocators(args, &et, blkno,
7007 len, &credits,
7008 &meta_ac, &data_ac);
7009 if (ret) {
7010 mlog_errno(ret);
7011 goto out;
7012 }
7013
7014 handle = ocfs2_start_trans(osb, credits);
7015 if (IS_ERR(handle)) {
7016 ret = PTR_ERR(handle);
7017 mlog_errno(ret);
7018 goto out;
7019 }
7020
Tao Ma121a39b2010-07-09 14:53:12 +08007021 ret = ocfs2_reflink_xattr_buckets(handle, inode, args, &et,
7022 meta_ac, data_ac,
7023 blkno, cpos, len);
Tao Ma2999d122009-08-18 11:43:55 +08007024 if (ret)
7025 mlog_errno(ret);
7026
Tao Ma2999d122009-08-18 11:43:55 +08007027 ocfs2_commit_trans(osb, handle);
7028
7029out:
7030 if (meta_ac)
7031 ocfs2_free_alloc_context(meta_ac);
7032 if (data_ac)
7033 ocfs2_free_alloc_context(data_ac);
7034 return ret;
7035}
7036
7037/*
7038 * Create reflinked xattr buckets.
7039 * We will add bucket one by one, and refcount all the xattrs in the bucket
7040 * if they are stored outside.
7041 */
7042static int ocfs2_reflink_xattr_tree(struct ocfs2_xattr_reflink *args,
7043 struct buffer_head *blk_bh,
7044 struct buffer_head *new_blk_bh)
7045{
7046 int ret;
7047 struct ocfs2_reflink_xattr_tree_args para;
7048
7049 memset(&para, 0, sizeof(para));
7050 para.reflink = args;
7051 para.old_blk_bh = blk_bh;
7052 para.new_blk_bh = new_blk_bh;
7053
7054 para.old_bucket = ocfs2_xattr_bucket_new(args->old_inode);
7055 if (!para.old_bucket) {
7056 mlog_errno(-ENOMEM);
7057 return -ENOMEM;
7058 }
7059
7060 para.new_bucket = ocfs2_xattr_bucket_new(args->new_inode);
7061 if (!para.new_bucket) {
7062 ret = -ENOMEM;
7063 mlog_errno(ret);
7064 goto out;
7065 }
7066
7067 ret = ocfs2_iterate_xattr_index_block(args->old_inode, blk_bh,
7068 ocfs2_reflink_xattr_rec,
7069 &para);
7070 if (ret)
7071 mlog_errno(ret);
7072
7073out:
7074 ocfs2_xattr_bucket_free(para.old_bucket);
7075 ocfs2_xattr_bucket_free(para.new_bucket);
7076 return ret;
7077}
7078
7079static int ocfs2_reflink_xattr_in_block(struct ocfs2_xattr_reflink *args,
7080 struct buffer_head *blk_bh)
7081{
7082 int ret, indexed = 0;
7083 struct buffer_head *new_blk_bh = NULL;
7084 struct ocfs2_xattr_block *xb =
7085 (struct ocfs2_xattr_block *)blk_bh->b_data;
7086
7087
7088 if (le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED)
7089 indexed = 1;
7090
7091 ret = ocfs2_create_empty_xattr_block(args->new_inode, args->new_bh,
7092 &new_blk_bh, indexed);
7093 if (ret) {
7094 mlog_errno(ret);
7095 goto out;
7096 }
7097
Jeff Liu2decd652010-10-12 11:18:18 +08007098 if (!indexed)
Tao Ma2999d122009-08-18 11:43:55 +08007099 ret = ocfs2_reflink_xattr_block(args, blk_bh, new_blk_bh);
7100 else
7101 ret = ocfs2_reflink_xattr_tree(args, blk_bh, new_blk_bh);
7102 if (ret)
7103 mlog_errno(ret);
7104
7105out:
7106 brelse(new_blk_bh);
7107 return ret;
7108}
7109
Tao Ma0fe9b662009-08-18 11:47:56 +08007110static int ocfs2_reflink_xattr_no_security(struct ocfs2_xattr_entry *xe)
7111{
7112 int type = ocfs2_xattr_get_type(xe);
7113
7114 return type != OCFS2_XATTR_INDEX_SECURITY &&
7115 type != OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS &&
7116 type != OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT;
7117}
7118
Tao Ma2999d122009-08-18 11:43:55 +08007119int ocfs2_reflink_xattrs(struct inode *old_inode,
7120 struct buffer_head *old_bh,
7121 struct inode *new_inode,
Tao Ma0fe9b662009-08-18 11:47:56 +08007122 struct buffer_head *new_bh,
7123 bool preserve_security)
Tao Ma2999d122009-08-18 11:43:55 +08007124{
7125 int ret;
7126 struct ocfs2_xattr_reflink args;
7127 struct ocfs2_inode_info *oi = OCFS2_I(old_inode);
7128 struct ocfs2_dinode *di = (struct ocfs2_dinode *)old_bh->b_data;
7129 struct buffer_head *blk_bh = NULL;
7130 struct ocfs2_cached_dealloc_ctxt dealloc;
7131 struct ocfs2_refcount_tree *ref_tree;
7132 struct buffer_head *ref_root_bh = NULL;
7133
7134 ret = ocfs2_lock_refcount_tree(OCFS2_SB(old_inode->i_sb),
7135 le64_to_cpu(di->i_refcount_loc),
7136 1, &ref_tree, &ref_root_bh);
7137 if (ret) {
7138 mlog_errno(ret);
7139 goto out;
7140 }
7141
7142 ocfs2_init_dealloc_ctxt(&dealloc);
7143
7144 args.old_inode = old_inode;
7145 args.new_inode = new_inode;
7146 args.old_bh = old_bh;
7147 args.new_bh = new_bh;
7148 args.ref_ci = &ref_tree->rf_ci;
7149 args.ref_root_bh = ref_root_bh;
7150 args.dealloc = &dealloc;
Tao Ma0fe9b662009-08-18 11:47:56 +08007151 if (preserve_security)
7152 args.xattr_reflinked = NULL;
7153 else
7154 args.xattr_reflinked = ocfs2_reflink_xattr_no_security;
Tao Ma2999d122009-08-18 11:43:55 +08007155
7156 if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL) {
7157 ret = ocfs2_reflink_xattr_inline(&args);
7158 if (ret) {
7159 mlog_errno(ret);
7160 goto out_unlock;
7161 }
7162 }
7163
7164 if (!di->i_xattr_loc)
7165 goto out_unlock;
7166
7167 ret = ocfs2_read_xattr_block(old_inode, le64_to_cpu(di->i_xattr_loc),
7168 &blk_bh);
7169 if (ret < 0) {
7170 mlog_errno(ret);
7171 goto out_unlock;
7172 }
7173
7174 ret = ocfs2_reflink_xattr_in_block(&args, blk_bh);
7175 if (ret)
7176 mlog_errno(ret);
7177
7178 brelse(blk_bh);
7179
7180out_unlock:
7181 ocfs2_unlock_refcount_tree(OCFS2_SB(old_inode->i_sb),
7182 ref_tree, 1);
7183 brelse(ref_root_bh);
7184
7185 if (ocfs2_dealloc_has_cluster(&dealloc)) {
7186 ocfs2_schedule_truncate_log_flush(OCFS2_SB(old_inode->i_sb), 1);
7187 ocfs2_run_deallocs(OCFS2_SB(old_inode->i_sb), &dealloc);
7188 }
7189
7190out:
7191 return ret;
7192}
7193
7194/*
Tao Ma0fe9b662009-08-18 11:47:56 +08007195 * Initialize security and acl for a already created inode.
7196 * Used for reflink a non-preserve-security file.
7197 *
7198 * It uses common api like ocfs2_xattr_set, so the caller
7199 * must not hold any lock expect i_mutex.
7200 */
7201int ocfs2_init_security_and_acl(struct inode *dir,
Eric Paris2a7dba32011-02-01 11:05:39 -05007202 struct inode *inode,
Christoph Hellwig702e5bc2013-12-20 05:16:48 -08007203 const struct qstr *qstr,
7204 struct posix_acl *default_acl,
7205 struct posix_acl *acl)
Tao Ma0fe9b662009-08-18 11:47:56 +08007206{
Tao Ma0fe9b662009-08-18 11:47:56 +08007207 struct buffer_head *dir_bh = NULL;
Christoph Hellwig702e5bc2013-12-20 05:16:48 -08007208 int ret = 0;
Tao Ma0fe9b662009-08-18 11:47:56 +08007209
Mimi Zohar9d8f13b2011-06-06 15:29:25 -04007210 ret = ocfs2_init_security_get(inode, dir, qstr, NULL);
Jeff Liu32918dd2013-02-27 17:02:48 -08007211 if (ret) {
Tao Ma0fe9b662009-08-18 11:47:56 +08007212 mlog_errno(ret);
7213 goto leave;
7214 }
7215
7216 ret = ocfs2_inode_lock(dir, &dir_bh, 0);
7217 if (ret) {
7218 mlog_errno(ret);
7219 goto leave;
7220 }
7221
Christoph Hellwig702e5bc2013-12-20 05:16:48 -08007222 if (!ret && default_acl)
7223 ret = ocfs2_iop_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
7224 if (!ret && acl)
7225 ret = ocfs2_iop_set_acl(inode, acl, ACL_TYPE_ACCESS);
Tao Ma0fe9b662009-08-18 11:47:56 +08007226
7227 ocfs2_inode_unlock(dir, 0);
7228 brelse(dir_bh);
7229leave:
7230 return ret;
7231}
7232/*
Tiger Yang923f7f32008-11-14 11:16:27 +08007233 * 'security' attributes support
7234 */
Christoph Hellwig431547b2009-11-13 09:52:56 +00007235static size_t ocfs2_xattr_security_list(struct dentry *dentry, char *list,
Tiger Yang923f7f32008-11-14 11:16:27 +08007236 size_t list_size, const char *name,
Christoph Hellwig431547b2009-11-13 09:52:56 +00007237 size_t name_len, int type)
Tiger Yang923f7f32008-11-14 11:16:27 +08007238{
7239 const size_t prefix_len = XATTR_SECURITY_PREFIX_LEN;
7240 const size_t total_len = prefix_len + name_len + 1;
7241
7242 if (list && total_len <= list_size) {
7243 memcpy(list, XATTR_SECURITY_PREFIX, prefix_len);
7244 memcpy(list + prefix_len, name, name_len);
7245 list[prefix_len + name_len] = '\0';
7246 }
7247 return total_len;
7248}
7249
Christoph Hellwig431547b2009-11-13 09:52:56 +00007250static int ocfs2_xattr_security_get(struct dentry *dentry, const char *name,
7251 void *buffer, size_t size, int type)
Tiger Yang923f7f32008-11-14 11:16:27 +08007252{
7253 if (strcmp(name, "") == 0)
7254 return -EINVAL;
David Howells2b0143b2015-03-17 22:25:59 +00007255 return ocfs2_xattr_get(d_inode(dentry), OCFS2_XATTR_INDEX_SECURITY,
Christoph Hellwig431547b2009-11-13 09:52:56 +00007256 name, buffer, size);
Tiger Yang923f7f32008-11-14 11:16:27 +08007257}
7258
Christoph Hellwig431547b2009-11-13 09:52:56 +00007259static int ocfs2_xattr_security_set(struct dentry *dentry, const char *name,
7260 const void *value, size_t size, int flags, int type)
Tiger Yang923f7f32008-11-14 11:16:27 +08007261{
7262 if (strcmp(name, "") == 0)
7263 return -EINVAL;
7264
David Howells2b0143b2015-03-17 22:25:59 +00007265 return ocfs2_xattr_set(d_inode(dentry), OCFS2_XATTR_INDEX_SECURITY,
Christoph Hellwig431547b2009-11-13 09:52:56 +00007266 name, value, size, flags);
Tiger Yang923f7f32008-11-14 11:16:27 +08007267}
7268
Joseph Qib519ea62015-06-24 16:55:34 -07007269static int ocfs2_initxattrs(struct inode *inode, const struct xattr *xattr_array,
Mimi Zohar9d8f13b2011-06-06 15:29:25 -04007270 void *fs_info)
7271{
7272 const struct xattr *xattr;
7273 int err = 0;
7274
7275 for (xattr = xattr_array; xattr->name != NULL; xattr++) {
7276 err = ocfs2_xattr_set(inode, OCFS2_XATTR_INDEX_SECURITY,
7277 xattr->name, xattr->value,
7278 xattr->value_len, XATTR_CREATE);
7279 if (err)
7280 break;
7281 }
7282 return err;
7283}
7284
Tiger Yang534eadd2008-11-14 11:16:41 +08007285int ocfs2_init_security_get(struct inode *inode,
7286 struct inode *dir,
Eric Paris2a7dba32011-02-01 11:05:39 -05007287 const struct qstr *qstr,
Tiger Yang534eadd2008-11-14 11:16:41 +08007288 struct ocfs2_security_xattr_info *si)
7289{
Tiger Yang38d59ef2008-12-17 10:22:56 +08007290 /* check whether ocfs2 support feature xattr */
7291 if (!ocfs2_supports_xattr(OCFS2_SB(dir->i_sb)))
7292 return -EOPNOTSUPP;
Mimi Zohar9d8f13b2011-06-06 15:29:25 -04007293 if (si)
7294 return security_old_inode_init_security(inode, dir, qstr,
7295 &si->name, &si->value,
7296 &si->value_len);
7297
7298 return security_inode_init_security(inode, dir, qstr,
7299 &ocfs2_initxattrs, NULL);
Tiger Yang534eadd2008-11-14 11:16:41 +08007300}
7301
7302int ocfs2_init_security_set(handle_t *handle,
7303 struct inode *inode,
7304 struct buffer_head *di_bh,
7305 struct ocfs2_security_xattr_info *si,
7306 struct ocfs2_alloc_context *xattr_ac,
7307 struct ocfs2_alloc_context *data_ac)
7308{
7309 return ocfs2_xattr_set_handle(handle, inode, di_bh,
7310 OCFS2_XATTR_INDEX_SECURITY,
7311 si->name, si->value, si->value_len, 0,
7312 xattr_ac, data_ac);
7313}
7314
Stephen Hemminger537d81c2010-05-13 17:53:22 -07007315const struct xattr_handler ocfs2_xattr_security_handler = {
Tiger Yang923f7f32008-11-14 11:16:27 +08007316 .prefix = XATTR_SECURITY_PREFIX,
7317 .list = ocfs2_xattr_security_list,
7318 .get = ocfs2_xattr_security_get,
7319 .set = ocfs2_xattr_security_set,
7320};
7321
7322/*
Mark Fasheh99219ae2008-10-07 14:52:59 -07007323 * 'trusted' attributes support
7324 */
Christoph Hellwig431547b2009-11-13 09:52:56 +00007325static size_t ocfs2_xattr_trusted_list(struct dentry *dentry, char *list,
Mark Fasheh99219ae2008-10-07 14:52:59 -07007326 size_t list_size, const char *name,
Christoph Hellwig431547b2009-11-13 09:52:56 +00007327 size_t name_len, int type)
Mark Fasheh99219ae2008-10-07 14:52:59 -07007328{
Tiger Yangceb1eba2008-10-23 16:34:13 +08007329 const size_t prefix_len = XATTR_TRUSTED_PREFIX_LEN;
Mark Fasheh99219ae2008-10-07 14:52:59 -07007330 const size_t total_len = prefix_len + name_len + 1;
7331
Sanidhya Kashyap0f5e7b42015-09-04 15:44:08 -07007332 if (!capable(CAP_SYS_ADMIN))
7333 return 0;
7334
Mark Fasheh99219ae2008-10-07 14:52:59 -07007335 if (list && total_len <= list_size) {
7336 memcpy(list, XATTR_TRUSTED_PREFIX, prefix_len);
7337 memcpy(list + prefix_len, name, name_len);
7338 list[prefix_len + name_len] = '\0';
7339 }
7340 return total_len;
7341}
7342
Christoph Hellwig431547b2009-11-13 09:52:56 +00007343static int ocfs2_xattr_trusted_get(struct dentry *dentry, const char *name,
7344 void *buffer, size_t size, int type)
Mark Fasheh99219ae2008-10-07 14:52:59 -07007345{
7346 if (strcmp(name, "") == 0)
7347 return -EINVAL;
David Howells2b0143b2015-03-17 22:25:59 +00007348 return ocfs2_xattr_get(d_inode(dentry), OCFS2_XATTR_INDEX_TRUSTED,
Christoph Hellwig431547b2009-11-13 09:52:56 +00007349 name, buffer, size);
Mark Fasheh99219ae2008-10-07 14:52:59 -07007350}
7351
Christoph Hellwig431547b2009-11-13 09:52:56 +00007352static int ocfs2_xattr_trusted_set(struct dentry *dentry, const char *name,
7353 const void *value, size_t size, int flags, int type)
Mark Fasheh99219ae2008-10-07 14:52:59 -07007354{
7355 if (strcmp(name, "") == 0)
7356 return -EINVAL;
7357
David Howells2b0143b2015-03-17 22:25:59 +00007358 return ocfs2_xattr_set(d_inode(dentry), OCFS2_XATTR_INDEX_TRUSTED,
Christoph Hellwig431547b2009-11-13 09:52:56 +00007359 name, value, size, flags);
Mark Fasheh99219ae2008-10-07 14:52:59 -07007360}
7361
Stephen Hemminger537d81c2010-05-13 17:53:22 -07007362const struct xattr_handler ocfs2_xattr_trusted_handler = {
Mark Fasheh99219ae2008-10-07 14:52:59 -07007363 .prefix = XATTR_TRUSTED_PREFIX,
7364 .list = ocfs2_xattr_trusted_list,
7365 .get = ocfs2_xattr_trusted_get,
7366 .set = ocfs2_xattr_trusted_set,
7367};
7368
Mark Fasheh99219ae2008-10-07 14:52:59 -07007369/*
7370 * 'user' attributes support
7371 */
Christoph Hellwig431547b2009-11-13 09:52:56 +00007372static size_t ocfs2_xattr_user_list(struct dentry *dentry, char *list,
Mark Fasheh99219ae2008-10-07 14:52:59 -07007373 size_t list_size, const char *name,
Christoph Hellwig431547b2009-11-13 09:52:56 +00007374 size_t name_len, int type)
Mark Fasheh99219ae2008-10-07 14:52:59 -07007375{
Tiger Yangceb1eba2008-10-23 16:34:13 +08007376 const size_t prefix_len = XATTR_USER_PREFIX_LEN;
Mark Fasheh99219ae2008-10-07 14:52:59 -07007377 const size_t total_len = prefix_len + name_len + 1;
Christoph Hellwig431547b2009-11-13 09:52:56 +00007378 struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
Mark Fasheh99219ae2008-10-07 14:52:59 -07007379
7380 if (osb->s_mount_opt & OCFS2_MOUNT_NOUSERXATTR)
7381 return 0;
7382
7383 if (list && total_len <= list_size) {
7384 memcpy(list, XATTR_USER_PREFIX, prefix_len);
7385 memcpy(list + prefix_len, name, name_len);
7386 list[prefix_len + name_len] = '\0';
7387 }
7388 return total_len;
7389}
7390
Christoph Hellwig431547b2009-11-13 09:52:56 +00007391static int ocfs2_xattr_user_get(struct dentry *dentry, const char *name,
7392 void *buffer, size_t size, int type)
Mark Fasheh99219ae2008-10-07 14:52:59 -07007393{
Christoph Hellwig431547b2009-11-13 09:52:56 +00007394 struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
Mark Fasheh99219ae2008-10-07 14:52:59 -07007395
7396 if (strcmp(name, "") == 0)
7397 return -EINVAL;
7398 if (osb->s_mount_opt & OCFS2_MOUNT_NOUSERXATTR)
7399 return -EOPNOTSUPP;
David Howells2b0143b2015-03-17 22:25:59 +00007400 return ocfs2_xattr_get(d_inode(dentry), OCFS2_XATTR_INDEX_USER, name,
Mark Fasheh99219ae2008-10-07 14:52:59 -07007401 buffer, size);
7402}
7403
Christoph Hellwig431547b2009-11-13 09:52:56 +00007404static int ocfs2_xattr_user_set(struct dentry *dentry, const char *name,
7405 const void *value, size_t size, int flags, int type)
Mark Fasheh99219ae2008-10-07 14:52:59 -07007406{
Christoph Hellwig431547b2009-11-13 09:52:56 +00007407 struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
Mark Fasheh99219ae2008-10-07 14:52:59 -07007408
7409 if (strcmp(name, "") == 0)
7410 return -EINVAL;
7411 if (osb->s_mount_opt & OCFS2_MOUNT_NOUSERXATTR)
7412 return -EOPNOTSUPP;
7413
David Howells2b0143b2015-03-17 22:25:59 +00007414 return ocfs2_xattr_set(d_inode(dentry), OCFS2_XATTR_INDEX_USER,
Christoph Hellwig431547b2009-11-13 09:52:56 +00007415 name, value, size, flags);
Mark Fasheh99219ae2008-10-07 14:52:59 -07007416}
7417
Stephen Hemminger537d81c2010-05-13 17:53:22 -07007418const struct xattr_handler ocfs2_xattr_user_handler = {
Mark Fasheh99219ae2008-10-07 14:52:59 -07007419 .prefix = XATTR_USER_PREFIX,
7420 .list = ocfs2_xattr_user_list,
7421 .get = ocfs2_xattr_user_get,
7422 .set = ocfs2_xattr_user_set,
7423};