blob: f643a92951794e3f73951a0abc4b9a3d4b16bdbc [file] [log] [blame]
Dave Chinner0b61f8a2018-06-05 19:42:14 -07001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Olaf Weber3e57ecf2006-06-09 14:48:12 +10003 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
Nathan Scott7b718762005-11-02 14:58:39 +11004 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 */
Robert P. J. Day40ebd812007-11-23 16:30:51 +11006#include <linux/log2.h>
Jeff Laytonf0e28282017-12-11 06:35:19 -05007#include <linux/iversion.h>
Robert P. J. Day40ebd812007-11-23 16:30:51 +11008
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include "xfs.h"
Nathan Scotta844f452005-11-02 14:38:42 +110010#include "xfs_fs.h"
Dave Chinner70a98832013-10-23 10:36:05 +110011#include "xfs_shared.h"
Dave Chinner239880e2013-10-23 10:50:10 +110012#include "xfs_format.h"
13#include "xfs_log_format.h"
14#include "xfs_trans_resv.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include "xfs_sb.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include "xfs_mount.h"
Darrick J. Wong3ab78df2016-08-03 11:15:38 +100017#include "xfs_defer.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110018#include "xfs_inode.h"
Dave Chinner57062782013-10-15 09:17:51 +110019#include "xfs_da_format.h"
Dave Chinnerc24b5df2013-08-12 20:49:45 +100020#include "xfs_da_btree.h"
Dave Chinnerc24b5df2013-08-12 20:49:45 +100021#include "xfs_dir2.h"
Nathan Scotta844f452005-11-02 14:38:42 +110022#include "xfs_attr_sf.h"
Dave Chinnerc24b5df2013-08-12 20:49:45 +100023#include "xfs_attr.h"
Dave Chinner239880e2013-10-23 10:50:10 +110024#include "xfs_trans_space.h"
25#include "xfs_trans.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include "xfs_buf_item.h"
Nathan Scotta844f452005-11-02 14:38:42 +110027#include "xfs_inode_item.h"
Nathan Scotta844f452005-11-02 14:38:42 +110028#include "xfs_ialloc.h"
29#include "xfs_bmap.h"
Dave Chinner68988112013-08-12 20:49:42 +100030#include "xfs_bmap_util.h"
Darrick J. Wonge9e899a2017-10-31 12:04:49 -070031#include "xfs_errortag.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include "xfs_error.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include "xfs_quota.h"
David Chinner2a82b8b2007-07-11 11:09:12 +100034#include "xfs_filestream.h"
Christoph Hellwig93848a92013-04-03 16:11:17 +110035#include "xfs_cksum.h"
Christoph Hellwig0b1b2132009-12-14 23:14:59 +000036#include "xfs_trace.h"
Dave Chinner33479e02012-10-08 21:56:11 +110037#include "xfs_icache.h"
Dave Chinnerc24b5df2013-08-12 20:49:45 +100038#include "xfs_symlink.h"
Dave Chinner239880e2013-10-23 10:50:10 +110039#include "xfs_trans_priv.h"
40#include "xfs_log.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110041#include "xfs_bmap_btree.h"
Darrick J. Wongaa8968f2016-10-03 09:11:38 -070042#include "xfs_reflink.h"
Darrick J. Wong005c5db2017-03-28 14:51:10 -070043#include "xfs_dir2_priv.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
Linus Torvalds1da177e2005-04-16 15:20:36 -070045kmem_zone_t *xfs_inode_zone;
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
47/*
Christoph Hellwig8f04c472011-07-08 14:34:34 +020048 * Used in xfs_itruncate_extents(). This is the maximum number of extents
Linus Torvalds1da177e2005-04-16 15:20:36 -070049 * freed from a file in a single transaction.
50 */
51#define XFS_ITRUNC_MAX_EXTENTS 2
52
Dave Chinner54d7b5c2016-02-09 16:54:58 +110053STATIC int xfs_iflush_int(struct xfs_inode *, struct xfs_buf *);
54STATIC int xfs_iunlink(struct xfs_trans *, struct xfs_inode *);
55STATIC int xfs_iunlink_remove(struct xfs_trans *, struct xfs_inode *);
Zhi Yong Wuab297432013-12-18 08:22:41 +080056
Dave Chinner2a0ec1d2012-04-23 15:59:02 +100057/*
58 * helper function to extract extent size hint from inode
59 */
60xfs_extlen_t
61xfs_get_extsz_hint(
62 struct xfs_inode *ip)
63{
64 if ((ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) && ip->i_d.di_extsize)
65 return ip->i_d.di_extsize;
66 if (XFS_IS_REALTIME_INODE(ip))
67 return ip->i_mount->m_sb.sb_rextsize;
68 return 0;
69}
70
Dave Chinnerfa96aca2012-10-08 21:56:10 +110071/*
Darrick J. Wongf7ca3522016-10-03 09:11:43 -070072 * Helper function to extract CoW extent size hint from inode.
73 * Between the extent size hint and the CoW extent size hint, we
Darrick J. Wonge153aa72016-10-03 09:11:49 -070074 * return the greater of the two. If the value is zero (automatic),
75 * use the default size.
Darrick J. Wongf7ca3522016-10-03 09:11:43 -070076 */
77xfs_extlen_t
78xfs_get_cowextsz_hint(
79 struct xfs_inode *ip)
80{
81 xfs_extlen_t a, b;
82
83 a = 0;
84 if (ip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE)
85 a = ip->i_d.di_cowextsize;
86 b = xfs_get_extsz_hint(ip);
87
Darrick J. Wonge153aa72016-10-03 09:11:49 -070088 a = max(a, b);
89 if (a == 0)
90 return XFS_DEFAULT_COWEXTSZ_HINT;
91 return a;
Darrick J. Wongf7ca3522016-10-03 09:11:43 -070092}
93
94/*
Christoph Hellwigefa70be2013-12-18 02:14:39 -080095 * These two are wrapper routines around the xfs_ilock() routine used to
96 * centralize some grungy code. They are used in places that wish to lock the
97 * inode solely for reading the extents. The reason these places can't just
98 * call xfs_ilock(ip, XFS_ILOCK_SHARED) is that the inode lock also guards to
99 * bringing in of the extents from disk for a file in b-tree format. If the
100 * inode is in b-tree format, then we need to lock the inode exclusively until
101 * the extents are read in. Locking it exclusively all the time would limit
102 * our parallelism unnecessarily, though. What we do instead is check to see
103 * if the extents have been read in yet, and only lock the inode exclusively
104 * if they have not.
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100105 *
Christoph Hellwigefa70be2013-12-18 02:14:39 -0800106 * The functions return a value which should be given to the corresponding
Christoph Hellwig01f4f322013-12-06 12:30:08 -0800107 * xfs_iunlock() call.
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100108 */
109uint
Christoph Hellwig309ecac82013-12-06 12:30:09 -0800110xfs_ilock_data_map_shared(
111 struct xfs_inode *ip)
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100112{
Christoph Hellwig309ecac82013-12-06 12:30:09 -0800113 uint lock_mode = XFS_ILOCK_SHARED;
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100114
Christoph Hellwig309ecac82013-12-06 12:30:09 -0800115 if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE &&
116 (ip->i_df.if_flags & XFS_IFEXTENTS) == 0)
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100117 lock_mode = XFS_ILOCK_EXCL;
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100118 xfs_ilock(ip, lock_mode);
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100119 return lock_mode;
120}
121
Christoph Hellwigefa70be2013-12-18 02:14:39 -0800122uint
123xfs_ilock_attr_map_shared(
124 struct xfs_inode *ip)
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100125{
Christoph Hellwigefa70be2013-12-18 02:14:39 -0800126 uint lock_mode = XFS_ILOCK_SHARED;
127
128 if (ip->i_d.di_aformat == XFS_DINODE_FMT_BTREE &&
129 (ip->i_afp->if_flags & XFS_IFEXTENTS) == 0)
130 lock_mode = XFS_ILOCK_EXCL;
131 xfs_ilock(ip, lock_mode);
132 return lock_mode;
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100133}
134
135/*
Christoph Hellwig65523212016-11-30 14:33:25 +1100136 * In addition to i_rwsem in the VFS inode, the xfs inode contains 2
137 * multi-reader locks: i_mmap_lock and the i_lock. This routine allows
138 * various combinations of the locks to be obtained.
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100139 *
Dave Chinner653c60b2015-02-23 21:43:37 +1100140 * The 3 locks should always be ordered so that the IO lock is obtained first,
141 * the mmap lock second and the ilock last in order to prevent deadlock.
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100142 *
Dave Chinner653c60b2015-02-23 21:43:37 +1100143 * Basic locking order:
144 *
Christoph Hellwig65523212016-11-30 14:33:25 +1100145 * i_rwsem -> i_mmap_lock -> page_lock -> i_ilock
Dave Chinner653c60b2015-02-23 21:43:37 +1100146 *
147 * mmap_sem locking order:
148 *
Christoph Hellwig65523212016-11-30 14:33:25 +1100149 * i_rwsem -> page lock -> mmap_sem
Dave Chinner653c60b2015-02-23 21:43:37 +1100150 * mmap_sem -> i_mmap_lock -> page_lock
151 *
152 * The difference in mmap_sem locking order mean that we cannot hold the
153 * i_mmap_lock over syscall based read(2)/write(2) based IO. These IO paths can
154 * fault in pages during copy in/out (for buffered IO) or require the mmap_sem
155 * in get_user_pages() to map the user pages into the kernel address space for
Christoph Hellwig65523212016-11-30 14:33:25 +1100156 * direct IO. Similarly the i_rwsem cannot be taken inside a page fault because
Dave Chinner653c60b2015-02-23 21:43:37 +1100157 * page faults already hold the mmap_sem.
158 *
159 * Hence to serialise fully against both syscall and mmap based IO, we need to
Christoph Hellwig65523212016-11-30 14:33:25 +1100160 * take both the i_rwsem and the i_mmap_lock. These locks should *only* be both
Dave Chinner653c60b2015-02-23 21:43:37 +1100161 * taken in places where we need to invalidate the page cache in a race
162 * free manner (e.g. truncate, hole punch and other extent manipulation
163 * functions).
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100164 */
165void
166xfs_ilock(
167 xfs_inode_t *ip,
168 uint lock_flags)
169{
170 trace_xfs_ilock(ip, lock_flags, _RET_IP_);
171
172 /*
173 * You can't set both SHARED and EXCL for the same lock,
174 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
175 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
176 */
177 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
178 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
Dave Chinner653c60b2015-02-23 21:43:37 +1100179 ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
180 (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100181 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
182 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
Dave Chinner0952c812015-08-19 10:32:49 +1000183 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100184
Christoph Hellwig65523212016-11-30 14:33:25 +1100185 if (lock_flags & XFS_IOLOCK_EXCL) {
186 down_write_nested(&VFS_I(ip)->i_rwsem,
187 XFS_IOLOCK_DEP(lock_flags));
188 } else if (lock_flags & XFS_IOLOCK_SHARED) {
189 down_read_nested(&VFS_I(ip)->i_rwsem,
190 XFS_IOLOCK_DEP(lock_flags));
191 }
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100192
Dave Chinner653c60b2015-02-23 21:43:37 +1100193 if (lock_flags & XFS_MMAPLOCK_EXCL)
194 mrupdate_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
195 else if (lock_flags & XFS_MMAPLOCK_SHARED)
196 mraccess_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
197
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100198 if (lock_flags & XFS_ILOCK_EXCL)
199 mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
200 else if (lock_flags & XFS_ILOCK_SHARED)
201 mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
202}
203
204/*
205 * This is just like xfs_ilock(), except that the caller
206 * is guaranteed not to sleep. It returns 1 if it gets
207 * the requested locks and 0 otherwise. If the IO lock is
208 * obtained but the inode lock cannot be, then the IO lock
209 * is dropped before returning.
210 *
211 * ip -- the inode being locked
212 * lock_flags -- this parameter indicates the inode's locks to be
213 * to be locked. See the comment for xfs_ilock() for a list
214 * of valid values.
215 */
216int
217xfs_ilock_nowait(
218 xfs_inode_t *ip,
219 uint lock_flags)
220{
221 trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
222
223 /*
224 * You can't set both SHARED and EXCL for the same lock,
225 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
226 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
227 */
228 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
229 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
Dave Chinner653c60b2015-02-23 21:43:37 +1100230 ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
231 (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100232 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
233 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
Dave Chinner0952c812015-08-19 10:32:49 +1000234 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100235
236 if (lock_flags & XFS_IOLOCK_EXCL) {
Christoph Hellwig65523212016-11-30 14:33:25 +1100237 if (!down_write_trylock(&VFS_I(ip)->i_rwsem))
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100238 goto out;
239 } else if (lock_flags & XFS_IOLOCK_SHARED) {
Christoph Hellwig65523212016-11-30 14:33:25 +1100240 if (!down_read_trylock(&VFS_I(ip)->i_rwsem))
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100241 goto out;
242 }
Dave Chinner653c60b2015-02-23 21:43:37 +1100243
244 if (lock_flags & XFS_MMAPLOCK_EXCL) {
245 if (!mrtryupdate(&ip->i_mmaplock))
246 goto out_undo_iolock;
247 } else if (lock_flags & XFS_MMAPLOCK_SHARED) {
248 if (!mrtryaccess(&ip->i_mmaplock))
249 goto out_undo_iolock;
250 }
251
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100252 if (lock_flags & XFS_ILOCK_EXCL) {
253 if (!mrtryupdate(&ip->i_lock))
Dave Chinner653c60b2015-02-23 21:43:37 +1100254 goto out_undo_mmaplock;
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100255 } else if (lock_flags & XFS_ILOCK_SHARED) {
256 if (!mrtryaccess(&ip->i_lock))
Dave Chinner653c60b2015-02-23 21:43:37 +1100257 goto out_undo_mmaplock;
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100258 }
259 return 1;
260
Dave Chinner653c60b2015-02-23 21:43:37 +1100261out_undo_mmaplock:
262 if (lock_flags & XFS_MMAPLOCK_EXCL)
263 mrunlock_excl(&ip->i_mmaplock);
264 else if (lock_flags & XFS_MMAPLOCK_SHARED)
265 mrunlock_shared(&ip->i_mmaplock);
266out_undo_iolock:
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100267 if (lock_flags & XFS_IOLOCK_EXCL)
Christoph Hellwig65523212016-11-30 14:33:25 +1100268 up_write(&VFS_I(ip)->i_rwsem);
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100269 else if (lock_flags & XFS_IOLOCK_SHARED)
Christoph Hellwig65523212016-11-30 14:33:25 +1100270 up_read(&VFS_I(ip)->i_rwsem);
Dave Chinner653c60b2015-02-23 21:43:37 +1100271out:
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100272 return 0;
273}
274
275/*
276 * xfs_iunlock() is used to drop the inode locks acquired with
277 * xfs_ilock() and xfs_ilock_nowait(). The caller must pass
278 * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
279 * that we know which locks to drop.
280 *
281 * ip -- the inode being unlocked
282 * lock_flags -- this parameter indicates the inode's locks to be
283 * to be unlocked. See the comment for xfs_ilock() for a list
284 * of valid values for this parameter.
285 *
286 */
287void
288xfs_iunlock(
289 xfs_inode_t *ip,
290 uint lock_flags)
291{
292 /*
293 * You can't set both SHARED and EXCL for the same lock,
294 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
295 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
296 */
297 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
298 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
Dave Chinner653c60b2015-02-23 21:43:37 +1100299 ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
300 (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100301 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
302 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
Dave Chinner0952c812015-08-19 10:32:49 +1000303 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100304 ASSERT(lock_flags != 0);
305
306 if (lock_flags & XFS_IOLOCK_EXCL)
Christoph Hellwig65523212016-11-30 14:33:25 +1100307 up_write(&VFS_I(ip)->i_rwsem);
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100308 else if (lock_flags & XFS_IOLOCK_SHARED)
Christoph Hellwig65523212016-11-30 14:33:25 +1100309 up_read(&VFS_I(ip)->i_rwsem);
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100310
Dave Chinner653c60b2015-02-23 21:43:37 +1100311 if (lock_flags & XFS_MMAPLOCK_EXCL)
312 mrunlock_excl(&ip->i_mmaplock);
313 else if (lock_flags & XFS_MMAPLOCK_SHARED)
314 mrunlock_shared(&ip->i_mmaplock);
315
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100316 if (lock_flags & XFS_ILOCK_EXCL)
317 mrunlock_excl(&ip->i_lock);
318 else if (lock_flags & XFS_ILOCK_SHARED)
319 mrunlock_shared(&ip->i_lock);
320
321 trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
322}
323
324/*
325 * give up write locks. the i/o lock cannot be held nested
326 * if it is being demoted.
327 */
328void
329xfs_ilock_demote(
330 xfs_inode_t *ip,
331 uint lock_flags)
332{
Dave Chinner653c60b2015-02-23 21:43:37 +1100333 ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL));
334 ASSERT((lock_flags &
335 ~(XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100336
337 if (lock_flags & XFS_ILOCK_EXCL)
338 mrdemote(&ip->i_lock);
Dave Chinner653c60b2015-02-23 21:43:37 +1100339 if (lock_flags & XFS_MMAPLOCK_EXCL)
340 mrdemote(&ip->i_mmaplock);
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100341 if (lock_flags & XFS_IOLOCK_EXCL)
Christoph Hellwig65523212016-11-30 14:33:25 +1100342 downgrade_write(&VFS_I(ip)->i_rwsem);
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100343
344 trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
345}
346
Dave Chinner742ae1e2013-04-30 21:39:34 +1000347#if defined(DEBUG) || defined(XFS_WARN)
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100348int
349xfs_isilocked(
350 xfs_inode_t *ip,
351 uint lock_flags)
352{
353 if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
354 if (!(lock_flags & XFS_ILOCK_SHARED))
355 return !!ip->i_lock.mr_writer;
356 return rwsem_is_locked(&ip->i_lock.mr_lock);
357 }
358
Dave Chinner653c60b2015-02-23 21:43:37 +1100359 if (lock_flags & (XFS_MMAPLOCK_EXCL|XFS_MMAPLOCK_SHARED)) {
360 if (!(lock_flags & XFS_MMAPLOCK_SHARED))
361 return !!ip->i_mmaplock.mr_writer;
362 return rwsem_is_locked(&ip->i_mmaplock.mr_lock);
363 }
364
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100365 if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) {
366 if (!(lock_flags & XFS_IOLOCK_SHARED))
Christoph Hellwig65523212016-11-30 14:33:25 +1100367 return !debug_locks ||
368 lockdep_is_held_type(&VFS_I(ip)->i_rwsem, 0);
369 return rwsem_is_locked(&VFS_I(ip)->i_rwsem);
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100370 }
371
372 ASSERT(0);
373 return 0;
374}
375#endif
376
Dave Chinnerb6a99472015-08-25 10:05:13 +1000377/*
378 * xfs_lockdep_subclass_ok() is only used in an ASSERT, so is only called when
379 * DEBUG or XFS_WARN is set. And MAX_LOCKDEP_SUBCLASSES is then only defined
380 * when CONFIG_LOCKDEP is set. Hence the complex define below to avoid build
381 * errors and warnings.
382 */
383#if (defined(DEBUG) || defined(XFS_WARN)) && defined(CONFIG_LOCKDEP)
Dave Chinner3403ccc2015-08-20 09:27:49 +1000384static bool
385xfs_lockdep_subclass_ok(
386 int subclass)
387{
388 return subclass < MAX_LOCKDEP_SUBCLASSES;
389}
390#else
391#define xfs_lockdep_subclass_ok(subclass) (true)
392#endif
393
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000394/*
Dave Chinner653c60b2015-02-23 21:43:37 +1100395 * Bump the subclass so xfs_lock_inodes() acquires each lock with a different
Dave Chinner0952c812015-08-19 10:32:49 +1000396 * value. This can be called for any type of inode lock combination, including
397 * parent locking. Care must be taken to ensure we don't overrun the subclass
398 * storage fields in the class mask we build.
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000399 */
400static inline int
401xfs_lock_inumorder(int lock_mode, int subclass)
402{
Dave Chinner0952c812015-08-19 10:32:49 +1000403 int class = 0;
404
405 ASSERT(!(lock_mode & (XFS_ILOCK_PARENT | XFS_ILOCK_RTBITMAP |
406 XFS_ILOCK_RTSUM)));
Dave Chinner3403ccc2015-08-20 09:27:49 +1000407 ASSERT(xfs_lockdep_subclass_ok(subclass));
Dave Chinner0952c812015-08-19 10:32:49 +1000408
Dave Chinner653c60b2015-02-23 21:43:37 +1100409 if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) {
Dave Chinner0952c812015-08-19 10:32:49 +1000410 ASSERT(subclass <= XFS_IOLOCK_MAX_SUBCLASS);
Dave Chinner0952c812015-08-19 10:32:49 +1000411 class += subclass << XFS_IOLOCK_SHIFT;
Dave Chinner653c60b2015-02-23 21:43:37 +1100412 }
413
414 if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) {
Dave Chinner0952c812015-08-19 10:32:49 +1000415 ASSERT(subclass <= XFS_MMAPLOCK_MAX_SUBCLASS);
416 class += subclass << XFS_MMAPLOCK_SHIFT;
Dave Chinner653c60b2015-02-23 21:43:37 +1100417 }
418
Dave Chinner0952c812015-08-19 10:32:49 +1000419 if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) {
420 ASSERT(subclass <= XFS_ILOCK_MAX_SUBCLASS);
421 class += subclass << XFS_ILOCK_SHIFT;
422 }
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000423
Dave Chinner0952c812015-08-19 10:32:49 +1000424 return (lock_mode & ~XFS_LOCK_SUBCLASS_MASK) | class;
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000425}
426
427/*
Dave Chinner95afcf52015-03-25 14:03:32 +1100428 * The following routine will lock n inodes in exclusive mode. We assume the
429 * caller calls us with the inodes in i_ino order.
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000430 *
Dave Chinner95afcf52015-03-25 14:03:32 +1100431 * We need to detect deadlock where an inode that we lock is in the AIL and we
432 * start waiting for another inode that is locked by a thread in a long running
433 * transaction (such as truncate). This can result in deadlock since the long
434 * running trans might need to wait for the inode we just locked in order to
435 * push the tail and free space in the log.
Dave Chinner0952c812015-08-19 10:32:49 +1000436 *
437 * xfs_lock_inodes() can only be used to lock one type of lock at a time -
438 * the iolock, the mmaplock or the ilock, but not more than one at a time. If we
439 * lock more than one at a time, lockdep will report false positives saying we
440 * have violated locking orders.
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000441 */
Eric Sandeen0d5a75e2016-06-01 17:38:15 +1000442static void
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000443xfs_lock_inodes(
444 xfs_inode_t **ips,
445 int inodes,
446 uint lock_mode)
447{
448 int attempts = 0, i, j, try_lock;
449 xfs_log_item_t *lp;
450
Dave Chinner0952c812015-08-19 10:32:49 +1000451 /*
452 * Currently supports between 2 and 5 inodes with exclusive locking. We
453 * support an arbitrary depth of locking here, but absolute limits on
454 * inodes depend on the the type of locking and the limits placed by
455 * lockdep annotations in xfs_lock_inumorder. These are all checked by
456 * the asserts.
457 */
Dave Chinner95afcf52015-03-25 14:03:32 +1100458 ASSERT(ips && inodes >= 2 && inodes <= 5);
Dave Chinner0952c812015-08-19 10:32:49 +1000459 ASSERT(lock_mode & (XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL |
460 XFS_ILOCK_EXCL));
461 ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED | XFS_MMAPLOCK_SHARED |
462 XFS_ILOCK_SHARED)));
Dave Chinner0952c812015-08-19 10:32:49 +1000463 ASSERT(!(lock_mode & XFS_MMAPLOCK_EXCL) ||
464 inodes <= XFS_MMAPLOCK_MAX_SUBCLASS + 1);
465 ASSERT(!(lock_mode & XFS_ILOCK_EXCL) ||
466 inodes <= XFS_ILOCK_MAX_SUBCLASS + 1);
467
468 if (lock_mode & XFS_IOLOCK_EXCL) {
469 ASSERT(!(lock_mode & (XFS_MMAPLOCK_EXCL | XFS_ILOCK_EXCL)));
470 } else if (lock_mode & XFS_MMAPLOCK_EXCL)
471 ASSERT(!(lock_mode & XFS_ILOCK_EXCL));
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000472
473 try_lock = 0;
474 i = 0;
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000475again:
476 for (; i < inodes; i++) {
477 ASSERT(ips[i]);
478
Dave Chinner95afcf52015-03-25 14:03:32 +1100479 if (i && (ips[i] == ips[i - 1])) /* Already locked */
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000480 continue;
481
482 /*
Dave Chinner95afcf52015-03-25 14:03:32 +1100483 * If try_lock is not set yet, make sure all locked inodes are
484 * not in the AIL. If any are, set try_lock to be used later.
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000485 */
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000486 if (!try_lock) {
487 for (j = (i - 1); j >= 0 && !try_lock; j--) {
488 lp = (xfs_log_item_t *)ips[j]->i_itemp;
Dave Chinner22525c12018-05-09 07:47:34 -0700489 if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags))
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000490 try_lock++;
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000491 }
492 }
493
494 /*
495 * If any of the previous locks we have locked is in the AIL,
496 * we must TRY to get the second and subsequent locks. If
497 * we can't get any, we must release all we have
498 * and try again.
499 */
Dave Chinner95afcf52015-03-25 14:03:32 +1100500 if (!try_lock) {
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000501 xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i));
Dave Chinner95afcf52015-03-25 14:03:32 +1100502 continue;
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000503 }
Dave Chinner95afcf52015-03-25 14:03:32 +1100504
505 /* try_lock means we have an inode locked that is in the AIL. */
506 ASSERT(i != 0);
507 if (xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i)))
508 continue;
509
510 /*
511 * Unlock all previous guys and try again. xfs_iunlock will try
512 * to push the tail if the inode is in the AIL.
513 */
514 attempts++;
515 for (j = i - 1; j >= 0; j--) {
516 /*
517 * Check to see if we've already unlocked this one. Not
518 * the first one going back, and the inode ptr is the
519 * same.
520 */
521 if (j != (i - 1) && ips[j] == ips[j + 1])
522 continue;
523
524 xfs_iunlock(ips[j], lock_mode);
525 }
526
527 if ((attempts % 5) == 0) {
528 delay(1); /* Don't just spin the CPU */
Dave Chinner95afcf52015-03-25 14:03:32 +1100529 }
530 i = 0;
531 try_lock = 0;
532 goto again;
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000533 }
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000534}
535
536/*
Dave Chinner653c60b2015-02-23 21:43:37 +1100537 * xfs_lock_two_inodes() can only be used to lock one type of lock at a time -
Darrick J. Wong7c2d2382018-01-26 15:27:33 -0800538 * the mmaplock or the ilock, but not more than one type at a time. If we lock
539 * more than one at a time, lockdep will report false positives saying we have
540 * violated locking orders. The iolock must be double-locked separately since
541 * we use i_rwsem for that. We now support taking one lock EXCL and the other
542 * SHARED.
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000543 */
544void
545xfs_lock_two_inodes(
Darrick J. Wong7c2d2382018-01-26 15:27:33 -0800546 struct xfs_inode *ip0,
547 uint ip0_mode,
548 struct xfs_inode *ip1,
549 uint ip1_mode)
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000550{
Darrick J. Wong7c2d2382018-01-26 15:27:33 -0800551 struct xfs_inode *temp;
552 uint mode_temp;
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000553 int attempts = 0;
554 xfs_log_item_t *lp;
555
Darrick J. Wong7c2d2382018-01-26 15:27:33 -0800556 ASSERT(hweight32(ip0_mode) == 1);
557 ASSERT(hweight32(ip1_mode) == 1);
558 ASSERT(!(ip0_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
559 ASSERT(!(ip1_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
560 ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
561 !(ip0_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
562 ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
563 !(ip1_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
564 ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
565 !(ip0_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
566 ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
567 !(ip1_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
Dave Chinner653c60b2015-02-23 21:43:37 +1100568
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000569 ASSERT(ip0->i_ino != ip1->i_ino);
570
571 if (ip0->i_ino > ip1->i_ino) {
572 temp = ip0;
573 ip0 = ip1;
574 ip1 = temp;
Darrick J. Wong7c2d2382018-01-26 15:27:33 -0800575 mode_temp = ip0_mode;
576 ip0_mode = ip1_mode;
577 ip1_mode = mode_temp;
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000578 }
579
580 again:
Darrick J. Wong7c2d2382018-01-26 15:27:33 -0800581 xfs_ilock(ip0, xfs_lock_inumorder(ip0_mode, 0));
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000582
583 /*
584 * If the first lock we have locked is in the AIL, we must TRY to get
585 * the second lock. If we can't get it, we must release the first one
586 * and try again.
587 */
588 lp = (xfs_log_item_t *)ip0->i_itemp;
Dave Chinner22525c12018-05-09 07:47:34 -0700589 if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags)) {
Darrick J. Wong7c2d2382018-01-26 15:27:33 -0800590 if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(ip1_mode, 1))) {
591 xfs_iunlock(ip0, ip0_mode);
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000592 if ((++attempts % 5) == 0)
593 delay(1); /* Don't just spin the CPU */
594 goto again;
595 }
596 } else {
Darrick J. Wong7c2d2382018-01-26 15:27:33 -0800597 xfs_ilock(ip1, xfs_lock_inumorder(ip1_mode, 1));
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000598 }
599}
600
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100601void
602__xfs_iflock(
603 struct xfs_inode *ip)
604{
605 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IFLOCK_BIT);
606 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IFLOCK_BIT);
607
608 do {
Ingo Molnar21417132017-03-05 11:25:39 +0100609 prepare_to_wait_exclusive(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100610 if (xfs_isiflocked(ip))
611 io_schedule();
612 } while (!xfs_iflock_nowait(ip));
613
Ingo Molnar21417132017-03-05 11:25:39 +0100614 finish_wait(wq, &wait.wq_entry);
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100615}
616
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617STATIC uint
618_xfs_dic2xflags(
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700619 uint16_t di_flags,
Dave Chinner58f88ca2016-01-04 16:44:15 +1100620 uint64_t di_flags2,
621 bool has_attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622{
623 uint flags = 0;
624
625 if (di_flags & XFS_DIFLAG_ANY) {
626 if (di_flags & XFS_DIFLAG_REALTIME)
Dave Chinnere7b89482016-01-04 16:44:15 +1100627 flags |= FS_XFLAG_REALTIME;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 if (di_flags & XFS_DIFLAG_PREALLOC)
Dave Chinnere7b89482016-01-04 16:44:15 +1100629 flags |= FS_XFLAG_PREALLOC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 if (di_flags & XFS_DIFLAG_IMMUTABLE)
Dave Chinnere7b89482016-01-04 16:44:15 +1100631 flags |= FS_XFLAG_IMMUTABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632 if (di_flags & XFS_DIFLAG_APPEND)
Dave Chinnere7b89482016-01-04 16:44:15 +1100633 flags |= FS_XFLAG_APPEND;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634 if (di_flags & XFS_DIFLAG_SYNC)
Dave Chinnere7b89482016-01-04 16:44:15 +1100635 flags |= FS_XFLAG_SYNC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 if (di_flags & XFS_DIFLAG_NOATIME)
Dave Chinnere7b89482016-01-04 16:44:15 +1100637 flags |= FS_XFLAG_NOATIME;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638 if (di_flags & XFS_DIFLAG_NODUMP)
Dave Chinnere7b89482016-01-04 16:44:15 +1100639 flags |= FS_XFLAG_NODUMP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640 if (di_flags & XFS_DIFLAG_RTINHERIT)
Dave Chinnere7b89482016-01-04 16:44:15 +1100641 flags |= FS_XFLAG_RTINHERIT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642 if (di_flags & XFS_DIFLAG_PROJINHERIT)
Dave Chinnere7b89482016-01-04 16:44:15 +1100643 flags |= FS_XFLAG_PROJINHERIT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644 if (di_flags & XFS_DIFLAG_NOSYMLINKS)
Dave Chinnere7b89482016-01-04 16:44:15 +1100645 flags |= FS_XFLAG_NOSYMLINKS;
Nathan Scottdd9f4382006-01-11 15:28:28 +1100646 if (di_flags & XFS_DIFLAG_EXTSIZE)
Dave Chinnere7b89482016-01-04 16:44:15 +1100647 flags |= FS_XFLAG_EXTSIZE;
Nathan Scottdd9f4382006-01-11 15:28:28 +1100648 if (di_flags & XFS_DIFLAG_EXTSZINHERIT)
Dave Chinnere7b89482016-01-04 16:44:15 +1100649 flags |= FS_XFLAG_EXTSZINHERIT;
Barry Naujokd3446ea2006-06-09 14:54:19 +1000650 if (di_flags & XFS_DIFLAG_NODEFRAG)
Dave Chinnere7b89482016-01-04 16:44:15 +1100651 flags |= FS_XFLAG_NODEFRAG;
David Chinner2a82b8b2007-07-11 11:09:12 +1000652 if (di_flags & XFS_DIFLAG_FILESTREAM)
Dave Chinnere7b89482016-01-04 16:44:15 +1100653 flags |= FS_XFLAG_FILESTREAM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 }
655
Dave Chinner58f88ca2016-01-04 16:44:15 +1100656 if (di_flags2 & XFS_DIFLAG2_ANY) {
657 if (di_flags2 & XFS_DIFLAG2_DAX)
658 flags |= FS_XFLAG_DAX;
Darrick J. Wongf7ca3522016-10-03 09:11:43 -0700659 if (di_flags2 & XFS_DIFLAG2_COWEXTSIZE)
660 flags |= FS_XFLAG_COWEXTSIZE;
Dave Chinner58f88ca2016-01-04 16:44:15 +1100661 }
662
663 if (has_attr)
664 flags |= FS_XFLAG_HASATTR;
665
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666 return flags;
667}
668
669uint
670xfs_ip2xflags(
Dave Chinner58f88ca2016-01-04 16:44:15 +1100671 struct xfs_inode *ip)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672{
Dave Chinner58f88ca2016-01-04 16:44:15 +1100673 struct xfs_icdinode *dic = &ip->i_d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674
Dave Chinner58f88ca2016-01-04 16:44:15 +1100675 return _xfs_dic2xflags(dic->di_flags, dic->di_flags2, XFS_IFORK_Q(ip));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676}
677
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678/*
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000679 * Lookups up an inode from "name". If ci_name is not NULL, then a CI match
680 * is allowed, otherwise it has to be an exact match. If a CI match is found,
681 * ci_name->name will point to a the actual name (caller must free) or
682 * will be set to NULL if an exact match is found.
683 */
684int
685xfs_lookup(
686 xfs_inode_t *dp,
687 struct xfs_name *name,
688 xfs_inode_t **ipp,
689 struct xfs_name *ci_name)
690{
691 xfs_ino_t inum;
692 int error;
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000693
694 trace_xfs_lookup(dp, name);
695
696 if (XFS_FORCED_SHUTDOWN(dp->i_mount))
Dave Chinner24513372014-06-25 14:58:08 +1000697 return -EIO;
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000698
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000699 error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name);
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000700 if (error)
Dave Chinnerdbad7c92015-08-19 10:33:00 +1000701 goto out_unlock;
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000702
703 error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp);
704 if (error)
705 goto out_free_name;
706
707 return 0;
708
709out_free_name:
710 if (ci_name)
711 kmem_free(ci_name->name);
Dave Chinnerdbad7c92015-08-19 10:33:00 +1000712out_unlock:
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000713 *ipp = NULL;
714 return error;
715}
716
717/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718 * Allocate an inode on disk and return a copy of its in-core version.
719 * The in-core inode is locked exclusively. Set mode, nlink, and rdev
720 * appropriately within the inode. The uid and gid for the inode are
721 * set according to the contents of the given cred structure.
722 *
723 * Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc()
Carlos Maiolinocd856db2012-10-20 11:08:19 -0300724 * has a free inode available, call xfs_iget() to obtain the in-core
725 * version of the allocated inode. Finally, fill in the inode and
726 * log its initial contents. In this case, ialloc_context would be
727 * set to NULL.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728 *
Carlos Maiolinocd856db2012-10-20 11:08:19 -0300729 * If xfs_dialloc() does not have an available inode, it will replenish
730 * its supply by doing an allocation. Since we can only do one
731 * allocation within a transaction without deadlocks, we must commit
732 * the current transaction before returning the inode itself.
733 * In this case, therefore, we will set ialloc_context and return.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734 * The caller should then commit the current transaction, start a new
735 * transaction, and call xfs_ialloc() again to actually get the inode.
736 *
737 * To ensure that some other process does not grab the inode that
738 * was allocated during the first call to xfs_ialloc(), this routine
739 * also returns the [locked] bp pointing to the head of the freelist
740 * as ialloc_context. The caller should hold this buffer across
741 * the commit and pass it back into this routine on the second call.
David Chinnerb11f94d2007-07-11 11:09:33 +1000742 *
743 * If we are allocating quota inodes, we do not have a parent inode
744 * to attach to or associate with (i.e. pip == NULL) because they
745 * are not linked into the directory structure - they are attached
746 * directly to the superblock - and so have no parent.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747 */
Eric Sandeen0d5a75e2016-06-01 17:38:15 +1000748static int
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749xfs_ialloc(
750 xfs_trans_t *tp,
751 xfs_inode_t *pip,
Al Viro576b1d62011-07-26 02:50:15 -0400752 umode_t mode,
Nathan Scott31b084a2005-05-05 13:25:00 -0700753 xfs_nlink_t nlink,
Christoph Hellwig66f36462017-10-19 11:07:09 -0700754 dev_t rdev,
Arkadiusz Mi?kiewicz67430992010-09-26 06:10:18 +0000755 prid_t prid,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756 xfs_buf_t **ialloc_context,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757 xfs_inode_t **ipp)
758{
Christoph Hellwig93848a92013-04-03 16:11:17 +1100759 struct xfs_mount *mp = tp->t_mountp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760 xfs_ino_t ino;
761 xfs_inode_t *ip;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762 uint flags;
763 int error;
Deepa Dinamani95582b02018-05-08 19:36:02 -0700764 struct timespec64 tv;
Dave Chinner39878482016-02-09 16:54:58 +1100765 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766
767 /*
768 * Call the space management code to pick
769 * the on-disk inode to be allocated.
770 */
Christoph Hellwigf59cf5c2017-12-04 17:32:55 -0800771 error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode,
Christoph Hellwig08358902012-07-04 10:54:47 -0400772 ialloc_context, &ino);
David Chinnerbf904242008-10-30 17:36:14 +1100773 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774 return error;
Christoph Hellwig08358902012-07-04 10:54:47 -0400775 if (*ialloc_context || ino == NULLFSINO) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776 *ipp = NULL;
777 return 0;
778 }
779 ASSERT(*ialloc_context == NULL);
780
781 /*
Dave Chinner8b269842018-04-17 17:17:35 -0700782 * Protect against obviously corrupt allocation btree records. Later
783 * xfs_iget checks will catch re-allocation of other active in-memory
784 * and on-disk inodes. If we don't catch reallocating the parent inode
785 * here we will deadlock in xfs_iget() so we have to do these checks
786 * first.
787 */
788 if ((pip && ino == pip->i_ino) || !xfs_verify_dir_ino(mp, ino)) {
789 xfs_alert(mp, "Allocated a known in-use inode 0x%llx!", ino);
790 return -EFSCORRUPTED;
791 }
792
793 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794 * Get the in-core inode with the lock held exclusively.
795 * This is because we're setting fields here we need
796 * to prevent others from looking at until we're done.
797 */
Christoph Hellwig93848a92013-04-03 16:11:17 +1100798 error = xfs_iget(mp, tp, ino, XFS_IGET_CREATE,
Christoph Hellwigec3ba852011-02-13 13:26:42 +0000799 XFS_ILOCK_EXCL, &ip);
David Chinnerbf904242008-10-30 17:36:14 +1100800 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802 ASSERT(ip != NULL);
Dave Chinner39878482016-02-09 16:54:58 +1100803 inode = VFS_I(ip);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804
Dave Chinner263997a2014-05-20 07:46:40 +1000805 /*
806 * We always convert v1 inodes to v2 now - we only support filesystems
807 * with >= v2 inode capability, so there is no reason for ever leaving
808 * an inode in v1 format.
809 */
810 if (ip->i_d.di_version == 1)
811 ip->i_d.di_version = 2;
812
Dave Chinnerc19b3b052016-02-09 16:54:58 +1100813 inode->i_mode = mode;
Dave Chinner54d7b5c2016-02-09 16:54:58 +1100814 set_nlink(inode, nlink);
Dwight Engen7aab1b22013-08-15 14:08:01 -0400815 ip->i_d.di_uid = xfs_kuid_to_uid(current_fsuid());
816 ip->i_d.di_gid = xfs_kgid_to_gid(current_fsgid());
Christoph Hellwig66f36462017-10-19 11:07:09 -0700817 inode->i_rdev = rdev;
Arkadiusz Mi?kiewicz67430992010-09-26 06:10:18 +0000818 xfs_set_projid(ip, prid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819
Christoph Hellwigbd186aa2007-08-30 17:21:12 +1000820 if (pip && XFS_INHERIT_GID(pip)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821 ip->i_d.di_gid = pip->i_d.di_gid;
Dave Chinnerc19b3b052016-02-09 16:54:58 +1100822 if ((VFS_I(pip)->i_mode & S_ISGID) && S_ISDIR(mode))
823 inode->i_mode |= S_ISGID;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824 }
825
826 /*
827 * If the group ID of the new file does not match the effective group
828 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
829 * (and only if the irix_sgid_inherit compatibility variable is set).
830 */
831 if ((irix_sgid_inherit) &&
Dave Chinnerc19b3b052016-02-09 16:54:58 +1100832 (inode->i_mode & S_ISGID) &&
833 (!in_group_p(xfs_gid_to_kgid(ip->i_d.di_gid))))
834 inode->i_mode &= ~S_ISGID;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835
836 ip->i_d.di_size = 0;
837 ip->i_d.di_nextents = 0;
838 ASSERT(ip->i_d.di_nblocks == 0);
Christoph Hellwigdff35fd2008-08-13 16:44:15 +1000839
Deepa Dinamanic2050a42016-09-14 07:48:06 -0700840 tv = current_time(inode);
Dave Chinner39878482016-02-09 16:54:58 +1100841 inode->i_mtime = tv;
842 inode->i_atime = tv;
843 inode->i_ctime = tv;
Christoph Hellwigdff35fd2008-08-13 16:44:15 +1000844
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845 ip->i_d.di_extsize = 0;
846 ip->i_d.di_dmevmask = 0;
847 ip->i_d.di_dmstate = 0;
848 ip->i_d.di_flags = 0;
Christoph Hellwig93848a92013-04-03 16:11:17 +1100849
850 if (ip->i_d.di_version == 3) {
Jeff Laytonf0e28282017-12-11 06:35:19 -0500851 inode_set_iversion(inode, 1);
Christoph Hellwig93848a92013-04-03 16:11:17 +1100852 ip->i_d.di_flags2 = 0;
Darrick J. Wongf7ca3522016-10-03 09:11:43 -0700853 ip->i_d.di_cowextsize = 0;
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700854 ip->i_d.di_crtime.t_sec = (int32_t)tv.tv_sec;
855 ip->i_d.di_crtime.t_nsec = (int32_t)tv.tv_nsec;
Christoph Hellwig93848a92013-04-03 16:11:17 +1100856 }
857
858
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 flags = XFS_ILOG_CORE;
860 switch (mode & S_IFMT) {
861 case S_IFIFO:
862 case S_IFCHR:
863 case S_IFBLK:
864 case S_IFSOCK:
865 ip->i_d.di_format = XFS_DINODE_FMT_DEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866 ip->i_df.if_flags = 0;
867 flags |= XFS_ILOG_DEV;
868 break;
869 case S_IFREG:
870 case S_IFDIR:
David Chinnerb11f94d2007-07-11 11:09:33 +1000871 if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) {
Dave Chinner58f88ca2016-01-04 16:44:15 +1100872 uint di_flags = 0;
Nathan Scott365ca832005-06-21 15:39:12 +1000873
Al Viroabbede12011-07-26 02:31:30 -0400874 if (S_ISDIR(mode)) {
Nathan Scott365ca832005-06-21 15:39:12 +1000875 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
876 di_flags |= XFS_DIFLAG_RTINHERIT;
Nathan Scottdd9f4382006-01-11 15:28:28 +1100877 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
878 di_flags |= XFS_DIFLAG_EXTSZINHERIT;
879 ip->i_d.di_extsize = pip->i_d.di_extsize;
880 }
Dave Chinner9336e3a2014-10-02 09:18:40 +1000881 if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
882 di_flags |= XFS_DIFLAG_PROJINHERIT;
Al Viroabbede12011-07-26 02:31:30 -0400883 } else if (S_ISREG(mode)) {
Christoph Hellwig613d7042007-10-11 17:44:08 +1000884 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
Nathan Scott365ca832005-06-21 15:39:12 +1000885 di_flags |= XFS_DIFLAG_REALTIME;
Nathan Scottdd9f4382006-01-11 15:28:28 +1100886 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
887 di_flags |= XFS_DIFLAG_EXTSIZE;
888 ip->i_d.di_extsize = pip->i_d.di_extsize;
889 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890 }
891 if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) &&
892 xfs_inherit_noatime)
Nathan Scott365ca832005-06-21 15:39:12 +1000893 di_flags |= XFS_DIFLAG_NOATIME;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894 if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) &&
895 xfs_inherit_nodump)
Nathan Scott365ca832005-06-21 15:39:12 +1000896 di_flags |= XFS_DIFLAG_NODUMP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897 if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) &&
898 xfs_inherit_sync)
Nathan Scott365ca832005-06-21 15:39:12 +1000899 di_flags |= XFS_DIFLAG_SYNC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) &&
901 xfs_inherit_nosymlinks)
Nathan Scott365ca832005-06-21 15:39:12 +1000902 di_flags |= XFS_DIFLAG_NOSYMLINKS;
Barry Naujokd3446ea2006-06-09 14:54:19 +1000903 if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) &&
904 xfs_inherit_nodefrag)
905 di_flags |= XFS_DIFLAG_NODEFRAG;
David Chinner2a82b8b2007-07-11 11:09:12 +1000906 if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM)
907 di_flags |= XFS_DIFLAG_FILESTREAM;
Dave Chinner58f88ca2016-01-04 16:44:15 +1100908
Nathan Scott365ca832005-06-21 15:39:12 +1000909 ip->i_d.di_flags |= di_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910 }
Darrick J. Wongf7ca3522016-10-03 09:11:43 -0700911 if (pip &&
912 (pip->i_d.di_flags2 & XFS_DIFLAG2_ANY) &&
913 pip->i_d.di_version == 3 &&
914 ip->i_d.di_version == 3) {
Lukas Czerner56bdf852017-08-03 13:19:13 -0700915 uint64_t di_flags2 = 0;
916
Darrick J. Wongf7ca3522016-10-03 09:11:43 -0700917 if (pip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) {
Lukas Czerner56bdf852017-08-03 13:19:13 -0700918 di_flags2 |= XFS_DIFLAG2_COWEXTSIZE;
Darrick J. Wongf7ca3522016-10-03 09:11:43 -0700919 ip->i_d.di_cowextsize = pip->i_d.di_cowextsize;
920 }
Lukas Czerner56bdf852017-08-03 13:19:13 -0700921 if (pip->i_d.di_flags2 & XFS_DIFLAG2_DAX)
922 di_flags2 |= XFS_DIFLAG2_DAX;
923
924 ip->i_d.di_flags2 |= di_flags2;
Darrick J. Wongf7ca3522016-10-03 09:11:43 -0700925 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926 /* FALLTHROUGH */
927 case S_IFLNK:
928 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
929 ip->i_df.if_flags = XFS_IFEXTENTS;
Christoph Hellwigfcacbc32018-07-17 16:51:50 -0700930 ip->i_df.if_bytes = 0;
Christoph Hellwig6bdcf262017-11-03 10:34:46 -0700931 ip->i_df.if_u1.if_root = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932 break;
933 default:
934 ASSERT(0);
935 }
936 /*
937 * Attribute fork settings for new inode.
938 */
939 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
940 ip->i_d.di_anextents = 0;
941
942 /*
943 * Log the new values stuffed into the inode.
944 */
Christoph Hellwigddc34152011-09-19 15:00:54 +0000945 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946 xfs_trans_log_inode(tp, ip, flags);
947
Dave Chinner58c90472015-02-23 22:38:08 +1100948 /* now that we have an i_mode we can setup the inode structure */
Christoph Hellwig41be8be2008-08-13 16:23:13 +1000949 xfs_setup_inode(ip);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950
951 *ipp = ip;
952 return 0;
953}
954
Dave Chinnere546cb72013-08-12 20:49:47 +1000955/*
956 * Allocates a new inode from disk and return a pointer to the
957 * incore copy. This routine will internally commit the current
958 * transaction and allocate a new one if the Space Manager needed
959 * to do an allocation to replenish the inode free-list.
960 *
961 * This routine is designed to be called from xfs_create and
962 * xfs_create_dir.
963 *
964 */
965int
966xfs_dir_ialloc(
967 xfs_trans_t **tpp, /* input: current transaction;
968 output: may be a new transaction. */
969 xfs_inode_t *dp, /* directory within whose allocate
970 the inode. */
971 umode_t mode,
972 xfs_nlink_t nlink,
Christoph Hellwig66f36462017-10-19 11:07:09 -0700973 dev_t rdev,
Dave Chinnere546cb72013-08-12 20:49:47 +1000974 prid_t prid, /* project id */
Chandan Rajendrac9590252018-04-02 15:47:43 -0700975 xfs_inode_t **ipp) /* pointer to inode; it will be
Dave Chinnere546cb72013-08-12 20:49:47 +1000976 locked. */
Dave Chinnere546cb72013-08-12 20:49:47 +1000977{
978 xfs_trans_t *tp;
Dave Chinnere546cb72013-08-12 20:49:47 +1000979 xfs_inode_t *ip;
980 xfs_buf_t *ialloc_context = NULL;
981 int code;
Dave Chinnere546cb72013-08-12 20:49:47 +1000982 void *dqinfo;
983 uint tflags;
984
985 tp = *tpp;
986 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
987
988 /*
989 * xfs_ialloc will return a pointer to an incore inode if
990 * the Space Manager has an available inode on the free
991 * list. Otherwise, it will do an allocation and replenish
992 * the freelist. Since we can only do one allocation per
993 * transaction without deadlocks, we will need to commit the
994 * current transaction and start a new one. We will then
995 * need to call xfs_ialloc again to get the inode.
996 *
997 * If xfs_ialloc did an allocation to replenish the freelist,
998 * it returns the bp containing the head of the freelist as
999 * ialloc_context. We will hold a lock on it across the
1000 * transaction commit so that no other process can steal
1001 * the inode(s) that we've just allocated.
1002 */
Christoph Hellwigf59cf5c2017-12-04 17:32:55 -08001003 code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid, &ialloc_context,
1004 &ip);
Dave Chinnere546cb72013-08-12 20:49:47 +10001005
1006 /*
1007 * Return an error if we were unable to allocate a new inode.
1008 * This should only happen if we run out of space on disk or
1009 * encounter a disk error.
1010 */
1011 if (code) {
1012 *ipp = NULL;
1013 return code;
1014 }
1015 if (!ialloc_context && !ip) {
1016 *ipp = NULL;
Dave Chinner24513372014-06-25 14:58:08 +10001017 return -ENOSPC;
Dave Chinnere546cb72013-08-12 20:49:47 +10001018 }
1019
1020 /*
1021 * If the AGI buffer is non-NULL, then we were unable to get an
1022 * inode in one operation. We need to commit the current
1023 * transaction and call xfs_ialloc() again. It is guaranteed
1024 * to succeed the second time.
1025 */
1026 if (ialloc_context) {
1027 /*
1028 * Normally, xfs_trans_commit releases all the locks.
1029 * We call bhold to hang on to the ialloc_context across
1030 * the commit. Holding this buffer prevents any other
1031 * processes from doing any allocations in this
1032 * allocation group.
1033 */
1034 xfs_trans_bhold(tp, ialloc_context);
Dave Chinnere546cb72013-08-12 20:49:47 +10001035
1036 /*
1037 * We want the quota changes to be associated with the next
1038 * transaction, NOT this one. So, detach the dqinfo from this
1039 * and attach it to the next transaction.
1040 */
1041 dqinfo = NULL;
1042 tflags = 0;
1043 if (tp->t_dqinfo) {
1044 dqinfo = (void *)tp->t_dqinfo;
1045 tp->t_dqinfo = NULL;
1046 tflags = tp->t_flags & XFS_TRANS_DQ_DIRTY;
1047 tp->t_flags &= ~(XFS_TRANS_DQ_DIRTY);
1048 }
1049
Christoph Hellwig411350d2017-08-28 10:21:03 -07001050 code = xfs_trans_roll(&tp);
Jie Liu3d3c8b52013-08-12 20:49:59 +10001051
Dave Chinnere546cb72013-08-12 20:49:47 +10001052 /*
1053 * Re-attach the quota info that we detached from prev trx.
1054 */
1055 if (dqinfo) {
1056 tp->t_dqinfo = dqinfo;
1057 tp->t_flags |= tflags;
1058 }
1059
1060 if (code) {
1061 xfs_buf_relse(ialloc_context);
Christoph Hellwig2e6db6c2015-06-04 13:47:29 +10001062 *tpp = tp;
Dave Chinnere546cb72013-08-12 20:49:47 +10001063 *ipp = NULL;
1064 return code;
1065 }
1066 xfs_trans_bjoin(tp, ialloc_context);
1067
1068 /*
1069 * Call ialloc again. Since we've locked out all
1070 * other allocations in this allocation group,
1071 * this call should always succeed.
1072 */
1073 code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid,
Christoph Hellwigf59cf5c2017-12-04 17:32:55 -08001074 &ialloc_context, &ip);
Dave Chinnere546cb72013-08-12 20:49:47 +10001075
1076 /*
1077 * If we get an error at this point, return to the caller
1078 * so that the current transaction can be aborted.
1079 */
1080 if (code) {
1081 *tpp = tp;
1082 *ipp = NULL;
1083 return code;
1084 }
1085 ASSERT(!ialloc_context && ip);
1086
Dave Chinnere546cb72013-08-12 20:49:47 +10001087 }
1088
1089 *ipp = ip;
1090 *tpp = tp;
1091
1092 return 0;
1093}
1094
1095/*
Dave Chinner54d7b5c2016-02-09 16:54:58 +11001096 * Decrement the link count on an inode & log the change. If this causes the
1097 * link count to go to zero, move the inode to AGI unlinked list so that it can
1098 * be freed when the last active reference goes away via xfs_inactive().
Dave Chinnere546cb72013-08-12 20:49:47 +10001099 */
Eric Sandeen0d5a75e2016-06-01 17:38:15 +10001100static int /* error */
Dave Chinnere546cb72013-08-12 20:49:47 +10001101xfs_droplink(
1102 xfs_trans_t *tp,
1103 xfs_inode_t *ip)
1104{
Dave Chinnere546cb72013-08-12 20:49:47 +10001105 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
1106
Dave Chinnere546cb72013-08-12 20:49:47 +10001107 drop_nlink(VFS_I(ip));
1108 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1109
Dave Chinner54d7b5c2016-02-09 16:54:58 +11001110 if (VFS_I(ip)->i_nlink)
1111 return 0;
1112
1113 return xfs_iunlink(tp, ip);
Dave Chinnere546cb72013-08-12 20:49:47 +10001114}
1115
1116/*
Dave Chinnere546cb72013-08-12 20:49:47 +10001117 * Increment the link count on an inode & log the change.
1118 */
Eric Sandeen0d5a75e2016-06-01 17:38:15 +10001119static int
Dave Chinnere546cb72013-08-12 20:49:47 +10001120xfs_bumplink(
1121 xfs_trans_t *tp,
1122 xfs_inode_t *ip)
1123{
1124 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
1125
Dave Chinner263997a2014-05-20 07:46:40 +10001126 ASSERT(ip->i_d.di_version > 1);
Dave Chinnere546cb72013-08-12 20:49:47 +10001127 inc_nlink(VFS_I(ip));
Dave Chinnere546cb72013-08-12 20:49:47 +10001128 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1129 return 0;
1130}
1131
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001132int
1133xfs_create(
1134 xfs_inode_t *dp,
1135 struct xfs_name *name,
1136 umode_t mode,
Christoph Hellwig66f36462017-10-19 11:07:09 -07001137 dev_t rdev,
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001138 xfs_inode_t **ipp)
1139{
1140 int is_dir = S_ISDIR(mode);
1141 struct xfs_mount *mp = dp->i_mount;
1142 struct xfs_inode *ip = NULL;
1143 struct xfs_trans *tp = NULL;
1144 int error;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001145 bool unlock_dp_on_error = false;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001146 prid_t prid;
1147 struct xfs_dquot *udqp = NULL;
1148 struct xfs_dquot *gdqp = NULL;
1149 struct xfs_dquot *pdqp = NULL;
Brian Foster062647a2014-11-28 14:00:16 +11001150 struct xfs_trans_res *tres;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001151 uint resblks;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001152
1153 trace_xfs_create(dp, name);
1154
1155 if (XFS_FORCED_SHUTDOWN(mp))
Dave Chinner24513372014-06-25 14:58:08 +10001156 return -EIO;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001157
Zhi Yong Wu163467d2013-12-18 08:22:39 +08001158 prid = xfs_get_initial_prid(dp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001159
1160 /*
1161 * Make sure that we have allocated dquot(s) on disk.
1162 */
Dwight Engen7aab1b22013-08-15 14:08:01 -04001163 error = xfs_qm_vop_dqalloc(dp, xfs_kuid_to_uid(current_fsuid()),
1164 xfs_kgid_to_gid(current_fsgid()), prid,
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001165 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
1166 &udqp, &gdqp, &pdqp);
1167 if (error)
1168 return error;
1169
1170 if (is_dir) {
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001171 resblks = XFS_MKDIR_SPACE_RES(mp, name->len);
Brian Foster062647a2014-11-28 14:00:16 +11001172 tres = &M_RES(mp)->tr_mkdir;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001173 } else {
1174 resblks = XFS_CREATE_SPACE_RES(mp, name->len);
Brian Foster062647a2014-11-28 14:00:16 +11001175 tres = &M_RES(mp)->tr_create;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001176 }
1177
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001178 /*
1179 * Initially assume that the file does not exist and
1180 * reserve the resources for that case. If that is not
1181 * the case we'll drop the one we have and get a more
1182 * appropriate transaction later.
1183 */
Christoph Hellwig253f4912016-04-06 09:19:55 +10001184 error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
Dave Chinner24513372014-06-25 14:58:08 +10001185 if (error == -ENOSPC) {
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001186 /* flush outstanding delalloc blocks and retry */
1187 xfs_flush_inodes(mp);
Christoph Hellwig253f4912016-04-06 09:19:55 +10001188 error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001189 }
Christoph Hellwig4906e212015-06-04 13:47:56 +10001190 if (error)
Christoph Hellwig253f4912016-04-06 09:19:55 +10001191 goto out_release_inode;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001192
Christoph Hellwig65523212016-11-30 14:33:25 +11001193 xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001194 unlock_dp_on_error = true;
1195
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001196 /*
1197 * Reserve disk quota and the inode.
1198 */
1199 error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
1200 pdqp, resblks, 1, 0);
1201 if (error)
1202 goto out_trans_cancel;
1203
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001204 /*
1205 * A newly created regular or special file just has one directory
1206 * entry pointing to them, but a directory also the "." entry
1207 * pointing to itself.
1208 */
Chandan Rajendrac9590252018-04-02 15:47:43 -07001209 error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev, prid, &ip);
Jan Karad6077aa2015-07-29 11:52:08 +10001210 if (error)
Christoph Hellwig4906e212015-06-04 13:47:56 +10001211 goto out_trans_cancel;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001212
1213 /*
1214 * Now we join the directory inode to the transaction. We do not do it
1215 * earlier because xfs_dir_ialloc might commit the previous transaction
1216 * (and release all the locks). An error from here on will result in
1217 * the transaction cancel unlocking dp so don't do it explicitly in the
1218 * error path.
1219 */
Christoph Hellwig65523212016-11-30 14:33:25 +11001220 xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001221 unlock_dp_on_error = false;
1222
Brian Foster381eee62018-07-11 22:26:21 -07001223 error = xfs_dir_createname(tp, dp, name, ip->i_ino,
Brian Fosterc9cfdb32018-07-11 22:26:08 -07001224 resblks ?
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001225 resblks - XFS_IALLOC_SPACE_RES(mp) : 0);
1226 if (error) {
Dave Chinner24513372014-06-25 14:58:08 +10001227 ASSERT(error != -ENOSPC);
Christoph Hellwig4906e212015-06-04 13:47:56 +10001228 goto out_trans_cancel;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001229 }
1230 xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1231 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
1232
1233 if (is_dir) {
1234 error = xfs_dir_init(tp, ip, dp);
1235 if (error)
Brian Fosterc8eac492018-07-24 13:43:13 -07001236 goto out_trans_cancel;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001237
1238 error = xfs_bumplink(tp, dp);
1239 if (error)
Brian Fosterc8eac492018-07-24 13:43:13 -07001240 goto out_trans_cancel;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001241 }
1242
1243 /*
1244 * If this is a synchronous mount, make sure that the
1245 * create transaction goes to disk before returning to
1246 * the user.
1247 */
1248 if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
1249 xfs_trans_set_sync(tp);
1250
1251 /*
1252 * Attach the dquot(s) to the inodes and modify them incore.
1253 * These ids of the inode couldn't have changed since the new
1254 * inode has been locked ever since it was created.
1255 */
1256 xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1257
Christoph Hellwig70393312015-06-04 13:48:08 +10001258 error = xfs_trans_commit(tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001259 if (error)
1260 goto out_release_inode;
1261
1262 xfs_qm_dqrele(udqp);
1263 xfs_qm_dqrele(gdqp);
1264 xfs_qm_dqrele(pdqp);
1265
1266 *ipp = ip;
1267 return 0;
1268
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001269 out_trans_cancel:
Christoph Hellwig4906e212015-06-04 13:47:56 +10001270 xfs_trans_cancel(tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001271 out_release_inode:
1272 /*
Dave Chinner58c90472015-02-23 22:38:08 +11001273 * Wait until after the current transaction is aborted to finish the
1274 * setup of the inode and release the inode. This prevents recursive
1275 * transactions and deadlocks from xfs_inactive.
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001276 */
Dave Chinner58c90472015-02-23 22:38:08 +11001277 if (ip) {
1278 xfs_finish_inode_setup(ip);
Darrick J. Wong44a87362018-07-25 12:52:32 -07001279 xfs_irele(ip);
Dave Chinner58c90472015-02-23 22:38:08 +11001280 }
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001281
1282 xfs_qm_dqrele(udqp);
1283 xfs_qm_dqrele(gdqp);
1284 xfs_qm_dqrele(pdqp);
1285
1286 if (unlock_dp_on_error)
Christoph Hellwig65523212016-11-30 14:33:25 +11001287 xfs_iunlock(dp, XFS_ILOCK_EXCL);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001288 return error;
1289}
1290
1291int
Zhi Yong Wu99b64362013-12-18 08:22:40 +08001292xfs_create_tmpfile(
1293 struct xfs_inode *dp,
Brian Foster330033d2014-04-17 08:15:30 +10001294 umode_t mode,
1295 struct xfs_inode **ipp)
Zhi Yong Wu99b64362013-12-18 08:22:40 +08001296{
1297 struct xfs_mount *mp = dp->i_mount;
1298 struct xfs_inode *ip = NULL;
1299 struct xfs_trans *tp = NULL;
1300 int error;
Zhi Yong Wu99b64362013-12-18 08:22:40 +08001301 prid_t prid;
1302 struct xfs_dquot *udqp = NULL;
1303 struct xfs_dquot *gdqp = NULL;
1304 struct xfs_dquot *pdqp = NULL;
1305 struct xfs_trans_res *tres;
1306 uint resblks;
1307
1308 if (XFS_FORCED_SHUTDOWN(mp))
Dave Chinner24513372014-06-25 14:58:08 +10001309 return -EIO;
Zhi Yong Wu99b64362013-12-18 08:22:40 +08001310
1311 prid = xfs_get_initial_prid(dp);
1312
1313 /*
1314 * Make sure that we have allocated dquot(s) on disk.
1315 */
1316 error = xfs_qm_vop_dqalloc(dp, xfs_kuid_to_uid(current_fsuid()),
1317 xfs_kgid_to_gid(current_fsgid()), prid,
1318 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
1319 &udqp, &gdqp, &pdqp);
1320 if (error)
1321 return error;
1322
1323 resblks = XFS_IALLOC_SPACE_RES(mp);
Zhi Yong Wu99b64362013-12-18 08:22:40 +08001324 tres = &M_RES(mp)->tr_create_tmpfile;
Christoph Hellwig253f4912016-04-06 09:19:55 +10001325
1326 error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
Christoph Hellwig4906e212015-06-04 13:47:56 +10001327 if (error)
Christoph Hellwig253f4912016-04-06 09:19:55 +10001328 goto out_release_inode;
Zhi Yong Wu99b64362013-12-18 08:22:40 +08001329
1330 error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
1331 pdqp, resblks, 1, 0);
1332 if (error)
1333 goto out_trans_cancel;
1334
Darrick J. Wongc4a6bf72019-02-13 11:15:17 -08001335 error = xfs_dir_ialloc(&tp, dp, mode, 0, 0, prid, &ip);
Jan Karad6077aa2015-07-29 11:52:08 +10001336 if (error)
Christoph Hellwig4906e212015-06-04 13:47:56 +10001337 goto out_trans_cancel;
Zhi Yong Wu99b64362013-12-18 08:22:40 +08001338
1339 if (mp->m_flags & XFS_MOUNT_WSYNC)
1340 xfs_trans_set_sync(tp);
1341
1342 /*
1343 * Attach the dquot(s) to the inodes and modify them incore.
1344 * These ids of the inode couldn't have changed since the new
1345 * inode has been locked ever since it was created.
1346 */
1347 xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1348
Zhi Yong Wu99b64362013-12-18 08:22:40 +08001349 error = xfs_iunlink(tp, ip);
1350 if (error)
Christoph Hellwig4906e212015-06-04 13:47:56 +10001351 goto out_trans_cancel;
Zhi Yong Wu99b64362013-12-18 08:22:40 +08001352
Christoph Hellwig70393312015-06-04 13:48:08 +10001353 error = xfs_trans_commit(tp);
Zhi Yong Wu99b64362013-12-18 08:22:40 +08001354 if (error)
1355 goto out_release_inode;
1356
1357 xfs_qm_dqrele(udqp);
1358 xfs_qm_dqrele(gdqp);
1359 xfs_qm_dqrele(pdqp);
1360
Brian Foster330033d2014-04-17 08:15:30 +10001361 *ipp = ip;
Zhi Yong Wu99b64362013-12-18 08:22:40 +08001362 return 0;
1363
Zhi Yong Wu99b64362013-12-18 08:22:40 +08001364 out_trans_cancel:
Christoph Hellwig4906e212015-06-04 13:47:56 +10001365 xfs_trans_cancel(tp);
Zhi Yong Wu99b64362013-12-18 08:22:40 +08001366 out_release_inode:
1367 /*
Dave Chinner58c90472015-02-23 22:38:08 +11001368 * Wait until after the current transaction is aborted to finish the
1369 * setup of the inode and release the inode. This prevents recursive
1370 * transactions and deadlocks from xfs_inactive.
Zhi Yong Wu99b64362013-12-18 08:22:40 +08001371 */
Dave Chinner58c90472015-02-23 22:38:08 +11001372 if (ip) {
1373 xfs_finish_inode_setup(ip);
Darrick J. Wong44a87362018-07-25 12:52:32 -07001374 xfs_irele(ip);
Dave Chinner58c90472015-02-23 22:38:08 +11001375 }
Zhi Yong Wu99b64362013-12-18 08:22:40 +08001376
1377 xfs_qm_dqrele(udqp);
1378 xfs_qm_dqrele(gdqp);
1379 xfs_qm_dqrele(pdqp);
1380
1381 return error;
1382}
1383
1384int
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001385xfs_link(
1386 xfs_inode_t *tdp,
1387 xfs_inode_t *sip,
1388 struct xfs_name *target_name)
1389{
1390 xfs_mount_t *mp = tdp->i_mount;
1391 xfs_trans_t *tp;
1392 int error;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001393 int resblks;
1394
1395 trace_xfs_link(tdp, target_name);
1396
Dave Chinnerc19b3b052016-02-09 16:54:58 +11001397 ASSERT(!S_ISDIR(VFS_I(sip)->i_mode));
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001398
1399 if (XFS_FORCED_SHUTDOWN(mp))
Dave Chinner24513372014-06-25 14:58:08 +10001400 return -EIO;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001401
Darrick J. Wongc14cfcc2018-05-04 15:30:21 -07001402 error = xfs_qm_dqattach(sip);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001403 if (error)
1404 goto std_return;
1405
Darrick J. Wongc14cfcc2018-05-04 15:30:21 -07001406 error = xfs_qm_dqattach(tdp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001407 if (error)
1408 goto std_return;
1409
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001410 resblks = XFS_LINK_SPACE_RES(mp, target_name->len);
Christoph Hellwig253f4912016-04-06 09:19:55 +10001411 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, resblks, 0, 0, &tp);
Dave Chinner24513372014-06-25 14:58:08 +10001412 if (error == -ENOSPC) {
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001413 resblks = 0;
Christoph Hellwig253f4912016-04-06 09:19:55 +10001414 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, 0, 0, 0, &tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001415 }
Christoph Hellwig4906e212015-06-04 13:47:56 +10001416 if (error)
Christoph Hellwig253f4912016-04-06 09:19:55 +10001417 goto std_return;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001418
Darrick J. Wong7c2d2382018-01-26 15:27:33 -08001419 xfs_lock_two_inodes(sip, XFS_ILOCK_EXCL, tdp, XFS_ILOCK_EXCL);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001420
1421 xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL);
Christoph Hellwig65523212016-11-30 14:33:25 +11001422 xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001423
1424 /*
1425 * If we are using project inheritance, we only allow hard link
1426 * creation in our tree when the project IDs are the same; else
1427 * the tree quota mechanism could be circumvented.
1428 */
1429 if (unlikely((tdp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
1430 (xfs_get_projid(tdp) != xfs_get_projid(sip)))) {
Dave Chinner24513372014-06-25 14:58:08 +10001431 error = -EXDEV;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001432 goto error_return;
1433 }
1434
Eric Sandeen94f3cad2014-09-09 11:57:52 +10001435 if (!resblks) {
1436 error = xfs_dir_canenter(tp, tdp, target_name);
1437 if (error)
1438 goto error_return;
1439 }
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001440
Dave Chinner54d7b5c2016-02-09 16:54:58 +11001441 /*
1442 * Handle initial link state of O_TMPFILE inode
1443 */
1444 if (VFS_I(sip)->i_nlink == 0) {
Zhi Yong Wuab297432013-12-18 08:22:41 +08001445 error = xfs_iunlink_remove(tp, sip);
1446 if (error)
Christoph Hellwig4906e212015-06-04 13:47:56 +10001447 goto error_return;
Zhi Yong Wuab297432013-12-18 08:22:41 +08001448 }
1449
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001450 error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino,
Brian Foster381eee62018-07-11 22:26:21 -07001451 resblks);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001452 if (error)
Christoph Hellwig4906e212015-06-04 13:47:56 +10001453 goto error_return;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001454 xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1455 xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE);
1456
1457 error = xfs_bumplink(tp, sip);
1458 if (error)
Christoph Hellwig4906e212015-06-04 13:47:56 +10001459 goto error_return;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001460
1461 /*
1462 * If this is a synchronous mount, make sure that the
1463 * link transaction goes to disk before returning to
1464 * the user.
1465 */
Eric Sandeenf6106ef2016-01-11 11:34:01 +11001466 if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001467 xfs_trans_set_sync(tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001468
Christoph Hellwig70393312015-06-04 13:48:08 +10001469 return xfs_trans_commit(tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001470
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001471 error_return:
Christoph Hellwig4906e212015-06-04 13:47:56 +10001472 xfs_trans_cancel(tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001473 std_return:
1474 return error;
1475}
1476
Darrick J. Wong363e59b2017-12-14 15:42:59 -08001477/* Clear the reflink flag and the cowblocks tag if possible. */
1478static void
1479xfs_itruncate_clear_reflink_flags(
1480 struct xfs_inode *ip)
1481{
1482 struct xfs_ifork *dfork;
1483 struct xfs_ifork *cfork;
1484
1485 if (!xfs_is_reflink_inode(ip))
1486 return;
1487 dfork = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1488 cfork = XFS_IFORK_PTR(ip, XFS_COW_FORK);
1489 if (dfork->if_bytes == 0 && cfork->if_bytes == 0)
1490 ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
1491 if (cfork->if_bytes == 0)
1492 xfs_inode_clear_cowblocks_tag(ip);
1493}
1494
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495/*
Christoph Hellwig8f04c472011-07-08 14:34:34 +02001496 * Free up the underlying blocks past new_size. The new size must be smaller
1497 * than the current size. This routine can be used both for the attribute and
1498 * data fork, and does not modify the inode size, which is left to the caller.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499 *
David Chinnerf6485052008-04-17 16:50:04 +10001500 * The transaction passed to this routine must have made a permanent log
1501 * reservation of at least XFS_ITRUNCATE_LOG_RES. This routine may commit the
1502 * given transaction and start new ones, so make sure everything involved in
1503 * the transaction is tidy before calling here. Some transaction will be
1504 * returned to the caller to be committed. The incoming transaction must
1505 * already include the inode, and both inode locks must be held exclusively.
1506 * The inode must also be "held" within the transaction. On return the inode
1507 * will be "held" within the returned transaction. This routine does NOT
1508 * require any disk space to be reserved for it within the transaction.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509 *
David Chinnerf6485052008-04-17 16:50:04 +10001510 * If we get an error, we must return with the inode locked and linked into the
1511 * current transaction. This keeps things simple for the higher level code,
1512 * because it always knows that the inode is locked and held in the transaction
1513 * that returns to it whether errors occur or not. We don't mark the inode
1514 * dirty on error so that transactions can be easily aborted if possible.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515 */
1516int
Brian Foster4e529332018-05-10 09:35:42 -07001517xfs_itruncate_extents_flags(
Christoph Hellwig8f04c472011-07-08 14:34:34 +02001518 struct xfs_trans **tpp,
1519 struct xfs_inode *ip,
1520 int whichfork,
Brian Foster13b86fc2018-05-09 08:45:04 -07001521 xfs_fsize_t new_size,
Brian Foster4e529332018-05-10 09:35:42 -07001522 int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523{
Christoph Hellwig8f04c472011-07-08 14:34:34 +02001524 struct xfs_mount *mp = ip->i_mount;
1525 struct xfs_trans *tp = *tpp;
Christoph Hellwig8f04c472011-07-08 14:34:34 +02001526 xfs_fileoff_t first_unmap_block;
1527 xfs_fileoff_t last_block;
1528 xfs_filblks_t unmap_len;
Christoph Hellwig8f04c472011-07-08 14:34:34 +02001529 int error = 0;
1530 int done = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531
Christoph Hellwig0b561852012-07-04 11:13:31 -04001532 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1533 ASSERT(!atomic_read(&VFS_I(ip)->i_count) ||
1534 xfs_isilocked(ip, XFS_IOLOCK_EXCL));
Christoph Hellwigce7ae1512011-12-18 20:00:11 +00001535 ASSERT(new_size <= XFS_ISIZE(ip));
Christoph Hellwig8f04c472011-07-08 14:34:34 +02001536 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537 ASSERT(ip->i_itemp != NULL);
Christoph Hellwig898621d2010-06-24 11:36:58 +10001538 ASSERT(ip->i_itemp->ili_lock_flags == 0);
Christoph Hellwig8f04c472011-07-08 14:34:34 +02001539 ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540
Christoph Hellwig673e8e52011-12-18 20:00:04 +00001541 trace_xfs_itruncate_extents_start(ip, new_size);
1542
Brian Foster4e529332018-05-10 09:35:42 -07001543 flags |= xfs_bmapi_aflag(whichfork);
Brian Foster13b86fc2018-05-09 08:45:04 -07001544
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545 /*
1546 * Since it is possible for space to become allocated beyond
1547 * the end of the file (in a crash where the space is allocated
1548 * but the inode size is not yet updated), simply remove any
1549 * blocks which show up between the new EOF and the maximum
1550 * possible file size. If the first block to be removed is
1551 * beyond the maximum file size (ie it is the same as last_block),
1552 * then there is nothing to do.
1553 */
Christoph Hellwig8f04c472011-07-08 14:34:34 +02001554 first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
Dave Chinner32972382012-06-08 15:44:54 +10001555 last_block = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
Christoph Hellwig8f04c472011-07-08 14:34:34 +02001556 if (first_unmap_block == last_block)
1557 return 0;
1558
1559 ASSERT(first_unmap_block < last_block);
1560 unmap_len = last_block - first_unmap_block + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561 while (!done) {
Brian Foster02dff7b2018-07-24 13:43:07 -07001562 ASSERT(tp->t_firstblock == NULLFSBLOCK);
Brian Foster13b86fc2018-05-09 08:45:04 -07001563 error = xfs_bunmapi(tp, ip, first_unmap_block, unmap_len, flags,
Brian Foster2af52842018-07-11 22:26:25 -07001564 XFS_ITRUNC_MAX_EXTENTS, &done);
Christoph Hellwig8f04c472011-07-08 14:34:34 +02001565 if (error)
Brian Fosterd5a2e282018-09-29 13:41:58 +10001566 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567
1568 /*
1569 * Duplicate the transaction that has the permanent
1570 * reservation and commit the old transaction.
1571 */
Brian Foster9e28a242018-07-24 13:43:15 -07001572 error = xfs_defer_finish(&tp);
Christoph Hellwig8f04c472011-07-08 14:34:34 +02001573 if (error)
Brian Foster9b1f4e92018-08-01 07:20:33 -07001574 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575
Christoph Hellwig411350d2017-08-28 10:21:03 -07001576 error = xfs_trans_roll_inode(&tp, ip);
David Chinnerf6485052008-04-17 16:50:04 +10001577 if (error)
Christoph Hellwig8f04c472011-07-08 14:34:34 +02001578 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579 }
Christoph Hellwig8f04c472011-07-08 14:34:34 +02001580
Darrick J. Wong4919d422018-04-10 08:28:33 -07001581 if (whichfork == XFS_DATA_FORK) {
1582 /* Remove all pending CoW reservations. */
1583 error = xfs_reflink_cancel_cow_blocks(ip, &tp,
1584 first_unmap_block, last_block, true);
1585 if (error)
1586 goto out;
Darrick J. Wongaa8968f2016-10-03 09:11:38 -07001587
Darrick J. Wong4919d422018-04-10 08:28:33 -07001588 xfs_itruncate_clear_reflink_flags(ip);
1589 }
Darrick J. Wongaa8968f2016-10-03 09:11:38 -07001590
Christoph Hellwig673e8e52011-12-18 20:00:04 +00001591 /*
1592 * Always re-log the inode so that our permanent transaction can keep
1593 * on rolling it forward in the log.
1594 */
1595 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1596
1597 trace_xfs_itruncate_extents_end(ip, new_size);
1598
Christoph Hellwig8f04c472011-07-08 14:34:34 +02001599out:
1600 *tpp = tp;
1601 return error;
Christoph Hellwig8f04c472011-07-08 14:34:34 +02001602}
1603
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001604int
1605xfs_release(
1606 xfs_inode_t *ip)
1607{
1608 xfs_mount_t *mp = ip->i_mount;
1609 int error;
1610
Dave Chinnerc19b3b052016-02-09 16:54:58 +11001611 if (!S_ISREG(VFS_I(ip)->i_mode) || (VFS_I(ip)->i_mode == 0))
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001612 return 0;
1613
1614 /* If this is a read-only mount, don't do this (would generate I/O) */
1615 if (mp->m_flags & XFS_MOUNT_RDONLY)
1616 return 0;
1617
1618 if (!XFS_FORCED_SHUTDOWN(mp)) {
1619 int truncated;
1620
1621 /*
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001622 * If we previously truncated this file and removed old data
1623 * in the process, we want to initiate "early" writeout on
1624 * the last close. This is an attempt to combat the notorious
1625 * NULL files problem which is particularly noticeable from a
1626 * truncate down, buffered (re-)write (delalloc), followed by
1627 * a crash. What we are effectively doing here is
1628 * significantly reducing the time window where we'd otherwise
1629 * be exposed to that problem.
1630 */
1631 truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
1632 if (truncated) {
1633 xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE);
Dave Chinnereac152b2014-08-04 13:22:49 +10001634 if (ip->i_delayed_blks > 0) {
Dave Chinner24513372014-06-25 14:58:08 +10001635 error = filemap_flush(VFS_I(ip)->i_mapping);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001636 if (error)
1637 return error;
1638 }
1639 }
1640 }
1641
Dave Chinner54d7b5c2016-02-09 16:54:58 +11001642 if (VFS_I(ip)->i_nlink == 0)
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001643 return 0;
1644
1645 if (xfs_can_free_eofblocks(ip, false)) {
1646
1647 /*
Brian Fostera36b9262017-01-27 23:22:55 -08001648 * Check if the inode is being opened, written and closed
1649 * frequently and we have delayed allocation blocks outstanding
1650 * (e.g. streaming writes from the NFS server), truncating the
1651 * blocks past EOF will cause fragmentation to occur.
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001652 *
Brian Fostera36b9262017-01-27 23:22:55 -08001653 * In this case don't do the truncation, but we have to be
1654 * careful how we detect this case. Blocks beyond EOF show up as
1655 * i_delayed_blks even when the inode is clean, so we need to
1656 * truncate them away first before checking for a dirty release.
1657 * Hence on the first dirty close we will still remove the
1658 * speculative allocation, but after that we will leave it in
1659 * place.
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001660 */
1661 if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
1662 return 0;
Brian Fostera36b9262017-01-27 23:22:55 -08001663 /*
1664 * If we can't get the iolock just skip truncating the blocks
1665 * past EOF because we could deadlock with the mmap_sem
1666 * otherwise. We'll get another chance to drop them once the
1667 * last reference to the inode is dropped, so we'll never leak
1668 * blocks permanently.
1669 */
1670 if (xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1671 error = xfs_free_eofblocks(ip);
1672 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1673 if (error)
1674 return error;
1675 }
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001676
1677 /* delalloc blocks after truncation means it really is dirty */
1678 if (ip->i_delayed_blks)
1679 xfs_iflags_set(ip, XFS_IDIRTY_RELEASE);
1680 }
1681 return 0;
1682}
1683
1684/*
Brian Fosterf7be2d72013-09-20 11:06:10 -04001685 * xfs_inactive_truncate
1686 *
1687 * Called to perform a truncate when an inode becomes unlinked.
1688 */
1689STATIC int
1690xfs_inactive_truncate(
1691 struct xfs_inode *ip)
1692{
1693 struct xfs_mount *mp = ip->i_mount;
1694 struct xfs_trans *tp;
1695 int error;
1696
Christoph Hellwig253f4912016-04-06 09:19:55 +10001697 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
Brian Fosterf7be2d72013-09-20 11:06:10 -04001698 if (error) {
1699 ASSERT(XFS_FORCED_SHUTDOWN(mp));
Brian Fosterf7be2d72013-09-20 11:06:10 -04001700 return error;
1701 }
Brian Fosterf7be2d72013-09-20 11:06:10 -04001702 xfs_ilock(ip, XFS_ILOCK_EXCL);
1703 xfs_trans_ijoin(tp, ip, 0);
1704
1705 /*
1706 * Log the inode size first to prevent stale data exposure in the event
1707 * of a system crash before the truncate completes. See the related
Jan Kara69bca802016-05-26 14:46:43 +02001708 * comment in xfs_vn_setattr_size() for details.
Brian Fosterf7be2d72013-09-20 11:06:10 -04001709 */
1710 ip->i_d.di_size = 0;
1711 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1712
1713 error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
1714 if (error)
1715 goto error_trans_cancel;
1716
1717 ASSERT(ip->i_d.di_nextents == 0);
1718
Christoph Hellwig70393312015-06-04 13:48:08 +10001719 error = xfs_trans_commit(tp);
Brian Fosterf7be2d72013-09-20 11:06:10 -04001720 if (error)
1721 goto error_unlock;
1722
1723 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1724 return 0;
1725
1726error_trans_cancel:
Christoph Hellwig4906e212015-06-04 13:47:56 +10001727 xfs_trans_cancel(tp);
Brian Fosterf7be2d72013-09-20 11:06:10 -04001728error_unlock:
1729 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1730 return error;
1731}
1732
1733/*
Brian Foster88877d22013-09-20 11:06:11 -04001734 * xfs_inactive_ifree()
1735 *
1736 * Perform the inode free when an inode is unlinked.
1737 */
1738STATIC int
1739xfs_inactive_ifree(
1740 struct xfs_inode *ip)
1741{
Brian Foster88877d22013-09-20 11:06:11 -04001742 struct xfs_mount *mp = ip->i_mount;
1743 struct xfs_trans *tp;
1744 int error;
1745
Brian Foster9d43b182014-04-24 16:00:52 +10001746 /*
Christoph Hellwig76d771b2017-01-25 07:49:35 -08001747 * We try to use a per-AG reservation for any block needed by the finobt
1748 * tree, but as the finobt feature predates the per-AG reservation
1749 * support a degraded file system might not have enough space for the
1750 * reservation at mount time. In that case try to dip into the reserved
1751 * pool and pray.
Brian Foster9d43b182014-04-24 16:00:52 +10001752 *
1753 * Send a warning if the reservation does happen to fail, as the inode
1754 * now remains allocated and sits on the unlinked list until the fs is
1755 * repaired.
1756 */
Darrick J. Wonge1f6ca12019-02-14 09:33:15 -08001757 if (unlikely(mp->m_finobt_nores)) {
Christoph Hellwig76d771b2017-01-25 07:49:35 -08001758 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
1759 XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE,
1760 &tp);
1761 } else {
1762 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 0, 0, 0, &tp);
1763 }
Brian Foster88877d22013-09-20 11:06:11 -04001764 if (error) {
Dave Chinner24513372014-06-25 14:58:08 +10001765 if (error == -ENOSPC) {
Brian Foster9d43b182014-04-24 16:00:52 +10001766 xfs_warn_ratelimited(mp,
1767 "Failed to remove inode(s) from unlinked list. "
1768 "Please free space, unmount and run xfs_repair.");
1769 } else {
1770 ASSERT(XFS_FORCED_SHUTDOWN(mp));
1771 }
Brian Foster88877d22013-09-20 11:06:11 -04001772 return error;
1773 }
1774
1775 xfs_ilock(ip, XFS_ILOCK_EXCL);
1776 xfs_trans_ijoin(tp, ip, 0);
1777
Brian Foster0e0417f2018-07-11 22:26:07 -07001778 error = xfs_ifree(tp, ip);
Brian Foster88877d22013-09-20 11:06:11 -04001779 if (error) {
1780 /*
1781 * If we fail to free the inode, shut down. The cancel
1782 * might do that, we need to make sure. Otherwise the
1783 * inode might be lost for a long time or forever.
1784 */
1785 if (!XFS_FORCED_SHUTDOWN(mp)) {
1786 xfs_notice(mp, "%s: xfs_ifree returned error %d",
1787 __func__, error);
1788 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
1789 }
Christoph Hellwig4906e212015-06-04 13:47:56 +10001790 xfs_trans_cancel(tp);
Brian Foster88877d22013-09-20 11:06:11 -04001791 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1792 return error;
1793 }
1794
1795 /*
1796 * Credit the quota account(s). The inode is gone.
1797 */
1798 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
1799
1800 /*
Brian Fosterd4a97a02015-08-19 10:01:40 +10001801 * Just ignore errors at this point. There is nothing we can do except
1802 * to try to keep going. Make sure it's not a silent error.
Brian Foster88877d22013-09-20 11:06:11 -04001803 */
Christoph Hellwig70393312015-06-04 13:48:08 +10001804 error = xfs_trans_commit(tp);
Brian Foster88877d22013-09-20 11:06:11 -04001805 if (error)
1806 xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
1807 __func__, error);
1808
1809 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1810 return 0;
1811}
1812
1813/*
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001814 * xfs_inactive
1815 *
1816 * This is called when the vnode reference count for the vnode
1817 * goes to zero. If the file has been unlinked, then it must
1818 * now be truncated. Also, we clear all of the read-ahead state
1819 * kept for the inode here since the file is now closed.
1820 */
Brian Foster74564fb2013-09-20 11:06:12 -04001821void
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001822xfs_inactive(
1823 xfs_inode_t *ip)
1824{
Jie Liu3d3c8b52013-08-12 20:49:59 +10001825 struct xfs_mount *mp;
Jie Liu3d3c8b52013-08-12 20:49:59 +10001826 int error;
1827 int truncate = 0;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001828
1829 /*
1830 * If the inode is already free, then there can be nothing
1831 * to clean up here.
1832 */
Dave Chinnerc19b3b052016-02-09 16:54:58 +11001833 if (VFS_I(ip)->i_mode == 0) {
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001834 ASSERT(ip->i_df.if_broot_bytes == 0);
Brian Foster74564fb2013-09-20 11:06:12 -04001835 return;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001836 }
1837
1838 mp = ip->i_mount;
Darrick J. Wong17c12bc2016-10-03 09:11:29 -07001839 ASSERT(!xfs_iflags_test(ip, XFS_IRECOVERY));
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001840
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001841 /* If this is a read-only mount, don't do this (would generate I/O) */
1842 if (mp->m_flags & XFS_MOUNT_RDONLY)
Brian Foster74564fb2013-09-20 11:06:12 -04001843 return;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001844
Darrick J. Wong62318482018-03-06 17:08:31 -08001845 /* Try to clean out the cow blocks if there are any. */
Christoph Hellwig51d62692018-07-17 16:51:51 -07001846 if (xfs_inode_has_cow_data(ip))
Darrick J. Wong62318482018-03-06 17:08:31 -08001847 xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true);
1848
Dave Chinner54d7b5c2016-02-09 16:54:58 +11001849 if (VFS_I(ip)->i_nlink != 0) {
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001850 /*
1851 * force is true because we are evicting an inode from the
1852 * cache. Post-eof blocks must be freed, lest we end up with
1853 * broken free space accounting.
Brian Foster3b4683c2017-04-11 10:50:05 -07001854 *
1855 * Note: don't bother with iolock here since lockdep complains
1856 * about acquiring it in reclaim context. We have the only
1857 * reference to the inode at this point anyways.
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001858 */
Brian Foster3b4683c2017-04-11 10:50:05 -07001859 if (xfs_can_free_eofblocks(ip, true))
Brian Fostera36b9262017-01-27 23:22:55 -08001860 xfs_free_eofblocks(ip);
Brian Foster74564fb2013-09-20 11:06:12 -04001861
1862 return;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001863 }
1864
Dave Chinnerc19b3b052016-02-09 16:54:58 +11001865 if (S_ISREG(VFS_I(ip)->i_mode) &&
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001866 (ip->i_d.di_size != 0 || XFS_ISIZE(ip) != 0 ||
1867 ip->i_d.di_nextents > 0 || ip->i_delayed_blks > 0))
1868 truncate = 1;
1869
Darrick J. Wongc14cfcc2018-05-04 15:30:21 -07001870 error = xfs_qm_dqattach(ip);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001871 if (error)
Brian Foster74564fb2013-09-20 11:06:12 -04001872 return;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001873
Dave Chinnerc19b3b052016-02-09 16:54:58 +11001874 if (S_ISLNK(VFS_I(ip)->i_mode))
Brian Foster36b21dd2013-09-20 11:06:09 -04001875 error = xfs_inactive_symlink(ip);
Brian Fosterf7be2d72013-09-20 11:06:10 -04001876 else if (truncate)
1877 error = xfs_inactive_truncate(ip);
1878 if (error)
Brian Foster74564fb2013-09-20 11:06:12 -04001879 return;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001880
1881 /*
1882 * If there are attributes associated with the file then blow them away
1883 * now. The code calls a routine that recursively deconstructs the
Dave Chinner6dfe5a02015-05-29 07:40:08 +10001884 * attribute fork. If also blows away the in-core attribute fork.
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001885 */
Dave Chinner6dfe5a02015-05-29 07:40:08 +10001886 if (XFS_IFORK_Q(ip)) {
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001887 error = xfs_attr_inactive(ip);
1888 if (error)
Brian Foster74564fb2013-09-20 11:06:12 -04001889 return;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001890 }
1891
Dave Chinner6dfe5a02015-05-29 07:40:08 +10001892 ASSERT(!ip->i_afp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001893 ASSERT(ip->i_d.di_anextents == 0);
Dave Chinner6dfe5a02015-05-29 07:40:08 +10001894 ASSERT(ip->i_d.di_forkoff == 0);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001895
1896 /*
1897 * Free the inode.
1898 */
Brian Foster88877d22013-09-20 11:06:11 -04001899 error = xfs_inactive_ifree(ip);
1900 if (error)
Brian Foster74564fb2013-09-20 11:06:12 -04001901 return;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001902
1903 /*
1904 * Release the dquots held by inode, if any.
1905 */
1906 xfs_qm_dqdetach(ip);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001907}
1908
Linus Torvalds1da177e2005-04-16 15:20:36 -07001909/*
Darrick J. Wong9b247172019-02-07 10:37:16 -08001910 * In-Core Unlinked List Lookups
1911 * =============================
1912 *
1913 * Every inode is supposed to be reachable from some other piece of metadata
1914 * with the exception of the root directory. Inodes with a connection to a
1915 * file descriptor but not linked from anywhere in the on-disk directory tree
1916 * are collectively known as unlinked inodes, though the filesystem itself
1917 * maintains links to these inodes so that on-disk metadata are consistent.
1918 *
1919 * XFS implements a per-AG on-disk hash table of unlinked inodes. The AGI
1920 * header contains a number of buckets that point to an inode, and each inode
1921 * record has a pointer to the next inode in the hash chain. This
1922 * singly-linked list causes scaling problems in the iunlink remove function
1923 * because we must walk that list to find the inode that points to the inode
1924 * being removed from the unlinked hash bucket list.
1925 *
1926 * What if we modelled the unlinked list as a collection of records capturing
1927 * "X.next_unlinked = Y" relations? If we indexed those records on Y, we'd
1928 * have a fast way to look up unlinked list predecessors, which avoids the
1929 * slow list walk. That's exactly what we do here (in-core) with a per-AG
1930 * rhashtable.
1931 *
1932 * Because this is a backref cache, we ignore operational failures since the
1933 * iunlink code can fall back to the slow bucket walk. The only errors that
1934 * should bubble out are for obviously incorrect situations.
1935 *
1936 * All users of the backref cache MUST hold the AGI buffer lock to serialize
1937 * access or have otherwise provided for concurrency control.
1938 */
1939
1940/* Capture a "X.next_unlinked = Y" relationship. */
1941struct xfs_iunlink {
1942 struct rhash_head iu_rhash_head;
1943 xfs_agino_t iu_agino; /* X */
1944 xfs_agino_t iu_next_unlinked; /* Y */
1945};
1946
1947/* Unlinked list predecessor lookup hashtable construction */
1948static int
1949xfs_iunlink_obj_cmpfn(
1950 struct rhashtable_compare_arg *arg,
1951 const void *obj)
1952{
1953 const xfs_agino_t *key = arg->key;
1954 const struct xfs_iunlink *iu = obj;
1955
1956 if (iu->iu_next_unlinked != *key)
1957 return 1;
1958 return 0;
1959}
1960
1961static const struct rhashtable_params xfs_iunlink_hash_params = {
1962 .min_size = XFS_AGI_UNLINKED_BUCKETS,
1963 .key_len = sizeof(xfs_agino_t),
1964 .key_offset = offsetof(struct xfs_iunlink,
1965 iu_next_unlinked),
1966 .head_offset = offsetof(struct xfs_iunlink, iu_rhash_head),
1967 .automatic_shrinking = true,
1968 .obj_cmpfn = xfs_iunlink_obj_cmpfn,
1969};
1970
1971/*
1972 * Return X, where X.next_unlinked == @agino. Returns NULLAGINO if no such
1973 * relation is found.
1974 */
1975static xfs_agino_t
1976xfs_iunlink_lookup_backref(
1977 struct xfs_perag *pag,
1978 xfs_agino_t agino)
1979{
1980 struct xfs_iunlink *iu;
1981
1982 iu = rhashtable_lookup_fast(&pag->pagi_unlinked_hash, &agino,
1983 xfs_iunlink_hash_params);
1984 return iu ? iu->iu_agino : NULLAGINO;
1985}
1986
1987/*
1988 * Take ownership of an iunlink cache entry and insert it into the hash table.
1989 * If successful, the entry will be owned by the cache; if not, it is freed.
1990 * Either way, the caller does not own @iu after this call.
1991 */
1992static int
1993xfs_iunlink_insert_backref(
1994 struct xfs_perag *pag,
1995 struct xfs_iunlink *iu)
1996{
1997 int error;
1998
1999 error = rhashtable_insert_fast(&pag->pagi_unlinked_hash,
2000 &iu->iu_rhash_head, xfs_iunlink_hash_params);
2001 /*
2002 * Fail loudly if there already was an entry because that's a sign of
2003 * corruption of in-memory data. Also fail loudly if we see an error
2004 * code we didn't anticipate from the rhashtable code. Currently we
2005 * only anticipate ENOMEM.
2006 */
2007 if (error) {
2008 WARN(error != -ENOMEM, "iunlink cache insert error %d", error);
2009 kmem_free(iu);
2010 }
2011 /*
2012 * Absorb any runtime errors that aren't a result of corruption because
2013 * this is a cache and we can always fall back to bucket list scanning.
2014 */
2015 if (error != 0 && error != -EEXIST)
2016 error = 0;
2017 return error;
2018}
2019
2020/* Remember that @prev_agino.next_unlinked = @this_agino. */
2021static int
2022xfs_iunlink_add_backref(
2023 struct xfs_perag *pag,
2024 xfs_agino_t prev_agino,
2025 xfs_agino_t this_agino)
2026{
2027 struct xfs_iunlink *iu;
2028
2029 if (XFS_TEST_ERROR(false, pag->pag_mount, XFS_ERRTAG_IUNLINK_FALLBACK))
2030 return 0;
2031
2032 iu = kmem_zalloc(sizeof(*iu), KM_SLEEP | KM_NOFS);
2033 iu->iu_agino = prev_agino;
2034 iu->iu_next_unlinked = this_agino;
2035
2036 return xfs_iunlink_insert_backref(pag, iu);
2037}
2038
2039/*
2040 * Replace X.next_unlinked = @agino with X.next_unlinked = @next_unlinked.
2041 * If @next_unlinked is NULLAGINO, we drop the backref and exit. If there
2042 * wasn't any such entry then we don't bother.
2043 */
2044static int
2045xfs_iunlink_change_backref(
2046 struct xfs_perag *pag,
2047 xfs_agino_t agino,
2048 xfs_agino_t next_unlinked)
2049{
2050 struct xfs_iunlink *iu;
2051 int error;
2052
2053 /* Look up the old entry; if there wasn't one then exit. */
2054 iu = rhashtable_lookup_fast(&pag->pagi_unlinked_hash, &agino,
2055 xfs_iunlink_hash_params);
2056 if (!iu)
2057 return 0;
2058
2059 /*
2060 * Remove the entry. This shouldn't ever return an error, but if we
2061 * couldn't remove the old entry we don't want to add it again to the
2062 * hash table, and if the entry disappeared on us then someone's
2063 * violated the locking rules and we need to fail loudly. Either way
2064 * we cannot remove the inode because internal state is or would have
2065 * been corrupt.
2066 */
2067 error = rhashtable_remove_fast(&pag->pagi_unlinked_hash,
2068 &iu->iu_rhash_head, xfs_iunlink_hash_params);
2069 if (error)
2070 return error;
2071
2072 /* If there is no new next entry just free our item and return. */
2073 if (next_unlinked == NULLAGINO) {
2074 kmem_free(iu);
2075 return 0;
2076 }
2077
2078 /* Update the entry and re-add it to the hash table. */
2079 iu->iu_next_unlinked = next_unlinked;
2080 return xfs_iunlink_insert_backref(pag, iu);
2081}
2082
2083/* Set up the in-core predecessor structures. */
2084int
2085xfs_iunlink_init(
2086 struct xfs_perag *pag)
2087{
2088 return rhashtable_init(&pag->pagi_unlinked_hash,
2089 &xfs_iunlink_hash_params);
2090}
2091
2092/* Free the in-core predecessor structures. */
2093static void
2094xfs_iunlink_free_item(
2095 void *ptr,
2096 void *arg)
2097{
2098 struct xfs_iunlink *iu = ptr;
2099 bool *freed_anything = arg;
2100
2101 *freed_anything = true;
2102 kmem_free(iu);
2103}
2104
2105void
2106xfs_iunlink_destroy(
2107 struct xfs_perag *pag)
2108{
2109 bool freed_anything = false;
2110
2111 rhashtable_free_and_destroy(&pag->pagi_unlinked_hash,
2112 xfs_iunlink_free_item, &freed_anything);
2113
2114 ASSERT(freed_anything == false || XFS_FORCED_SHUTDOWN(pag->pag_mount));
2115}
2116
2117/*
Darrick J. Wong9a4a5112019-02-07 10:37:14 -08002118 * Point the AGI unlinked bucket at an inode and log the results. The caller
2119 * is responsible for validating the old value.
2120 */
2121STATIC int
2122xfs_iunlink_update_bucket(
2123 struct xfs_trans *tp,
2124 xfs_agnumber_t agno,
2125 struct xfs_buf *agibp,
2126 unsigned int bucket_index,
2127 xfs_agino_t new_agino)
2128{
2129 struct xfs_agi *agi = XFS_BUF_TO_AGI(agibp);
2130 xfs_agino_t old_value;
2131 int offset;
2132
2133 ASSERT(xfs_verify_agino_or_null(tp->t_mountp, agno, new_agino));
2134
2135 old_value = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2136 trace_xfs_iunlink_update_bucket(tp->t_mountp, agno, bucket_index,
2137 old_value, new_agino);
2138
2139 /*
2140 * We should never find the head of the list already set to the value
2141 * passed in because either we're adding or removing ourselves from the
2142 * head of the list.
2143 */
2144 if (old_value == new_agino)
2145 return -EFSCORRUPTED;
2146
2147 agi->agi_unlinked[bucket_index] = cpu_to_be32(new_agino);
2148 offset = offsetof(struct xfs_agi, agi_unlinked) +
2149 (sizeof(xfs_agino_t) * bucket_index);
2150 xfs_trans_log_buf(tp, agibp, offset, offset + sizeof(xfs_agino_t) - 1);
2151 return 0;
2152}
2153
Darrick J. Wongf2fc16a2019-02-07 10:37:15 -08002154/* Set an on-disk inode's next_unlinked pointer. */
2155STATIC void
2156xfs_iunlink_update_dinode(
2157 struct xfs_trans *tp,
2158 xfs_agnumber_t agno,
2159 xfs_agino_t agino,
2160 struct xfs_buf *ibp,
2161 struct xfs_dinode *dip,
2162 struct xfs_imap *imap,
2163 xfs_agino_t next_agino)
2164{
2165 struct xfs_mount *mp = tp->t_mountp;
2166 int offset;
2167
2168 ASSERT(xfs_verify_agino_or_null(mp, agno, next_agino));
2169
2170 trace_xfs_iunlink_update_dinode(mp, agno, agino,
2171 be32_to_cpu(dip->di_next_unlinked), next_agino);
2172
2173 dip->di_next_unlinked = cpu_to_be32(next_agino);
2174 offset = imap->im_boffset +
2175 offsetof(struct xfs_dinode, di_next_unlinked);
2176
2177 /* need to recalc the inode CRC if appropriate */
2178 xfs_dinode_calc_crc(mp, dip);
2179 xfs_trans_inode_buf(tp, ibp);
2180 xfs_trans_log_buf(tp, ibp, offset, offset + sizeof(xfs_agino_t) - 1);
2181 xfs_inobp_check(mp, ibp);
2182}
2183
2184/* Set an in-core inode's unlinked pointer and return the old value. */
2185STATIC int
2186xfs_iunlink_update_inode(
2187 struct xfs_trans *tp,
2188 struct xfs_inode *ip,
2189 xfs_agnumber_t agno,
2190 xfs_agino_t next_agino,
2191 xfs_agino_t *old_next_agino)
2192{
2193 struct xfs_mount *mp = tp->t_mountp;
2194 struct xfs_dinode *dip;
2195 struct xfs_buf *ibp;
2196 xfs_agino_t old_value;
2197 int error;
2198
2199 ASSERT(xfs_verify_agino_or_null(mp, agno, next_agino));
2200
2201 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp, 0, 0);
2202 if (error)
2203 return error;
2204
2205 /* Make sure the old pointer isn't garbage. */
2206 old_value = be32_to_cpu(dip->di_next_unlinked);
2207 if (!xfs_verify_agino_or_null(mp, agno, old_value)) {
2208 error = -EFSCORRUPTED;
2209 goto out;
2210 }
2211
2212 /*
2213 * Since we're updating a linked list, we should never find that the
2214 * current pointer is the same as the new value, unless we're
2215 * terminating the list.
2216 */
2217 *old_next_agino = old_value;
2218 if (old_value == next_agino) {
2219 if (next_agino != NULLAGINO)
2220 error = -EFSCORRUPTED;
2221 goto out;
2222 }
2223
2224 /* Ok, update the new pointer. */
2225 xfs_iunlink_update_dinode(tp, agno, XFS_INO_TO_AGINO(mp, ip->i_ino),
2226 ibp, dip, &ip->i_imap, next_agino);
2227 return 0;
2228out:
2229 xfs_trans_brelse(tp, ibp);
2230 return error;
2231}
2232
Darrick J. Wong9a4a5112019-02-07 10:37:14 -08002233/*
Darrick J. Wongc4a6bf72019-02-13 11:15:17 -08002234 * This is called when the inode's link count has gone to 0 or we are creating
2235 * a tmpfile via O_TMPFILE. The inode @ip must have nlink == 0.
Dave Chinner54d7b5c2016-02-09 16:54:58 +11002236 *
2237 * We place the on-disk inode on a list in the AGI. It will be pulled from this
2238 * list when the inode is freed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002239 */
Dave Chinner54d7b5c2016-02-09 16:54:58 +11002240STATIC int
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241xfs_iunlink(
Darrick J. Wong5837f622019-02-07 10:37:13 -08002242 struct xfs_trans *tp,
2243 struct xfs_inode *ip)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244{
Darrick J. Wong5837f622019-02-07 10:37:13 -08002245 struct xfs_mount *mp = tp->t_mountp;
2246 struct xfs_agi *agi;
Darrick J. Wong5837f622019-02-07 10:37:13 -08002247 struct xfs_buf *agibp;
Darrick J. Wong86bfd372019-02-07 10:37:14 -08002248 xfs_agino_t next_agino;
Darrick J. Wong5837f622019-02-07 10:37:13 -08002249 xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
2250 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
2251 short bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
Darrick J. Wong5837f622019-02-07 10:37:13 -08002252 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002253
Darrick J. Wongc4a6bf72019-02-13 11:15:17 -08002254 ASSERT(VFS_I(ip)->i_nlink == 0);
Dave Chinnerc19b3b052016-02-09 16:54:58 +11002255 ASSERT(VFS_I(ip)->i_mode != 0);
Darrick J. Wong4664c662019-02-07 10:37:16 -08002256 trace_xfs_iunlink(ip);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257
Darrick J. Wong5837f622019-02-07 10:37:13 -08002258 /* Get the agi buffer first. It ensures lock ordering on the list. */
2259 error = xfs_read_agi(mp, tp, agno, &agibp);
Vlad Apostolov859d7182007-10-11 17:44:18 +10002260 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002262 agi = XFS_BUF_TO_AGI(agibp);
Christoph Hellwig5e1be0f2008-11-28 14:23:37 +11002263
Linus Torvalds1da177e2005-04-16 15:20:36 -07002264 /*
Darrick J. Wong86bfd372019-02-07 10:37:14 -08002265 * Get the index into the agi hash table for the list this inode will
2266 * go on. Make sure the pointer isn't garbage and that this inode
2267 * isn't already on the list.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002268 */
Darrick J. Wong86bfd372019-02-07 10:37:14 -08002269 next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2270 if (next_agino == agino ||
2271 !xfs_verify_agino_or_null(mp, agno, next_agino))
2272 return -EFSCORRUPTED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273
Darrick J. Wong86bfd372019-02-07 10:37:14 -08002274 if (next_agino != NULLAGINO) {
Darrick J. Wong9b247172019-02-07 10:37:16 -08002275 struct xfs_perag *pag;
2276 xfs_agino_t old_agino;
Darrick J. Wongf2fc16a2019-02-07 10:37:15 -08002277
Linus Torvalds1da177e2005-04-16 15:20:36 -07002278 /*
Darrick J. Wongf2fc16a2019-02-07 10:37:15 -08002279 * There is already another inode in the bucket, so point this
2280 * inode to the current head of the list.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002281 */
Darrick J. Wongf2fc16a2019-02-07 10:37:15 -08002282 error = xfs_iunlink_update_inode(tp, ip, agno, next_agino,
2283 &old_agino);
Vlad Apostolovc319b582007-11-23 16:27:51 +11002284 if (error)
2285 return error;
Darrick J. Wongf2fc16a2019-02-07 10:37:15 -08002286 ASSERT(old_agino == NULLAGINO);
Darrick J. Wong9b247172019-02-07 10:37:16 -08002287
2288 /*
2289 * agino has been unlinked, add a backref from the next inode
2290 * back to agino.
2291 */
2292 pag = xfs_perag_get(mp, agno);
2293 error = xfs_iunlink_add_backref(pag, agino, next_agino);
2294 xfs_perag_put(pag);
2295 if (error)
2296 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297 }
2298
Darrick J. Wong9a4a5112019-02-07 10:37:14 -08002299 /* Point the head of the list to point to this inode. */
2300 return xfs_iunlink_update_bucket(tp, agno, agibp, bucket_index, agino);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002301}
2302
Darrick J. Wong23ffa522019-02-07 10:37:15 -08002303/* Return the imap, dinode pointer, and buffer for an inode. */
2304STATIC int
2305xfs_iunlink_map_ino(
2306 struct xfs_trans *tp,
2307 xfs_agnumber_t agno,
2308 xfs_agino_t agino,
2309 struct xfs_imap *imap,
2310 struct xfs_dinode **dipp,
2311 struct xfs_buf **bpp)
2312{
2313 struct xfs_mount *mp = tp->t_mountp;
2314 int error;
2315
2316 imap->im_blkno = 0;
2317 error = xfs_imap(mp, tp, XFS_AGINO_TO_INO(mp, agno, agino), imap, 0);
2318 if (error) {
2319 xfs_warn(mp, "%s: xfs_imap returned error %d.",
2320 __func__, error);
2321 return error;
2322 }
2323
2324 error = xfs_imap_to_bp(mp, tp, imap, dipp, bpp, 0, 0);
2325 if (error) {
2326 xfs_warn(mp, "%s: xfs_imap_to_bp returned error %d.",
2327 __func__, error);
2328 return error;
2329 }
2330
2331 return 0;
2332}
2333
2334/*
2335 * Walk the unlinked chain from @head_agino until we find the inode that
2336 * points to @target_agino. Return the inode number, map, dinode pointer,
2337 * and inode cluster buffer of that inode as @agino, @imap, @dipp, and @bpp.
2338 *
2339 * @tp, @pag, @head_agino, and @target_agino are input parameters.
2340 * @agino, @imap, @dipp, and @bpp are all output parameters.
2341 *
2342 * Do not call this function if @target_agino is the head of the list.
2343 */
2344STATIC int
2345xfs_iunlink_map_prev(
2346 struct xfs_trans *tp,
2347 xfs_agnumber_t agno,
2348 xfs_agino_t head_agino,
2349 xfs_agino_t target_agino,
2350 xfs_agino_t *agino,
2351 struct xfs_imap *imap,
2352 struct xfs_dinode **dipp,
Darrick J. Wong9b247172019-02-07 10:37:16 -08002353 struct xfs_buf **bpp,
2354 struct xfs_perag *pag)
Darrick J. Wong23ffa522019-02-07 10:37:15 -08002355{
2356 struct xfs_mount *mp = tp->t_mountp;
2357 xfs_agino_t next_agino;
2358 int error;
2359
2360 ASSERT(head_agino != target_agino);
2361 *bpp = NULL;
2362
Darrick J. Wong9b247172019-02-07 10:37:16 -08002363 /* See if our backref cache can find it faster. */
2364 *agino = xfs_iunlink_lookup_backref(pag, target_agino);
2365 if (*agino != NULLAGINO) {
2366 error = xfs_iunlink_map_ino(tp, agno, *agino, imap, dipp, bpp);
2367 if (error)
2368 return error;
2369
2370 if (be32_to_cpu((*dipp)->di_next_unlinked) == target_agino)
2371 return 0;
2372
2373 /*
2374 * If we get here the cache contents were corrupt, so drop the
2375 * buffer and fall back to walking the bucket list.
2376 */
2377 xfs_trans_brelse(tp, *bpp);
2378 *bpp = NULL;
2379 WARN_ON_ONCE(1);
2380 }
2381
2382 trace_xfs_iunlink_map_prev_fallback(mp, agno);
2383
2384 /* Otherwise, walk the entire bucket until we find it. */
Darrick J. Wong23ffa522019-02-07 10:37:15 -08002385 next_agino = head_agino;
2386 while (next_agino != target_agino) {
2387 xfs_agino_t unlinked_agino;
2388
2389 if (*bpp)
2390 xfs_trans_brelse(tp, *bpp);
2391
2392 *agino = next_agino;
2393 error = xfs_iunlink_map_ino(tp, agno, next_agino, imap, dipp,
2394 bpp);
2395 if (error)
2396 return error;
2397
2398 unlinked_agino = be32_to_cpu((*dipp)->di_next_unlinked);
2399 /*
2400 * Make sure this pointer is valid and isn't an obvious
2401 * infinite loop.
2402 */
2403 if (!xfs_verify_agino(mp, agno, unlinked_agino) ||
2404 next_agino == unlinked_agino) {
2405 XFS_CORRUPTION_ERROR(__func__,
2406 XFS_ERRLEVEL_LOW, mp,
2407 *dipp, sizeof(**dipp));
2408 error = -EFSCORRUPTED;
2409 return error;
2410 }
2411 next_agino = unlinked_agino;
2412 }
2413
2414 return 0;
2415}
2416
Linus Torvalds1da177e2005-04-16 15:20:36 -07002417/*
2418 * Pull the on-disk inode from the AGI unlinked list.
2419 */
2420STATIC int
2421xfs_iunlink_remove(
Darrick J. Wong5837f622019-02-07 10:37:13 -08002422 struct xfs_trans *tp,
2423 struct xfs_inode *ip)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002424{
Darrick J. Wong5837f622019-02-07 10:37:13 -08002425 struct xfs_mount *mp = tp->t_mountp;
2426 struct xfs_agi *agi;
Darrick J. Wong5837f622019-02-07 10:37:13 -08002427 struct xfs_buf *agibp;
Darrick J. Wong5837f622019-02-07 10:37:13 -08002428 struct xfs_buf *last_ibp;
2429 struct xfs_dinode *last_dip = NULL;
Darrick J. Wong9b247172019-02-07 10:37:16 -08002430 struct xfs_perag *pag = NULL;
Darrick J. Wong5837f622019-02-07 10:37:13 -08002431 xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
2432 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
2433 xfs_agino_t next_agino;
Darrick J. Wongb1d2a062019-02-07 10:37:15 -08002434 xfs_agino_t head_agino;
Darrick J. Wong5837f622019-02-07 10:37:13 -08002435 short bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
Darrick J. Wong5837f622019-02-07 10:37:13 -08002436 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002437
Darrick J. Wong4664c662019-02-07 10:37:16 -08002438 trace_xfs_iunlink_remove(ip);
2439
Darrick J. Wong5837f622019-02-07 10:37:13 -08002440 /* Get the agi buffer first. It ensures lock ordering on the list. */
Christoph Hellwig5e1be0f2008-11-28 14:23:37 +11002441 error = xfs_read_agi(mp, tp, agno, &agibp);
2442 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002443 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002444 agi = XFS_BUF_TO_AGI(agibp);
Christoph Hellwig5e1be0f2008-11-28 14:23:37 +11002445
Linus Torvalds1da177e2005-04-16 15:20:36 -07002446 /*
Darrick J. Wong86bfd372019-02-07 10:37:14 -08002447 * Get the index into the agi hash table for the list this inode will
2448 * go on. Make sure the head pointer isn't garbage.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002449 */
Darrick J. Wongb1d2a062019-02-07 10:37:15 -08002450 head_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2451 if (!xfs_verify_agino(mp, agno, head_agino)) {
Darrick J. Wongd2e73662018-06-04 11:27:51 -07002452 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
2453 agi, sizeof(*agi));
2454 return -EFSCORRUPTED;
2455 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002456
Darrick J. Wongb1d2a062019-02-07 10:37:15 -08002457 /*
2458 * Set our inode's next_unlinked pointer to NULL and then return
2459 * the old pointer value so that we can update whatever was previous
2460 * to us in the list to point to whatever was next in the list.
2461 */
2462 error = xfs_iunlink_update_inode(tp, ip, agno, NULLAGINO, &next_agino);
2463 if (error)
2464 return error;
Darrick J. Wong9a4a5112019-02-07 10:37:14 -08002465
Darrick J. Wong9b247172019-02-07 10:37:16 -08002466 /*
2467 * If there was a backref pointing from the next inode back to this
2468 * one, remove it because we've removed this inode from the list.
2469 *
2470 * Later, if this inode was in the middle of the list we'll update
2471 * this inode's backref to point from the next inode.
2472 */
2473 if (next_agino != NULLAGINO) {
2474 pag = xfs_perag_get(mp, agno);
2475 error = xfs_iunlink_change_backref(pag, next_agino,
2476 NULLAGINO);
2477 if (error)
2478 goto out;
2479 }
2480
Darrick J. Wongb1d2a062019-02-07 10:37:15 -08002481 if (head_agino == agino) {
Darrick J. Wong9a4a5112019-02-07 10:37:14 -08002482 /* Point the head of the list to the next unlinked inode. */
2483 error = xfs_iunlink_update_bucket(tp, agno, agibp, bucket_index,
2484 next_agino);
2485 if (error)
Darrick J. Wong9b247172019-02-07 10:37:16 -08002486 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002487 } else {
Darrick J. Wongf2fc16a2019-02-07 10:37:15 -08002488 struct xfs_imap imap;
2489 xfs_agino_t prev_agino;
2490
Darrick J. Wong9b247172019-02-07 10:37:16 -08002491 if (!pag)
2492 pag = xfs_perag_get(mp, agno);
2493
Darrick J. Wong23ffa522019-02-07 10:37:15 -08002494 /* We need to search the list for the inode being freed. */
Darrick J. Wongb1d2a062019-02-07 10:37:15 -08002495 error = xfs_iunlink_map_prev(tp, agno, head_agino, agino,
Darrick J. Wong9b247172019-02-07 10:37:16 -08002496 &prev_agino, &imap, &last_dip, &last_ibp,
2497 pag);
Darrick J. Wong23ffa522019-02-07 10:37:15 -08002498 if (error)
Darrick J. Wong9b247172019-02-07 10:37:16 -08002499 goto out;
Christoph Hellwig475ee412012-07-03 12:21:22 -04002500
Darrick J. Wongf2fc16a2019-02-07 10:37:15 -08002501 /* Point the previous inode on the list to the next inode. */
2502 xfs_iunlink_update_dinode(tp, agno, prev_agino, last_ibp,
2503 last_dip, &imap, next_agino);
Darrick J. Wong9b247172019-02-07 10:37:16 -08002504
2505 /*
2506 * Now we deal with the backref for this inode. If this inode
2507 * pointed at a real inode, change the backref that pointed to
2508 * us to point to our old next. If this inode was the end of
2509 * the list, delete the backref that pointed to us. Note that
2510 * change_backref takes care of deleting the backref if
2511 * next_agino is NULLAGINO.
2512 */
2513 error = xfs_iunlink_change_backref(pag, agino, next_agino);
2514 if (error)
2515 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002516 }
Darrick J. Wong9b247172019-02-07 10:37:16 -08002517
2518out:
2519 if (pag)
2520 xfs_perag_put(pag);
2521 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002522}
2523
Dave Chinner5b3eed72010-08-24 11:42:41 +10002524/*
Zhi Yong Wu0b8182d2013-08-12 03:14:59 +00002525 * A big issue when freeing the inode cluster is that we _cannot_ skip any
Dave Chinner5b3eed72010-08-24 11:42:41 +10002526 * inodes that are in memory - they all must be marked stale and attached to
2527 * the cluster buffer.
2528 */
Chandra Seetharaman2a30f36d2011-09-20 13:56:55 +00002529STATIC int
Linus Torvalds1da177e2005-04-16 15:20:36 -07002530xfs_ifree_cluster(
Brian Foster09b56602015-05-29 09:26:03 +10002531 xfs_inode_t *free_ip,
2532 xfs_trans_t *tp,
2533 struct xfs_icluster *xic)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002534{
2535 xfs_mount_t *mp = free_ip->i_mount;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002536 int nbufs;
Dave Chinner5b257b42010-06-03 16:22:29 +10002537 int i, j;
Brian Foster3cdaa182015-06-04 13:03:34 +10002538 int ioffset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002539 xfs_daddr_t blkno;
2540 xfs_buf_t *bp;
Dave Chinner5b257b42010-06-03 16:22:29 +10002541 xfs_inode_t *ip;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002542 xfs_inode_log_item_t *iip;
Carlos Maiolino643c8c02018-01-24 13:38:49 -08002543 struct xfs_log_item *lip;
Dave Chinner5017e972010-01-11 11:47:40 +00002544 struct xfs_perag *pag;
Brian Foster09b56602015-05-29 09:26:03 +10002545 xfs_ino_t inum;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002546
Brian Foster09b56602015-05-29 09:26:03 +10002547 inum = xic->first_ino;
Dave Chinner5017e972010-01-11 11:47:40 +00002548 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum));
Darrick J. Wong83dcdb42018-12-12 08:46:25 -08002549 nbufs = mp->m_ialloc_blks / mp->m_blocks_per_cluster;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002550
Darrick J. Wong83dcdb42018-12-12 08:46:25 -08002551 for (j = 0; j < nbufs; j++, inum += mp->m_inodes_per_cluster) {
Brian Foster09b56602015-05-29 09:26:03 +10002552 /*
2553 * The allocation bitmap tells us which inodes of the chunk were
2554 * physically allocated. Skip the cluster if an inode falls into
2555 * a sparse region.
2556 */
Brian Foster3cdaa182015-06-04 13:03:34 +10002557 ioffset = inum - xic->first_ino;
2558 if ((xic->alloc & XFS_INOBT_MASK(ioffset)) == 0) {
Darrick J. Wong83dcdb42018-12-12 08:46:25 -08002559 ASSERT(ioffset % mp->m_inodes_per_cluster == 0);
Brian Foster09b56602015-05-29 09:26:03 +10002560 continue;
2561 }
2562
Linus Torvalds1da177e2005-04-16 15:20:36 -07002563 blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
2564 XFS_INO_TO_AGBNO(mp, inum));
2565
Linus Torvalds1da177e2005-04-16 15:20:36 -07002566 /*
Dave Chinner5b257b42010-06-03 16:22:29 +10002567 * We obtain and lock the backing buffer first in the process
2568 * here, as we have to ensure that any dirty inode that we
2569 * can't get the flush lock on is attached to the buffer.
2570 * If we scan the in-memory inodes first, then buffer IO can
2571 * complete before we get a lock on it, and hence we may fail
2572 * to mark all the active inodes on the buffer stale.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002573 */
Dave Chinner5b257b42010-06-03 16:22:29 +10002574 bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
Darrick J. Wong83dcdb42018-12-12 08:46:25 -08002575 mp->m_bsize * mp->m_blocks_per_cluster,
Dave Chinnerb6aff292012-11-02 11:38:42 +11002576 XBF_UNMAPPED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002577
Chandra Seetharaman2a30f36d2011-09-20 13:56:55 +00002578 if (!bp)
Dave Chinner24513372014-06-25 14:58:08 +10002579 return -ENOMEM;
Dave Chinnerb0f539d2012-11-14 17:53:49 +11002580
2581 /*
2582 * This buffer may not have been correctly initialised as we
2583 * didn't read it from disk. That's not important because we are
2584 * only using to mark the buffer as stale in the log, and to
2585 * attach stale cached inodes on it. That means it will never be
2586 * dispatched for IO. If it is, we want to know about it, and we
2587 * want it to fail. We can acheive this by adding a write
2588 * verifier to the buffer.
2589 */
Colin Ian King8c4ce792018-12-12 08:46:20 -08002590 bp->b_ops = &xfs_inode_buf_ops;
Dave Chinnerb0f539d2012-11-14 17:53:49 +11002591
Dave Chinner5b257b42010-06-03 16:22:29 +10002592 /*
2593 * Walk the inodes already attached to the buffer and mark them
2594 * stale. These will all have the flush locks held, so an
Dave Chinner5b3eed72010-08-24 11:42:41 +10002595 * in-memory inode walk can't lock them. By marking them all
2596 * stale first, we will not attempt to lock them in the loop
2597 * below as the XFS_ISTALE flag will be set.
Dave Chinner5b257b42010-06-03 16:22:29 +10002598 */
Carlos Maiolino643c8c02018-01-24 13:38:49 -08002599 list_for_each_entry(lip, &bp->b_li_list, li_bio_list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002600 if (lip->li_type == XFS_LI_INODE) {
2601 iip = (xfs_inode_log_item_t *)lip;
2602 ASSERT(iip->ili_logged == 1);
Christoph Hellwigca30b2a2010-06-23 18:11:15 +10002603 lip->li_cb = xfs_istale_done;
David Chinner7b2e2a32008-10-30 17:39:12 +11002604 xfs_trans_ail_copy_lsn(mp->m_ail,
2605 &iip->ili_flush_lsn,
2606 &iip->ili_item.li_lsn);
David Chinnere5ffd2b2006-11-21 18:55:33 +11002607 xfs_iflags_set(iip->ili_inode, XFS_ISTALE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002608 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002609 }
2610
Dave Chinner5b3eed72010-08-24 11:42:41 +10002611
Dave Chinner5b257b42010-06-03 16:22:29 +10002612 /*
2613 * For each inode in memory attempt to add it to the inode
2614 * buffer and set it up for being staled on buffer IO
2615 * completion. This is safe as we've locked out tail pushing
2616 * and flushing by locking the buffer.
2617 *
2618 * We have already marked every inode that was part of a
2619 * transaction stale above, which means there is no point in
2620 * even trying to lock them.
2621 */
Darrick J. Wong83dcdb42018-12-12 08:46:25 -08002622 for (i = 0; i < mp->m_inodes_per_cluster; i++) {
Dave Chinner5b3eed72010-08-24 11:42:41 +10002623retry:
Dave Chinner1a3e8f32010-12-17 17:29:43 +11002624 rcu_read_lock();
Dave Chinner5b257b42010-06-03 16:22:29 +10002625 ip = radix_tree_lookup(&pag->pag_ici_root,
2626 XFS_INO_TO_AGINO(mp, (inum + i)));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002627
Dave Chinner1a3e8f32010-12-17 17:29:43 +11002628 /* Inode not in memory, nothing to do */
2629 if (!ip) {
2630 rcu_read_unlock();
Dave Chinner5b257b42010-06-03 16:22:29 +10002631 continue;
2632 }
2633
Dave Chinner5b3eed72010-08-24 11:42:41 +10002634 /*
Dave Chinner1a3e8f32010-12-17 17:29:43 +11002635 * because this is an RCU protected lookup, we could
2636 * find a recently freed or even reallocated inode
2637 * during the lookup. We need to check under the
2638 * i_flags_lock for a valid inode here. Skip it if it
2639 * is not valid, the wrong inode or stale.
2640 */
2641 spin_lock(&ip->i_flags_lock);
2642 if (ip->i_ino != inum + i ||
2643 __xfs_iflags_test(ip, XFS_ISTALE)) {
2644 spin_unlock(&ip->i_flags_lock);
2645 rcu_read_unlock();
2646 continue;
2647 }
2648 spin_unlock(&ip->i_flags_lock);
2649
2650 /*
Dave Chinner5b3eed72010-08-24 11:42:41 +10002651 * Don't try to lock/unlock the current inode, but we
2652 * _cannot_ skip the other inodes that we did not find
2653 * in the list attached to the buffer and are not
2654 * already marked stale. If we can't lock it, back off
2655 * and retry.
2656 */
Omar Sandovalf2e9ad22017-08-25 10:05:26 -07002657 if (ip != free_ip) {
2658 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
2659 rcu_read_unlock();
2660 delay(1);
2661 goto retry;
2662 }
2663
2664 /*
2665 * Check the inode number again in case we're
2666 * racing with freeing in xfs_reclaim_inode().
2667 * See the comments in that function for more
2668 * information as to why the initial check is
2669 * not sufficient.
2670 */
2671 if (ip->i_ino != inum + i) {
2672 xfs_iunlock(ip, XFS_ILOCK_EXCL);
Darrick J. Wong962cc1a2017-11-14 16:34:44 -08002673 rcu_read_unlock();
Omar Sandovalf2e9ad22017-08-25 10:05:26 -07002674 continue;
2675 }
Dave Chinner5b257b42010-06-03 16:22:29 +10002676 }
Dave Chinner1a3e8f32010-12-17 17:29:43 +11002677 rcu_read_unlock();
Dave Chinner5b257b42010-06-03 16:22:29 +10002678
Dave Chinner5b3eed72010-08-24 11:42:41 +10002679 xfs_iflock(ip);
Dave Chinner5b257b42010-06-03 16:22:29 +10002680 xfs_iflags_set(ip, XFS_ISTALE);
Dave Chinner5b257b42010-06-03 16:22:29 +10002681
Dave Chinner5b3eed72010-08-24 11:42:41 +10002682 /*
2683 * we don't need to attach clean inodes or those only
2684 * with unlogged changes (which we throw away, anyway).
2685 */
Dave Chinner5b257b42010-06-03 16:22:29 +10002686 iip = ip->i_itemp;
Dave Chinner5b3eed72010-08-24 11:42:41 +10002687 if (!iip || xfs_inode_clean(ip)) {
Dave Chinner5b257b42010-06-03 16:22:29 +10002688 ASSERT(ip != free_ip);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002689 xfs_ifunlock(ip);
2690 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2691 continue;
2692 }
2693
Christoph Hellwigf5d8d5c2012-02-29 09:53:54 +00002694 iip->ili_last_fields = iip->ili_fields;
2695 iip->ili_fields = 0;
Dave Chinnerfc0561c2015-11-03 13:14:59 +11002696 iip->ili_fsync_fields = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002697 iip->ili_logged = 1;
David Chinner7b2e2a32008-10-30 17:39:12 +11002698 xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
2699 &iip->ili_item.li_lsn);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002700
Christoph Hellwigca30b2a2010-06-23 18:11:15 +10002701 xfs_buf_attach_iodone(bp, xfs_istale_done,
2702 &iip->ili_item);
Dave Chinner5b257b42010-06-03 16:22:29 +10002703
2704 if (ip != free_ip)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002705 xfs_iunlock(ip, XFS_ILOCK_EXCL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002706 }
2707
Dave Chinner5b3eed72010-08-24 11:42:41 +10002708 xfs_trans_stale_inode_buf(tp, bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002709 xfs_trans_binval(tp, bp);
2710 }
2711
Dave Chinner5017e972010-01-11 11:47:40 +00002712 xfs_perag_put(pag);
Chandra Seetharaman2a30f36d2011-09-20 13:56:55 +00002713 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002714}
2715
2716/*
Darrick J. Wong98c4f782017-11-22 12:21:07 -08002717 * Free any local-format buffers sitting around before we reset to
2718 * extents format.
2719 */
2720static inline void
2721xfs_ifree_local_data(
2722 struct xfs_inode *ip,
2723 int whichfork)
2724{
2725 struct xfs_ifork *ifp;
2726
2727 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL)
2728 return;
2729
2730 ifp = XFS_IFORK_PTR(ip, whichfork);
2731 xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
2732}
2733
2734/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002735 * This is called to return an inode to the inode free list.
2736 * The inode should already be truncated to 0 length and have
2737 * no pages associated with it. This routine also assumes that
2738 * the inode is already a part of the transaction.
2739 *
2740 * The on-disk copy of the inode will have been added to the list
2741 * of unlinked inodes in the AGI. We need to remove the inode from
2742 * that list atomically with respect to freeing it here.
2743 */
2744int
2745xfs_ifree(
Brian Foster0e0417f2018-07-11 22:26:07 -07002746 struct xfs_trans *tp,
2747 struct xfs_inode *ip)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002748{
2749 int error;
Brian Foster09b56602015-05-29 09:26:03 +10002750 struct xfs_icluster xic = { 0 };
Linus Torvalds1da177e2005-04-16 15:20:36 -07002751
Christoph Hellwig579aa9c2008-04-22 17:34:00 +10002752 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
Dave Chinner54d7b5c2016-02-09 16:54:58 +11002753 ASSERT(VFS_I(ip)->i_nlink == 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002754 ASSERT(ip->i_d.di_nextents == 0);
2755 ASSERT(ip->i_d.di_anextents == 0);
Dave Chinnerc19b3b052016-02-09 16:54:58 +11002756 ASSERT(ip->i_d.di_size == 0 || !S_ISREG(VFS_I(ip)->i_mode));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002757 ASSERT(ip->i_d.di_nblocks == 0);
2758
2759 /*
2760 * Pull the on-disk inode from the AGI unlinked list.
2761 */
2762 error = xfs_iunlink_remove(tp, ip);
Dave Chinner1baaed82013-06-27 16:04:50 +10002763 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002765
Brian Foster0e0417f2018-07-11 22:26:07 -07002766 error = xfs_difree(tp, ip->i_ino, &xic);
Dave Chinner1baaed82013-06-27 16:04:50 +10002767 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002768 return error;
Dave Chinner1baaed82013-06-27 16:04:50 +10002769
Darrick J. Wong98c4f782017-11-22 12:21:07 -08002770 xfs_ifree_local_data(ip, XFS_DATA_FORK);
2771 xfs_ifree_local_data(ip, XFS_ATTR_FORK);
2772
Dave Chinnerc19b3b052016-02-09 16:54:58 +11002773 VFS_I(ip)->i_mode = 0; /* mark incore inode as free */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002774 ip->i_d.di_flags = 0;
Darrick J. Wongbeaae8c2018-01-22 19:19:26 -08002775 ip->i_d.di_flags2 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002776 ip->i_d.di_dmevmask = 0;
2777 ip->i_d.di_forkoff = 0; /* mark the attr fork not in use */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002778 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
2779 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
Eric Sandeendc1baa72018-03-28 17:48:08 -07002780
2781 /* Don't attempt to replay owner changes for a deleted inode */
2782 ip->i_itemp->ili_fields &= ~(XFS_ILOG_AOWNER|XFS_ILOG_DOWNER);
2783
Linus Torvalds1da177e2005-04-16 15:20:36 -07002784 /*
2785 * Bump the generation count so no one will be confused
2786 * by reincarnations of this inode.
2787 */
Dave Chinner9e9a2672016-02-09 16:54:58 +11002788 VFS_I(ip)->i_generation++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002789 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
2790
Brian Foster09b56602015-05-29 09:26:03 +10002791 if (xic.deleted)
2792 error = xfs_ifree_cluster(ip, tp, &xic);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002793
Chandra Seetharaman2a30f36d2011-09-20 13:56:55 +00002794 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002795}
2796
2797/*
Christoph Hellwig60ec6782010-02-17 19:43:56 +00002798 * This is called to unpin an inode. The caller must have the inode locked
2799 * in at least shared mode so that the buffer cannot be subsequently pinned
2800 * once someone is waiting for it to be unpinned.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002801 */
Christoph Hellwig60ec6782010-02-17 19:43:56 +00002802static void
Christoph Hellwigf392e632011-12-18 20:00:10 +00002803xfs_iunpin(
Christoph Hellwig60ec6782010-02-17 19:43:56 +00002804 struct xfs_inode *ip)
David Chinnera3f74ff2008-03-06 13:43:42 +11002805{
Christoph Hellwig579aa9c2008-04-22 17:34:00 +10002806 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
David Chinnera3f74ff2008-03-06 13:43:42 +11002807
Dave Chinner4aaf15d2010-03-08 11:24:07 +11002808 trace_xfs_inode_unpin_nowait(ip, _RET_IP_);
2809
David Chinnera3f74ff2008-03-06 13:43:42 +11002810 /* Give the log a push to start the unpinning I/O */
Christoph Hellwig656de4f2018-03-13 23:15:28 -07002811 xfs_log_force_lsn(ip->i_mount, ip->i_itemp->ili_last_lsn, 0, NULL);
Christoph Hellwiga14a3482010-01-19 09:56:46 +00002812
David Chinnera3f74ff2008-03-06 13:43:42 +11002813}
2814
Christoph Hellwigf392e632011-12-18 20:00:10 +00002815static void
2816__xfs_iunpin_wait(
2817 struct xfs_inode *ip)
2818{
2819 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT);
2820 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT);
2821
2822 xfs_iunpin(ip);
2823
2824 do {
Ingo Molnar21417132017-03-05 11:25:39 +01002825 prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
Christoph Hellwigf392e632011-12-18 20:00:10 +00002826 if (xfs_ipincount(ip))
2827 io_schedule();
2828 } while (xfs_ipincount(ip));
Ingo Molnar21417132017-03-05 11:25:39 +01002829 finish_wait(wq, &wait.wq_entry);
Christoph Hellwigf392e632011-12-18 20:00:10 +00002830}
2831
Dave Chinner777df5a2010-02-06 12:37:26 +11002832void
Linus Torvalds1da177e2005-04-16 15:20:36 -07002833xfs_iunpin_wait(
Christoph Hellwig60ec6782010-02-17 19:43:56 +00002834 struct xfs_inode *ip)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002835{
Christoph Hellwigf392e632011-12-18 20:00:10 +00002836 if (xfs_ipincount(ip))
2837 __xfs_iunpin_wait(ip);
David Chinnera3f74ff2008-03-06 13:43:42 +11002838}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002839
Dave Chinner27320362013-10-29 22:11:44 +11002840/*
2841 * Removing an inode from the namespace involves removing the directory entry
2842 * and dropping the link count on the inode. Removing the directory entry can
2843 * result in locking an AGF (directory blocks were freed) and removing a link
2844 * count can result in placing the inode on an unlinked list which results in
2845 * locking an AGI.
2846 *
2847 * The big problem here is that we have an ordering constraint on AGF and AGI
2848 * locking - inode allocation locks the AGI, then can allocate a new extent for
2849 * new inodes, locking the AGF after the AGI. Similarly, freeing the inode
2850 * removes the inode from the unlinked list, requiring that we lock the AGI
2851 * first, and then freeing the inode can result in an inode chunk being freed
2852 * and hence freeing disk space requiring that we lock an AGF.
2853 *
2854 * Hence the ordering that is imposed by other parts of the code is AGI before
2855 * AGF. This means we cannot remove the directory entry before we drop the inode
2856 * reference count and put it on the unlinked list as this results in a lock
2857 * order of AGF then AGI, and this can deadlock against inode allocation and
2858 * freeing. Therefore we must drop the link counts before we remove the
2859 * directory entry.
2860 *
2861 * This is still safe from a transactional point of view - it is not until we
Darrick J. Wong310a75a2016-08-03 11:18:10 +10002862 * get to xfs_defer_finish() that we have the possibility of multiple
Dave Chinner27320362013-10-29 22:11:44 +11002863 * transactions in this operation. Hence as long as we remove the directory
2864 * entry and drop the link count in the first transaction of the remove
2865 * operation, there are no transactional constraints on the ordering here.
2866 */
Dave Chinnerc24b5df2013-08-12 20:49:45 +10002867int
2868xfs_remove(
2869 xfs_inode_t *dp,
2870 struct xfs_name *name,
2871 xfs_inode_t *ip)
2872{
2873 xfs_mount_t *mp = dp->i_mount;
2874 xfs_trans_t *tp = NULL;
Dave Chinnerc19b3b052016-02-09 16:54:58 +11002875 int is_dir = S_ISDIR(VFS_I(ip)->i_mode);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10002876 int error = 0;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10002877 uint resblks;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10002878
2879 trace_xfs_remove(dp, name);
2880
2881 if (XFS_FORCED_SHUTDOWN(mp))
Dave Chinner24513372014-06-25 14:58:08 +10002882 return -EIO;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10002883
Darrick J. Wongc14cfcc2018-05-04 15:30:21 -07002884 error = xfs_qm_dqattach(dp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10002885 if (error)
2886 goto std_return;
2887
Darrick J. Wongc14cfcc2018-05-04 15:30:21 -07002888 error = xfs_qm_dqattach(ip);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10002889 if (error)
2890 goto std_return;
2891
Dave Chinnerc24b5df2013-08-12 20:49:45 +10002892 /*
2893 * We try to get the real space reservation first,
2894 * allowing for directory btree deletion(s) implying
2895 * possible bmap insert(s). If we can't get the space
2896 * reservation then we use 0 instead, and avoid the bmap
2897 * btree insert(s) in the directory code by, if the bmap
2898 * insert tries to happen, instead trimming the LAST
2899 * block from the directory.
2900 */
2901 resblks = XFS_REMOVE_SPACE_RES(mp);
Christoph Hellwig253f4912016-04-06 09:19:55 +10002902 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, resblks, 0, 0, &tp);
Dave Chinner24513372014-06-25 14:58:08 +10002903 if (error == -ENOSPC) {
Dave Chinnerc24b5df2013-08-12 20:49:45 +10002904 resblks = 0;
Christoph Hellwig253f4912016-04-06 09:19:55 +10002905 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, 0, 0, 0,
2906 &tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10002907 }
2908 if (error) {
Dave Chinner24513372014-06-25 14:58:08 +10002909 ASSERT(error != -ENOSPC);
Christoph Hellwig253f4912016-04-06 09:19:55 +10002910 goto std_return;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10002911 }
2912
Darrick J. Wong7c2d2382018-01-26 15:27:33 -08002913 xfs_lock_two_inodes(dp, XFS_ILOCK_EXCL, ip, XFS_ILOCK_EXCL);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10002914
Christoph Hellwig65523212016-11-30 14:33:25 +11002915 xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10002916 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
2917
2918 /*
2919 * If we're removing a directory perform some additional validation.
2920 */
2921 if (is_dir) {
Dave Chinner54d7b5c2016-02-09 16:54:58 +11002922 ASSERT(VFS_I(ip)->i_nlink >= 2);
2923 if (VFS_I(ip)->i_nlink != 2) {
Dave Chinner24513372014-06-25 14:58:08 +10002924 error = -ENOTEMPTY;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10002925 goto out_trans_cancel;
2926 }
2927 if (!xfs_dir_isempty(ip)) {
Dave Chinner24513372014-06-25 14:58:08 +10002928 error = -ENOTEMPTY;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10002929 goto out_trans_cancel;
2930 }
Dave Chinnerc24b5df2013-08-12 20:49:45 +10002931
Dave Chinner27320362013-10-29 22:11:44 +11002932 /* Drop the link from ip's "..". */
Dave Chinnerc24b5df2013-08-12 20:49:45 +10002933 error = xfs_droplink(tp, dp);
2934 if (error)
Dave Chinner27320362013-10-29 22:11:44 +11002935 goto out_trans_cancel;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10002936
Dave Chinner27320362013-10-29 22:11:44 +11002937 /* Drop the "." link from ip to self. */
Dave Chinnerc24b5df2013-08-12 20:49:45 +10002938 error = xfs_droplink(tp, ip);
2939 if (error)
Dave Chinner27320362013-10-29 22:11:44 +11002940 goto out_trans_cancel;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10002941 } else {
2942 /*
2943 * When removing a non-directory we need to log the parent
2944 * inode here. For a directory this is done implicitly
2945 * by the xfs_droplink call for the ".." entry.
2946 */
2947 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
2948 }
Dave Chinner27320362013-10-29 22:11:44 +11002949 xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10002950
Dave Chinner27320362013-10-29 22:11:44 +11002951 /* Drop the link from dp to ip. */
Dave Chinnerc24b5df2013-08-12 20:49:45 +10002952 error = xfs_droplink(tp, ip);
2953 if (error)
Dave Chinner27320362013-10-29 22:11:44 +11002954 goto out_trans_cancel;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10002955
Brian Foster381eee62018-07-11 22:26:21 -07002956 error = xfs_dir_removename(tp, dp, name, ip->i_ino, resblks);
Dave Chinner27320362013-10-29 22:11:44 +11002957 if (error) {
Dave Chinner24513372014-06-25 14:58:08 +10002958 ASSERT(error != -ENOENT);
Brian Fosterc8eac492018-07-24 13:43:13 -07002959 goto out_trans_cancel;
Dave Chinner27320362013-10-29 22:11:44 +11002960 }
2961
Dave Chinnerc24b5df2013-08-12 20:49:45 +10002962 /*
2963 * If this is a synchronous mount, make sure that the
2964 * remove transaction goes to disk before returning to
2965 * the user.
2966 */
2967 if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
2968 xfs_trans_set_sync(tp);
2969
Christoph Hellwig70393312015-06-04 13:48:08 +10002970 error = xfs_trans_commit(tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10002971 if (error)
2972 goto std_return;
2973
Christoph Hellwig2cd2ef62014-04-23 07:11:51 +10002974 if (is_dir && xfs_inode_is_filestream(ip))
Dave Chinnerc24b5df2013-08-12 20:49:45 +10002975 xfs_filestream_deassociate(ip);
2976
2977 return 0;
2978
Dave Chinnerc24b5df2013-08-12 20:49:45 +10002979 out_trans_cancel:
Christoph Hellwig4906e212015-06-04 13:47:56 +10002980 xfs_trans_cancel(tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10002981 std_return:
2982 return error;
2983}
2984
Dave Chinnerf6bba202013-08-12 20:49:46 +10002985/*
2986 * Enter all inodes for a rename transaction into a sorted array.
2987 */
Dave Chinner95afcf52015-03-25 14:03:32 +11002988#define __XFS_SORT_INODES 5
Dave Chinnerf6bba202013-08-12 20:49:46 +10002989STATIC void
2990xfs_sort_for_rename(
Dave Chinner95afcf52015-03-25 14:03:32 +11002991 struct xfs_inode *dp1, /* in: old (source) directory inode */
2992 struct xfs_inode *dp2, /* in: new (target) directory inode */
2993 struct xfs_inode *ip1, /* in: inode of old entry */
2994 struct xfs_inode *ip2, /* in: inode of new entry */
2995 struct xfs_inode *wip, /* in: whiteout inode */
2996 struct xfs_inode **i_tab,/* out: sorted array of inodes */
2997 int *num_inodes) /* in/out: inodes in array */
Dave Chinnerf6bba202013-08-12 20:49:46 +10002998{
Dave Chinnerf6bba202013-08-12 20:49:46 +10002999 int i, j;
3000
Dave Chinner95afcf52015-03-25 14:03:32 +11003001 ASSERT(*num_inodes == __XFS_SORT_INODES);
3002 memset(i_tab, 0, *num_inodes * sizeof(struct xfs_inode *));
3003
Dave Chinnerf6bba202013-08-12 20:49:46 +10003004 /*
3005 * i_tab contains a list of pointers to inodes. We initialize
3006 * the table here & we'll sort it. We will then use it to
3007 * order the acquisition of the inode locks.
3008 *
3009 * Note that the table may contain duplicates. e.g., dp1 == dp2.
3010 */
Dave Chinner95afcf52015-03-25 14:03:32 +11003011 i = 0;
3012 i_tab[i++] = dp1;
3013 i_tab[i++] = dp2;
3014 i_tab[i++] = ip1;
3015 if (ip2)
3016 i_tab[i++] = ip2;
3017 if (wip)
3018 i_tab[i++] = wip;
3019 *num_inodes = i;
Dave Chinnerf6bba202013-08-12 20:49:46 +10003020
3021 /*
3022 * Sort the elements via bubble sort. (Remember, there are at
Dave Chinner95afcf52015-03-25 14:03:32 +11003023 * most 5 elements to sort, so this is adequate.)
Dave Chinnerf6bba202013-08-12 20:49:46 +10003024 */
3025 for (i = 0; i < *num_inodes; i++) {
3026 for (j = 1; j < *num_inodes; j++) {
3027 if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) {
Dave Chinner95afcf52015-03-25 14:03:32 +11003028 struct xfs_inode *temp = i_tab[j];
Dave Chinnerf6bba202013-08-12 20:49:46 +10003029 i_tab[j] = i_tab[j-1];
3030 i_tab[j-1] = temp;
3031 }
3032 }
3033 }
3034}
3035
Dave Chinner310606b2015-03-25 14:06:07 +11003036static int
3037xfs_finish_rename(
Brian Fosterc9cfdb32018-07-11 22:26:08 -07003038 struct xfs_trans *tp)
Dave Chinner310606b2015-03-25 14:06:07 +11003039{
Dave Chinner310606b2015-03-25 14:06:07 +11003040 /*
3041 * If this is a synchronous mount, make sure that the rename transaction
3042 * goes to disk before returning to the user.
3043 */
3044 if (tp->t_mountp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
3045 xfs_trans_set_sync(tp);
3046
Christoph Hellwig70393312015-06-04 13:48:08 +10003047 return xfs_trans_commit(tp);
Dave Chinner310606b2015-03-25 14:06:07 +11003048}
3049
Dave Chinnerf6bba202013-08-12 20:49:46 +10003050/*
Carlos Maiolinod31a1822014-12-24 08:51:42 +11003051 * xfs_cross_rename()
3052 *
3053 * responsible for handling RENAME_EXCHANGE flag in renameat2() sytemcall
3054 */
3055STATIC int
3056xfs_cross_rename(
3057 struct xfs_trans *tp,
3058 struct xfs_inode *dp1,
3059 struct xfs_name *name1,
3060 struct xfs_inode *ip1,
3061 struct xfs_inode *dp2,
3062 struct xfs_name *name2,
3063 struct xfs_inode *ip2,
Carlos Maiolinod31a1822014-12-24 08:51:42 +11003064 int spaceres)
3065{
3066 int error = 0;
3067 int ip1_flags = 0;
3068 int ip2_flags = 0;
3069 int dp2_flags = 0;
3070
3071 /* Swap inode number for dirent in first parent */
Brian Foster381eee62018-07-11 22:26:21 -07003072 error = xfs_dir_replace(tp, dp1, name1, ip2->i_ino, spaceres);
Carlos Maiolinod31a1822014-12-24 08:51:42 +11003073 if (error)
Dave Chinnereeacd322015-03-25 14:08:07 +11003074 goto out_trans_abort;
Carlos Maiolinod31a1822014-12-24 08:51:42 +11003075
3076 /* Swap inode number for dirent in second parent */
Brian Foster381eee62018-07-11 22:26:21 -07003077 error = xfs_dir_replace(tp, dp2, name2, ip1->i_ino, spaceres);
Carlos Maiolinod31a1822014-12-24 08:51:42 +11003078 if (error)
Dave Chinnereeacd322015-03-25 14:08:07 +11003079 goto out_trans_abort;
Carlos Maiolinod31a1822014-12-24 08:51:42 +11003080
3081 /*
3082 * If we're renaming one or more directories across different parents,
3083 * update the respective ".." entries (and link counts) to match the new
3084 * parents.
3085 */
3086 if (dp1 != dp2) {
3087 dp2_flags = XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
3088
Dave Chinnerc19b3b052016-02-09 16:54:58 +11003089 if (S_ISDIR(VFS_I(ip2)->i_mode)) {
Carlos Maiolinod31a1822014-12-24 08:51:42 +11003090 error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot,
Brian Foster381eee62018-07-11 22:26:21 -07003091 dp1->i_ino, spaceres);
Carlos Maiolinod31a1822014-12-24 08:51:42 +11003092 if (error)
Dave Chinnereeacd322015-03-25 14:08:07 +11003093 goto out_trans_abort;
Carlos Maiolinod31a1822014-12-24 08:51:42 +11003094
3095 /* transfer ip2 ".." reference to dp1 */
Dave Chinnerc19b3b052016-02-09 16:54:58 +11003096 if (!S_ISDIR(VFS_I(ip1)->i_mode)) {
Carlos Maiolinod31a1822014-12-24 08:51:42 +11003097 error = xfs_droplink(tp, dp2);
3098 if (error)
Dave Chinnereeacd322015-03-25 14:08:07 +11003099 goto out_trans_abort;
Carlos Maiolinod31a1822014-12-24 08:51:42 +11003100 error = xfs_bumplink(tp, dp1);
3101 if (error)
Dave Chinnereeacd322015-03-25 14:08:07 +11003102 goto out_trans_abort;
Carlos Maiolinod31a1822014-12-24 08:51:42 +11003103 }
3104
3105 /*
3106 * Although ip1 isn't changed here, userspace needs
3107 * to be warned about the change, so that applications
3108 * relying on it (like backup ones), will properly
3109 * notify the change
3110 */
3111 ip1_flags |= XFS_ICHGTIME_CHG;
3112 ip2_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
3113 }
3114
Dave Chinnerc19b3b052016-02-09 16:54:58 +11003115 if (S_ISDIR(VFS_I(ip1)->i_mode)) {
Carlos Maiolinod31a1822014-12-24 08:51:42 +11003116 error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot,
Brian Foster381eee62018-07-11 22:26:21 -07003117 dp2->i_ino, spaceres);
Carlos Maiolinod31a1822014-12-24 08:51:42 +11003118 if (error)
Dave Chinnereeacd322015-03-25 14:08:07 +11003119 goto out_trans_abort;
Carlos Maiolinod31a1822014-12-24 08:51:42 +11003120
3121 /* transfer ip1 ".." reference to dp2 */
Dave Chinnerc19b3b052016-02-09 16:54:58 +11003122 if (!S_ISDIR(VFS_I(ip2)->i_mode)) {
Carlos Maiolinod31a1822014-12-24 08:51:42 +11003123 error = xfs_droplink(tp, dp1);
3124 if (error)
Dave Chinnereeacd322015-03-25 14:08:07 +11003125 goto out_trans_abort;
Carlos Maiolinod31a1822014-12-24 08:51:42 +11003126 error = xfs_bumplink(tp, dp2);
3127 if (error)
Dave Chinnereeacd322015-03-25 14:08:07 +11003128 goto out_trans_abort;
Carlos Maiolinod31a1822014-12-24 08:51:42 +11003129 }
3130
3131 /*
3132 * Although ip2 isn't changed here, userspace needs
3133 * to be warned about the change, so that applications
3134 * relying on it (like backup ones), will properly
3135 * notify the change
3136 */
3137 ip1_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
3138 ip2_flags |= XFS_ICHGTIME_CHG;
3139 }
3140 }
3141
3142 if (ip1_flags) {
3143 xfs_trans_ichgtime(tp, ip1, ip1_flags);
3144 xfs_trans_log_inode(tp, ip1, XFS_ILOG_CORE);
3145 }
3146 if (ip2_flags) {
3147 xfs_trans_ichgtime(tp, ip2, ip2_flags);
3148 xfs_trans_log_inode(tp, ip2, XFS_ILOG_CORE);
3149 }
3150 if (dp2_flags) {
3151 xfs_trans_ichgtime(tp, dp2, dp2_flags);
3152 xfs_trans_log_inode(tp, dp2, XFS_ILOG_CORE);
3153 }
3154 xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3155 xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE);
Brian Fosterc9cfdb32018-07-11 22:26:08 -07003156 return xfs_finish_rename(tp);
Dave Chinnereeacd322015-03-25 14:08:07 +11003157
3158out_trans_abort:
Christoph Hellwig4906e212015-06-04 13:47:56 +10003159 xfs_trans_cancel(tp);
Carlos Maiolinod31a1822014-12-24 08:51:42 +11003160 return error;
3161}
3162
3163/*
Dave Chinner7dcf5c32015-03-25 14:08:08 +11003164 * xfs_rename_alloc_whiteout()
3165 *
3166 * Return a referenced, unlinked, unlocked inode that that can be used as a
3167 * whiteout in a rename transaction. We use a tmpfile inode here so that if we
3168 * crash between allocating the inode and linking it into the rename transaction
3169 * recovery will free the inode and we won't leak it.
3170 */
3171static int
3172xfs_rename_alloc_whiteout(
3173 struct xfs_inode *dp,
3174 struct xfs_inode **wip)
3175{
3176 struct xfs_inode *tmpfile;
3177 int error;
3178
Eric Sandeena1f69412018-04-06 10:09:42 -07003179 error = xfs_create_tmpfile(dp, S_IFCHR | WHITEOUT_MODE, &tmpfile);
Dave Chinner7dcf5c32015-03-25 14:08:08 +11003180 if (error)
3181 return error;
3182
Brian Foster22419ac2015-05-29 08:14:55 +10003183 /*
3184 * Prepare the tmpfile inode as if it were created through the VFS.
Darrick J. Wongc4a6bf72019-02-13 11:15:17 -08003185 * Complete the inode setup and flag it as linkable. nlink is already
3186 * zero, so we can skip the drop_nlink.
Brian Foster22419ac2015-05-29 08:14:55 +10003187 */
Christoph Hellwig2b3d1d42016-04-06 07:48:27 +10003188 xfs_setup_iops(tmpfile);
Dave Chinner7dcf5c32015-03-25 14:08:08 +11003189 xfs_finish_inode_setup(tmpfile);
3190 VFS_I(tmpfile)->i_state |= I_LINKABLE;
3191
3192 *wip = tmpfile;
3193 return 0;
3194}
3195
3196/*
Dave Chinnerf6bba202013-08-12 20:49:46 +10003197 * xfs_rename
3198 */
3199int
3200xfs_rename(
Dave Chinner7dcf5c32015-03-25 14:08:08 +11003201 struct xfs_inode *src_dp,
3202 struct xfs_name *src_name,
3203 struct xfs_inode *src_ip,
3204 struct xfs_inode *target_dp,
3205 struct xfs_name *target_name,
3206 struct xfs_inode *target_ip,
3207 unsigned int flags)
Dave Chinnerf6bba202013-08-12 20:49:46 +10003208{
Dave Chinner7dcf5c32015-03-25 14:08:08 +11003209 struct xfs_mount *mp = src_dp->i_mount;
3210 struct xfs_trans *tp;
Dave Chinner7dcf5c32015-03-25 14:08:08 +11003211 struct xfs_inode *wip = NULL; /* whiteout inode */
3212 struct xfs_inode *inodes[__XFS_SORT_INODES];
3213 int num_inodes = __XFS_SORT_INODES;
Dave Chinner2b936812015-03-25 15:12:30 +11003214 bool new_parent = (src_dp != target_dp);
Dave Chinnerc19b3b052016-02-09 16:54:58 +11003215 bool src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode);
Dave Chinner7dcf5c32015-03-25 14:08:08 +11003216 int spaceres;
3217 int error;
Dave Chinnerf6bba202013-08-12 20:49:46 +10003218
3219 trace_xfs_rename(src_dp, target_dp, src_name, target_name);
3220
Dave Chinnereeacd322015-03-25 14:08:07 +11003221 if ((flags & RENAME_EXCHANGE) && !target_ip)
3222 return -EINVAL;
Dave Chinnerf6bba202013-08-12 20:49:46 +10003223
Dave Chinner7dcf5c32015-03-25 14:08:08 +11003224 /*
3225 * If we are doing a whiteout operation, allocate the whiteout inode
3226 * we will be placing at the target and ensure the type is set
3227 * appropriately.
3228 */
3229 if (flags & RENAME_WHITEOUT) {
3230 ASSERT(!(flags & (RENAME_NOREPLACE | RENAME_EXCHANGE)));
3231 error = xfs_rename_alloc_whiteout(target_dp, &wip);
3232 if (error)
3233 return error;
Dave Chinnerf6bba202013-08-12 20:49:46 +10003234
Dave Chinner7dcf5c32015-03-25 14:08:08 +11003235 /* setup target dirent info as whiteout */
3236 src_name->type = XFS_DIR3_FT_CHRDEV;
3237 }
3238
3239 xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, wip,
Dave Chinnerf6bba202013-08-12 20:49:46 +10003240 inodes, &num_inodes);
3241
Dave Chinnerf6bba202013-08-12 20:49:46 +10003242 spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len);
Christoph Hellwig253f4912016-04-06 09:19:55 +10003243 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, spaceres, 0, 0, &tp);
Dave Chinner24513372014-06-25 14:58:08 +10003244 if (error == -ENOSPC) {
Dave Chinnerf6bba202013-08-12 20:49:46 +10003245 spaceres = 0;
Christoph Hellwig253f4912016-04-06 09:19:55 +10003246 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, 0, 0, 0,
3247 &tp);
Dave Chinnerf6bba202013-08-12 20:49:46 +10003248 }
Dave Chinner445883e2015-03-25 14:05:43 +11003249 if (error)
Christoph Hellwig253f4912016-04-06 09:19:55 +10003250 goto out_release_wip;
Dave Chinnerf6bba202013-08-12 20:49:46 +10003251
3252 /*
3253 * Attach the dquots to the inodes
3254 */
3255 error = xfs_qm_vop_rename_dqattach(inodes);
Dave Chinner445883e2015-03-25 14:05:43 +11003256 if (error)
3257 goto out_trans_cancel;
Dave Chinnerf6bba202013-08-12 20:49:46 +10003258
3259 /*
3260 * Lock all the participating inodes. Depending upon whether
3261 * the target_name exists in the target directory, and
3262 * whether the target directory is the same as the source
3263 * directory, we can lock from 2 to 4 inodes.
3264 */
3265 xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL);
3266
3267 /*
3268 * Join all the inodes to the transaction. From this point on,
3269 * we can rely on either trans_commit or trans_cancel to unlock
3270 * them.
3271 */
Christoph Hellwig65523212016-11-30 14:33:25 +11003272 xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL);
Dave Chinnerf6bba202013-08-12 20:49:46 +10003273 if (new_parent)
Christoph Hellwig65523212016-11-30 14:33:25 +11003274 xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL);
Dave Chinnerf6bba202013-08-12 20:49:46 +10003275 xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL);
3276 if (target_ip)
3277 xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL);
Dave Chinner7dcf5c32015-03-25 14:08:08 +11003278 if (wip)
3279 xfs_trans_ijoin(tp, wip, XFS_ILOCK_EXCL);
Dave Chinnerf6bba202013-08-12 20:49:46 +10003280
3281 /*
3282 * If we are using project inheritance, we only allow renames
3283 * into our tree when the project IDs are the same; else the
3284 * tree quota mechanism would be circumvented.
3285 */
3286 if (unlikely((target_dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
3287 (xfs_get_projid(target_dp) != xfs_get_projid(src_ip)))) {
Dave Chinner24513372014-06-25 14:58:08 +10003288 error = -EXDEV;
Dave Chinner445883e2015-03-25 14:05:43 +11003289 goto out_trans_cancel;
Dave Chinnerf6bba202013-08-12 20:49:46 +10003290 }
3291
Dave Chinnereeacd322015-03-25 14:08:07 +11003292 /* RENAME_EXCHANGE is unique from here on. */
3293 if (flags & RENAME_EXCHANGE)
3294 return xfs_cross_rename(tp, src_dp, src_name, src_ip,
3295 target_dp, target_name, target_ip,
Brian Fosterf16dea52018-07-11 22:26:20 -07003296 spaceres);
Carlos Maiolinod31a1822014-12-24 08:51:42 +11003297
3298 /*
Dave Chinnerf6bba202013-08-12 20:49:46 +10003299 * Set up the target.
3300 */
3301 if (target_ip == NULL) {
3302 /*
3303 * If there's no space reservation, check the entry will
3304 * fit before actually inserting it.
3305 */
Eric Sandeen94f3cad2014-09-09 11:57:52 +10003306 if (!spaceres) {
3307 error = xfs_dir_canenter(tp, target_dp, target_name);
3308 if (error)
Dave Chinner445883e2015-03-25 14:05:43 +11003309 goto out_trans_cancel;
Eric Sandeen94f3cad2014-09-09 11:57:52 +10003310 }
Dave Chinnerf6bba202013-08-12 20:49:46 +10003311 /*
3312 * If target does not exist and the rename crosses
3313 * directories, adjust the target directory link count
3314 * to account for the ".." reference from the new entry.
3315 */
3316 error = xfs_dir_createname(tp, target_dp, target_name,
Brian Foster381eee62018-07-11 22:26:21 -07003317 src_ip->i_ino, spaceres);
Dave Chinnerf6bba202013-08-12 20:49:46 +10003318 if (error)
Brian Fosterc8eac492018-07-24 13:43:13 -07003319 goto out_trans_cancel;
Dave Chinnerf6bba202013-08-12 20:49:46 +10003320
3321 xfs_trans_ichgtime(tp, target_dp,
3322 XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3323
3324 if (new_parent && src_is_directory) {
3325 error = xfs_bumplink(tp, target_dp);
3326 if (error)
Brian Fosterc8eac492018-07-24 13:43:13 -07003327 goto out_trans_cancel;
Dave Chinnerf6bba202013-08-12 20:49:46 +10003328 }
3329 } else { /* target_ip != NULL */
3330 /*
3331 * If target exists and it's a directory, check that both
3332 * target and source are directories and that target can be
3333 * destroyed, or that neither is a directory.
3334 */
Dave Chinnerc19b3b052016-02-09 16:54:58 +11003335 if (S_ISDIR(VFS_I(target_ip)->i_mode)) {
Dave Chinnerf6bba202013-08-12 20:49:46 +10003336 /*
3337 * Make sure target dir is empty.
3338 */
3339 if (!(xfs_dir_isempty(target_ip)) ||
Dave Chinner54d7b5c2016-02-09 16:54:58 +11003340 (VFS_I(target_ip)->i_nlink > 2)) {
Dave Chinner24513372014-06-25 14:58:08 +10003341 error = -EEXIST;
Dave Chinner445883e2015-03-25 14:05:43 +11003342 goto out_trans_cancel;
Dave Chinnerf6bba202013-08-12 20:49:46 +10003343 }
3344 }
3345
3346 /*
3347 * Link the source inode under the target name.
3348 * If the source inode is a directory and we are moving
3349 * it across directories, its ".." entry will be
3350 * inconsistent until we replace that down below.
3351 *
3352 * In case there is already an entry with the same
3353 * name at the destination directory, remove it first.
3354 */
3355 error = xfs_dir_replace(tp, target_dp, target_name,
Brian Foster381eee62018-07-11 22:26:21 -07003356 src_ip->i_ino, spaceres);
Dave Chinnerf6bba202013-08-12 20:49:46 +10003357 if (error)
Brian Fosterc8eac492018-07-24 13:43:13 -07003358 goto out_trans_cancel;
Dave Chinnerf6bba202013-08-12 20:49:46 +10003359
3360 xfs_trans_ichgtime(tp, target_dp,
3361 XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3362
3363 /*
3364 * Decrement the link count on the target since the target
3365 * dir no longer points to it.
3366 */
3367 error = xfs_droplink(tp, target_ip);
3368 if (error)
Brian Fosterc8eac492018-07-24 13:43:13 -07003369 goto out_trans_cancel;
Dave Chinnerf6bba202013-08-12 20:49:46 +10003370
3371 if (src_is_directory) {
3372 /*
3373 * Drop the link from the old "." entry.
3374 */
3375 error = xfs_droplink(tp, target_ip);
3376 if (error)
Brian Fosterc8eac492018-07-24 13:43:13 -07003377 goto out_trans_cancel;
Dave Chinnerf6bba202013-08-12 20:49:46 +10003378 }
3379 } /* target_ip != NULL */
3380
3381 /*
3382 * Remove the source.
3383 */
3384 if (new_parent && src_is_directory) {
3385 /*
3386 * Rewrite the ".." entry to point to the new
3387 * directory.
3388 */
3389 error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot,
Brian Foster381eee62018-07-11 22:26:21 -07003390 target_dp->i_ino, spaceres);
Dave Chinner24513372014-06-25 14:58:08 +10003391 ASSERT(error != -EEXIST);
Dave Chinnerf6bba202013-08-12 20:49:46 +10003392 if (error)
Brian Fosterc8eac492018-07-24 13:43:13 -07003393 goto out_trans_cancel;
Dave Chinnerf6bba202013-08-12 20:49:46 +10003394 }
3395
3396 /*
3397 * We always want to hit the ctime on the source inode.
3398 *
3399 * This isn't strictly required by the standards since the source
3400 * inode isn't really being changed, but old unix file systems did
3401 * it and some incremental backup programs won't work without it.
3402 */
3403 xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG);
3404 xfs_trans_log_inode(tp, src_ip, XFS_ILOG_CORE);
3405
3406 /*
3407 * Adjust the link count on src_dp. This is necessary when
3408 * renaming a directory, either within one parent when
3409 * the target existed, or across two parent directories.
3410 */
3411 if (src_is_directory && (new_parent || target_ip != NULL)) {
3412
3413 /*
3414 * Decrement link count on src_directory since the
3415 * entry that's moved no longer points to it.
3416 */
3417 error = xfs_droplink(tp, src_dp);
3418 if (error)
Brian Fosterc8eac492018-07-24 13:43:13 -07003419 goto out_trans_cancel;
Dave Chinnerf6bba202013-08-12 20:49:46 +10003420 }
3421
Dave Chinner7dcf5c32015-03-25 14:08:08 +11003422 /*
3423 * For whiteouts, we only need to update the source dirent with the
3424 * inode number of the whiteout inode rather than removing it
3425 * altogether.
3426 */
3427 if (wip) {
3428 error = xfs_dir_replace(tp, src_dp, src_name, wip->i_ino,
Brian Foster381eee62018-07-11 22:26:21 -07003429 spaceres);
Dave Chinner7dcf5c32015-03-25 14:08:08 +11003430 } else
3431 error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino,
Brian Foster381eee62018-07-11 22:26:21 -07003432 spaceres);
Dave Chinnerf6bba202013-08-12 20:49:46 +10003433 if (error)
Brian Fosterc8eac492018-07-24 13:43:13 -07003434 goto out_trans_cancel;
Dave Chinnerf6bba202013-08-12 20:49:46 +10003435
Dave Chinner7dcf5c32015-03-25 14:08:08 +11003436 /*
3437 * For whiteouts, we need to bump the link count on the whiteout inode.
3438 * This means that failures all the way up to this point leave the inode
3439 * on the unlinked list and so cleanup is a simple matter of dropping
3440 * the remaining reference to it. If we fail here after bumping the link
3441 * count, we're shutting down the filesystem so we'll never see the
3442 * intermediate state on disk.
3443 */
3444 if (wip) {
Dave Chinner54d7b5c2016-02-09 16:54:58 +11003445 ASSERT(VFS_I(wip)->i_nlink == 0);
Dave Chinner7dcf5c32015-03-25 14:08:08 +11003446 error = xfs_bumplink(tp, wip);
3447 if (error)
Brian Fosterc8eac492018-07-24 13:43:13 -07003448 goto out_trans_cancel;
Dave Chinner7dcf5c32015-03-25 14:08:08 +11003449 error = xfs_iunlink_remove(tp, wip);
3450 if (error)
Brian Fosterc8eac492018-07-24 13:43:13 -07003451 goto out_trans_cancel;
Dave Chinner7dcf5c32015-03-25 14:08:08 +11003452 xfs_trans_log_inode(tp, wip, XFS_ILOG_CORE);
3453
3454 /*
3455 * Now we have a real link, clear the "I'm a tmpfile" state
3456 * flag from the inode so it doesn't accidentally get misused in
3457 * future.
3458 */
3459 VFS_I(wip)->i_state &= ~I_LINKABLE;
3460 }
Dave Chinnerf6bba202013-08-12 20:49:46 +10003461
3462 xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3463 xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE);
3464 if (new_parent)
3465 xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
3466
Brian Fosterc9cfdb32018-07-11 22:26:08 -07003467 error = xfs_finish_rename(tp);
Dave Chinner7dcf5c32015-03-25 14:08:08 +11003468 if (wip)
Darrick J. Wong44a87362018-07-25 12:52:32 -07003469 xfs_irele(wip);
Dave Chinner7dcf5c32015-03-25 14:08:08 +11003470 return error;
Dave Chinnerf6bba202013-08-12 20:49:46 +10003471
Dave Chinner445883e2015-03-25 14:05:43 +11003472out_trans_cancel:
Christoph Hellwig4906e212015-06-04 13:47:56 +10003473 xfs_trans_cancel(tp);
Christoph Hellwig253f4912016-04-06 09:19:55 +10003474out_release_wip:
Dave Chinner7dcf5c32015-03-25 14:08:08 +11003475 if (wip)
Darrick J. Wong44a87362018-07-25 12:52:32 -07003476 xfs_irele(wip);
Dave Chinnerf6bba202013-08-12 20:49:46 +10003477 return error;
3478}
3479
David Chinnerbad55842008-03-06 13:43:49 +11003480STATIC int
3481xfs_iflush_cluster(
Dave Chinner19429362016-05-18 14:09:46 +10003482 struct xfs_inode *ip,
3483 struct xfs_buf *bp)
David Chinnerbad55842008-03-06 13:43:49 +11003484{
Dave Chinner19429362016-05-18 14:09:46 +10003485 struct xfs_mount *mp = ip->i_mount;
Dave Chinner5017e972010-01-11 11:47:40 +00003486 struct xfs_perag *pag;
David Chinnerbad55842008-03-06 13:43:49 +11003487 unsigned long first_index, mask;
David Chinnerc8f5f122008-05-20 11:30:15 +10003488 unsigned long inodes_per_cluster;
Dave Chinner19429362016-05-18 14:09:46 +10003489 int cilist_size;
3490 struct xfs_inode **cilist;
3491 struct xfs_inode *cip;
David Chinnerbad55842008-03-06 13:43:49 +11003492 int nr_found;
3493 int clcount = 0;
David Chinnerbad55842008-03-06 13:43:49 +11003494 int i;
3495
Dave Chinner5017e972010-01-11 11:47:40 +00003496 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
David Chinnerbad55842008-03-06 13:43:49 +11003497
Jie Liu0f49efd2013-12-13 15:51:48 +11003498 inodes_per_cluster = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
Dave Chinner19429362016-05-18 14:09:46 +10003499 cilist_size = inodes_per_cluster * sizeof(xfs_inode_t *);
3500 cilist = kmem_alloc(cilist_size, KM_MAYFAIL|KM_NOFS);
3501 if (!cilist)
Dave Chinner44b56e02010-01-11 11:47:43 +00003502 goto out_put;
David Chinnerbad55842008-03-06 13:43:49 +11003503
Jie Liu0f49efd2013-12-13 15:51:48 +11003504 mask = ~(((mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog)) - 1);
David Chinnerbad55842008-03-06 13:43:49 +11003505 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask;
Dave Chinner1a3e8f32010-12-17 17:29:43 +11003506 rcu_read_lock();
David Chinnerbad55842008-03-06 13:43:49 +11003507 /* really need a gang lookup range call here */
Dave Chinner19429362016-05-18 14:09:46 +10003508 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void**)cilist,
David Chinnerc8f5f122008-05-20 11:30:15 +10003509 first_index, inodes_per_cluster);
David Chinnerbad55842008-03-06 13:43:49 +11003510 if (nr_found == 0)
3511 goto out_free;
3512
3513 for (i = 0; i < nr_found; i++) {
Dave Chinner19429362016-05-18 14:09:46 +10003514 cip = cilist[i];
3515 if (cip == ip)
David Chinnerbad55842008-03-06 13:43:49 +11003516 continue;
Dave Chinner1a3e8f32010-12-17 17:29:43 +11003517
3518 /*
3519 * because this is an RCU protected lookup, we could find a
3520 * recently freed or even reallocated inode during the lookup.
3521 * We need to check under the i_flags_lock for a valid inode
3522 * here. Skip it if it is not valid or the wrong inode.
3523 */
Dave Chinner19429362016-05-18 14:09:46 +10003524 spin_lock(&cip->i_flags_lock);
3525 if (!cip->i_ino ||
3526 __xfs_iflags_test(cip, XFS_ISTALE)) {
3527 spin_unlock(&cip->i_flags_lock);
Dave Chinner1a3e8f32010-12-17 17:29:43 +11003528 continue;
3529 }
Dave Chinner5a90e532016-05-18 14:09:13 +10003530
3531 /*
3532 * Once we fall off the end of the cluster, no point checking
3533 * any more inodes in the list because they will also all be
3534 * outside the cluster.
3535 */
Dave Chinner19429362016-05-18 14:09:46 +10003536 if ((XFS_INO_TO_AGINO(mp, cip->i_ino) & mask) != first_index) {
3537 spin_unlock(&cip->i_flags_lock);
Dave Chinner5a90e532016-05-18 14:09:13 +10003538 break;
3539 }
Dave Chinner19429362016-05-18 14:09:46 +10003540 spin_unlock(&cip->i_flags_lock);
Dave Chinner1a3e8f32010-12-17 17:29:43 +11003541
David Chinnerbad55842008-03-06 13:43:49 +11003542 /*
3543 * Do an un-protected check to see if the inode is dirty and
3544 * is a candidate for flushing. These checks will be repeated
3545 * later after the appropriate locks are acquired.
3546 */
Dave Chinner19429362016-05-18 14:09:46 +10003547 if (xfs_inode_clean(cip) && xfs_ipincount(cip) == 0)
David Chinnerbad55842008-03-06 13:43:49 +11003548 continue;
David Chinnerbad55842008-03-06 13:43:49 +11003549
3550 /*
3551 * Try to get locks. If any are unavailable or it is pinned,
3552 * then this inode cannot be flushed and is skipped.
3553 */
3554
Dave Chinner19429362016-05-18 14:09:46 +10003555 if (!xfs_ilock_nowait(cip, XFS_ILOCK_SHARED))
David Chinnerbad55842008-03-06 13:43:49 +11003556 continue;
Dave Chinner19429362016-05-18 14:09:46 +10003557 if (!xfs_iflock_nowait(cip)) {
3558 xfs_iunlock(cip, XFS_ILOCK_SHARED);
David Chinnerbad55842008-03-06 13:43:49 +11003559 continue;
3560 }
Dave Chinner19429362016-05-18 14:09:46 +10003561 if (xfs_ipincount(cip)) {
3562 xfs_ifunlock(cip);
3563 xfs_iunlock(cip, XFS_ILOCK_SHARED);
David Chinnerbad55842008-03-06 13:43:49 +11003564 continue;
3565 }
3566
Dave Chinner8a17d7d2016-05-18 14:09:12 +10003567
3568 /*
3569 * Check the inode number again, just to be certain we are not
3570 * racing with freeing in xfs_reclaim_inode(). See the comments
3571 * in that function for more information as to why the initial
3572 * check is not sufficient.
3573 */
Dave Chinner19429362016-05-18 14:09:46 +10003574 if (!cip->i_ino) {
3575 xfs_ifunlock(cip);
3576 xfs_iunlock(cip, XFS_ILOCK_SHARED);
David Chinnerbad55842008-03-06 13:43:49 +11003577 continue;
3578 }
3579
3580 /*
3581 * arriving here means that this inode can be flushed. First
3582 * re-check that it's dirty before flushing.
3583 */
Dave Chinner19429362016-05-18 14:09:46 +10003584 if (!xfs_inode_clean(cip)) {
David Chinner33540402008-03-06 13:43:59 +11003585 int error;
Dave Chinner19429362016-05-18 14:09:46 +10003586 error = xfs_iflush_int(cip, bp);
David Chinnerbad55842008-03-06 13:43:49 +11003587 if (error) {
Dave Chinner19429362016-05-18 14:09:46 +10003588 xfs_iunlock(cip, XFS_ILOCK_SHARED);
David Chinnerbad55842008-03-06 13:43:49 +11003589 goto cluster_corrupt_out;
3590 }
3591 clcount++;
3592 } else {
Dave Chinner19429362016-05-18 14:09:46 +10003593 xfs_ifunlock(cip);
David Chinnerbad55842008-03-06 13:43:49 +11003594 }
Dave Chinner19429362016-05-18 14:09:46 +10003595 xfs_iunlock(cip, XFS_ILOCK_SHARED);
David Chinnerbad55842008-03-06 13:43:49 +11003596 }
3597
3598 if (clcount) {
Bill O'Donnellff6d6af2015-10-12 18:21:22 +11003599 XFS_STATS_INC(mp, xs_icluster_flushcnt);
3600 XFS_STATS_ADD(mp, xs_icluster_flushinode, clcount);
David Chinnerbad55842008-03-06 13:43:49 +11003601 }
3602
3603out_free:
Dave Chinner1a3e8f32010-12-17 17:29:43 +11003604 rcu_read_unlock();
Dave Chinner19429362016-05-18 14:09:46 +10003605 kmem_free(cilist);
Dave Chinner44b56e02010-01-11 11:47:43 +00003606out_put:
3607 xfs_perag_put(pag);
David Chinnerbad55842008-03-06 13:43:49 +11003608 return 0;
3609
3610
3611cluster_corrupt_out:
3612 /*
3613 * Corruption detected in the clustering loop. Invalidate the
3614 * inode buffer and shut down the filesystem.
3615 */
Dave Chinner1a3e8f32010-12-17 17:29:43 +11003616 rcu_read_unlock();
David Chinnerbad55842008-03-06 13:43:49 +11003617 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3618
David Chinnerbad55842008-03-06 13:43:49 +11003619 /*
Dave Chinnere53946d2018-06-21 23:26:05 -07003620 * We'll always have an inode attached to the buffer for completion
3621 * process by the time we are called from xfs_iflush(). Hence we have
3622 * always need to do IO completion processing to abort the inodes
3623 * attached to the buffer. handle them just like the shutdown case in
3624 * xfs_buf_submit().
David Chinnerbad55842008-03-06 13:43:49 +11003625 */
Dave Chinnere53946d2018-06-21 23:26:05 -07003626 ASSERT(bp->b_iodone);
3627 bp->b_flags &= ~XBF_DONE;
3628 xfs_buf_stale(bp);
3629 xfs_buf_ioerror(bp, -EIO);
3630 xfs_buf_ioend(bp);
3631
3632 /* abort the corrupt inode, as it was not attached to the buffer */
Dave Chinner19429362016-05-18 14:09:46 +10003633 xfs_iflush_abort(cip, false);
3634 kmem_free(cilist);
Dave Chinner44b56e02010-01-11 11:47:43 +00003635 xfs_perag_put(pag);
Dave Chinner24513372014-06-25 14:58:08 +10003636 return -EFSCORRUPTED;
David Chinnerbad55842008-03-06 13:43:49 +11003637}
3638
Linus Torvalds1da177e2005-04-16 15:20:36 -07003639/*
Christoph Hellwig4c468192012-04-23 15:58:36 +10003640 * Flush dirty inode metadata into the backing buffer.
3641 *
3642 * The caller must have the inode lock and the inode flush lock held. The
3643 * inode lock will still be held upon return to the caller, and the inode
3644 * flush lock will be released after the inode has reached the disk.
3645 *
3646 * The caller must write out the buffer returned in *bpp and release it.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003647 */
3648int
3649xfs_iflush(
Christoph Hellwig4c468192012-04-23 15:58:36 +10003650 struct xfs_inode *ip,
3651 struct xfs_buf **bpp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003652{
Christoph Hellwig4c468192012-04-23 15:58:36 +10003653 struct xfs_mount *mp = ip->i_mount;
Dave Chinnerb1438f42016-05-18 13:53:42 +10003654 struct xfs_buf *bp = NULL;
Christoph Hellwig4c468192012-04-23 15:58:36 +10003655 struct xfs_dinode *dip;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003656 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003657
Bill O'Donnellff6d6af2015-10-12 18:21:22 +11003658 XFS_STATS_INC(mp, xs_iflush_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003659
Christoph Hellwig579aa9c2008-04-22 17:34:00 +10003660 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
Christoph Hellwig474fce02011-12-18 20:00:09 +00003661 ASSERT(xfs_isiflocked(ip));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003662 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
Christoph Hellwig8096b1e2011-12-18 20:00:07 +00003663 ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003664
Christoph Hellwig4c468192012-04-23 15:58:36 +10003665 *bpp = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003666
Linus Torvalds1da177e2005-04-16 15:20:36 -07003667 xfs_iunpin_wait(ip);
3668
3669 /*
Dave Chinner4b6a4682010-01-11 11:45:21 +00003670 * For stale inodes we cannot rely on the backing buffer remaining
3671 * stale in cache for the remaining life of the stale inode and so
Christoph Hellwig475ee412012-07-03 12:21:22 -04003672 * xfs_imap_to_bp() below may give us a buffer that no longer contains
Dave Chinner4b6a4682010-01-11 11:45:21 +00003673 * inodes below. We have to check this after ensuring the inode is
3674 * unpinned so that it is safe to reclaim the stale inode after the
3675 * flush call.
3676 */
3677 if (xfs_iflags_test(ip, XFS_ISTALE)) {
3678 xfs_ifunlock(ip);
3679 return 0;
3680 }
3681
3682 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003683 * This may have been unpinned because the filesystem is shutting
3684 * down forcibly. If that's the case we must not write this inode
Christoph Hellwig32ce90a2012-04-23 15:58:32 +10003685 * to disk, because the log record didn't make it to disk.
3686 *
3687 * We also have to remove the log item from the AIL in this case,
3688 * as we wait for an empty AIL as part of the unmount process.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003689 */
3690 if (XFS_FORCED_SHUTDOWN(mp)) {
Dave Chinner24513372014-06-25 14:58:08 +10003691 error = -EIO;
Christoph Hellwig32ce90a2012-04-23 15:58:32 +10003692 goto abort_out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003693 }
3694
3695 /*
Dave Chinnerb1438f42016-05-18 13:53:42 +10003696 * Get the buffer containing the on-disk inode. We are doing a try-lock
3697 * operation here, so we may get an EAGAIN error. In that case, we
3698 * simply want to return with the inode still dirty.
3699 *
3700 * If we get any other error, we effectively have a corruption situation
3701 * and we cannot flush the inode, so we treat it the same as failing
3702 * xfs_iflush_int().
David Chinnera3f74ff2008-03-06 13:43:42 +11003703 */
Christoph Hellwig475ee412012-07-03 12:21:22 -04003704 error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &bp, XBF_TRYLOCK,
3705 0);
Dave Chinnerb1438f42016-05-18 13:53:42 +10003706 if (error == -EAGAIN) {
David Chinnera3f74ff2008-03-06 13:43:42 +11003707 xfs_ifunlock(ip);
3708 return error;
3709 }
Dave Chinnerb1438f42016-05-18 13:53:42 +10003710 if (error)
3711 goto corrupt_out;
David Chinnera3f74ff2008-03-06 13:43:42 +11003712
3713 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003714 * First flush out the inode that xfs_iflush was called with.
3715 */
3716 error = xfs_iflush_int(ip, bp);
David Chinnerbad55842008-03-06 13:43:49 +11003717 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003718 goto corrupt_out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003719
3720 /*
David Chinnera3f74ff2008-03-06 13:43:42 +11003721 * If the buffer is pinned then push on the log now so we won't
3722 * get stuck waiting in the write for too long.
3723 */
Chandra Seetharaman811e64c2011-07-22 23:40:27 +00003724 if (xfs_buf_ispinned(bp))
Christoph Hellwiga14a3482010-01-19 09:56:46 +00003725 xfs_log_force(mp, 0);
David Chinnera3f74ff2008-03-06 13:43:42 +11003726
3727 /*
Dave Chinnere53946d2018-06-21 23:26:05 -07003728 * inode clustering: try to gather other inodes into this write
3729 *
3730 * Note: Any error during clustering will result in the filesystem
3731 * being shut down and completion callbacks run on the cluster buffer.
3732 * As we have already flushed and attached this inode to the buffer,
3733 * it has already been aborted and released by xfs_iflush_cluster() and
3734 * so we have no further error handling to do here.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003735 */
David Chinnerbad55842008-03-06 13:43:49 +11003736 error = xfs_iflush_cluster(ip, bp);
3737 if (error)
Dave Chinnere53946d2018-06-21 23:26:05 -07003738 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003739
Christoph Hellwig4c468192012-04-23 15:58:36 +10003740 *bpp = bp;
3741 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003742
3743corrupt_out:
Dave Chinnerb1438f42016-05-18 13:53:42 +10003744 if (bp)
3745 xfs_buf_relse(bp);
Nathan Scott7d04a332006-06-09 14:58:38 +10003746 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
Christoph Hellwig32ce90a2012-04-23 15:58:32 +10003747abort_out:
Dave Chinnere53946d2018-06-21 23:26:05 -07003748 /* abort the corrupt inode, as it was not attached to the buffer */
Dave Chinner04913fd2012-04-23 15:58:41 +10003749 xfs_iflush_abort(ip, false);
Christoph Hellwig32ce90a2012-04-23 15:58:32 +10003750 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003751}
3752
Darrick J. Wong9cfb9b42018-01-08 10:51:06 -08003753/*
3754 * If there are inline format data / attr forks attached to this inode,
3755 * make sure they're not corrupt.
3756 */
3757bool
3758xfs_inode_verify_forks(
3759 struct xfs_inode *ip)
3760{
Darrick J. Wong22431bf2018-01-22 18:09:48 -08003761 struct xfs_ifork *ifp;
Darrick J. Wong9cfb9b42018-01-08 10:51:06 -08003762 xfs_failaddr_t fa;
3763
3764 fa = xfs_ifork_verify_data(ip, &xfs_default_ifork_ops);
3765 if (fa) {
Darrick J. Wong22431bf2018-01-22 18:09:48 -08003766 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
3767 xfs_inode_verifier_error(ip, -EFSCORRUPTED, "data fork",
3768 ifp->if_u1.if_data, ifp->if_bytes, fa);
Darrick J. Wong9cfb9b42018-01-08 10:51:06 -08003769 return false;
3770 }
3771
3772 fa = xfs_ifork_verify_attr(ip, &xfs_default_ifork_ops);
3773 if (fa) {
Darrick J. Wong22431bf2018-01-22 18:09:48 -08003774 ifp = XFS_IFORK_PTR(ip, XFS_ATTR_FORK);
3775 xfs_inode_verifier_error(ip, -EFSCORRUPTED, "attr fork",
3776 ifp ? ifp->if_u1.if_data : NULL,
3777 ifp ? ifp->if_bytes : 0, fa);
Darrick J. Wong9cfb9b42018-01-08 10:51:06 -08003778 return false;
3779 }
3780 return true;
3781}
3782
Linus Torvalds1da177e2005-04-16 15:20:36 -07003783STATIC int
3784xfs_iflush_int(
Christoph Hellwig93848a92013-04-03 16:11:17 +11003785 struct xfs_inode *ip,
3786 struct xfs_buf *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003787{
Christoph Hellwig93848a92013-04-03 16:11:17 +11003788 struct xfs_inode_log_item *iip = ip->i_itemp;
3789 struct xfs_dinode *dip;
3790 struct xfs_mount *mp = ip->i_mount;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003791
Christoph Hellwig579aa9c2008-04-22 17:34:00 +10003792 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
Christoph Hellwig474fce02011-12-18 20:00:09 +00003793 ASSERT(xfs_isiflocked(ip));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003794 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
Christoph Hellwig8096b1e2011-12-18 20:00:07 +00003795 ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
Christoph Hellwig93848a92013-04-03 16:11:17 +11003796 ASSERT(iip != NULL && iip->ili_fields != 0);
Dave Chinner263997a2014-05-20 07:46:40 +10003797 ASSERT(ip->i_d.di_version > 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003798
Linus Torvalds1da177e2005-04-16 15:20:36 -07003799 /* set *dip = inode's place in the buffer */
Christoph Hellwig88ee2df2015-06-22 09:44:29 +10003800 dip = xfs_buf_offset(bp, ip->i_imap.im_boffset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003801
Christoph Hellwig69ef9212011-07-08 14:36:05 +02003802 if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC),
Darrick J. Wong9e24cfd2017-06-20 17:54:47 -07003803 mp, XFS_ERRTAG_IFLUSH_1)) {
Dave Chinner6a19d932011-03-07 10:02:35 +11003804 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
Darrick J. Wongc9690042018-01-09 12:02:55 -08003805 "%s: Bad inode %Lu magic number 0x%x, ptr "PTR_FMT,
Dave Chinner6a19d932011-03-07 10:02:35 +11003806 __func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003807 goto corrupt_out;
3808 }
Dave Chinnerc19b3b052016-02-09 16:54:58 +11003809 if (S_ISREG(VFS_I(ip)->i_mode)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003810 if (XFS_TEST_ERROR(
3811 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
3812 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE),
Darrick J. Wong9e24cfd2017-06-20 17:54:47 -07003813 mp, XFS_ERRTAG_IFLUSH_3)) {
Dave Chinner6a19d932011-03-07 10:02:35 +11003814 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
Darrick J. Wongc9690042018-01-09 12:02:55 -08003815 "%s: Bad regular inode %Lu, ptr "PTR_FMT,
Dave Chinner6a19d932011-03-07 10:02:35 +11003816 __func__, ip->i_ino, ip);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003817 goto corrupt_out;
3818 }
Dave Chinnerc19b3b052016-02-09 16:54:58 +11003819 } else if (S_ISDIR(VFS_I(ip)->i_mode)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003820 if (XFS_TEST_ERROR(
3821 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
3822 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
3823 (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL),
Darrick J. Wong9e24cfd2017-06-20 17:54:47 -07003824 mp, XFS_ERRTAG_IFLUSH_4)) {
Dave Chinner6a19d932011-03-07 10:02:35 +11003825 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
Darrick J. Wongc9690042018-01-09 12:02:55 -08003826 "%s: Bad directory inode %Lu, ptr "PTR_FMT,
Dave Chinner6a19d932011-03-07 10:02:35 +11003827 __func__, ip->i_ino, ip);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003828 goto corrupt_out;
3829 }
3830 }
3831 if (XFS_TEST_ERROR(ip->i_d.di_nextents + ip->i_d.di_anextents >
Darrick J. Wong9e24cfd2017-06-20 17:54:47 -07003832 ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5)) {
Dave Chinner6a19d932011-03-07 10:02:35 +11003833 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3834 "%s: detected corrupt incore inode %Lu, "
Darrick J. Wongc9690042018-01-09 12:02:55 -08003835 "total extents = %d, nblocks = %Ld, ptr "PTR_FMT,
Dave Chinner6a19d932011-03-07 10:02:35 +11003836 __func__, ip->i_ino,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003837 ip->i_d.di_nextents + ip->i_d.di_anextents,
Dave Chinner6a19d932011-03-07 10:02:35 +11003838 ip->i_d.di_nblocks, ip);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003839 goto corrupt_out;
3840 }
3841 if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize,
Darrick J. Wong9e24cfd2017-06-20 17:54:47 -07003842 mp, XFS_ERRTAG_IFLUSH_6)) {
Dave Chinner6a19d932011-03-07 10:02:35 +11003843 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
Darrick J. Wongc9690042018-01-09 12:02:55 -08003844 "%s: bad inode %Lu, forkoff 0x%x, ptr "PTR_FMT,
Dave Chinner6a19d932011-03-07 10:02:35 +11003845 __func__, ip->i_ino, ip->i_d.di_forkoff, ip);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003846 goto corrupt_out;
3847 }
Dave Chinnere60896d2013-07-24 15:47:30 +10003848
Linus Torvalds1da177e2005-04-16 15:20:36 -07003849 /*
Dave Chinner263997a2014-05-20 07:46:40 +10003850 * Inode item log recovery for v2 inodes are dependent on the
Dave Chinnere60896d2013-07-24 15:47:30 +10003851 * di_flushiter count for correct sequencing. We bump the flush
3852 * iteration count so we can detect flushes which postdate a log record
3853 * during recovery. This is redundant as we now log every change and
3854 * hence this can't happen but we need to still do it to ensure
3855 * backwards compatibility with old kernels that predate logging all
3856 * inode changes.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003857 */
Dave Chinnere60896d2013-07-24 15:47:30 +10003858 if (ip->i_d.di_version < 3)
3859 ip->i_d.di_flushiter++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003860
Darrick J. Wong9cfb9b42018-01-08 10:51:06 -08003861 /* Check the inline fork data before we write out. */
3862 if (!xfs_inode_verify_forks(ip))
Darrick J. Wong005c5db2017-03-28 14:51:10 -07003863 goto corrupt_out;
3864
Linus Torvalds1da177e2005-04-16 15:20:36 -07003865 /*
Dave Chinner39878482016-02-09 16:54:58 +11003866 * Copy the dirty parts of the inode into the on-disk inode. We always
3867 * copy out the core of the inode, because if the inode is dirty at all
3868 * the core must be.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003869 */
Dave Chinner93f958f2016-02-09 16:54:58 +11003870 xfs_inode_to_disk(ip, dip, iip->ili_item.li_lsn);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003871
3872 /* Wrap, we never let the log put out DI_MAX_FLUSH */
3873 if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
3874 ip->i_d.di_flushiter = 0;
3875
Darrick J. Wong005c5db2017-03-28 14:51:10 -07003876 xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
3877 if (XFS_IFORK_Q(ip))
3878 xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003879 xfs_inobp_check(mp, bp);
3880
3881 /*
Christoph Hellwigf5d8d5c2012-02-29 09:53:54 +00003882 * We've recorded everything logged in the inode, so we'd like to clear
3883 * the ili_fields bits so we don't log and flush things unnecessarily.
3884 * However, we can't stop logging all this information until the data
3885 * we've copied into the disk buffer is written to disk. If we did we
3886 * might overwrite the copy of the inode in the log with all the data
3887 * after re-logging only part of it, and in the face of a crash we
3888 * wouldn't have all the data we need to recover.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003889 *
Christoph Hellwigf5d8d5c2012-02-29 09:53:54 +00003890 * What we do is move the bits to the ili_last_fields field. When
3891 * logging the inode, these bits are moved back to the ili_fields field.
3892 * In the xfs_iflush_done() routine we clear ili_last_fields, since we
3893 * know that the information those bits represent is permanently on
3894 * disk. As long as the flush completes before the inode is logged
3895 * again, then both ili_fields and ili_last_fields will be cleared.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003896 *
Christoph Hellwigf5d8d5c2012-02-29 09:53:54 +00003897 * We can play with the ili_fields bits here, because the inode lock
3898 * must be held exclusively in order to set bits there and the flush
3899 * lock protects the ili_last_fields bits. Set ili_logged so the flush
3900 * done routine can tell whether or not to look in the AIL. Also, store
3901 * the current LSN of the inode so that we can tell whether the item has
3902 * moved in the AIL from xfs_iflush_done(). In order to read the lsn we
3903 * need the AIL lock, because it is a 64 bit value that cannot be read
3904 * atomically.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003905 */
Christoph Hellwig93848a92013-04-03 16:11:17 +11003906 iip->ili_last_fields = iip->ili_fields;
3907 iip->ili_fields = 0;
Dave Chinnerfc0561c2015-11-03 13:14:59 +11003908 iip->ili_fsync_fields = 0;
Christoph Hellwig93848a92013-04-03 16:11:17 +11003909 iip->ili_logged = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003910
Christoph Hellwig93848a92013-04-03 16:11:17 +11003911 xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
3912 &iip->ili_item.li_lsn);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003913
Christoph Hellwig93848a92013-04-03 16:11:17 +11003914 /*
3915 * Attach the function xfs_iflush_done to the inode's
3916 * buffer. This will remove the inode from the AIL
3917 * and unlock the inode's flush lock when the inode is
3918 * completely written to disk.
3919 */
3920 xfs_buf_attach_iodone(bp, xfs_iflush_done, &iip->ili_item);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003921
Christoph Hellwig93848a92013-04-03 16:11:17 +11003922 /* generate the checksum. */
3923 xfs_dinode_calc_crc(mp, dip);
3924
Carlos Maiolino643c8c02018-01-24 13:38:49 -08003925 ASSERT(!list_empty(&bp->b_li_list));
Christoph Hellwig93848a92013-04-03 16:11:17 +11003926 ASSERT(bp->b_iodone != NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003927 return 0;
3928
3929corrupt_out:
Dave Chinner24513372014-06-25 14:58:08 +10003930 return -EFSCORRUPTED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003931}
Darrick J. Wong44a87362018-07-25 12:52:32 -07003932
3933/* Release an inode. */
3934void
3935xfs_irele(
3936 struct xfs_inode *ip)
3937{
3938 trace_xfs_irele(ip, _RET_IP_);
3939 iput(VFS_I(ip));
3940}