blob: 9644f938990c54717f337795755aeab1d7a235e5 [file] [log] [blame]
Dave Chinner0b61f8a2018-06-05 19:42:14 -07001// SPDX-License-Identifier: GPL-2.0
David Chinnerfe4fa4b2008-10-30 17:06:08 +11002/*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
David Chinnerfe4fa4b2008-10-30 17:06:08 +11005 */
6#include "xfs.h"
7#include "xfs_fs.h"
Darrick J. Wong5467b342019-06-28 19:25:35 -07008#include "xfs_shared.h"
Dave Chinner6ca1c902013-08-12 20:49:26 +10009#include "xfs_format.h"
Dave Chinner239880e2013-10-23 10:50:10 +110010#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
David Chinnerfe4fa4b2008-10-30 17:06:08 +110012#include "xfs_mount.h"
David Chinnerfe4fa4b2008-10-30 17:06:08 +110013#include "xfs_inode.h"
Dave Chinner239880e2013-10-23 10:50:10 +110014#include "xfs_trans.h"
15#include "xfs_trans_priv.h"
David Chinnerfe4fa4b2008-10-30 17:06:08 +110016#include "xfs_inode_item.h"
Christoph Hellwig7d095252009-06-08 15:33:32 +020017#include "xfs_quota.h"
Christoph Hellwig0b1b2132009-12-14 23:14:59 +000018#include "xfs_trace.h"
Dave Chinner6d8b79c2012-10-08 21:56:09 +110019#include "xfs_icache.h"
Dave Chinnerc24b5df2013-08-12 20:49:45 +100020#include "xfs_bmap_util.h"
Brian Fosterdc06f3982014-07-24 19:49:28 +100021#include "xfs_dquot_item.h"
22#include "xfs_dquot.h"
Darrick J. Wong83104d42016-10-03 09:11:46 -070023#include "xfs_reflink.h"
Christoph Hellwigbb8a66a2020-05-14 14:01:19 -070024#include "xfs_ialloc.h"
Dave Chinner9bbafc712021-06-02 10:48:24 +100025#include "xfs_ag.h"
David Chinnerfe4fa4b2008-10-30 17:06:08 +110026
Jeff Laytonf0e28282017-12-11 06:35:19 -050027#include <linux/iversion.h>
David Chinnera167b172008-10-30 17:06:18 +110028
Darrick J. Wongc809d7e2021-06-01 13:49:52 -070029/* Radix tree tags for incore inode tree. */
30
31/* inode is to be reclaimed */
32#define XFS_ICI_RECLAIM_TAG 0
33/* Inode has speculative preallocations (posteof or cow) to clean. */
34#define XFS_ICI_BLOCKGC_TAG 1
35
36/*
37 * The goal for walking incore inodes. These can correspond with incore inode
38 * radix tree tags when convenient. Avoid existing XFS_IWALK namespace.
39 */
40enum xfs_icwalk_goal {
Darrick J. Wongc809d7e2021-06-01 13:49:52 -070041 /* Goals directly associated with tagged inodes. */
42 XFS_ICWALK_BLOCKGC = XFS_ICI_BLOCKGC_TAG,
Darrick J. Wongf1bc5c52021-05-31 11:32:02 -070043 XFS_ICWALK_RECLAIM = XFS_ICI_RECLAIM_TAG,
Darrick J. Wongc809d7e2021-06-01 13:49:52 -070044};
45
Darrick J. Wong7fdff522021-05-31 11:31:59 -070046static int xfs_icwalk(struct xfs_mount *mp,
Darrick J. Wongb26b2bf2021-06-07 09:34:51 -070047 enum xfs_icwalk_goal goal, struct xfs_icwalk *icw);
Darrick J. Wong7fdff522021-05-31 11:31:59 -070048static int xfs_icwalk_ag(struct xfs_perag *pag,
Darrick J. Wongb26b2bf2021-06-07 09:34:51 -070049 enum xfs_icwalk_goal goal, struct xfs_icwalk *icw);
Darrick J. Wongdf600192021-06-01 13:29:41 -070050
Dave Chinner33479e02012-10-08 21:56:11 +110051/*
Darrick J. Wongb26b2bf2021-06-07 09:34:51 -070052 * Private inode cache walk flags for struct xfs_icwalk. Must not
53 * coincide with XFS_ICWALK_FLAGS_VALID.
Darrick J. Wong1ad2cfe2021-05-31 11:31:57 -070054 */
Darrick J. Wong1ad2cfe2021-05-31 11:31:57 -070055
Darrick J. Wongf1bc5c52021-05-31 11:32:02 -070056/* Stop scanning after icw_scan_limit inodes. */
57#define XFS_ICWALK_FLAG_SCAN_LIMIT (1U << 28)
58
Darrick J. Wong94927502021-06-07 09:34:50 -070059#define XFS_ICWALK_FLAG_RECLAIM_SICK (1U << 27)
Darrick J. Wong2d53f66b2021-06-07 09:34:51 -070060#define XFS_ICWALK_FLAG_UNION (1U << 26) /* union filter algorithm */
Darrick J. Wong94927502021-06-07 09:34:50 -070061
Christoph Hellwig777eb1f2021-08-06 11:05:36 -070062#define XFS_ICWALK_PRIVATE_FLAGS (XFS_ICWALK_FLAG_SCAN_LIMIT | \
Darrick J. Wong2d53f66b2021-06-07 09:34:51 -070063 XFS_ICWALK_FLAG_RECLAIM_SICK | \
64 XFS_ICWALK_FLAG_UNION)
Darrick J. Wong1ad2cfe2021-05-31 11:31:57 -070065
Dave Chinner33479e02012-10-08 21:56:11 +110066/*
67 * Allocate and initialise an xfs_inode.
68 */
Dave Chinner638f44162013-08-30 10:23:45 +100069struct xfs_inode *
Dave Chinner33479e02012-10-08 21:56:11 +110070xfs_inode_alloc(
71 struct xfs_mount *mp,
72 xfs_ino_t ino)
73{
74 struct xfs_inode *ip;
75
76 /*
Carlos Maiolino3050bd02020-07-22 09:23:04 -070077 * XXX: If this didn't occur in transactions, we could drop GFP_NOFAIL
78 * and return NULL here on ENOMEM.
Dave Chinner33479e02012-10-08 21:56:11 +110079 */
Darrick J. Wong182696f2021-10-12 11:09:23 -070080 ip = kmem_cache_alloc(xfs_inode_cache, GFP_KERNEL | __GFP_NOFAIL);
Carlos Maiolino3050bd02020-07-22 09:23:04 -070081
Dave Chinner33479e02012-10-08 21:56:11 +110082 if (inode_init_always(mp->m_super, VFS_I(ip))) {
Darrick J. Wong182696f2021-10-12 11:09:23 -070083 kmem_cache_free(xfs_inode_cache, ip);
Dave Chinner33479e02012-10-08 21:56:11 +110084 return NULL;
85 }
86
Dave Chinnerf38a0322021-08-24 19:13:04 -070087 /* VFS doesn't initialise i_mode or i_state! */
Dave Chinnerc19b3b052016-02-09 16:54:58 +110088 VFS_I(ip)->i_mode = 0;
Dave Chinnerf38a0322021-08-24 19:13:04 -070089 VFS_I(ip)->i_state = 0;
Matthew Wilcox (Oracle)67958012021-05-19 18:38:43 -040090 mapping_set_large_folios(VFS_I(ip)->i_mapping);
Dave Chinnerc19b3b052016-02-09 16:54:58 +110091
Bill O'Donnellff6d6af2015-10-12 18:21:22 +110092 XFS_STATS_INC(mp, vn_active);
Dave Chinner33479e02012-10-08 21:56:11 +110093 ASSERT(atomic_read(&ip->i_pincount) == 0);
Dave Chinner33479e02012-10-08 21:56:11 +110094 ASSERT(ip->i_ino == 0);
95
Dave Chinner33479e02012-10-08 21:56:11 +110096 /* initialise the xfs inode */
97 ip->i_ino = ino;
98 ip->i_mount = mp;
99 memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
100 ip->i_afp = NULL;
Darrick J. Wong3993bae2016-10-03 09:11:32 -0700101 ip->i_cowfp = NULL;
Christoph Hellwig3ba738d2018-07-17 16:51:50 -0700102 memset(&ip->i_df, 0, sizeof(ip->i_df));
Dave Chinner33479e02012-10-08 21:56:11 +1100103 ip->i_flags = 0;
104 ip->i_delayed_blks = 0;
Christoph Hellwig3e09ab82021-03-29 11:11:45 -0700105 ip->i_diflags2 = mp->m_ino_geo.new_diflags2;
Christoph Hellwig6e73a542021-03-29 11:11:40 -0700106 ip->i_nblocks = 0;
Christoph Hellwig7821ea32021-03-29 11:11:44 -0700107 ip->i_forkoff = 0;
Darrick J. Wong6772c1f2019-04-12 07:40:25 -0700108 ip->i_sick = 0;
109 ip->i_checked = 0;
Darrick J. Wongcb357bf2019-04-15 13:13:20 -0700110 INIT_WORK(&ip->i_ioend_work, xfs_end_io);
111 INIT_LIST_HEAD(&ip->i_ioend_list);
112 spin_lock_init(&ip->i_ioend_lock);
Dave Chinner33479e02012-10-08 21:56:11 +1100113
114 return ip;
115}
116
117STATIC void
118xfs_inode_free_callback(
119 struct rcu_head *head)
120{
121 struct inode *inode = container_of(head, struct inode, i_rcu);
122 struct xfs_inode *ip = XFS_I(inode);
123
Dave Chinnerc19b3b052016-02-09 16:54:58 +1100124 switch (VFS_I(ip)->i_mode & S_IFMT) {
Dave Chinner33479e02012-10-08 21:56:11 +1100125 case S_IFREG:
126 case S_IFDIR:
127 case S_IFLNK:
Christoph Hellwigef838512020-05-18 10:29:27 -0700128 xfs_idestroy_fork(&ip->i_df);
Dave Chinner33479e02012-10-08 21:56:11 +1100129 break;
130 }
131
Christoph Hellwigef838512020-05-18 10:29:27 -0700132 if (ip->i_afp) {
133 xfs_idestroy_fork(ip->i_afp);
Darrick J. Wong182696f2021-10-12 11:09:23 -0700134 kmem_cache_free(xfs_ifork_cache, ip->i_afp);
Christoph Hellwigef838512020-05-18 10:29:27 -0700135 }
136 if (ip->i_cowfp) {
137 xfs_idestroy_fork(ip->i_cowfp);
Darrick J. Wong182696f2021-10-12 11:09:23 -0700138 kmem_cache_free(xfs_ifork_cache, ip->i_cowfp);
Christoph Hellwigef838512020-05-18 10:29:27 -0700139 }
Dave Chinner33479e02012-10-08 21:56:11 +1100140 if (ip->i_itemp) {
Dave Chinner22525c12018-05-09 07:47:34 -0700141 ASSERT(!test_bit(XFS_LI_IN_AIL,
142 &ip->i_itemp->ili_item.li_flags));
Dave Chinner33479e02012-10-08 21:56:11 +1100143 xfs_inode_item_destroy(ip);
144 ip->i_itemp = NULL;
145 }
146
Darrick J. Wong182696f2021-10-12 11:09:23 -0700147 kmem_cache_free(xfs_inode_cache, ip);
Dave Chinner1f2dcfe2016-05-18 14:01:53 +1000148}
149
Dave Chinner8a17d7d2016-05-18 14:09:12 +1000150static void
151__xfs_inode_free(
152 struct xfs_inode *ip)
153{
154 /* asserts to verify all state is correct here */
155 ASSERT(atomic_read(&ip->i_pincount) == 0);
Dave Chinner48d55e22020-06-29 14:49:18 -0700156 ASSERT(!ip->i_itemp || list_empty(&ip->i_itemp->ili_item.li_bio_list));
Dave Chinner8a17d7d2016-05-18 14:09:12 +1000157 XFS_STATS_DEC(ip->i_mount, vn_active);
158
159 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
160}
161
Dave Chinner1f2dcfe2016-05-18 14:01:53 +1000162void
163xfs_inode_free(
164 struct xfs_inode *ip)
165{
Dave Chinner718ecc52020-08-17 16:41:01 -0700166 ASSERT(!xfs_iflags_test(ip, XFS_IFLUSHING));
Brian Foster98efe8a2016-11-10 08:23:22 +1100167
Dave Chinner33479e02012-10-08 21:56:11 +1100168 /*
169 * Because we use RCU freeing we need to ensure the inode always
170 * appears to be reclaimed with an invalid inode number when in the
171 * free state. The ip->i_flags_lock provides the barrier against lookup
172 * races.
173 */
174 spin_lock(&ip->i_flags_lock);
175 ip->i_flags = XFS_IRECLAIM;
176 ip->i_ino = 0;
177 spin_unlock(&ip->i_flags_lock);
178
Dave Chinner8a17d7d2016-05-18 14:09:12 +1000179 __xfs_inode_free(ip);
Dave Chinner33479e02012-10-08 21:56:11 +1100180}
181
182/*
Dave Chinner02511a52020-06-29 14:49:18 -0700183 * Queue background inode reclaim work if there are reclaimable inodes and there
184 * isn't reclaim work already scheduled or in progress.
Dave Chinnerad438c42016-05-18 14:20:08 +1000185 */
186static void
187xfs_reclaim_work_queue(
188 struct xfs_mount *mp)
189{
190
191 rcu_read_lock();
192 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
193 queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
194 msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
195 }
196 rcu_read_unlock();
197}
198
Darrick J. Wongc076ae72021-05-31 11:32:02 -0700199/*
200 * Background scanning to trim preallocated space. This is queued based on the
201 * 'speculative_prealloc_lifetime' tunable (5m by default).
202 */
203static inline void
204xfs_blockgc_queue(
Dave Chinnerad438c42016-05-18 14:20:08 +1000205 struct xfs_perag *pag)
206{
Darrick J. Wong6f649092021-08-06 11:05:42 -0700207 struct xfs_mount *mp = pag->pag_mount;
208
209 if (!xfs_is_blockgc_enabled(mp))
210 return;
211
Darrick J. Wongc076ae72021-05-31 11:32:02 -0700212 rcu_read_lock();
213 if (radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG))
Dave Chinnerab23a772021-08-06 11:05:39 -0700214 queue_delayed_work(pag->pag_mount->m_blockgc_wq,
Darrick J. Wongc076ae72021-05-31 11:32:02 -0700215 &pag->pag_blockgc_work,
216 msecs_to_jiffies(xfs_blockgc_secs * 1000));
217 rcu_read_unlock();
218}
219
220/* Set a tag on both the AG incore inode tree and the AG radix tree. */
221static void
222xfs_perag_set_inode_tag(
223 struct xfs_perag *pag,
224 xfs_agino_t agino,
225 unsigned int tag)
226{
227 struct xfs_mount *mp = pag->pag_mount;
228 bool was_tagged;
229
230 lockdep_assert_held(&pag->pag_ici_lock);
231
232 was_tagged = radix_tree_tagged(&pag->pag_ici_root, tag);
233 radix_tree_tag_set(&pag->pag_ici_root, agino, tag);
234
235 if (tag == XFS_ICI_RECLAIM_TAG)
236 pag->pag_ici_reclaimable++;
237
238 if (was_tagged)
239 return;
240
241 /* propagate the tag up into the perag radix tree */
242 spin_lock(&mp->m_perag_lock);
243 radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno, tag);
244 spin_unlock(&mp->m_perag_lock);
245
246 /* start background work */
247 switch (tag) {
248 case XFS_ICI_RECLAIM_TAG:
249 xfs_reclaim_work_queue(mp);
250 break;
251 case XFS_ICI_BLOCKGC_TAG:
252 xfs_blockgc_queue(pag);
253 break;
254 }
255
256 trace_xfs_perag_set_inode_tag(mp, pag->pag_agno, tag, _RET_IP_);
257}
258
259/* Clear a tag on both the AG incore inode tree and the AG radix tree. */
260static void
261xfs_perag_clear_inode_tag(
262 struct xfs_perag *pag,
263 xfs_agino_t agino,
264 unsigned int tag)
265{
Dave Chinnerad438c42016-05-18 14:20:08 +1000266 struct xfs_mount *mp = pag->pag_mount;
267
Brian Foster95989c42017-06-08 08:23:07 -0700268 lockdep_assert_held(&pag->pag_ici_lock);
Darrick J. Wongc076ae72021-05-31 11:32:02 -0700269
270 /*
271 * Reclaim can signal (with a null agino) that it cleared its own tag
272 * by removing the inode from the radix tree.
273 */
274 if (agino != NULLAGINO)
275 radix_tree_tag_clear(&pag->pag_ici_root, agino, tag);
276 else
277 ASSERT(tag == XFS_ICI_RECLAIM_TAG);
278
279 if (tag == XFS_ICI_RECLAIM_TAG)
280 pag->pag_ici_reclaimable--;
281
282 if (radix_tree_tagged(&pag->pag_ici_root, tag))
Dave Chinnerad438c42016-05-18 14:20:08 +1000283 return;
284
Darrick J. Wongc076ae72021-05-31 11:32:02 -0700285 /* clear the tag from the perag radix tree */
Dave Chinnerad438c42016-05-18 14:20:08 +1000286 spin_lock(&mp->m_perag_lock);
Darrick J. Wongc076ae72021-05-31 11:32:02 -0700287 radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno, tag);
Dave Chinnerad438c42016-05-18 14:20:08 +1000288 spin_unlock(&mp->m_perag_lock);
289
Darrick J. Wongc076ae72021-05-31 11:32:02 -0700290 trace_xfs_perag_clear_inode_tag(mp, pag->pag_agno, tag, _RET_IP_);
Dave Chinnerad438c42016-05-18 14:20:08 +1000291}
292
Dave Chinnerad438c42016-05-18 14:20:08 +1000293/*
Dave Chinner50997472016-02-09 16:54:58 +1100294 * When we recycle a reclaimable inode, we need to re-initialise the VFS inode
295 * part of the structure. This is made more complex by the fact we store
296 * information about the on-disk values in the VFS inode and so we can't just
Dave Chinner83e06f22016-02-09 16:54:58 +1100297 * overwrite the values unconditionally. Hence we save the parameters we
Dave Chinner50997472016-02-09 16:54:58 +1100298 * need to retain across reinitialisation, and rewrite them into the VFS inode
Dave Chinner83e06f22016-02-09 16:54:58 +1100299 * after reinitialisation even if it fails.
Dave Chinner50997472016-02-09 16:54:58 +1100300 */
301static int
302xfs_reinit_inode(
303 struct xfs_mount *mp,
304 struct inode *inode)
305{
Darrick J. Wongff7bebe2021-06-18 11:57:05 -0700306 int error;
307 uint32_t nlink = inode->i_nlink;
308 uint32_t generation = inode->i_generation;
309 uint64_t version = inode_peek_iversion(inode);
310 umode_t mode = inode->i_mode;
311 dev_t dev = inode->i_rdev;
312 kuid_t uid = inode->i_uid;
313 kgid_t gid = inode->i_gid;
Dave Chinner50997472016-02-09 16:54:58 +1100314
315 error = inode_init_always(mp->m_super, inode);
316
Dave Chinner54d7b5c2016-02-09 16:54:58 +1100317 set_nlink(inode, nlink);
Dave Chinner9e9a2672016-02-09 16:54:58 +1100318 inode->i_generation = generation;
Jeff Laytonf0e28282017-12-11 06:35:19 -0500319 inode_set_iversion_queried(inode, version);
Dave Chinnerc19b3b052016-02-09 16:54:58 +1100320 inode->i_mode = mode;
Amir Goldsteinacd1d712018-01-26 11:24:40 -0800321 inode->i_rdev = dev;
Christoph Hellwig3d8f2822020-02-21 08:31:26 -0800322 inode->i_uid = uid;
323 inode->i_gid = gid;
Matthew Wilcox (Oracle)67958012021-05-19 18:38:43 -0400324 mapping_set_large_folios(inode->i_mapping);
Dave Chinner50997472016-02-09 16:54:58 +1100325 return error;
326}
327
328/*
Darrick J. Wongff7bebe2021-06-18 11:57:05 -0700329 * Carefully nudge an inode whose VFS state has been torn down back into a
330 * usable state. Drops the i_flags_lock and the rcu read lock.
331 */
332static int
333xfs_iget_recycle(
334 struct xfs_perag *pag,
335 struct xfs_inode *ip) __releases(&ip->i_flags_lock)
336{
337 struct xfs_mount *mp = ip->i_mount;
338 struct inode *inode = VFS_I(ip);
339 int error;
340
341 trace_xfs_iget_recycle(ip);
342
343 /*
344 * We need to make it look like the inode is being reclaimed to prevent
345 * the actual reclaim workers from stomping over us while we recycle
346 * the inode. We can't clear the radix tree tag yet as it requires
347 * pag_ici_lock to be held exclusive.
348 */
349 ip->i_flags |= XFS_IRECLAIM;
350
351 spin_unlock(&ip->i_flags_lock);
352 rcu_read_unlock();
353
354 ASSERT(!rwsem_is_locked(&inode->i_rwsem));
355 error = xfs_reinit_inode(mp, inode);
356 if (error) {
Darrick J. Wongff7bebe2021-06-18 11:57:05 -0700357 /*
358 * Re-initializing the inode failed, and we are in deep
359 * trouble. Try to re-add it to the reclaim list.
360 */
361 rcu_read_lock();
362 spin_lock(&ip->i_flags_lock);
Darrick J. Wongff7bebe2021-06-18 11:57:05 -0700363 ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
Darrick J. Wongff7bebe2021-06-18 11:57:05 -0700364 ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
365 spin_unlock(&ip->i_flags_lock);
366 rcu_read_unlock();
367
368 trace_xfs_iget_recycle_fail(ip);
369 return error;
370 }
371
372 spin_lock(&pag->pag_ici_lock);
373 spin_lock(&ip->i_flags_lock);
374
375 /*
376 * Clear the per-lifetime state in the inode as we are now effectively
377 * a new inode and need to return to the initial state before reuse
378 * occurs.
379 */
380 ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
381 ip->i_flags |= XFS_INEW;
382 xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
383 XFS_ICI_RECLAIM_TAG);
384 inode->i_state = I_NEW;
385 spin_unlock(&ip->i_flags_lock);
386 spin_unlock(&pag->pag_ici_lock);
387
388 return 0;
389}
390
391/*
Dave Chinnerafca6c52018-04-17 17:17:34 -0700392 * If we are allocating a new inode, then check what was returned is
393 * actually a free, empty inode. If we are not allocating an inode,
394 * then check we didn't find a free inode.
395 *
396 * Returns:
397 * 0 if the inode free state matches the lookup context
398 * -ENOENT if the inode is free and we are not allocating
399 * -EFSCORRUPTED if there is any state mismatch at all
400 */
401static int
402xfs_iget_check_free_state(
403 struct xfs_inode *ip,
404 int flags)
405{
406 if (flags & XFS_IGET_CREATE) {
407 /* should be a free inode */
408 if (VFS_I(ip)->i_mode != 0) {
409 xfs_warn(ip->i_mount,
410"Corruption detected! Free inode 0x%llx not marked free! (mode 0x%x)",
411 ip->i_ino, VFS_I(ip)->i_mode);
412 return -EFSCORRUPTED;
413 }
414
Christoph Hellwig6e73a542021-03-29 11:11:40 -0700415 if (ip->i_nblocks != 0) {
Dave Chinnerafca6c52018-04-17 17:17:34 -0700416 xfs_warn(ip->i_mount,
417"Corruption detected! Free inode 0x%llx has blocks allocated!",
418 ip->i_ino);
419 return -EFSCORRUPTED;
420 }
421 return 0;
422 }
423
424 /* should be an allocated inode */
425 if (VFS_I(ip)->i_mode == 0)
426 return -ENOENT;
427
428 return 0;
429}
430
Dave Chinnerab23a772021-08-06 11:05:39 -0700431/* Make all pending inactivation work start immediately. */
432static void
433xfs_inodegc_queue_all(
434 struct xfs_mount *mp)
435{
436 struct xfs_inodegc *gc;
437 int cpu;
438
439 for_each_online_cpu(cpu) {
440 gc = per_cpu_ptr(mp->m_inodegc, cpu);
441 if (!llist_empty(&gc->list))
442 queue_work_on(cpu, mp->m_inodegc_wq, &gc->work);
443 }
444}
445
Dave Chinnerafca6c52018-04-17 17:17:34 -0700446/*
Dave Chinner33479e02012-10-08 21:56:11 +1100447 * Check the validity of the inode we just found it the cache
448 */
449static int
450xfs_iget_cache_hit(
451 struct xfs_perag *pag,
452 struct xfs_inode *ip,
453 xfs_ino_t ino,
454 int flags,
455 int lock_flags) __releases(RCU)
456{
457 struct inode *inode = VFS_I(ip);
458 struct xfs_mount *mp = ip->i_mount;
459 int error;
460
461 /*
462 * check for re-use of an inode within an RCU grace period due to the
463 * radix tree nodes not being updated yet. We monitor for this by
464 * setting the inode number to zero before freeing the inode structure.
465 * If the inode has been reallocated and set up, then the inode number
466 * will not match, so check for that, too.
467 */
468 spin_lock(&ip->i_flags_lock);
Darrick J. Wong77b4d282021-06-18 11:57:06 -0700469 if (ip->i_ino != ino)
470 goto out_skip;
Dave Chinner33479e02012-10-08 21:56:11 +1100471
472 /*
473 * If we are racing with another cache hit that is currently
474 * instantiating this inode or currently recycling it out of
Darrick J. Wongff7bebe2021-06-18 11:57:05 -0700475 * reclaimable state, wait for the initialisation to complete
Dave Chinner33479e02012-10-08 21:56:11 +1100476 * before continuing.
477 *
Dave Chinnerab23a772021-08-06 11:05:39 -0700478 * If we're racing with the inactivation worker we also want to wait.
479 * If we're creating a new file, it's possible that the worker
480 * previously marked the inode as free on disk but hasn't finished
481 * updating the incore state yet. The AGI buffer will be dirty and
482 * locked to the icreate transaction, so a synchronous push of the
483 * inodegc workers would result in deadlock. For a regular iget, the
484 * worker is running already, so we might as well wait.
485 *
Dave Chinner33479e02012-10-08 21:56:11 +1100486 * XXX(hch): eventually we should do something equivalent to
487 * wait_on_inode to wait for these flags to be cleared
488 * instead of polling for it.
489 */
Dave Chinnerab23a772021-08-06 11:05:39 -0700490 if (ip->i_flags & (XFS_INEW | XFS_IRECLAIM | XFS_INACTIVATING))
Darrick J. Wong77b4d282021-06-18 11:57:06 -0700491 goto out_skip;
Dave Chinner33479e02012-10-08 21:56:11 +1100492
Dave Chinnerab23a772021-08-06 11:05:39 -0700493 if (ip->i_flags & XFS_NEED_INACTIVE) {
494 /* Unlinked inodes cannot be re-grabbed. */
495 if (VFS_I(ip)->i_nlink == 0) {
496 error = -ENOENT;
497 goto out_error;
498 }
499 goto out_inodegc_flush;
500 }
501
Dave Chinner33479e02012-10-08 21:56:11 +1100502 /*
Dave Chinnerafca6c52018-04-17 17:17:34 -0700503 * Check the inode free state is valid. This also detects lookup
504 * racing with unlinks.
Dave Chinner33479e02012-10-08 21:56:11 +1100505 */
Dave Chinnerafca6c52018-04-17 17:17:34 -0700506 error = xfs_iget_check_free_state(ip, flags);
507 if (error)
Dave Chinner33479e02012-10-08 21:56:11 +1100508 goto out_error;
Dave Chinner33479e02012-10-08 21:56:11 +1100509
Darrick J. Wong77b4d282021-06-18 11:57:06 -0700510 /* Skip inodes that have no vfs state. */
511 if ((flags & XFS_IGET_INCORE) &&
512 (ip->i_flags & XFS_IRECLAIMABLE))
513 goto out_skip;
Darrick J. Wong378f6812017-06-19 08:58:56 -0700514
Darrick J. Wong77b4d282021-06-18 11:57:06 -0700515 /* The inode fits the selection criteria; process it. */
516 if (ip->i_flags & XFS_IRECLAIMABLE) {
Darrick J. Wongff7bebe2021-06-18 11:57:05 -0700517 /* Drops i_flags_lock and RCU read lock. */
518 error = xfs_iget_recycle(pag, ip);
519 if (error)
520 return error;
Dave Chinner33479e02012-10-08 21:56:11 +1100521 } else {
522 /* If the VFS inode is being torn down, pause and try again. */
Darrick J. Wong77b4d282021-06-18 11:57:06 -0700523 if (!igrab(inode))
524 goto out_skip;
Dave Chinner33479e02012-10-08 21:56:11 +1100525
526 /* We've got a live one. */
527 spin_unlock(&ip->i_flags_lock);
528 rcu_read_unlock();
529 trace_xfs_iget_hit(ip);
530 }
531
532 if (lock_flags != 0)
533 xfs_ilock(ip, lock_flags);
534
Darrick J. Wong378f6812017-06-19 08:58:56 -0700535 if (!(flags & XFS_IGET_INCORE))
Ira Weinydae2f8e2020-04-30 07:41:37 -0700536 xfs_iflags_clear(ip, XFS_ISTALE);
Bill O'Donnellff6d6af2015-10-12 18:21:22 +1100537 XFS_STATS_INC(mp, xs_ig_found);
Dave Chinner33479e02012-10-08 21:56:11 +1100538
539 return 0;
540
Darrick J. Wong77b4d282021-06-18 11:57:06 -0700541out_skip:
542 trace_xfs_iget_skip(ip);
543 XFS_STATS_INC(mp, xs_ig_frecycle);
544 error = -EAGAIN;
Dave Chinner33479e02012-10-08 21:56:11 +1100545out_error:
546 spin_unlock(&ip->i_flags_lock);
547 rcu_read_unlock();
548 return error;
Dave Chinnerab23a772021-08-06 11:05:39 -0700549
550out_inodegc_flush:
551 spin_unlock(&ip->i_flags_lock);
552 rcu_read_unlock();
553 /*
554 * Do not wait for the workers, because the caller could hold an AGI
555 * buffer lock. We're just going to sleep in a loop anyway.
556 */
557 if (xfs_is_inodegc_enabled(mp))
558 xfs_inodegc_queue_all(mp);
559 return -EAGAIN;
Dave Chinner33479e02012-10-08 21:56:11 +1100560}
561
Dave Chinner33479e02012-10-08 21:56:11 +1100562static int
563xfs_iget_cache_miss(
564 struct xfs_mount *mp,
565 struct xfs_perag *pag,
566 xfs_trans_t *tp,
567 xfs_ino_t ino,
568 struct xfs_inode **ipp,
569 int flags,
570 int lock_flags)
571{
572 struct xfs_inode *ip;
573 int error;
574 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
575 int iflags;
576
577 ip = xfs_inode_alloc(mp, ino);
578 if (!ip)
Dave Chinner24513372014-06-25 14:58:08 +1000579 return -ENOMEM;
Dave Chinner33479e02012-10-08 21:56:11 +1100580
Christoph Hellwigbb8a66a2020-05-14 14:01:19 -0700581 error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, flags);
Dave Chinner33479e02012-10-08 21:56:11 +1100582 if (error)
583 goto out_destroy;
584
Christoph Hellwigbb8a66a2020-05-14 14:01:19 -0700585 /*
586 * For version 5 superblocks, if we are initialising a new inode and we
Dave Chinner0560f312021-08-18 18:46:52 -0700587 * are not utilising the XFS_FEAT_IKEEP inode cluster mode, we can
Christoph Hellwigbb8a66a2020-05-14 14:01:19 -0700588 * simply build the new inode core with a random generation number.
589 *
590 * For version 4 (and older) superblocks, log recovery is dependent on
Christoph Hellwig965e0a12021-03-29 11:11:42 -0700591 * the i_flushiter field being initialised from the current on-disk
Christoph Hellwigbb8a66a2020-05-14 14:01:19 -0700592 * value and hence we must also read the inode off disk even when
593 * initializing new inodes.
594 */
Dave Chinner38c26bf2021-08-18 18:46:37 -0700595 if (xfs_has_v3inodes(mp) &&
Dave Chinner0560f312021-08-18 18:46:52 -0700596 (flags & XFS_IGET_CREATE) && !xfs_has_ikeep(mp)) {
Christoph Hellwigbb8a66a2020-05-14 14:01:19 -0700597 VFS_I(ip)->i_generation = prandom_u32();
598 } else {
Christoph Hellwigbb8a66a2020-05-14 14:01:19 -0700599 struct xfs_buf *bp;
600
Christoph Hellwigaf9dcdd2021-03-29 11:11:37 -0700601 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp);
Christoph Hellwigbb8a66a2020-05-14 14:01:19 -0700602 if (error)
603 goto out_destroy;
604
Christoph Hellwigaf9dcdd2021-03-29 11:11:37 -0700605 error = xfs_inode_from_disk(ip,
606 xfs_buf_offset(bp, ip->i_imap.im_boffset));
Christoph Hellwigbb8a66a2020-05-14 14:01:19 -0700607 if (!error)
608 xfs_buf_set_ref(bp, XFS_INO_REF);
609 xfs_trans_brelse(tp, bp);
610
611 if (error)
612 goto out_destroy;
613 }
614
Dave Chinner33479e02012-10-08 21:56:11 +1100615 trace_xfs_iget_miss(ip);
616
Dave Chinneree457002018-03-23 10:22:53 -0700617 /*
Dave Chinnerafca6c52018-04-17 17:17:34 -0700618 * Check the inode free state is valid. This also detects lookup
619 * racing with unlinks.
Dave Chinneree457002018-03-23 10:22:53 -0700620 */
Dave Chinnerafca6c52018-04-17 17:17:34 -0700621 error = xfs_iget_check_free_state(ip, flags);
622 if (error)
Dave Chinner33479e02012-10-08 21:56:11 +1100623 goto out_destroy;
Dave Chinner33479e02012-10-08 21:56:11 +1100624
625 /*
626 * Preload the radix tree so we can insert safely under the
627 * write spinlock. Note that we cannot sleep inside the preload
628 * region. Since we can be called from transaction context, don't
629 * recurse into the file system.
630 */
631 if (radix_tree_preload(GFP_NOFS)) {
Dave Chinner24513372014-06-25 14:58:08 +1000632 error = -EAGAIN;
Dave Chinner33479e02012-10-08 21:56:11 +1100633 goto out_destroy;
634 }
635
636 /*
637 * Because the inode hasn't been added to the radix-tree yet it can't
638 * be found by another thread, so we can do the non-sleeping lock here.
639 */
640 if (lock_flags) {
641 if (!xfs_ilock_nowait(ip, lock_flags))
642 BUG();
643 }
644
645 /*
646 * These values must be set before inserting the inode into the radix
647 * tree as the moment it is inserted a concurrent lookup (allowed by the
648 * RCU locking mechanism) can find it and that lookup must see that this
649 * is an inode currently under construction (i.e. that XFS_INEW is set).
650 * The ip->i_flags_lock that protects the XFS_INEW flag forms the
651 * memory barrier that ensures this detection works correctly at lookup
652 * time.
653 */
654 iflags = XFS_INEW;
655 if (flags & XFS_IGET_DONTCACHE)
Ira Weiny2c567af2020-04-30 07:41:37 -0700656 d_mark_dontcache(VFS_I(ip));
Chandra Seetharaman113a5682013-06-27 17:25:07 -0500657 ip->i_udquot = NULL;
658 ip->i_gdquot = NULL;
Chandra Seetharaman92f8ff72013-07-11 00:00:40 -0500659 ip->i_pdquot = NULL;
Dave Chinner33479e02012-10-08 21:56:11 +1100660 xfs_iflags_set(ip, iflags);
661
662 /* insert the new inode */
663 spin_lock(&pag->pag_ici_lock);
664 error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
665 if (unlikely(error)) {
666 WARN_ON(error != -EEXIST);
Bill O'Donnellff6d6af2015-10-12 18:21:22 +1100667 XFS_STATS_INC(mp, xs_ig_dup);
Dave Chinner24513372014-06-25 14:58:08 +1000668 error = -EAGAIN;
Dave Chinner33479e02012-10-08 21:56:11 +1100669 goto out_preload_end;
670 }
671 spin_unlock(&pag->pag_ici_lock);
672 radix_tree_preload_end();
673
674 *ipp = ip;
675 return 0;
676
677out_preload_end:
678 spin_unlock(&pag->pag_ici_lock);
679 radix_tree_preload_end();
680 if (lock_flags)
681 xfs_iunlock(ip, lock_flags);
682out_destroy:
683 __destroy_inode(VFS_I(ip));
684 xfs_inode_free(ip);
685 return error;
686}
687
688/*
Dave Chinner02511a52020-06-29 14:49:18 -0700689 * Look up an inode by number in the given file system. The inode is looked up
690 * in the cache held in each AG. If the inode is found in the cache, initialise
691 * the vfs inode if necessary.
Dave Chinner33479e02012-10-08 21:56:11 +1100692 *
Dave Chinner02511a52020-06-29 14:49:18 -0700693 * If it is not in core, read it in from the file system's device, add it to the
694 * cache and initialise the vfs inode.
Dave Chinner33479e02012-10-08 21:56:11 +1100695 *
696 * The inode is locked according to the value of the lock_flags parameter.
Dave Chinner02511a52020-06-29 14:49:18 -0700697 * Inode lookup is only done during metadata operations and not as part of the
698 * data IO path. Hence we only allow locking of the XFS_ILOCK during lookup.
Dave Chinner33479e02012-10-08 21:56:11 +1100699 */
700int
701xfs_iget(
Dave Chinner02511a52020-06-29 14:49:18 -0700702 struct xfs_mount *mp,
703 struct xfs_trans *tp,
704 xfs_ino_t ino,
705 uint flags,
706 uint lock_flags,
707 struct xfs_inode **ipp)
Dave Chinner33479e02012-10-08 21:56:11 +1100708{
Dave Chinner02511a52020-06-29 14:49:18 -0700709 struct xfs_inode *ip;
710 struct xfs_perag *pag;
711 xfs_agino_t agino;
712 int error;
Dave Chinner33479e02012-10-08 21:56:11 +1100713
Dave Chinner33479e02012-10-08 21:56:11 +1100714 ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
715
716 /* reject inode numbers outside existing AGs */
717 if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
Dave Chinner24513372014-06-25 14:58:08 +1000718 return -EINVAL;
Dave Chinner33479e02012-10-08 21:56:11 +1100719
Bill O'Donnellff6d6af2015-10-12 18:21:22 +1100720 XFS_STATS_INC(mp, xs_ig_attempts);
Lucas Stach8774cf82015-08-28 14:50:56 +1000721
Dave Chinner33479e02012-10-08 21:56:11 +1100722 /* get the perag structure and ensure that it's inode capable */
723 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
724 agino = XFS_INO_TO_AGINO(mp, ino);
725
726again:
727 error = 0;
728 rcu_read_lock();
729 ip = radix_tree_lookup(&pag->pag_ici_root, agino);
730
731 if (ip) {
732 error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
733 if (error)
734 goto out_error_or_again;
735 } else {
736 rcu_read_unlock();
Darrick J. Wong378f6812017-06-19 08:58:56 -0700737 if (flags & XFS_IGET_INCORE) {
Darrick J. Wonged438b42017-10-17 21:37:32 -0700738 error = -ENODATA;
Darrick J. Wong378f6812017-06-19 08:58:56 -0700739 goto out_error_or_again;
740 }
Bill O'Donnellff6d6af2015-10-12 18:21:22 +1100741 XFS_STATS_INC(mp, xs_ig_missed);
Dave Chinner33479e02012-10-08 21:56:11 +1100742
743 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
744 flags, lock_flags);
745 if (error)
746 goto out_error_or_again;
747 }
748 xfs_perag_put(pag);
749
750 *ipp = ip;
751
752 /*
Dave Chinner58c90472015-02-23 22:38:08 +1100753 * If we have a real type for an on-disk inode, we can setup the inode
Yang Xu132c4602021-12-21 09:38:19 -0800754 * now. If it's a new inode being created, xfs_init_new_inode will
755 * handle it.
Dave Chinner33479e02012-10-08 21:56:11 +1100756 */
Dave Chinnerc19b3b052016-02-09 16:54:58 +1100757 if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0)
Dave Chinner58c90472015-02-23 22:38:08 +1100758 xfs_setup_existing_inode(ip);
Dave Chinner33479e02012-10-08 21:56:11 +1100759 return 0;
760
761out_error_or_again:
Darrick J. Wong378f6812017-06-19 08:58:56 -0700762 if (!(flags & XFS_IGET_INCORE) && error == -EAGAIN) {
Dave Chinner33479e02012-10-08 21:56:11 +1100763 delay(1);
764 goto again;
765 }
766 xfs_perag_put(pag);
767 return error;
768}
769
Dave Chinner78ae5252010-09-28 12:28:19 +1000770/*
Darrick J. Wong378f6812017-06-19 08:58:56 -0700771 * "Is this a cached inode that's also allocated?"
772 *
773 * Look up an inode by number in the given file system. If the inode is
774 * in cache and isn't in purgatory, return 1 if the inode is allocated
775 * and 0 if it is not. For all other cases (not in cache, being torn
776 * down, etc.), return a negative error code.
777 *
778 * The caller has to prevent inode allocation and freeing activity,
779 * presumably by locking the AGI buffer. This is to ensure that an
780 * inode cannot transition from allocated to freed until the caller is
781 * ready to allow that. If the inode is in an intermediate state (new,
782 * reclaimable, or being reclaimed), -EAGAIN will be returned; if the
783 * inode is not in the cache, -ENOENT will be returned. The caller must
784 * deal with these scenarios appropriately.
785 *
786 * This is a specialized use case for the online scrubber; if you're
787 * reading this, you probably want xfs_iget.
788 */
789int
790xfs_icache_inode_is_allocated(
791 struct xfs_mount *mp,
792 struct xfs_trans *tp,
793 xfs_ino_t ino,
794 bool *inuse)
795{
796 struct xfs_inode *ip;
797 int error;
798
799 error = xfs_iget(mp, tp, ino, XFS_IGET_INCORE, 0, &ip);
800 if (error)
801 return error;
802
803 *inuse = !!(VFS_I(ip)->i_mode);
Darrick J. Wong44a87362018-07-25 12:52:32 -0700804 xfs_irele(ip);
Darrick J. Wong378f6812017-06-19 08:58:56 -0700805 return 0;
806}
807
Brian Foster579b62f2012-11-06 09:50:47 -0500808/*
Dave Chinnere3a20c02010-09-24 19:51:50 +1000809 * Grab the inode for reclaim exclusively.
Dave Chinner50718b82020-07-01 10:21:05 -0700810 *
811 * We have found this inode via a lookup under RCU, so the inode may have
812 * already been freed, or it may be in the process of being recycled by
813 * xfs_iget(). In both cases, the inode will have XFS_IRECLAIM set. If the inode
814 * has been fully recycled by the time we get the i_flags_lock, XFS_IRECLAIMABLE
815 * will not be set. Hence we need to check for both these flag conditions to
816 * avoid inodes that are no longer reclaim candidates.
817 *
818 * Note: checking for other state flags here, under the i_flags_lock or not, is
819 * racy and should be avoided. Those races should be resolved only after we have
820 * ensured that we are able to reclaim this inode and the world can see that we
821 * are going to reclaim it.
822 *
823 * Return true if we grabbed it, false otherwise.
Dave Chinnere3a20c02010-09-24 19:51:50 +1000824 */
Dave Chinner50718b82020-07-01 10:21:05 -0700825static bool
Darrick J. Wongf1bc5c52021-05-31 11:32:02 -0700826xfs_reclaim_igrab(
Darrick J. Wong94927502021-06-07 09:34:50 -0700827 struct xfs_inode *ip,
Darrick J. Wongb26b2bf2021-06-07 09:34:51 -0700828 struct xfs_icwalk *icw)
Dave Chinnere3a20c02010-09-24 19:51:50 +1000829{
Dave Chinner1a3e8f32010-12-17 17:29:43 +1100830 ASSERT(rcu_read_lock_held());
831
Dave Chinnere3a20c02010-09-24 19:51:50 +1000832 spin_lock(&ip->i_flags_lock);
Dave Chinner1a3e8f32010-12-17 17:29:43 +1100833 if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
834 __xfs_iflags_test(ip, XFS_IRECLAIM)) {
835 /* not a reclaim candidate. */
Dave Chinnere3a20c02010-09-24 19:51:50 +1000836 spin_unlock(&ip->i_flags_lock);
Dave Chinner50718b82020-07-01 10:21:05 -0700837 return false;
Dave Chinnere3a20c02010-09-24 19:51:50 +1000838 }
Darrick J. Wong94927502021-06-07 09:34:50 -0700839
840 /* Don't reclaim a sick inode unless the caller asked for it. */
841 if (ip->i_sick &&
Darrick J. Wongb26b2bf2021-06-07 09:34:51 -0700842 (!icw || !(icw->icw_flags & XFS_ICWALK_FLAG_RECLAIM_SICK))) {
Darrick J. Wong94927502021-06-07 09:34:50 -0700843 spin_unlock(&ip->i_flags_lock);
844 return false;
845 }
846
Dave Chinnere3a20c02010-09-24 19:51:50 +1000847 __xfs_iflags_set(ip, XFS_IRECLAIM);
848 spin_unlock(&ip->i_flags_lock);
Dave Chinner50718b82020-07-01 10:21:05 -0700849 return true;
Dave Chinnere3a20c02010-09-24 19:51:50 +1000850}
851
852/*
Dave Chinner02511a52020-06-29 14:49:18 -0700853 * Inode reclaim is non-blocking, so the default action if progress cannot be
854 * made is to "requeue" the inode for reclaim by unlocking it and clearing the
855 * XFS_IRECLAIM flag. If we are in a shutdown state, we don't care about
856 * blocking anymore and hence we can wait for the inode to be able to reclaim
857 * it.
Dave Chinner777df5a2010-02-06 12:37:26 +1100858 *
Dave Chinner02511a52020-06-29 14:49:18 -0700859 * We do no IO here - if callers require inodes to be cleaned they must push the
860 * AIL first to trigger writeback of dirty inodes. This enables writeback to be
861 * done in the background in a non-blocking manner, and enables memory reclaim
862 * to make progress without blocking.
Dave Chinner777df5a2010-02-06 12:37:26 +1100863 */
Dave Chinner4d0bab32020-07-01 10:21:28 -0700864static void
Dave Chinnerc8e20be2010-01-10 23:51:45 +0000865xfs_reclaim_inode(
Dave Chinner75f3cb12009-06-08 15:35:14 +0200866 struct xfs_inode *ip,
Dave Chinner50718b82020-07-01 10:21:05 -0700867 struct xfs_perag *pag)
David Chinner7a3be022008-10-30 17:37:37 +1100868{
Dave Chinner8a17d7d2016-05-18 14:09:12 +1000869 xfs_ino_t ino = ip->i_ino; /* for radix_tree_delete */
Dave Chinner777df5a2010-02-06 12:37:26 +1100870
Dave Chinner9552e142020-06-29 14:49:17 -0700871 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
Dave Chinner617825f2020-06-29 14:49:16 -0700872 goto out;
Dave Chinner718ecc52020-08-17 16:41:01 -0700873 if (xfs_iflags_test_and_set(ip, XFS_IFLUSHING))
Dave Chinner9552e142020-06-29 14:49:17 -0700874 goto out_iunlock;
Dave Chinnerc8e20be2010-01-10 23:51:45 +0000875
Dave Chinner75c8c50f2021-08-18 18:46:53 -0700876 if (xfs_is_shutdown(ip->i_mount)) {
Dave Chinner777df5a2010-02-06 12:37:26 +1100877 xfs_iunpin_wait(ip);
Brian Foster88fc1872020-05-06 13:27:40 -0700878 xfs_iflush_abort(ip);
Dave Chinner777df5a2010-02-06 12:37:26 +1100879 goto reclaim;
880 }
Dave Chinner617825f2020-06-29 14:49:16 -0700881 if (xfs_ipincount(ip))
Dave Chinner718ecc52020-08-17 16:41:01 -0700882 goto out_clear_flush;
Dave Chinner617825f2020-06-29 14:49:16 -0700883 if (!xfs_inode_clean(ip))
Dave Chinner718ecc52020-08-17 16:41:01 -0700884 goto out_clear_flush;
Christoph Hellwig8a480882012-04-23 15:58:35 +1000885
Dave Chinner718ecc52020-08-17 16:41:01 -0700886 xfs_iflags_clear(ip, XFS_IFLUSHING);
Dave Chinner777df5a2010-02-06 12:37:26 +1100887reclaim:
Dave Chinnerab23a772021-08-06 11:05:39 -0700888 trace_xfs_inode_reclaiming(ip);
Brian Foster98efe8a2016-11-10 08:23:22 +1100889
Dave Chinner8a17d7d2016-05-18 14:09:12 +1000890 /*
891 * Because we use RCU freeing we need to ensure the inode always appears
892 * to be reclaimed with an invalid inode number when in the free state.
Brian Foster98efe8a2016-11-10 08:23:22 +1100893 * We do this as early as possible under the ILOCK so that
Omar Sandovalf2e9ad22017-08-25 10:05:26 -0700894 * xfs_iflush_cluster() and xfs_ifree_cluster() can be guaranteed to
895 * detect races with us here. By doing this, we guarantee that once
896 * xfs_iflush_cluster() or xfs_ifree_cluster() has locked XFS_ILOCK that
897 * it will see either a valid inode that will serialise correctly, or it
898 * will see an invalid inode that it can skip.
Dave Chinner8a17d7d2016-05-18 14:09:12 +1000899 */
900 spin_lock(&ip->i_flags_lock);
901 ip->i_flags = XFS_IRECLAIM;
902 ip->i_ino = 0;
Darrick J. Wong255794c2021-06-07 09:34:49 -0700903 ip->i_sick = 0;
904 ip->i_checked = 0;
Dave Chinner8a17d7d2016-05-18 14:09:12 +1000905 spin_unlock(&ip->i_flags_lock);
906
Dave Chinnerc8e20be2010-01-10 23:51:45 +0000907 xfs_iunlock(ip, XFS_ILOCK_EXCL);
Dave Chinner2f11fea2010-07-20 17:53:25 +1000908
Bill O'Donnellff6d6af2015-10-12 18:21:22 +1100909 XFS_STATS_INC(ip->i_mount, xs_ig_reclaims);
Dave Chinner2f11fea2010-07-20 17:53:25 +1000910 /*
911 * Remove the inode from the per-AG radix tree.
912 *
913 * Because radix_tree_delete won't complain even if the item was never
914 * added to the tree assert that it's been there before to catch
915 * problems with the inode life time early on.
916 */
Dave Chinner1a427ab2010-12-16 17:08:41 +1100917 spin_lock(&pag->pag_ici_lock);
Dave Chinner2f11fea2010-07-20 17:53:25 +1000918 if (!radix_tree_delete(&pag->pag_ici_root,
Dave Chinner8a17d7d2016-05-18 14:09:12 +1000919 XFS_INO_TO_AGINO(ip->i_mount, ino)))
Dave Chinner2f11fea2010-07-20 17:53:25 +1000920 ASSERT(0);
Darrick J. Wongc076ae72021-05-31 11:32:02 -0700921 xfs_perag_clear_inode_tag(pag, NULLAGINO, XFS_ICI_RECLAIM_TAG);
Dave Chinner1a427ab2010-12-16 17:08:41 +1100922 spin_unlock(&pag->pag_ici_lock);
Dave Chinner2f11fea2010-07-20 17:53:25 +1000923
924 /*
925 * Here we do an (almost) spurious inode lock in order to coordinate
926 * with inode cache radix tree lookups. This is because the lookup
927 * can reference the inodes in the cache without taking references.
928 *
929 * We make that OK here by ensuring that we wait until the inode is
Alex Elderad637a12012-02-16 22:01:00 +0000930 * unlocked after the lookup before we go ahead and free it.
Dave Chinner2f11fea2010-07-20 17:53:25 +1000931 */
Alex Elderad637a12012-02-16 22:01:00 +0000932 xfs_ilock(ip, XFS_ILOCK_EXCL);
Darrick J. Wong3ea06d72021-05-31 11:31:57 -0700933 ASSERT(!ip->i_udquot && !ip->i_gdquot && !ip->i_pdquot);
Alex Elderad637a12012-02-16 22:01:00 +0000934 xfs_iunlock(ip, XFS_ILOCK_EXCL);
Dave Chinner96355d5a2020-06-29 14:48:45 -0700935 ASSERT(xfs_inode_clean(ip));
Dave Chinner2f11fea2010-07-20 17:53:25 +1000936
Dave Chinner8a17d7d2016-05-18 14:09:12 +1000937 __xfs_inode_free(ip);
Dave Chinner4d0bab32020-07-01 10:21:28 -0700938 return;
Christoph Hellwig8a480882012-04-23 15:58:35 +1000939
Dave Chinner718ecc52020-08-17 16:41:01 -0700940out_clear_flush:
941 xfs_iflags_clear(ip, XFS_IFLUSHING);
Dave Chinner9552e142020-06-29 14:49:17 -0700942out_iunlock:
Christoph Hellwig8a480882012-04-23 15:58:35 +1000943 xfs_iunlock(ip, XFS_ILOCK_EXCL);
Dave Chinner9552e142020-06-29 14:49:17 -0700944out:
Dave Chinner617825f2020-06-29 14:49:16 -0700945 xfs_iflags_clear(ip, XFS_IRECLAIM);
David Chinner7a3be022008-10-30 17:37:37 +1100946}
947
Darrick J. Wong94927502021-06-07 09:34:50 -0700948/* Reclaim sick inodes if we're unmounting or the fs went down. */
949static inline bool
950xfs_want_reclaim_sick(
951 struct xfs_mount *mp)
952{
Dave Chinner2e973b22021-08-18 18:46:52 -0700953 return xfs_is_unmounting(mp) || xfs_has_norecovery(mp) ||
Dave Chinner75c8c50f2021-08-18 18:46:53 -0700954 xfs_is_shutdown(mp);
Darrick J. Wong94927502021-06-07 09:34:50 -0700955}
956
Dave Chinner4d0bab32020-07-01 10:21:28 -0700957void
David Chinner1dc33182008-10-30 17:37:15 +1100958xfs_reclaim_inodes(
Dave Chinner4d0bab32020-07-01 10:21:28 -0700959 struct xfs_mount *mp)
David Chinnerfce08f22008-10-30 17:37:03 +1100960{
Darrick J. Wongb26b2bf2021-06-07 09:34:51 -0700961 struct xfs_icwalk icw = {
962 .icw_flags = 0,
Darrick J. Wong94927502021-06-07 09:34:50 -0700963 };
964
965 if (xfs_want_reclaim_sick(mp))
Darrick J. Wongb26b2bf2021-06-07 09:34:51 -0700966 icw.icw_flags |= XFS_ICWALK_FLAG_RECLAIM_SICK;
Darrick J. Wong94927502021-06-07 09:34:50 -0700967
Dave Chinner4d0bab32020-07-01 10:21:28 -0700968 while (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
Dave Chinner617825f2020-06-29 14:49:16 -0700969 xfs_ail_push_all_sync(mp->m_ail);
Darrick J. Wongb26b2bf2021-06-07 09:34:51 -0700970 xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw);
Zheng Bin0f4ec0f12020-09-09 09:29:16 -0700971 }
Dave Chinner9bf729c2010-04-29 09:55:50 +1000972}
973
974/*
Dave Chinner02511a52020-06-29 14:49:18 -0700975 * The shrinker infrastructure determines how many inodes we should scan for
976 * reclaim. We want as many clean inodes ready to reclaim as possible, so we
977 * push the AIL here. We also want to proactively free up memory if we can to
978 * minimise the amount of work memory reclaim has to do so we kick the
979 * background reclaim if it isn't already scheduled.
Dave Chinner9bf729c2010-04-29 09:55:50 +1000980 */
Dave Chinner0a234c62013-08-28 10:17:57 +1000981long
Dave Chinner8daaa832011-07-08 14:14:46 +1000982xfs_reclaim_inodes_nr(
983 struct xfs_mount *mp,
Darrick J. Wong10be3502021-06-18 11:57:06 -0700984 unsigned long nr_to_scan)
Dave Chinner9bf729c2010-04-29 09:55:50 +1000985{
Darrick J. Wongb26b2bf2021-06-07 09:34:51 -0700986 struct xfs_icwalk icw = {
987 .icw_flags = XFS_ICWALK_FLAG_SCAN_LIMIT,
Darrick J. Wong10be3502021-06-18 11:57:06 -0700988 .icw_scan_limit = min_t(unsigned long, LONG_MAX, nr_to_scan),
Darrick J. Wongf1bc5c52021-05-31 11:32:02 -0700989 };
990
Darrick J. Wong94927502021-06-07 09:34:50 -0700991 if (xfs_want_reclaim_sick(mp))
Darrick J. Wongb26b2bf2021-06-07 09:34:51 -0700992 icw.icw_flags |= XFS_ICWALK_FLAG_RECLAIM_SICK;
Darrick J. Wong94927502021-06-07 09:34:50 -0700993
Dave Chinner8daaa832011-07-08 14:14:46 +1000994 /* kick background reclaimer and push the AIL */
Dave Chinner58896082012-10-08 21:56:05 +1100995 xfs_reclaim_work_queue(mp);
Dave Chinner8daaa832011-07-08 14:14:46 +1000996 xfs_ail_push_all(mp->m_ail);
Dave Chinner9bf729c2010-04-29 09:55:50 +1000997
Darrick J. Wongb26b2bf2021-06-07 09:34:51 -0700998 xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw);
Dave Chinner617825f2020-06-29 14:49:16 -0700999 return 0;
Dave Chinner8daaa832011-07-08 14:14:46 +10001000}
Dave Chinnera7b339f2011-04-08 12:45:07 +10001001
Dave Chinner8daaa832011-07-08 14:14:46 +10001002/*
1003 * Return the number of reclaimable inodes in the filesystem for
1004 * the shrinker to determine how much to reclaim.
1005 */
Darrick J. Wong10be3502021-06-18 11:57:06 -07001006long
Dave Chinner8daaa832011-07-08 14:14:46 +10001007xfs_reclaim_inodes_count(
1008 struct xfs_mount *mp)
1009{
1010 struct xfs_perag *pag;
1011 xfs_agnumber_t ag = 0;
Darrick J. Wong10be3502021-06-18 11:57:06 -07001012 long reclaimable = 0;
Dave Chinner9bf729c2010-04-29 09:55:50 +10001013
Dave Chinner65d0f202010-09-24 18:40:15 +10001014 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1015 ag = pag->pag_agno + 1;
Dave Chinner70e60ce2010-07-20 08:07:02 +10001016 reclaimable += pag->pag_ici_reclaimable;
1017 xfs_perag_put(pag);
Dave Chinner9bf729c2010-04-29 09:55:50 +10001018 }
Dave Chinner9bf729c2010-04-29 09:55:50 +10001019 return reclaimable;
1020}
1021
Darrick J. Wong39b1cfd2020-05-21 13:08:49 -07001022STATIC bool
Darrick J. Wongb26b2bf2021-06-07 09:34:51 -07001023xfs_icwalk_match_id(
Brian Foster3e3f9f52012-11-07 12:21:13 -05001024 struct xfs_inode *ip,
Darrick J. Wongb26b2bf2021-06-07 09:34:51 -07001025 struct xfs_icwalk *icw)
Brian Foster3e3f9f52012-11-07 12:21:13 -05001026{
Darrick J. Wongb26b2bf2021-06-07 09:34:51 -07001027 if ((icw->icw_flags & XFS_ICWALK_FLAG_UID) &&
1028 !uid_eq(VFS_I(ip)->i_uid, icw->icw_uid))
Darrick J. Wong39b1cfd2020-05-21 13:08:49 -07001029 return false;
Brian Foster3e3f9f52012-11-07 12:21:13 -05001030
Darrick J. Wongb26b2bf2021-06-07 09:34:51 -07001031 if ((icw->icw_flags & XFS_ICWALK_FLAG_GID) &&
1032 !gid_eq(VFS_I(ip)->i_gid, icw->icw_gid))
Darrick J. Wong39b1cfd2020-05-21 13:08:49 -07001033 return false;
Brian Foster1b556042012-11-06 09:50:45 -05001034
Darrick J. Wongb26b2bf2021-06-07 09:34:51 -07001035 if ((icw->icw_flags & XFS_ICWALK_FLAG_PRID) &&
1036 ip->i_projid != icw->icw_prid)
Darrick J. Wong39b1cfd2020-05-21 13:08:49 -07001037 return false;
Brian Foster1b556042012-11-06 09:50:45 -05001038
Darrick J. Wong39b1cfd2020-05-21 13:08:49 -07001039 return true;
Brian Foster3e3f9f52012-11-07 12:21:13 -05001040}
1041
Brian Fosterf4526392014-07-24 19:44:28 +10001042/*
1043 * A union-based inode filtering algorithm. Process the inode if any of the
1044 * criteria match. This is for global/internal scans only.
1045 */
Darrick J. Wong39b1cfd2020-05-21 13:08:49 -07001046STATIC bool
Darrick J. Wongb26b2bf2021-06-07 09:34:51 -07001047xfs_icwalk_match_id_union(
Brian Fosterf4526392014-07-24 19:44:28 +10001048 struct xfs_inode *ip,
Darrick J. Wongb26b2bf2021-06-07 09:34:51 -07001049 struct xfs_icwalk *icw)
Brian Fosterf4526392014-07-24 19:44:28 +10001050{
Darrick J. Wongb26b2bf2021-06-07 09:34:51 -07001051 if ((icw->icw_flags & XFS_ICWALK_FLAG_UID) &&
1052 uid_eq(VFS_I(ip)->i_uid, icw->icw_uid))
Darrick J. Wong39b1cfd2020-05-21 13:08:49 -07001053 return true;
Brian Fosterf4526392014-07-24 19:44:28 +10001054
Darrick J. Wongb26b2bf2021-06-07 09:34:51 -07001055 if ((icw->icw_flags & XFS_ICWALK_FLAG_GID) &&
1056 gid_eq(VFS_I(ip)->i_gid, icw->icw_gid))
Darrick J. Wong39b1cfd2020-05-21 13:08:49 -07001057 return true;
Brian Fosterf4526392014-07-24 19:44:28 +10001058
Darrick J. Wongb26b2bf2021-06-07 09:34:51 -07001059 if ((icw->icw_flags & XFS_ICWALK_FLAG_PRID) &&
1060 ip->i_projid == icw->icw_prid)
Darrick J. Wong39b1cfd2020-05-21 13:08:49 -07001061 return true;
Brian Fosterf4526392014-07-24 19:44:28 +10001062
Darrick J. Wong39b1cfd2020-05-21 13:08:49 -07001063 return false;
Brian Fosterf4526392014-07-24 19:44:28 +10001064}
1065
Darrick J. Wonga91bf992020-05-21 13:08:48 -07001066/*
1067 * Is this inode @ip eligible for eof/cow block reclamation, given some
Darrick J. Wongb26b2bf2021-06-07 09:34:51 -07001068 * filtering parameters @icw? The inode is eligible if @icw is null or
Darrick J. Wonga91bf992020-05-21 13:08:48 -07001069 * if the predicate functions match.
1070 */
1071static bool
Darrick J. Wongb26b2bf2021-06-07 09:34:51 -07001072xfs_icwalk_match(
Darrick J. Wonga91bf992020-05-21 13:08:48 -07001073 struct xfs_inode *ip,
Darrick J. Wongb26b2bf2021-06-07 09:34:51 -07001074 struct xfs_icwalk *icw)
Darrick J. Wonga91bf992020-05-21 13:08:48 -07001075{
Darrick J. Wong39b1cfd2020-05-21 13:08:49 -07001076 bool match;
Darrick J. Wonga91bf992020-05-21 13:08:48 -07001077
Darrick J. Wongb26b2bf2021-06-07 09:34:51 -07001078 if (!icw)
Darrick J. Wonga91bf992020-05-21 13:08:48 -07001079 return true;
1080
Darrick J. Wongb26b2bf2021-06-07 09:34:51 -07001081 if (icw->icw_flags & XFS_ICWALK_FLAG_UNION)
1082 match = xfs_icwalk_match_id_union(ip, icw);
Darrick J. Wonga91bf992020-05-21 13:08:48 -07001083 else
Darrick J. Wongb26b2bf2021-06-07 09:34:51 -07001084 match = xfs_icwalk_match_id(ip, icw);
Darrick J. Wonga91bf992020-05-21 13:08:48 -07001085 if (!match)
1086 return false;
1087
1088 /* skip the inode if the file size is too small */
Darrick J. Wongb26b2bf2021-06-07 09:34:51 -07001089 if ((icw->icw_flags & XFS_ICWALK_FLAG_MINFILESIZE) &&
1090 XFS_ISIZE(ip) < icw->icw_min_file_size)
Darrick J. Wonga91bf992020-05-21 13:08:48 -07001091 return false;
1092
1093 return true;
1094}
1095
Dave Chinner4d0bab32020-07-01 10:21:28 -07001096/*
1097 * This is a fast pass over the inode cache to try to get reclaim moving on as
1098 * many inodes as possible in a short period of time. It kicks itself every few
1099 * seconds, as well as being kicked by the inode cache shrinker when memory
Dave Chinner02511a52020-06-29 14:49:18 -07001100 * goes low.
Dave Chinner4d0bab32020-07-01 10:21:28 -07001101 */
1102void
1103xfs_reclaim_worker(
1104 struct work_struct *work)
1105{
1106 struct xfs_mount *mp = container_of(to_delayed_work(work),
1107 struct xfs_mount, m_reclaim_work);
Dave Chinner4d0bab32020-07-01 10:21:28 -07001108
Darrick J. Wongf1bc5c52021-05-31 11:32:02 -07001109 xfs_icwalk(mp, XFS_ICWALK_RECLAIM, NULL);
Dave Chinner4d0bab32020-07-01 10:21:28 -07001110 xfs_reclaim_work_queue(mp);
1111}
1112
Brian Foster3e3f9f52012-11-07 12:21:13 -05001113STATIC int
Brian Foster41176a62012-11-06 09:50:42 -05001114xfs_inode_free_eofblocks(
1115 struct xfs_inode *ip,
Darrick J. Wongb26b2bf2021-06-07 09:34:51 -07001116 struct xfs_icwalk *icw,
Darrick J. Wong0fa4a102021-01-25 21:09:49 -08001117 unsigned int *lockflags)
Brian Foster41176a62012-11-06 09:50:42 -05001118{
Darrick J. Wong390600f2020-05-21 13:08:48 -07001119 bool wait;
Darrick J. Wong390600f2020-05-21 13:08:48 -07001120
Darrick J. Wongb26b2bf2021-06-07 09:34:51 -07001121 wait = icw && (icw->icw_flags & XFS_ICWALK_FLAG_SYNC);
Brian Foster5400da72014-07-24 19:40:22 +10001122
Darrick J. Wongce2d3bb2021-01-22 16:48:43 -08001123 if (!xfs_iflags_test(ip, XFS_IEOFBLOCKS))
1124 return 0;
1125
Brian Foster41176a62012-11-06 09:50:42 -05001126 /*
1127 * If the mapping is dirty the operation can block and wait for some
1128 * time. Unless we are waiting, skip it.
1129 */
Darrick J. Wong390600f2020-05-21 13:08:48 -07001130 if (!wait && mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
Brian Foster41176a62012-11-06 09:50:42 -05001131 return 0;
1132
Darrick J. Wongb26b2bf2021-06-07 09:34:51 -07001133 if (!xfs_icwalk_match(ip, icw))
Darrick J. Wonga91bf992020-05-21 13:08:48 -07001134 return 0;
Brian Foster3e3f9f52012-11-07 12:21:13 -05001135
Brian Fostera36b9262017-01-27 23:22:55 -08001136 /*
1137 * If the caller is waiting, return -EAGAIN to keep the background
1138 * scanner moving and revisit the inode in a subsequent pass.
1139 */
Brian Fosterc3155092017-01-27 23:22:56 -08001140 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
Darrick J. Wong390600f2020-05-21 13:08:48 -07001141 if (wait)
1142 return -EAGAIN;
1143 return 0;
Brian Fostera36b9262017-01-27 23:22:55 -08001144 }
Darrick J. Wong0fa4a102021-01-25 21:09:49 -08001145 *lockflags |= XFS_IOLOCK_EXCL;
Darrick J. Wong390600f2020-05-21 13:08:48 -07001146
Darrick J. Wong2b156ff2021-03-23 16:59:31 -07001147 if (xfs_can_free_eofblocks(ip, false))
1148 return xfs_free_eofblocks(ip);
1149
1150 /* inode could be preallocated or append-only */
1151 trace_xfs_inode_free_eofblocks_invalid(ip);
1152 xfs_inode_clear_eofblocks_tag(ip);
1153 return 0;
Brian Foster41176a62012-11-06 09:50:42 -05001154}
1155
Darrick J. Wong83104d42016-10-03 09:11:46 -07001156static void
Darrick J. Wongce2d3bb2021-01-22 16:48:43 -08001157xfs_blockgc_set_iflag(
1158 struct xfs_inode *ip,
Darrick J. Wongce2d3bb2021-01-22 16:48:43 -08001159 unsigned long iflag)
Brian Foster27b52862012-11-06 09:50:38 -05001160{
Darrick J. Wongce2d3bb2021-01-22 16:48:43 -08001161 struct xfs_mount *mp = ip->i_mount;
1162 struct xfs_perag *pag;
Darrick J. Wongce2d3bb2021-01-22 16:48:43 -08001163
1164 ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0);
Brian Foster27b52862012-11-06 09:50:38 -05001165
Christoph Hellwig85a6e762016-09-19 11:09:48 +10001166 /*
1167 * Don't bother locking the AG and looking up in the radix trees
1168 * if we already know that we have the tag set.
1169 */
Darrick J. Wongce2d3bb2021-01-22 16:48:43 -08001170 if (ip->i_flags & iflag)
Christoph Hellwig85a6e762016-09-19 11:09:48 +10001171 return;
1172 spin_lock(&ip->i_flags_lock);
Darrick J. Wongce2d3bb2021-01-22 16:48:43 -08001173 ip->i_flags |= iflag;
Christoph Hellwig85a6e762016-09-19 11:09:48 +10001174 spin_unlock(&ip->i_flags_lock);
1175
Brian Foster27b52862012-11-06 09:50:38 -05001176 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1177 spin_lock(&pag->pag_ici_lock);
Brian Foster27b52862012-11-06 09:50:38 -05001178
Darrick J. Wongc076ae72021-05-31 11:32:02 -07001179 xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1180 XFS_ICI_BLOCKGC_TAG);
Brian Foster27b52862012-11-06 09:50:38 -05001181
1182 spin_unlock(&pag->pag_ici_lock);
1183 xfs_perag_put(pag);
1184}
1185
1186void
Darrick J. Wong83104d42016-10-03 09:11:46 -07001187xfs_inode_set_eofblocks_tag(
Brian Foster27b52862012-11-06 09:50:38 -05001188 xfs_inode_t *ip)
1189{
Darrick J. Wong83104d42016-10-03 09:11:46 -07001190 trace_xfs_inode_set_eofblocks_tag(ip);
Darrick J. Wong9669f512021-01-22 16:48:43 -08001191 return xfs_blockgc_set_iflag(ip, XFS_IEOFBLOCKS);
Darrick J. Wong83104d42016-10-03 09:11:46 -07001192}
1193
1194static void
Darrick J. Wongce2d3bb2021-01-22 16:48:43 -08001195xfs_blockgc_clear_iflag(
1196 struct xfs_inode *ip,
1197 unsigned long iflag)
Darrick J. Wong83104d42016-10-03 09:11:46 -07001198{
Darrick J. Wongce2d3bb2021-01-22 16:48:43 -08001199 struct xfs_mount *mp = ip->i_mount;
1200 struct xfs_perag *pag;
1201 bool clear_tag;
1202
1203 ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0);
Brian Foster27b52862012-11-06 09:50:38 -05001204
Christoph Hellwig85a6e762016-09-19 11:09:48 +10001205 spin_lock(&ip->i_flags_lock);
Darrick J. Wongce2d3bb2021-01-22 16:48:43 -08001206 ip->i_flags &= ~iflag;
1207 clear_tag = (ip->i_flags & (XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0;
Christoph Hellwig85a6e762016-09-19 11:09:48 +10001208 spin_unlock(&ip->i_flags_lock);
1209
Darrick J. Wongce2d3bb2021-01-22 16:48:43 -08001210 if (!clear_tag)
1211 return;
1212
Brian Foster27b52862012-11-06 09:50:38 -05001213 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1214 spin_lock(&pag->pag_ici_lock);
Brian Foster27b52862012-11-06 09:50:38 -05001215
Darrick J. Wongc076ae72021-05-31 11:32:02 -07001216 xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1217 XFS_ICI_BLOCKGC_TAG);
Brian Foster27b52862012-11-06 09:50:38 -05001218
1219 spin_unlock(&pag->pag_ici_lock);
1220 xfs_perag_put(pag);
1221}
1222
Darrick J. Wong83104d42016-10-03 09:11:46 -07001223void
1224xfs_inode_clear_eofblocks_tag(
1225 xfs_inode_t *ip)
1226{
1227 trace_xfs_inode_clear_eofblocks_tag(ip);
Darrick J. Wongce2d3bb2021-01-22 16:48:43 -08001228 return xfs_blockgc_clear_iflag(ip, XFS_IEOFBLOCKS);
Darrick J. Wong83104d42016-10-03 09:11:46 -07001229}
1230
1231/*
Darrick J. Wongbe78ff02018-01-16 19:03:59 -08001232 * Set ourselves up to free CoW blocks from this file. If it's already clean
1233 * then we can bail out quickly, but otherwise we must back off if the file
1234 * is undergoing some kind of write.
1235 */
1236static bool
1237xfs_prep_free_cowblocks(
Christoph Hellwig51d62692018-07-17 16:51:51 -07001238 struct xfs_inode *ip)
Darrick J. Wongbe78ff02018-01-16 19:03:59 -08001239{
1240 /*
1241 * Just clear the tag if we have an empty cow fork or none at all. It's
1242 * possible the inode was fully unshared since it was originally tagged.
1243 */
Christoph Hellwig51d62692018-07-17 16:51:51 -07001244 if (!xfs_inode_has_cow_data(ip)) {
Darrick J. Wongbe78ff02018-01-16 19:03:59 -08001245 trace_xfs_inode_free_cowblocks_invalid(ip);
1246 xfs_inode_clear_cowblocks_tag(ip);
1247 return false;
1248 }
1249
1250 /*
1251 * If the mapping is dirty or under writeback we cannot touch the
1252 * CoW fork. Leave it alone if we're in the midst of a directio.
1253 */
1254 if ((VFS_I(ip)->i_state & I_DIRTY_PAGES) ||
1255 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) ||
1256 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) ||
1257 atomic_read(&VFS_I(ip)->i_dio_count))
1258 return false;
1259
1260 return true;
1261}
1262
1263/*
Darrick J. Wong83104d42016-10-03 09:11:46 -07001264 * Automatic CoW Reservation Freeing
1265 *
1266 * These functions automatically garbage collect leftover CoW reservations
1267 * that were made on behalf of a cowextsize hint when we start to run out
1268 * of quota or when the reservations sit around for too long. If the file
1269 * has dirty pages or is undergoing writeback, its CoW reservations will
1270 * be retained.
1271 *
1272 * The actual garbage collection piggybacks off the same code that runs
1273 * the speculative EOF preallocation garbage collector.
1274 */
1275STATIC int
1276xfs_inode_free_cowblocks(
1277 struct xfs_inode *ip,
Darrick J. Wongb26b2bf2021-06-07 09:34:51 -07001278 struct xfs_icwalk *icw,
Darrick J. Wong0fa4a102021-01-25 21:09:49 -08001279 unsigned int *lockflags)
Darrick J. Wong83104d42016-10-03 09:11:46 -07001280{
Darrick J. Wongf41a0712021-01-22 16:48:35 -08001281 bool wait;
Darrick J. Wongbe78ff02018-01-16 19:03:59 -08001282 int ret = 0;
Darrick J. Wong83104d42016-10-03 09:11:46 -07001283
Darrick J. Wongb26b2bf2021-06-07 09:34:51 -07001284 wait = icw && (icw->icw_flags & XFS_ICWALK_FLAG_SYNC);
Darrick J. Wongf41a0712021-01-22 16:48:35 -08001285
Darrick J. Wongce2d3bb2021-01-22 16:48:43 -08001286 if (!xfs_iflags_test(ip, XFS_ICOWBLOCKS))
1287 return 0;
1288
Christoph Hellwig51d62692018-07-17 16:51:51 -07001289 if (!xfs_prep_free_cowblocks(ip))
Darrick J. Wong83104d42016-10-03 09:11:46 -07001290 return 0;
1291
Darrick J. Wongb26b2bf2021-06-07 09:34:51 -07001292 if (!xfs_icwalk_match(ip, icw))
Darrick J. Wonga91bf992020-05-21 13:08:48 -07001293 return 0;
Darrick J. Wong83104d42016-10-03 09:11:46 -07001294
Darrick J. Wongf41a0712021-01-22 16:48:35 -08001295 /*
1296 * If the caller is waiting, return -EAGAIN to keep the background
1297 * scanner moving and revisit the inode in a subsequent pass.
1298 */
Darrick J. Wong0fa4a102021-01-25 21:09:49 -08001299 if (!(*lockflags & XFS_IOLOCK_EXCL) &&
1300 !xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
Darrick J. Wongf41a0712021-01-22 16:48:35 -08001301 if (wait)
1302 return -EAGAIN;
1303 return 0;
1304 }
Darrick J. Wong0fa4a102021-01-25 21:09:49 -08001305 *lockflags |= XFS_IOLOCK_EXCL;
1306
Darrick J. Wongf41a0712021-01-22 16:48:35 -08001307 if (!xfs_ilock_nowait(ip, XFS_MMAPLOCK_EXCL)) {
1308 if (wait)
Darrick J. Wong0fa4a102021-01-25 21:09:49 -08001309 return -EAGAIN;
1310 return 0;
Darrick J. Wongf41a0712021-01-22 16:48:35 -08001311 }
Darrick J. Wong0fa4a102021-01-25 21:09:49 -08001312 *lockflags |= XFS_MMAPLOCK_EXCL;
Darrick J. Wong83104d42016-10-03 09:11:46 -07001313
Darrick J. Wongbe78ff02018-01-16 19:03:59 -08001314 /*
1315 * Check again, nobody else should be able to dirty blocks or change
1316 * the reflink iflag now that we have the first two locks held.
1317 */
Christoph Hellwig51d62692018-07-17 16:51:51 -07001318 if (xfs_prep_free_cowblocks(ip))
Darrick J. Wongbe78ff02018-01-16 19:03:59 -08001319 ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false);
Darrick J. Wong83104d42016-10-03 09:11:46 -07001320 return ret;
1321}
1322
Darrick J. Wong83104d42016-10-03 09:11:46 -07001323void
1324xfs_inode_set_cowblocks_tag(
1325 xfs_inode_t *ip)
1326{
Brian Foster7b7381f2016-10-24 14:21:00 +11001327 trace_xfs_inode_set_cowblocks_tag(ip);
Darrick J. Wong9669f512021-01-22 16:48:43 -08001328 return xfs_blockgc_set_iflag(ip, XFS_ICOWBLOCKS);
Darrick J. Wong83104d42016-10-03 09:11:46 -07001329}
1330
1331void
1332xfs_inode_clear_cowblocks_tag(
1333 xfs_inode_t *ip)
1334{
Brian Foster7b7381f2016-10-24 14:21:00 +11001335 trace_xfs_inode_clear_cowblocks_tag(ip);
Darrick J. Wongce2d3bb2021-01-22 16:48:43 -08001336 return xfs_blockgc_clear_iflag(ip, XFS_ICOWBLOCKS);
Darrick J. Wong83104d42016-10-03 09:11:46 -07001337}
Darrick J. Wongd6b636e2018-05-09 10:03:56 -07001338
1339/* Disable post-EOF and CoW block auto-reclamation. */
1340void
Darrick J. Wongc9a65262021-01-22 16:48:44 -08001341xfs_blockgc_stop(
Darrick J. Wongd6b636e2018-05-09 10:03:56 -07001342 struct xfs_mount *mp)
1343{
Darrick J. Wong894ecac2021-01-22 16:48:44 -08001344 struct xfs_perag *pag;
1345 xfs_agnumber_t agno;
1346
Darrick J. Wong6f649092021-08-06 11:05:42 -07001347 if (!xfs_clear_blockgc_enabled(mp))
1348 return;
1349
1350 for_each_perag(mp, agno, pag)
Darrick J. Wong894ecac2021-01-22 16:48:44 -08001351 cancel_delayed_work_sync(&pag->pag_blockgc_work);
Darrick J. Wong6f649092021-08-06 11:05:42 -07001352 trace_xfs_blockgc_stop(mp, __return_address);
Darrick J. Wongd6b636e2018-05-09 10:03:56 -07001353}
1354
1355/* Enable post-EOF and CoW block auto-reclamation. */
1356void
Darrick J. Wongc9a65262021-01-22 16:48:44 -08001357xfs_blockgc_start(
Darrick J. Wongd6b636e2018-05-09 10:03:56 -07001358 struct xfs_mount *mp)
1359{
Darrick J. Wong894ecac2021-01-22 16:48:44 -08001360 struct xfs_perag *pag;
1361 xfs_agnumber_t agno;
1362
Darrick J. Wong6f649092021-08-06 11:05:42 -07001363 if (xfs_set_blockgc_enabled(mp))
1364 return;
1365
1366 trace_xfs_blockgc_start(mp, __return_address);
Darrick J. Wong894ecac2021-01-22 16:48:44 -08001367 for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1368 xfs_blockgc_queue(pag);
Darrick J. Wongd6b636e2018-05-09 10:03:56 -07001369}
Darrick J. Wong3d4feec2021-01-22 16:48:36 -08001370
Darrick J. Wongd20d5ed2021-06-01 23:01:44 -07001371/* Don't try to run block gc on an inode that's in any of these states. */
1372#define XFS_BLOCKGC_NOGRAB_IFLAGS (XFS_INEW | \
Dave Chinnerab23a772021-08-06 11:05:39 -07001373 XFS_NEED_INACTIVE | \
1374 XFS_INACTIVATING | \
Darrick J. Wongd20d5ed2021-06-01 23:01:44 -07001375 XFS_IRECLAIMABLE | \
1376 XFS_IRECLAIM)
Darrick J. Wongdf600192021-06-01 13:29:41 -07001377/*
Darrick J. Wongb9baaef2021-05-31 11:31:58 -07001378 * Decide if the given @ip is eligible for garbage collection of speculative
1379 * preallocations, and grab it if so. Returns true if it's ready to go or
1380 * false if we should just ignore it.
Darrick J. Wongdf600192021-06-01 13:29:41 -07001381 */
1382static bool
Darrick J. Wongb9baaef2021-05-31 11:31:58 -07001383xfs_blockgc_igrab(
Darrick J. Wong7fdff522021-05-31 11:31:59 -07001384 struct xfs_inode *ip)
Darrick J. Wongdf600192021-06-01 13:29:41 -07001385{
1386 struct inode *inode = VFS_I(ip);
Darrick J. Wongdf600192021-06-01 13:29:41 -07001387
1388 ASSERT(rcu_read_lock_held());
1389
1390 /* Check for stale RCU freed inode */
1391 spin_lock(&ip->i_flags_lock);
1392 if (!ip->i_ino)
1393 goto out_unlock_noent;
1394
Darrick J. Wongd20d5ed2021-06-01 23:01:44 -07001395 if (ip->i_flags & XFS_BLOCKGC_NOGRAB_IFLAGS)
Darrick J. Wongdf600192021-06-01 13:29:41 -07001396 goto out_unlock_noent;
1397 spin_unlock(&ip->i_flags_lock);
1398
1399 /* nothing to sync during shutdown */
Dave Chinner75c8c50f2021-08-18 18:46:53 -07001400 if (xfs_is_shutdown(ip->i_mount))
Darrick J. Wongdf600192021-06-01 13:29:41 -07001401 return false;
1402
1403 /* If we can't grab the inode, it must on it's way to reclaim. */
1404 if (!igrab(inode))
1405 return false;
1406
1407 /* inode is valid */
1408 return true;
1409
1410out_unlock_noent:
1411 spin_unlock(&ip->i_flags_lock);
1412 return false;
1413}
1414
Darrick J. Wong41956752021-01-22 16:48:43 -08001415/* Scan one incore inode for block preallocations that we can remove. */
1416static int
1417xfs_blockgc_scan_inode(
1418 struct xfs_inode *ip,
Darrick J. Wongb26b2bf2021-06-07 09:34:51 -07001419 struct xfs_icwalk *icw)
Darrick J. Wong85c5b272021-01-22 16:48:39 -08001420{
Darrick J. Wong0fa4a102021-01-25 21:09:49 -08001421 unsigned int lockflags = 0;
Darrick J. Wong85c5b272021-01-22 16:48:39 -08001422 int error;
1423
Darrick J. Wongb26b2bf2021-06-07 09:34:51 -07001424 error = xfs_inode_free_eofblocks(ip, icw, &lockflags);
Darrick J. Wong85c5b272021-01-22 16:48:39 -08001425 if (error)
Darrick J. Wong0fa4a102021-01-25 21:09:49 -08001426 goto unlock;
Darrick J. Wong85c5b272021-01-22 16:48:39 -08001427
Darrick J. Wongb26b2bf2021-06-07 09:34:51 -07001428 error = xfs_inode_free_cowblocks(ip, icw, &lockflags);
Darrick J. Wong0fa4a102021-01-25 21:09:49 -08001429unlock:
1430 if (lockflags)
1431 xfs_iunlock(ip, lockflags);
Darrick J. Wong594ab002021-05-31 11:32:00 -07001432 xfs_irele(ip);
Darrick J. Wong0fa4a102021-01-25 21:09:49 -08001433 return error;
Darrick J. Wong85c5b272021-01-22 16:48:39 -08001434}
1435
Darrick J. Wong9669f512021-01-22 16:48:43 -08001436/* Background worker that trims preallocated space. */
1437void
1438xfs_blockgc_worker(
1439 struct work_struct *work)
1440{
Darrick J. Wong894ecac2021-01-22 16:48:44 -08001441 struct xfs_perag *pag = container_of(to_delayed_work(work),
1442 struct xfs_perag, pag_blockgc_work);
1443 struct xfs_mount *mp = pag->pag_mount;
Darrick J. Wong9669f512021-01-22 16:48:43 -08001444 int error;
1445
Darrick J. Wong6f649092021-08-06 11:05:42 -07001446 trace_xfs_blockgc_worker(mp, __return_address);
1447
Darrick J. Wongf427cf52021-05-31 11:32:00 -07001448 error = xfs_icwalk_ag(pag, XFS_ICWALK_BLOCKGC, NULL);
Darrick J. Wong9669f512021-01-22 16:48:43 -08001449 if (error)
Darrick J. Wong894ecac2021-01-22 16:48:44 -08001450 xfs_info(mp, "AG %u preallocation gc worker failed, err=%d",
1451 pag->pag_agno, error);
Darrick J. Wong894ecac2021-01-22 16:48:44 -08001452 xfs_blockgc_queue(pag);
Darrick J. Wong9669f512021-01-22 16:48:43 -08001453}
1454
Darrick J. Wong85c5b272021-01-22 16:48:39 -08001455/*
Darrick J. Wong2eb66502021-08-06 11:05:41 -07001456 * Try to free space in the filesystem by purging inactive inodes, eofblocks
1457 * and cowblocks.
Darrick J. Wong85c5b272021-01-22 16:48:39 -08001458 */
1459int
1460xfs_blockgc_free_space(
1461 struct xfs_mount *mp,
Darrick J. Wongb26b2bf2021-06-07 09:34:51 -07001462 struct xfs_icwalk *icw)
Darrick J. Wong85c5b272021-01-22 16:48:39 -08001463{
Darrick J. Wong2eb66502021-08-06 11:05:41 -07001464 int error;
1465
Darrick J. Wongb26b2bf2021-06-07 09:34:51 -07001466 trace_xfs_blockgc_free_space(mp, icw, _RET_IP_);
Darrick J. Wong85c5b272021-01-22 16:48:39 -08001467
Darrick J. Wong2eb66502021-08-06 11:05:41 -07001468 error = xfs_icwalk(mp, XFS_ICWALK_BLOCKGC, icw);
1469 if (error)
1470 return error;
1471
1472 xfs_inodegc_flush(mp);
1473 return 0;
Darrick J. Wong85c5b272021-01-22 16:48:39 -08001474}
1475
Darrick J. Wong3d4feec2021-01-22 16:48:36 -08001476/*
Darrick J. Wonge8d04c22021-08-06 11:05:42 -07001477 * Reclaim all the free space that we can by scheduling the background blockgc
1478 * and inodegc workers immediately and waiting for them all to clear.
1479 */
1480void
1481xfs_blockgc_flush_all(
1482 struct xfs_mount *mp)
1483{
1484 struct xfs_perag *pag;
1485 xfs_agnumber_t agno;
1486
1487 trace_xfs_blockgc_flush_all(mp, __return_address);
1488
1489 /*
1490 * For each blockgc worker, move its queue time up to now. If it
1491 * wasn't queued, it will not be requeued. Then flush whatever's
1492 * left.
1493 */
1494 for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1495 mod_delayed_work(pag->pag_mount->m_blockgc_wq,
1496 &pag->pag_blockgc_work, 0);
1497
1498 for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1499 flush_delayed_work(&pag->pag_blockgc_work);
1500
1501 xfs_inodegc_flush(mp);
1502}
1503
1504/*
Darrick J. Wongc237dd72021-01-22 16:48:37 -08001505 * Run cow/eofblocks scans on the supplied dquots. We don't know exactly which
1506 * quota caused an allocation failure, so we make a best effort by including
1507 * each quota under low free space conditions (less than 1% free space) in the
1508 * scan.
Darrick J. Wong111068f2021-01-22 16:48:36 -08001509 *
1510 * Callers must not hold any inode's ILOCK. If requesting a synchronous scan
Darrick J. Wong2d53f66b2021-06-07 09:34:51 -07001511 * (XFS_ICWALK_FLAG_SYNC), the caller also must not hold any inode's IOLOCK or
Darrick J. Wong111068f2021-01-22 16:48:36 -08001512 * MMAPLOCK.
Darrick J. Wong3d4feec2021-01-22 16:48:36 -08001513 */
Darrick J. Wong111068f2021-01-22 16:48:36 -08001514int
Darrick J. Wongc237dd72021-01-22 16:48:37 -08001515xfs_blockgc_free_dquots(
1516 struct xfs_mount *mp,
1517 struct xfs_dquot *udqp,
1518 struct xfs_dquot *gdqp,
1519 struct xfs_dquot *pdqp,
Darrick J. Wong2d53f66b2021-06-07 09:34:51 -07001520 unsigned int iwalk_flags)
Darrick J. Wong3d4feec2021-01-22 16:48:36 -08001521{
Darrick J. Wongb26b2bf2021-06-07 09:34:51 -07001522 struct xfs_icwalk icw = {0};
Darrick J. Wong3d4feec2021-01-22 16:48:36 -08001523 bool do_work = false;
1524
Darrick J. Wongc237dd72021-01-22 16:48:37 -08001525 if (!udqp && !gdqp && !pdqp)
1526 return 0;
1527
Darrick J. Wong3d4feec2021-01-22 16:48:36 -08001528 /*
Darrick J. Wong111068f2021-01-22 16:48:36 -08001529 * Run a scan to free blocks using the union filter to cover all
1530 * applicable quotas in a single scan.
Darrick J. Wong3d4feec2021-01-22 16:48:36 -08001531 */
Darrick J. Wongb26b2bf2021-06-07 09:34:51 -07001532 icw.icw_flags = XFS_ICWALK_FLAG_UNION | iwalk_flags;
Darrick J. Wong3d4feec2021-01-22 16:48:36 -08001533
Darrick J. Wongc237dd72021-01-22 16:48:37 -08001534 if (XFS_IS_UQUOTA_ENFORCED(mp) && udqp && xfs_dquot_lowsp(udqp)) {
Darrick J. Wongb26b2bf2021-06-07 09:34:51 -07001535 icw.icw_uid = make_kuid(mp->m_super->s_user_ns, udqp->q_id);
1536 icw.icw_flags |= XFS_ICWALK_FLAG_UID;
Darrick J. Wongc237dd72021-01-22 16:48:37 -08001537 do_work = true;
Darrick J. Wong3d4feec2021-01-22 16:48:36 -08001538 }
1539
Darrick J. Wongc237dd72021-01-22 16:48:37 -08001540 if (XFS_IS_UQUOTA_ENFORCED(mp) && gdqp && xfs_dquot_lowsp(gdqp)) {
Darrick J. Wongb26b2bf2021-06-07 09:34:51 -07001541 icw.icw_gid = make_kgid(mp->m_super->s_user_ns, gdqp->q_id);
1542 icw.icw_flags |= XFS_ICWALK_FLAG_GID;
Darrick J. Wongc237dd72021-01-22 16:48:37 -08001543 do_work = true;
Darrick J. Wong3d4feec2021-01-22 16:48:36 -08001544 }
1545
Darrick J. Wongc237dd72021-01-22 16:48:37 -08001546 if (XFS_IS_PQUOTA_ENFORCED(mp) && pdqp && xfs_dquot_lowsp(pdqp)) {
Darrick J. Wongb26b2bf2021-06-07 09:34:51 -07001547 icw.icw_prid = pdqp->q_id;
1548 icw.icw_flags |= XFS_ICWALK_FLAG_PRID;
Darrick J. Wongc237dd72021-01-22 16:48:37 -08001549 do_work = true;
Darrick J. Wong3d4feec2021-01-22 16:48:36 -08001550 }
1551
1552 if (!do_work)
Darrick J. Wong111068f2021-01-22 16:48:36 -08001553 return 0;
Darrick J. Wong3d4feec2021-01-22 16:48:36 -08001554
Darrick J. Wongb26b2bf2021-06-07 09:34:51 -07001555 return xfs_blockgc_free_space(mp, &icw);
Darrick J. Wongc237dd72021-01-22 16:48:37 -08001556}
1557
1558/* Run cow/eofblocks scans on the quotas attached to the inode. */
1559int
1560xfs_blockgc_free_quota(
1561 struct xfs_inode *ip,
Darrick J. Wong2d53f66b2021-06-07 09:34:51 -07001562 unsigned int iwalk_flags)
Darrick J. Wongc237dd72021-01-22 16:48:37 -08001563{
1564 return xfs_blockgc_free_dquots(ip->i_mount,
1565 xfs_inode_dquot(ip, XFS_DQTYPE_USER),
1566 xfs_inode_dquot(ip, XFS_DQTYPE_GROUP),
Darrick J. Wong2d53f66b2021-06-07 09:34:51 -07001567 xfs_inode_dquot(ip, XFS_DQTYPE_PROJ), iwalk_flags);
Darrick J. Wong3d4feec2021-01-22 16:48:36 -08001568}
Darrick J. Wongdf600192021-06-01 13:29:41 -07001569
1570/* XFS Inode Cache Walking Code */
1571
1572/*
Darrick J. Wongf1bc5c52021-05-31 11:32:02 -07001573 * The inode lookup is done in batches to keep the amount of lock traffic and
1574 * radix tree lookups to a minimum. The batch size is a trade off between
1575 * lookup reduction and stack usage. This is in the reclaim path, so we can't
1576 * be too greedy.
1577 */
1578#define XFS_LOOKUP_BATCH 32
1579
1580
1581/*
Darrick J. Wongb9baaef2021-05-31 11:31:58 -07001582 * Decide if we want to grab this inode in anticipation of doing work towards
Darrick J. Wong594ab002021-05-31 11:32:00 -07001583 * the goal.
Darrick J. Wongb9baaef2021-05-31 11:31:58 -07001584 */
1585static inline bool
1586xfs_icwalk_igrab(
1587 enum xfs_icwalk_goal goal,
Darrick J. Wong94927502021-06-07 09:34:50 -07001588 struct xfs_inode *ip,
Darrick J. Wongb26b2bf2021-06-07 09:34:51 -07001589 struct xfs_icwalk *icw)
Darrick J. Wongb9baaef2021-05-31 11:31:58 -07001590{
1591 switch (goal) {
Darrick J. Wongb9baaef2021-05-31 11:31:58 -07001592 case XFS_ICWALK_BLOCKGC:
Darrick J. Wong7fdff522021-05-31 11:31:59 -07001593 return xfs_blockgc_igrab(ip);
Darrick J. Wongf1bc5c52021-05-31 11:32:02 -07001594 case XFS_ICWALK_RECLAIM:
Darrick J. Wongb26b2bf2021-06-07 09:34:51 -07001595 return xfs_reclaim_igrab(ip, icw);
Darrick J. Wongb9baaef2021-05-31 11:31:58 -07001596 default:
1597 return false;
1598 }
1599}
1600
Darrick J. Wong594ab002021-05-31 11:32:00 -07001601/*
1602 * Process an inode. Each processing function must handle any state changes
1603 * made by the icwalk igrab function. Return -EAGAIN to skip an inode.
1604 */
Darrick J. Wongf427cf52021-05-31 11:32:00 -07001605static inline int
1606xfs_icwalk_process_inode(
1607 enum xfs_icwalk_goal goal,
1608 struct xfs_inode *ip,
Darrick J. Wongf1bc5c52021-05-31 11:32:02 -07001609 struct xfs_perag *pag,
Darrick J. Wongb26b2bf2021-06-07 09:34:51 -07001610 struct xfs_icwalk *icw)
Darrick J. Wongf427cf52021-05-31 11:32:00 -07001611{
Darrick J. Wong594ab002021-05-31 11:32:00 -07001612 int error = 0;
Darrick J. Wongf427cf52021-05-31 11:32:00 -07001613
1614 switch (goal) {
Darrick J. Wongf427cf52021-05-31 11:32:00 -07001615 case XFS_ICWALK_BLOCKGC:
Darrick J. Wongb26b2bf2021-06-07 09:34:51 -07001616 error = xfs_blockgc_scan_inode(ip, icw);
Darrick J. Wongf427cf52021-05-31 11:32:00 -07001617 break;
Darrick J. Wongf1bc5c52021-05-31 11:32:02 -07001618 case XFS_ICWALK_RECLAIM:
1619 xfs_reclaim_inode(ip, pag);
1620 break;
Darrick J. Wongf427cf52021-05-31 11:32:00 -07001621 }
Darrick J. Wongf427cf52021-05-31 11:32:00 -07001622 return error;
1623}
1624
Darrick J. Wongb9baaef2021-05-31 11:31:58 -07001625/*
Darrick J. Wongf427cf52021-05-31 11:32:00 -07001626 * For a given per-AG structure @pag and a goal, grab qualifying inodes and
1627 * process them in some manner.
Darrick J. Wongdf600192021-06-01 13:29:41 -07001628 */
1629static int
Darrick J. Wongc1115c02021-06-01 22:41:25 -07001630xfs_icwalk_ag(
Darrick J. Wongdf600192021-06-01 13:29:41 -07001631 struct xfs_perag *pag,
Darrick J. Wongf427cf52021-05-31 11:32:00 -07001632 enum xfs_icwalk_goal goal,
Darrick J. Wongb26b2bf2021-06-07 09:34:51 -07001633 struct xfs_icwalk *icw)
Darrick J. Wongdf600192021-06-01 13:29:41 -07001634{
1635 struct xfs_mount *mp = pag->pag_mount;
1636 uint32_t first_index;
1637 int last_error = 0;
1638 int skipped;
1639 bool done;
1640 int nr_found;
1641
1642restart:
1643 done = false;
1644 skipped = 0;
Darrick J. Wongf1bc5c52021-05-31 11:32:02 -07001645 if (goal == XFS_ICWALK_RECLAIM)
1646 first_index = READ_ONCE(pag->pag_ici_reclaim_cursor);
1647 else
1648 first_index = 0;
Darrick J. Wongdf600192021-06-01 13:29:41 -07001649 nr_found = 0;
1650 do {
1651 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
1652 int error = 0;
1653 int i;
1654
1655 rcu_read_lock();
1656
Christoph Hellwiga437b9b2021-08-13 09:16:52 -07001657 nr_found = radix_tree_gang_lookup_tag(&pag->pag_ici_root,
1658 (void **) batch, first_index,
1659 XFS_LOOKUP_BATCH, goal);
Darrick J. Wongdf600192021-06-01 13:29:41 -07001660 if (!nr_found) {
Darrick J. Wongf1bc5c52021-05-31 11:32:02 -07001661 done = true;
Darrick J. Wongdf600192021-06-01 13:29:41 -07001662 rcu_read_unlock();
1663 break;
1664 }
1665
1666 /*
1667 * Grab the inodes before we drop the lock. if we found
1668 * nothing, nr == 0 and the loop will be skipped.
1669 */
1670 for (i = 0; i < nr_found; i++) {
1671 struct xfs_inode *ip = batch[i];
1672
Darrick J. Wongb26b2bf2021-06-07 09:34:51 -07001673 if (done || !xfs_icwalk_igrab(goal, ip, icw))
Darrick J. Wongdf600192021-06-01 13:29:41 -07001674 batch[i] = NULL;
1675
1676 /*
1677 * Update the index for the next lookup. Catch
1678 * overflows into the next AG range which can occur if
1679 * we have inodes in the last block of the AG and we
1680 * are currently pointing to the last inode.
1681 *
1682 * Because we may see inodes that are from the wrong AG
1683 * due to RCU freeing and reallocation, only update the
1684 * index if it lies in this AG. It was a race that lead
1685 * us to see this inode, so another lookup from the
1686 * same index will not find it again.
1687 */
1688 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
1689 continue;
1690 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1691 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1692 done = true;
1693 }
1694
1695 /* unlock now we've grabbed the inodes. */
1696 rcu_read_unlock();
1697
1698 for (i = 0; i < nr_found; i++) {
1699 if (!batch[i])
1700 continue;
Darrick J. Wongf1bc5c52021-05-31 11:32:02 -07001701 error = xfs_icwalk_process_inode(goal, batch[i], pag,
Darrick J. Wongb26b2bf2021-06-07 09:34:51 -07001702 icw);
Darrick J. Wongdf600192021-06-01 13:29:41 -07001703 if (error == -EAGAIN) {
1704 skipped++;
1705 continue;
1706 }
1707 if (error && last_error != -EFSCORRUPTED)
1708 last_error = error;
1709 }
1710
1711 /* bail out if the filesystem is corrupted. */
1712 if (error == -EFSCORRUPTED)
1713 break;
1714
1715 cond_resched();
1716
Darrick J. Wongb26b2bf2021-06-07 09:34:51 -07001717 if (icw && (icw->icw_flags & XFS_ICWALK_FLAG_SCAN_LIMIT)) {
1718 icw->icw_scan_limit -= XFS_LOOKUP_BATCH;
1719 if (icw->icw_scan_limit <= 0)
Darrick J. Wongf1bc5c52021-05-31 11:32:02 -07001720 break;
1721 }
Darrick J. Wongdf600192021-06-01 13:29:41 -07001722 } while (nr_found && !done);
1723
Darrick J. Wongf1bc5c52021-05-31 11:32:02 -07001724 if (goal == XFS_ICWALK_RECLAIM) {
1725 if (done)
1726 first_index = 0;
1727 WRITE_ONCE(pag->pag_ici_reclaim_cursor, first_index);
1728 }
1729
Darrick J. Wongdf600192021-06-01 13:29:41 -07001730 if (skipped) {
1731 delay(1);
1732 goto restart;
1733 }
1734 return last_error;
1735}
1736
Darrick J. Wongf427cf52021-05-31 11:32:00 -07001737/* Walk all incore inodes to achieve a given goal. */
Darrick J. Wongdf600192021-06-01 13:29:41 -07001738static int
Darrick J. Wongc1115c02021-06-01 22:41:25 -07001739xfs_icwalk(
Darrick J. Wongdf600192021-06-01 13:29:41 -07001740 struct xfs_mount *mp,
Darrick J. Wongf427cf52021-05-31 11:32:00 -07001741 enum xfs_icwalk_goal goal,
Darrick J. Wongb26b2bf2021-06-07 09:34:51 -07001742 struct xfs_icwalk *icw)
Darrick J. Wongdf600192021-06-01 13:29:41 -07001743{
1744 struct xfs_perag *pag;
1745 int error = 0;
1746 int last_error = 0;
Christoph Hellwiga437b9b2021-08-13 09:16:52 -07001747 xfs_agnumber_t agno;
Darrick J. Wongdf600192021-06-01 13:29:41 -07001748
Christoph Hellwiga437b9b2021-08-13 09:16:52 -07001749 for_each_perag_tag(mp, agno, pag, goal) {
Darrick J. Wongb26b2bf2021-06-07 09:34:51 -07001750 error = xfs_icwalk_ag(pag, goal, icw);
Darrick J. Wongdf600192021-06-01 13:29:41 -07001751 if (error) {
1752 last_error = error;
Christoph Hellwiga437b9b2021-08-13 09:16:52 -07001753 if (error == -EFSCORRUPTED) {
1754 xfs_perag_put(pag);
Darrick J. Wongdf600192021-06-01 13:29:41 -07001755 break;
Christoph Hellwiga437b9b2021-08-13 09:16:52 -07001756 }
Darrick J. Wongdf600192021-06-01 13:29:41 -07001757 }
1758 }
1759 return last_error;
Darrick J. Wong2d53f66b2021-06-07 09:34:51 -07001760 BUILD_BUG_ON(XFS_ICWALK_PRIVATE_FLAGS & XFS_ICWALK_FLAGS_VALID);
Darrick J. Wongdf600192021-06-01 13:29:41 -07001761}
Darrick J. Wongc6c20662021-08-06 11:05:38 -07001762
1763#ifdef DEBUG
1764static void
1765xfs_check_delalloc(
1766 struct xfs_inode *ip,
1767 int whichfork)
1768{
1769 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1770 struct xfs_bmbt_irec got;
1771 struct xfs_iext_cursor icur;
1772
1773 if (!ifp || !xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got))
1774 return;
1775 do {
1776 if (isnullstartblock(got.br_startblock)) {
1777 xfs_warn(ip->i_mount,
1778 "ino %llx %s fork has delalloc extent at [0x%llx:0x%llx]",
1779 ip->i_ino,
1780 whichfork == XFS_DATA_FORK ? "data" : "cow",
1781 got.br_startoff, got.br_blockcount);
1782 }
1783 } while (xfs_iext_next_extent(ifp, &icur, &got));
1784}
1785#else
1786#define xfs_check_delalloc(ip, whichfork) do { } while (0)
1787#endif
1788
Dave Chinnerab23a772021-08-06 11:05:39 -07001789/* Schedule the inode for reclaim. */
1790static void
1791xfs_inodegc_set_reclaimable(
Darrick J. Wongc6c20662021-08-06 11:05:38 -07001792 struct xfs_inode *ip)
1793{
1794 struct xfs_mount *mp = ip->i_mount;
1795 struct xfs_perag *pag;
Darrick J. Wongc6c20662021-08-06 11:05:38 -07001796
Dave Chinner75c8c50f2021-08-18 18:46:53 -07001797 if (!xfs_is_shutdown(mp) && ip->i_delayed_blks) {
Darrick J. Wongc6c20662021-08-06 11:05:38 -07001798 xfs_check_delalloc(ip, XFS_DATA_FORK);
1799 xfs_check_delalloc(ip, XFS_COW_FORK);
1800 ASSERT(0);
1801 }
1802
Darrick J. Wongc6c20662021-08-06 11:05:38 -07001803 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1804 spin_lock(&pag->pag_ici_lock);
1805 spin_lock(&ip->i_flags_lock);
1806
Dave Chinnerab23a772021-08-06 11:05:39 -07001807 trace_xfs_inode_set_reclaimable(ip);
1808 ip->i_flags &= ~(XFS_NEED_INACTIVE | XFS_INACTIVATING);
1809 ip->i_flags |= XFS_IRECLAIMABLE;
Darrick J. Wongc6c20662021-08-06 11:05:38 -07001810 xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1811 XFS_ICI_RECLAIM_TAG);
Darrick J. Wongc6c20662021-08-06 11:05:38 -07001812
1813 spin_unlock(&ip->i_flags_lock);
1814 spin_unlock(&pag->pag_ici_lock);
1815 xfs_perag_put(pag);
1816}
Dave Chinnerab23a772021-08-06 11:05:39 -07001817
1818/*
1819 * Free all speculative preallocations and possibly even the inode itself.
1820 * This is the last chance to make changes to an otherwise unreferenced file
1821 * before incore reclamation happens.
1822 */
1823static void
1824xfs_inodegc_inactivate(
1825 struct xfs_inode *ip)
1826{
1827 trace_xfs_inode_inactivating(ip);
1828 xfs_inactive(ip);
1829 xfs_inodegc_set_reclaimable(ip);
1830}
1831
1832void
1833xfs_inodegc_worker(
1834 struct work_struct *work)
1835{
1836 struct xfs_inodegc *gc = container_of(work, struct xfs_inodegc,
1837 work);
1838 struct llist_node *node = llist_del_all(&gc->list);
1839 struct xfs_inode *ip, *n;
1840
1841 WRITE_ONCE(gc->items, 0);
1842
1843 if (!node)
1844 return;
1845
1846 ip = llist_entry(node, struct xfs_inode, i_gclist);
Darrick J. Wong40b1de0072021-08-06 11:05:43 -07001847 trace_xfs_inodegc_worker(ip->i_mount, READ_ONCE(gc->shrinker_hits));
Dave Chinnerab23a772021-08-06 11:05:39 -07001848
Darrick J. Wong40b1de0072021-08-06 11:05:43 -07001849 WRITE_ONCE(gc->shrinker_hits, 0);
Dave Chinnerab23a772021-08-06 11:05:39 -07001850 llist_for_each_entry_safe(ip, n, node, i_gclist) {
1851 xfs_iflags_set(ip, XFS_INACTIVATING);
1852 xfs_inodegc_inactivate(ip);
1853 }
1854}
1855
1856/*
Brian Foster6191cf32022-01-18 11:32:35 -08001857 * Force all currently queued inode inactivation work to run immediately and
1858 * wait for the work to finish.
Dave Chinnerab23a772021-08-06 11:05:39 -07001859 */
1860void
1861xfs_inodegc_flush(
1862 struct xfs_mount *mp)
1863{
Dave Chinnerab23a772021-08-06 11:05:39 -07001864 if (!xfs_is_inodegc_enabled(mp))
1865 return;
1866
1867 trace_xfs_inodegc_flush(mp, __return_address);
1868
1869 xfs_inodegc_queue_all(mp);
Brian Foster6191cf32022-01-18 11:32:35 -08001870 flush_workqueue(mp->m_inodegc_wq);
Dave Chinnerab23a772021-08-06 11:05:39 -07001871}
1872
1873/*
1874 * Flush all the pending work and then disable the inode inactivation background
1875 * workers and wait for them to stop.
1876 */
1877void
1878xfs_inodegc_stop(
1879 struct xfs_mount *mp)
1880{
Dave Chinnerab23a772021-08-06 11:05:39 -07001881 if (!xfs_clear_inodegc_enabled(mp))
1882 return;
1883
1884 xfs_inodegc_queue_all(mp);
Brian Foster6191cf32022-01-18 11:32:35 -08001885 drain_workqueue(mp->m_inodegc_wq);
Dave Chinnerab23a772021-08-06 11:05:39 -07001886
Dave Chinnerab23a772021-08-06 11:05:39 -07001887 trace_xfs_inodegc_stop(mp, __return_address);
1888}
1889
1890/*
1891 * Enable the inode inactivation background workers and schedule deferred inode
1892 * inactivation work if there is any.
1893 */
1894void
1895xfs_inodegc_start(
1896 struct xfs_mount *mp)
1897{
1898 if (xfs_set_inodegc_enabled(mp))
1899 return;
1900
1901 trace_xfs_inodegc_start(mp, __return_address);
1902 xfs_inodegc_queue_all(mp);
1903}
1904
Darrick J. Wong65f03d82021-08-06 11:05:41 -07001905#ifdef CONFIG_XFS_RT
1906static inline bool
1907xfs_inodegc_want_queue_rt_file(
1908 struct xfs_inode *ip)
1909{
1910 struct xfs_mount *mp = ip->i_mount;
1911 uint64_t freertx;
1912
1913 if (!XFS_IS_REALTIME_INODE(ip))
1914 return false;
1915
1916 freertx = READ_ONCE(mp->m_sb.sb_frextents);
1917 return freertx < mp->m_low_rtexts[XFS_LOWSP_5_PCNT];
1918}
1919#else
1920# define xfs_inodegc_want_queue_rt_file(ip) (false)
1921#endif /* CONFIG_XFS_RT */
1922
Dave Chinnerab23a772021-08-06 11:05:39 -07001923/*
1924 * Schedule the inactivation worker when:
1925 *
1926 * - We've accumulated more than one inode cluster buffer's worth of inodes.
Darrick J. Wong7d6f07d2021-08-06 11:05:40 -07001927 * - There is less than 5% free space left.
Darrick J. Wong108523b2021-08-06 11:05:40 -07001928 * - Any of the quotas for this inode are near an enforcement limit.
Dave Chinnerab23a772021-08-06 11:05:39 -07001929 */
1930static inline bool
1931xfs_inodegc_want_queue_work(
1932 struct xfs_inode *ip,
1933 unsigned int items)
1934{
1935 struct xfs_mount *mp = ip->i_mount;
1936
1937 if (items > mp->m_ino_geo.inodes_per_cluster)
1938 return true;
1939
Darrick J. Wong7d6f07d2021-08-06 11:05:40 -07001940 if (__percpu_counter_compare(&mp->m_fdblocks,
1941 mp->m_low_space[XFS_LOWSP_5_PCNT],
1942 XFS_FDBLOCKS_BATCH) < 0)
1943 return true;
1944
Darrick J. Wong65f03d82021-08-06 11:05:41 -07001945 if (xfs_inodegc_want_queue_rt_file(ip))
1946 return true;
1947
Darrick J. Wong108523b2021-08-06 11:05:40 -07001948 if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_USER))
1949 return true;
1950
1951 if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_GROUP))
1952 return true;
1953
1954 if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_PROJ))
1955 return true;
1956
Dave Chinnerab23a772021-08-06 11:05:39 -07001957 return false;
1958}
1959
1960/*
1961 * Upper bound on the number of inodes in each AG that can be queued for
1962 * inactivation at any given time, to avoid monopolizing the workqueue.
1963 */
1964#define XFS_INODEGC_MAX_BACKLOG (4 * XFS_INODES_PER_CHUNK)
1965
1966/*
1967 * Make the frontend wait for inactivations when:
1968 *
Darrick J. Wong40b1de0072021-08-06 11:05:43 -07001969 * - Memory shrinkers queued the inactivation worker and it hasn't finished.
Dave Chinnerab23a772021-08-06 11:05:39 -07001970 * - The queue depth exceeds the maximum allowable percpu backlog.
1971 *
1972 * Note: If the current thread is running a transaction, we don't ever want to
1973 * wait for other transactions because that could introduce a deadlock.
1974 */
1975static inline bool
1976xfs_inodegc_want_flush_work(
1977 struct xfs_inode *ip,
Darrick J. Wong40b1de0072021-08-06 11:05:43 -07001978 unsigned int items,
1979 unsigned int shrinker_hits)
Dave Chinnerab23a772021-08-06 11:05:39 -07001980{
1981 if (current->journal_info)
1982 return false;
1983
Darrick J. Wong40b1de0072021-08-06 11:05:43 -07001984 if (shrinker_hits > 0)
1985 return true;
1986
Dave Chinnerab23a772021-08-06 11:05:39 -07001987 if (items > XFS_INODEGC_MAX_BACKLOG)
1988 return true;
1989
1990 return false;
1991}
1992
1993/*
1994 * Queue a background inactivation worker if there are inodes that need to be
1995 * inactivated and higher level xfs code hasn't disabled the background
1996 * workers.
1997 */
1998static void
1999xfs_inodegc_queue(
2000 struct xfs_inode *ip)
2001{
2002 struct xfs_mount *mp = ip->i_mount;
2003 struct xfs_inodegc *gc;
2004 int items;
Darrick J. Wong40b1de0072021-08-06 11:05:43 -07002005 unsigned int shrinker_hits;
Dave Chinnerab23a772021-08-06 11:05:39 -07002006
2007 trace_xfs_inode_set_need_inactive(ip);
2008 spin_lock(&ip->i_flags_lock);
2009 ip->i_flags |= XFS_NEED_INACTIVE;
2010 spin_unlock(&ip->i_flags_lock);
2011
2012 gc = get_cpu_ptr(mp->m_inodegc);
2013 llist_add(&ip->i_gclist, &gc->list);
2014 items = READ_ONCE(gc->items);
2015 WRITE_ONCE(gc->items, items + 1);
Darrick J. Wong40b1de0072021-08-06 11:05:43 -07002016 shrinker_hits = READ_ONCE(gc->shrinker_hits);
Dave Chinnerab23a772021-08-06 11:05:39 -07002017 put_cpu_ptr(gc);
2018
2019 if (!xfs_is_inodegc_enabled(mp))
2020 return;
2021
2022 if (xfs_inodegc_want_queue_work(ip, items)) {
2023 trace_xfs_inodegc_queue(mp, __return_address);
2024 queue_work(mp->m_inodegc_wq, &gc->work);
2025 }
2026
Darrick J. Wong40b1de0072021-08-06 11:05:43 -07002027 if (xfs_inodegc_want_flush_work(ip, items, shrinker_hits)) {
Dave Chinnerab23a772021-08-06 11:05:39 -07002028 trace_xfs_inodegc_throttle(mp, __return_address);
2029 flush_work(&gc->work);
2030 }
2031}
2032
2033/*
2034 * Fold the dead CPU inodegc queue into the current CPUs queue.
2035 */
2036void
2037xfs_inodegc_cpu_dead(
2038 struct xfs_mount *mp,
2039 unsigned int dead_cpu)
2040{
2041 struct xfs_inodegc *dead_gc, *gc;
2042 struct llist_node *first, *last;
2043 unsigned int count = 0;
2044
2045 dead_gc = per_cpu_ptr(mp->m_inodegc, dead_cpu);
2046 cancel_work_sync(&dead_gc->work);
2047
2048 if (llist_empty(&dead_gc->list))
2049 return;
2050
2051 first = dead_gc->list.first;
2052 last = first;
2053 while (last->next) {
2054 last = last->next;
2055 count++;
2056 }
2057 dead_gc->list.first = NULL;
2058 dead_gc->items = 0;
2059
2060 /* Add pending work to current CPU */
2061 gc = get_cpu_ptr(mp->m_inodegc);
2062 llist_add_batch(first, last, &gc->list);
2063 count += READ_ONCE(gc->items);
2064 WRITE_ONCE(gc->items, count);
2065 put_cpu_ptr(gc);
2066
2067 if (xfs_is_inodegc_enabled(mp)) {
2068 trace_xfs_inodegc_queue(mp, __return_address);
2069 queue_work(mp->m_inodegc_wq, &gc->work);
2070 }
2071}
2072
2073/*
2074 * We set the inode flag atomically with the radix tree tag. Once we get tag
2075 * lookups on the radix tree, this inode flag can go away.
2076 *
2077 * We always use background reclaim here because even if the inode is clean, it
2078 * still may be under IO and hence we have wait for IO completion to occur
2079 * before we can reclaim the inode. The background reclaim path handles this
2080 * more efficiently than we can here, so simply let background reclaim tear down
2081 * all inodes.
2082 */
2083void
2084xfs_inode_mark_reclaimable(
2085 struct xfs_inode *ip)
2086{
2087 struct xfs_mount *mp = ip->i_mount;
2088 bool need_inactive;
2089
2090 XFS_STATS_INC(mp, vn_reclaim);
2091
2092 /*
2093 * We should never get here with any of the reclaim flags already set.
2094 */
2095 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_ALL_IRECLAIM_FLAGS));
2096
2097 need_inactive = xfs_inode_needs_inactive(ip);
2098 if (need_inactive) {
2099 xfs_inodegc_queue(ip);
2100 return;
2101 }
2102
2103 /* Going straight to reclaim, so drop the dquots. */
2104 xfs_qm_dqdetach(ip);
2105 xfs_inodegc_set_reclaimable(ip);
2106}
Darrick J. Wong40b1de0072021-08-06 11:05:43 -07002107
2108/*
2109 * Register a phony shrinker so that we can run background inodegc sooner when
2110 * there's memory pressure. Inactivation does not itself free any memory but
2111 * it does make inodes reclaimable, which eventually frees memory.
2112 *
2113 * The count function, seek value, and batch value are crafted to trigger the
2114 * scan function during the second round of scanning. Hopefully this means
2115 * that we reclaimed enough memory that initiating metadata transactions won't
2116 * make things worse.
2117 */
2118#define XFS_INODEGC_SHRINKER_COUNT (1UL << DEF_PRIORITY)
2119#define XFS_INODEGC_SHRINKER_BATCH ((XFS_INODEGC_SHRINKER_COUNT / 2) + 1)
2120
2121static unsigned long
2122xfs_inodegc_shrinker_count(
2123 struct shrinker *shrink,
2124 struct shrink_control *sc)
2125{
2126 struct xfs_mount *mp = container_of(shrink, struct xfs_mount,
2127 m_inodegc_shrinker);
2128 struct xfs_inodegc *gc;
2129 int cpu;
2130
2131 if (!xfs_is_inodegc_enabled(mp))
2132 return 0;
2133
2134 for_each_online_cpu(cpu) {
2135 gc = per_cpu_ptr(mp->m_inodegc, cpu);
2136 if (!llist_empty(&gc->list))
2137 return XFS_INODEGC_SHRINKER_COUNT;
2138 }
2139
2140 return 0;
2141}
2142
2143static unsigned long
2144xfs_inodegc_shrinker_scan(
2145 struct shrinker *shrink,
2146 struct shrink_control *sc)
2147{
2148 struct xfs_mount *mp = container_of(shrink, struct xfs_mount,
2149 m_inodegc_shrinker);
2150 struct xfs_inodegc *gc;
2151 int cpu;
2152 bool no_items = true;
2153
2154 if (!xfs_is_inodegc_enabled(mp))
2155 return SHRINK_STOP;
2156
2157 trace_xfs_inodegc_shrinker_scan(mp, sc, __return_address);
2158
2159 for_each_online_cpu(cpu) {
2160 gc = per_cpu_ptr(mp->m_inodegc, cpu);
2161 if (!llist_empty(&gc->list)) {
2162 unsigned int h = READ_ONCE(gc->shrinker_hits);
2163
2164 WRITE_ONCE(gc->shrinker_hits, h + 1);
2165 queue_work_on(cpu, mp->m_inodegc_wq, &gc->work);
2166 no_items = false;
2167 }
2168 }
2169
2170 /*
2171 * If there are no inodes to inactivate, we don't want the shrinker
2172 * to think there's deferred work to call us back about.
2173 */
2174 if (no_items)
2175 return LONG_MAX;
2176
2177 return SHRINK_STOP;
2178}
2179
2180/* Register a shrinker so we can accelerate inodegc and throttle queuing. */
2181int
2182xfs_inodegc_register_shrinker(
2183 struct xfs_mount *mp)
2184{
2185 struct shrinker *shrink = &mp->m_inodegc_shrinker;
2186
2187 shrink->count_objects = xfs_inodegc_shrinker_count;
2188 shrink->scan_objects = xfs_inodegc_shrinker_scan;
2189 shrink->seeks = 0;
2190 shrink->flags = SHRINKER_NONSLAB;
2191 shrink->batch = XFS_INODEGC_SHRINKER_BATCH;
2192
2193 return register_shrinker(shrink);
2194}