Dave Chinner | 0b61f8a | 2018-06-05 19:42:14 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
Nathan Scott | 4ce3121 | 2005-11-02 14:59:41 +1100 | [diff] [blame] | 3 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. |
| 4 | * All Rights Reserved. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | #include "xfs.h" |
| 7 | #include "xfs_fs.h" |
Dave Chinner | a4fbe6a | 2013-10-23 10:51:50 +1100 | [diff] [blame] | 8 | #include "xfs_shared.h" |
Dave Chinner | 6ca1c90 | 2013-08-12 20:49:26 +1000 | [diff] [blame] | 9 | #include "xfs_format.h" |
Dave Chinner | 239880e | 2013-10-23 10:50:10 +1100 | [diff] [blame] | 10 | #include "xfs_log_format.h" |
Dave Chinner | 239880e | 2013-10-23 10:50:10 +1100 | [diff] [blame] | 11 | #include "xfs_trans_resv.h" |
Nathan Scott | a844f45 | 2005-11-02 14:38:42 +1100 | [diff] [blame] | 12 | #include "xfs_bit.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #include "xfs_sb.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include "xfs_mount.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include "xfs_inode.h" |
Darrick J. Wong | ebd126a | 2019-07-02 09:39:39 -0700 | [diff] [blame] | 16 | #include "xfs_iwalk.h" |
Dave Chinner | 239880e | 2013-10-23 10:50:10 +1100 | [diff] [blame] | 17 | #include "xfs_quota.h" |
Nathan Scott | a844f45 | 2005-11-02 14:38:42 +1100 | [diff] [blame] | 18 | #include "xfs_bmap.h" |
Christoph Hellwig | 8bfadd8 | 2017-08-29 15:44:14 -0700 | [diff] [blame] | 19 | #include "xfs_bmap_util.h" |
Dave Chinner | 239880e | 2013-10-23 10:50:10 +1100 | [diff] [blame] | 20 | #include "xfs_trans.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | #include "xfs_trans_space.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | #include "xfs_qm.h" |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 23 | #include "xfs_trace.h" |
Dave Chinner | 33479e0 | 2012-10-08 21:56:11 +1100 | [diff] [blame] | 24 | #include "xfs_icache.h" |
Darrick J. Wong | a5155b8 | 2019-11-02 09:40:53 -0700 | [diff] [blame] | 25 | #include "xfs_error.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | |
| 27 | /* |
| 28 | * The global quota manager. There is only one of these for the entire |
| 29 | * system, _not_ one per file system. XQM keeps track of the overall |
| 30 | * quota functionality, including maintaining the freelist and hash |
| 31 | * tables of dquots. |
| 32 | */ |
Pavel Reichl | c072fbe | 2019-11-12 17:04:26 -0800 | [diff] [blame] | 33 | STATIC int xfs_qm_init_quotainos(struct xfs_mount *mp); |
| 34 | STATIC int xfs_qm_init_quotainfo(struct xfs_mount *mp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | |
Pavel Reichl | c072fbe | 2019-11-12 17:04:26 -0800 | [diff] [blame] | 36 | STATIC void xfs_qm_destroy_quotainos(struct xfs_quotainfo *qi); |
Dave Chinner | cd56a39 | 2013-08-28 10:18:07 +1000 | [diff] [blame] | 37 | STATIC void xfs_qm_dqfree_one(struct xfs_dquot *dqp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | /* |
Christoph Hellwig | b84a3a9 | 2012-03-14 11:53:34 -0500 | [diff] [blame] | 39 | * We use the batch lookup interface to iterate over the dquots as it |
| 40 | * currently is the only interface into the radix tree code that allows |
| 41 | * fuzzy lookups instead of exact matches. Holding the lock over multiple |
| 42 | * operations is fine as all callers are used either during mount/umount |
| 43 | * or quotaoff. |
| 44 | */ |
| 45 | #define XFS_DQ_LOOKUP_BATCH 32 |
| 46 | |
| 47 | STATIC int |
| 48 | xfs_qm_dquot_walk( |
| 49 | struct xfs_mount *mp, |
| 50 | int type, |
Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 51 | int (*execute)(struct xfs_dquot *dqp, void *data), |
| 52 | void *data) |
Christoph Hellwig | b84a3a9 | 2012-03-14 11:53:34 -0500 | [diff] [blame] | 53 | { |
| 54 | struct xfs_quotainfo *qi = mp->m_quotainfo; |
Chandra Seetharaman | 329e087 | 2013-06-27 17:25:05 -0500 | [diff] [blame] | 55 | struct radix_tree_root *tree = xfs_dquot_tree(qi, type); |
Christoph Hellwig | b84a3a9 | 2012-03-14 11:53:34 -0500 | [diff] [blame] | 56 | uint32_t next_index; |
| 57 | int last_error = 0; |
| 58 | int skipped; |
| 59 | int nr_found; |
| 60 | |
| 61 | restart: |
| 62 | skipped = 0; |
| 63 | next_index = 0; |
| 64 | nr_found = 0; |
| 65 | |
| 66 | while (1) { |
| 67 | struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH]; |
| 68 | int error = 0; |
| 69 | int i; |
| 70 | |
| 71 | mutex_lock(&qi->qi_tree_lock); |
| 72 | nr_found = radix_tree_gang_lookup(tree, (void **)batch, |
| 73 | next_index, XFS_DQ_LOOKUP_BATCH); |
| 74 | if (!nr_found) { |
| 75 | mutex_unlock(&qi->qi_tree_lock); |
| 76 | break; |
| 77 | } |
| 78 | |
| 79 | for (i = 0; i < nr_found; i++) { |
| 80 | struct xfs_dquot *dqp = batch[i]; |
| 81 | |
| 82 | next_index = be32_to_cpu(dqp->q_core.d_id) + 1; |
| 83 | |
Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 84 | error = execute(batch[i], data); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 85 | if (error == -EAGAIN) { |
Christoph Hellwig | b84a3a9 | 2012-03-14 11:53:34 -0500 | [diff] [blame] | 86 | skipped++; |
| 87 | continue; |
| 88 | } |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 89 | if (error && last_error != -EFSCORRUPTED) |
Christoph Hellwig | b84a3a9 | 2012-03-14 11:53:34 -0500 | [diff] [blame] | 90 | last_error = error; |
| 91 | } |
| 92 | |
| 93 | mutex_unlock(&qi->qi_tree_lock); |
| 94 | |
| 95 | /* bail out if the filesystem is corrupted. */ |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 96 | if (last_error == -EFSCORRUPTED) { |
Christoph Hellwig | b84a3a9 | 2012-03-14 11:53:34 -0500 | [diff] [blame] | 97 | skipped = 0; |
| 98 | break; |
| 99 | } |
Brian Foster | cfaf2d0 | 2017-07-24 08:33:25 -0700 | [diff] [blame] | 100 | /* we're done if id overflows back to zero */ |
| 101 | if (!next_index) |
| 102 | break; |
Christoph Hellwig | b84a3a9 | 2012-03-14 11:53:34 -0500 | [diff] [blame] | 103 | } |
| 104 | |
| 105 | if (skipped) { |
| 106 | delay(1); |
| 107 | goto restart; |
| 108 | } |
| 109 | |
| 110 | return last_error; |
| 111 | } |
| 112 | |
| 113 | |
| 114 | /* |
| 115 | * Purge a dquot from all tracking data structures and free it. |
| 116 | */ |
| 117 | STATIC int |
| 118 | xfs_qm_dqpurge( |
Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 119 | struct xfs_dquot *dqp, |
| 120 | void *data) |
Christoph Hellwig | b84a3a9 | 2012-03-14 11:53:34 -0500 | [diff] [blame] | 121 | { |
| 122 | struct xfs_mount *mp = dqp->q_mount; |
| 123 | struct xfs_quotainfo *qi = mp->m_quotainfo; |
Brian Foster | 8d3d7e2 | 2020-03-27 08:29:45 -0700 | [diff] [blame] | 124 | int error = -EAGAIN; |
Christoph Hellwig | b84a3a9 | 2012-03-14 11:53:34 -0500 | [diff] [blame] | 125 | |
| 126 | xfs_dqlock(dqp); |
Brian Foster | 8d3d7e2 | 2020-03-27 08:29:45 -0700 | [diff] [blame] | 127 | if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) |
| 128 | goto out_unlock; |
Christoph Hellwig | b84a3a9 | 2012-03-14 11:53:34 -0500 | [diff] [blame] | 129 | |
Christoph Hellwig | b84a3a9 | 2012-03-14 11:53:34 -0500 | [diff] [blame] | 130 | dqp->dq_flags |= XFS_DQ_FREEING; |
| 131 | |
Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 132 | xfs_dqflock(dqp); |
Christoph Hellwig | b84a3a9 | 2012-03-14 11:53:34 -0500 | [diff] [blame] | 133 | |
| 134 | /* |
| 135 | * If we are turning this type of quotas off, we don't care |
| 136 | * about the dirty metadata sitting in this dquot. OTOH, if |
| 137 | * we're unmounting, we do care, so we flush it and wait. |
| 138 | */ |
| 139 | if (XFS_DQ_IS_DIRTY(dqp)) { |
Christoph Hellwig | fe7257f | 2012-04-23 15:58:37 +1000 | [diff] [blame] | 140 | struct xfs_buf *bp = NULL; |
Christoph Hellwig | b84a3a9 | 2012-03-14 11:53:34 -0500 | [diff] [blame] | 141 | |
| 142 | /* |
| 143 | * We don't care about getting disk errors here. We need |
| 144 | * to purge this dquot anyway, so we go ahead regardless. |
| 145 | */ |
Christoph Hellwig | fe7257f | 2012-04-23 15:58:37 +1000 | [diff] [blame] | 146 | error = xfs_qm_dqflush(dqp, &bp); |
Darrick J. Wong | 609001b | 2018-05-04 15:30:20 -0700 | [diff] [blame] | 147 | if (!error) { |
Christoph Hellwig | fe7257f | 2012-04-23 15:58:37 +1000 | [diff] [blame] | 148 | error = xfs_bwrite(bp); |
| 149 | xfs_buf_relse(bp); |
Brian Foster | 8d3d7e2 | 2020-03-27 08:29:45 -0700 | [diff] [blame] | 150 | } else if (error == -EAGAIN) { |
| 151 | goto out_unlock; |
Christoph Hellwig | fe7257f | 2012-04-23 15:58:37 +1000 | [diff] [blame] | 152 | } |
Christoph Hellwig | b84a3a9 | 2012-03-14 11:53:34 -0500 | [diff] [blame] | 153 | xfs_dqflock(dqp); |
| 154 | } |
| 155 | |
| 156 | ASSERT(atomic_read(&dqp->q_pincount) == 0); |
| 157 | ASSERT(XFS_FORCED_SHUTDOWN(mp) || |
Dave Chinner | 22525c1 | 2018-05-09 07:47:34 -0700 | [diff] [blame] | 158 | !test_bit(XFS_LI_IN_AIL, &dqp->q_logitem.qli_item.li_flags)); |
Christoph Hellwig | b84a3a9 | 2012-03-14 11:53:34 -0500 | [diff] [blame] | 159 | |
| 160 | xfs_dqfunlock(dqp); |
| 161 | xfs_dqunlock(dqp); |
| 162 | |
Chandra Seetharaman | 329e087 | 2013-06-27 17:25:05 -0500 | [diff] [blame] | 163 | radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags), |
Christoph Hellwig | b84a3a9 | 2012-03-14 11:53:34 -0500 | [diff] [blame] | 164 | be32_to_cpu(dqp->q_core.d_id)); |
| 165 | qi->qi_dquots--; |
| 166 | |
| 167 | /* |
| 168 | * We move dquots to the freelist as soon as their reference count |
| 169 | * hits zero, so it really should be on the freelist here. |
| 170 | */ |
Christoph Hellwig | b84a3a9 | 2012-03-14 11:53:34 -0500 | [diff] [blame] | 171 | ASSERT(!list_empty(&dqp->q_lru)); |
Dave Chinner | cd56a39 | 2013-08-28 10:18:07 +1000 | [diff] [blame] | 172 | list_lru_del(&qi->qi_lru, &dqp->q_lru); |
Bill O'Donnell | ff6d6af | 2015-10-12 18:21:22 +1100 | [diff] [blame] | 173 | XFS_STATS_DEC(mp, xs_qm_dquot_unused); |
Christoph Hellwig | b84a3a9 | 2012-03-14 11:53:34 -0500 | [diff] [blame] | 174 | |
| 175 | xfs_qm_dqdestroy(dqp); |
Jie Liu | df8052e | 2013-11-26 21:38:49 +0800 | [diff] [blame] | 176 | return 0; |
Brian Foster | 8d3d7e2 | 2020-03-27 08:29:45 -0700 | [diff] [blame] | 177 | |
| 178 | out_unlock: |
| 179 | xfs_dqunlock(dqp); |
| 180 | return error; |
Jie Liu | df8052e | 2013-11-26 21:38:49 +0800 | [diff] [blame] | 181 | } |
| 182 | |
| 183 | /* |
Christoph Hellwig | b84a3a9 | 2012-03-14 11:53:34 -0500 | [diff] [blame] | 184 | * Purge the dquot cache. |
| 185 | */ |
| 186 | void |
| 187 | xfs_qm_dqpurge_all( |
| 188 | struct xfs_mount *mp, |
| 189 | uint flags) |
| 190 | { |
Dave Chinner | 3c3533757 | 2014-05-05 17:30:15 +1000 | [diff] [blame] | 191 | if (flags & XFS_QMOPT_UQUOTA) |
| 192 | xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge, NULL); |
Christoph Hellwig | b84a3a9 | 2012-03-14 11:53:34 -0500 | [diff] [blame] | 193 | if (flags & XFS_QMOPT_GQUOTA) |
Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 194 | xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL); |
Christoph Hellwig | b84a3a9 | 2012-03-14 11:53:34 -0500 | [diff] [blame] | 195 | if (flags & XFS_QMOPT_PQUOTA) |
Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 196 | xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_dqpurge, NULL); |
Christoph Hellwig | b84a3a9 | 2012-03-14 11:53:34 -0500 | [diff] [blame] | 197 | } |
| 198 | |
| 199 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 | * Just destroy the quotainfo structure. |
| 201 | */ |
| 202 | void |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 203 | xfs_qm_unmount( |
| 204 | struct xfs_mount *mp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 205 | { |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 206 | if (mp->m_quotainfo) { |
Christoph Hellwig | 8112e9d | 2010-04-20 17:02:29 +1000 | [diff] [blame] | 207 | xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 208 | xfs_qm_destroy_quotainfo(mp); |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 209 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 210 | } |
| 211 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 212 | /* |
| 213 | * Called from the vfsops layer. |
| 214 | */ |
Christoph Hellwig | e57481d | 2008-12-03 12:20:36 +0100 | [diff] [blame] | 215 | void |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 216 | xfs_qm_unmount_quotas( |
| 217 | xfs_mount_t *mp) |
| 218 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 219 | /* |
| 220 | * Release the dquots that root inode, et al might be holding, |
| 221 | * before we flush quotas and blow away the quotainfo structure. |
| 222 | */ |
| 223 | ASSERT(mp->m_rootip); |
| 224 | xfs_qm_dqdetach(mp->m_rootip); |
| 225 | if (mp->m_rbmip) |
| 226 | xfs_qm_dqdetach(mp->m_rbmip); |
| 227 | if (mp->m_rsumip) |
| 228 | xfs_qm_dqdetach(mp->m_rsumip); |
| 229 | |
| 230 | /* |
Christoph Hellwig | e57481d | 2008-12-03 12:20:36 +0100 | [diff] [blame] | 231 | * Release the quota inodes. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 233 | if (mp->m_quotainfo) { |
Christoph Hellwig | e57481d | 2008-12-03 12:20:36 +0100 | [diff] [blame] | 234 | if (mp->m_quotainfo->qi_uquotaip) { |
Darrick J. Wong | 44a8736 | 2018-07-25 12:52:32 -0700 | [diff] [blame] | 235 | xfs_irele(mp->m_quotainfo->qi_uquotaip); |
Christoph Hellwig | e57481d | 2008-12-03 12:20:36 +0100 | [diff] [blame] | 236 | mp->m_quotainfo->qi_uquotaip = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 237 | } |
Christoph Hellwig | e57481d | 2008-12-03 12:20:36 +0100 | [diff] [blame] | 238 | if (mp->m_quotainfo->qi_gquotaip) { |
Darrick J. Wong | 44a8736 | 2018-07-25 12:52:32 -0700 | [diff] [blame] | 239 | xfs_irele(mp->m_quotainfo->qi_gquotaip); |
Christoph Hellwig | e57481d | 2008-12-03 12:20:36 +0100 | [diff] [blame] | 240 | mp->m_quotainfo->qi_gquotaip = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 241 | } |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 242 | if (mp->m_quotainfo->qi_pquotaip) { |
Darrick J. Wong | 44a8736 | 2018-07-25 12:52:32 -0700 | [diff] [blame] | 243 | xfs_irele(mp->m_quotainfo->qi_pquotaip); |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 244 | mp->m_quotainfo->qi_pquotaip = NULL; |
| 245 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 246 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 247 | } |
| 248 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 249 | STATIC int |
| 250 | xfs_qm_dqattach_one( |
Pavel Reichl | aefe69a | 2019-11-12 17:04:02 -0800 | [diff] [blame] | 251 | struct xfs_inode *ip, |
| 252 | xfs_dqid_t id, |
| 253 | uint type, |
| 254 | bool doalloc, |
| 255 | struct xfs_dquot **IO_idqpp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 256 | { |
Pavel Reichl | aefe69a | 2019-11-12 17:04:02 -0800 | [diff] [blame] | 257 | struct xfs_dquot *dqp; |
| 258 | int error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 259 | |
Christoph Hellwig | 579aa9c | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 260 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 261 | error = 0; |
Christoph Hellwig | 8e9b6e7 | 2009-02-08 21:51:42 +0100 | [diff] [blame] | 262 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 263 | /* |
Dave Chinner | 3c3533757 | 2014-05-05 17:30:15 +1000 | [diff] [blame] | 264 | * See if we already have it in the inode itself. IO_idqpp is &i_udquot |
| 265 | * or &i_gdquot. This made the code look weird, but made the logic a lot |
| 266 | * simpler. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 267 | */ |
Christoph Hellwig | 8e9b6e7 | 2009-02-08 21:51:42 +0100 | [diff] [blame] | 268 | dqp = *IO_idqpp; |
| 269 | if (dqp) { |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 270 | trace_xfs_dqattach_found(dqp); |
Christoph Hellwig | 8e9b6e7 | 2009-02-08 21:51:42 +0100 | [diff] [blame] | 271 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 272 | } |
| 273 | |
| 274 | /* |
Dave Chinner | 3c3533757 | 2014-05-05 17:30:15 +1000 | [diff] [blame] | 275 | * Find the dquot from somewhere. This bumps the reference count of |
| 276 | * dquot and returns it locked. This can return ENOENT if dquot didn't |
| 277 | * exist on disk and we didn't ask it to allocate; ESRCH if quotas got |
| 278 | * turned off suddenly. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 279 | */ |
Darrick J. Wong | 4882c19 | 2018-05-04 15:30:22 -0700 | [diff] [blame] | 280 | error = xfs_qm_dqget_inode(ip, type, doalloc, &dqp); |
Christoph Hellwig | 8e9b6e7 | 2009-02-08 21:51:42 +0100 | [diff] [blame] | 281 | if (error) |
| 282 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 283 | |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 284 | trace_xfs_dqattach_get(dqp); |
Christoph Hellwig | 8e9b6e7 | 2009-02-08 21:51:42 +0100 | [diff] [blame] | 285 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 286 | /* |
| 287 | * dqget may have dropped and re-acquired the ilock, but it guarantees |
| 288 | * that the dquot returned is the one that should go in the inode. |
| 289 | */ |
| 290 | *IO_idqpp = dqp; |
Christoph Hellwig | 8e9b6e7 | 2009-02-08 21:51:42 +0100 | [diff] [blame] | 291 | xfs_dqunlock(dqp); |
| 292 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 293 | } |
| 294 | |
Christoph Hellwig | b4d05e3 | 2012-03-27 10:34:46 -0400 | [diff] [blame] | 295 | static bool |
| 296 | xfs_qm_need_dqattach( |
| 297 | struct xfs_inode *ip) |
| 298 | { |
| 299 | struct xfs_mount *mp = ip->i_mount; |
| 300 | |
| 301 | if (!XFS_IS_QUOTA_RUNNING(mp)) |
| 302 | return false; |
| 303 | if (!XFS_IS_QUOTA_ON(mp)) |
| 304 | return false; |
| 305 | if (!XFS_NOT_DQATTACHED(mp, ip)) |
| 306 | return false; |
Chandra Seetharaman | 9cad19d | 2013-06-27 17:25:04 -0500 | [diff] [blame] | 307 | if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino)) |
Christoph Hellwig | b4d05e3 | 2012-03-27 10:34:46 -0400 | [diff] [blame] | 308 | return false; |
| 309 | return true; |
| 310 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 311 | |
| 312 | /* |
Nathan Scott | c8ad20f | 2005-06-21 15:38:48 +1000 | [diff] [blame] | 313 | * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON |
| 314 | * into account. |
Darrick J. Wong | 30ab2dc | 2018-05-04 15:30:24 -0700 | [diff] [blame] | 315 | * If @doalloc is true, the dquot(s) will be allocated if needed. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 316 | * Inode may get unlocked and relocked in here, and the caller must deal with |
| 317 | * the consequences. |
| 318 | */ |
| 319 | int |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 320 | xfs_qm_dqattach_locked( |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 321 | xfs_inode_t *ip, |
Darrick J. Wong | 4882c19 | 2018-05-04 15:30:22 -0700 | [diff] [blame] | 322 | bool doalloc) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 323 | { |
| 324 | xfs_mount_t *mp = ip->i_mount; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 325 | int error = 0; |
| 326 | |
Christoph Hellwig | b4d05e3 | 2012-03-27 10:34:46 -0400 | [diff] [blame] | 327 | if (!xfs_qm_need_dqattach(ip)) |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 328 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 329 | |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 330 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 331 | |
Dave Chinner | 3c3533757 | 2014-05-05 17:30:15 +1000 | [diff] [blame] | 332 | if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) { |
Christoph Hellwig | ba8adad | 2020-02-21 08:31:27 -0800 | [diff] [blame] | 333 | error = xfs_qm_dqattach_one(ip, i_uid_read(VFS_I(ip)), |
Christoph Hellwig | 5429515 | 2020-02-21 08:31:27 -0800 | [diff] [blame] | 334 | XFS_DQ_USER, doalloc, &ip->i_udquot); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 335 | if (error) |
| 336 | goto done; |
Dave Chinner | 3c3533757 | 2014-05-05 17:30:15 +1000 | [diff] [blame] | 337 | ASSERT(ip->i_udquot); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 338 | } |
Christoph Hellwig | 579aa9c | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 339 | |
Dave Chinner | 3c3533757 | 2014-05-05 17:30:15 +1000 | [diff] [blame] | 340 | if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) { |
Christoph Hellwig | ba8adad | 2020-02-21 08:31:27 -0800 | [diff] [blame] | 341 | error = xfs_qm_dqattach_one(ip, i_gid_read(VFS_I(ip)), |
Christoph Hellwig | 5429515 | 2020-02-21 08:31:27 -0800 | [diff] [blame] | 342 | XFS_DQ_GROUP, doalloc, &ip->i_gdquot); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 343 | if (error) |
| 344 | goto done; |
Dave Chinner | 3c3533757 | 2014-05-05 17:30:15 +1000 | [diff] [blame] | 345 | ASSERT(ip->i_gdquot); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 346 | } |
| 347 | |
Dave Chinner | 3c3533757 | 2014-05-05 17:30:15 +1000 | [diff] [blame] | 348 | if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) { |
Christoph Hellwig | de7a866 | 2019-11-12 08:22:54 -0800 | [diff] [blame] | 349 | error = xfs_qm_dqattach_one(ip, ip->i_d.di_projid, XFS_DQ_PROJ, |
Darrick J. Wong | 4882c19 | 2018-05-04 15:30:22 -0700 | [diff] [blame] | 350 | doalloc, &ip->i_pdquot); |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 351 | if (error) |
| 352 | goto done; |
Dave Chinner | 3c3533757 | 2014-05-05 17:30:15 +1000 | [diff] [blame] | 353 | ASSERT(ip->i_pdquot); |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 354 | } |
| 355 | |
Dave Chinner | 3c3533757 | 2014-05-05 17:30:15 +1000 | [diff] [blame] | 356 | done: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 357 | /* |
Dave Chinner | 3c3533757 | 2014-05-05 17:30:15 +1000 | [diff] [blame] | 358 | * Don't worry about the dquots that we may have attached before any |
| 359 | * error - they'll get detached later if it has not already been done. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 360 | */ |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 361 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 362 | return error; |
| 363 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 364 | |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 365 | int |
| 366 | xfs_qm_dqattach( |
Darrick J. Wong | c14cfcc | 2018-05-04 15:30:21 -0700 | [diff] [blame] | 367 | struct xfs_inode *ip) |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 368 | { |
| 369 | int error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 370 | |
Christoph Hellwig | b4d05e3 | 2012-03-27 10:34:46 -0400 | [diff] [blame] | 371 | if (!xfs_qm_need_dqattach(ip)) |
| 372 | return 0; |
| 373 | |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 374 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
Darrick J. Wong | 4882c19 | 2018-05-04 15:30:22 -0700 | [diff] [blame] | 375 | error = xfs_qm_dqattach_locked(ip, false); |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 376 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
| 377 | |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 378 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 379 | } |
| 380 | |
| 381 | /* |
| 382 | * Release dquots (and their references) if any. |
| 383 | * The inode should be locked EXCL except when this's called by |
| 384 | * xfs_ireclaim. |
| 385 | */ |
| 386 | void |
| 387 | xfs_qm_dqdetach( |
| 388 | xfs_inode_t *ip) |
| 389 | { |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 390 | if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 391 | return; |
| 392 | |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 393 | trace_xfs_dquot_dqdetach(ip); |
| 394 | |
Chandra Seetharaman | 9cad19d | 2013-06-27 17:25:04 -0500 | [diff] [blame] | 395 | ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 396 | if (ip->i_udquot) { |
| 397 | xfs_qm_dqrele(ip->i_udquot); |
| 398 | ip->i_udquot = NULL; |
| 399 | } |
| 400 | if (ip->i_gdquot) { |
| 401 | xfs_qm_dqrele(ip->i_gdquot); |
| 402 | ip->i_gdquot = NULL; |
| 403 | } |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 404 | if (ip->i_pdquot) { |
| 405 | xfs_qm_dqrele(ip->i_pdquot); |
| 406 | ip->i_pdquot = NULL; |
| 407 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 408 | } |
| 409 | |
Dave Chinner | cd56a39 | 2013-08-28 10:18:07 +1000 | [diff] [blame] | 410 | struct xfs_qm_isolate { |
| 411 | struct list_head buffers; |
| 412 | struct list_head dispose; |
| 413 | }; |
| 414 | |
| 415 | static enum lru_status |
| 416 | xfs_qm_dquot_isolate( |
| 417 | struct list_head *item, |
Vladimir Davydov | 3f97b16 | 2015-02-12 14:59:35 -0800 | [diff] [blame] | 418 | struct list_lru_one *lru, |
Dave Chinner | cd56a39 | 2013-08-28 10:18:07 +1000 | [diff] [blame] | 419 | spinlock_t *lru_lock, |
| 420 | void *arg) |
Dave Chinner | bf1ed38 | 2014-09-29 10:43:40 +1000 | [diff] [blame] | 421 | __releases(lru_lock) __acquires(lru_lock) |
Dave Chinner | cd56a39 | 2013-08-28 10:18:07 +1000 | [diff] [blame] | 422 | { |
| 423 | struct xfs_dquot *dqp = container_of(item, |
| 424 | struct xfs_dquot, q_lru); |
| 425 | struct xfs_qm_isolate *isol = arg; |
| 426 | |
| 427 | if (!xfs_dqlock_nowait(dqp)) |
| 428 | goto out_miss_busy; |
| 429 | |
| 430 | /* |
| 431 | * This dquot has acquired a reference in the meantime remove it from |
| 432 | * the freelist and try again. |
| 433 | */ |
| 434 | if (dqp->q_nrefs) { |
| 435 | xfs_dqunlock(dqp); |
Bill O'Donnell | ff6d6af | 2015-10-12 18:21:22 +1100 | [diff] [blame] | 436 | XFS_STATS_INC(dqp->q_mount, xs_qm_dqwants); |
Dave Chinner | cd56a39 | 2013-08-28 10:18:07 +1000 | [diff] [blame] | 437 | |
| 438 | trace_xfs_dqreclaim_want(dqp); |
Vladimir Davydov | 3f97b16 | 2015-02-12 14:59:35 -0800 | [diff] [blame] | 439 | list_lru_isolate(lru, &dqp->q_lru); |
Bill O'Donnell | ff6d6af | 2015-10-12 18:21:22 +1100 | [diff] [blame] | 440 | XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused); |
Dave Chinner | 3516341 | 2013-08-28 10:18:08 +1000 | [diff] [blame] | 441 | return LRU_REMOVED; |
Dave Chinner | cd56a39 | 2013-08-28 10:18:07 +1000 | [diff] [blame] | 442 | } |
| 443 | |
| 444 | /* |
| 445 | * If the dquot is dirty, flush it. If it's already being flushed, just |
| 446 | * skip it so there is time for the IO to complete before we try to |
| 447 | * reclaim it again on the next LRU pass. |
| 448 | */ |
| 449 | if (!xfs_dqflock_nowait(dqp)) { |
| 450 | xfs_dqunlock(dqp); |
| 451 | goto out_miss_busy; |
| 452 | } |
| 453 | |
| 454 | if (XFS_DQ_IS_DIRTY(dqp)) { |
| 455 | struct xfs_buf *bp = NULL; |
| 456 | int error; |
| 457 | |
| 458 | trace_xfs_dqreclaim_dirty(dqp); |
| 459 | |
| 460 | /* we have to drop the LRU lock to flush the dquot */ |
| 461 | spin_unlock(lru_lock); |
| 462 | |
| 463 | error = xfs_qm_dqflush(dqp, &bp); |
Darrick J. Wong | 609001b | 2018-05-04 15:30:20 -0700 | [diff] [blame] | 464 | if (error) |
Dave Chinner | cd56a39 | 2013-08-28 10:18:07 +1000 | [diff] [blame] | 465 | goto out_unlock_dirty; |
Dave Chinner | cd56a39 | 2013-08-28 10:18:07 +1000 | [diff] [blame] | 466 | |
| 467 | xfs_buf_delwri_queue(bp, &isol->buffers); |
| 468 | xfs_buf_relse(bp); |
| 469 | goto out_unlock_dirty; |
| 470 | } |
| 471 | xfs_dqfunlock(dqp); |
| 472 | |
| 473 | /* |
| 474 | * Prevent lookups now that we are past the point of no return. |
| 475 | */ |
| 476 | dqp->dq_flags |= XFS_DQ_FREEING; |
| 477 | xfs_dqunlock(dqp); |
| 478 | |
| 479 | ASSERT(dqp->q_nrefs == 0); |
Vladimir Davydov | 3f97b16 | 2015-02-12 14:59:35 -0800 | [diff] [blame] | 480 | list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose); |
Bill O'Donnell | ff6d6af | 2015-10-12 18:21:22 +1100 | [diff] [blame] | 481 | XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused); |
Dave Chinner | cd56a39 | 2013-08-28 10:18:07 +1000 | [diff] [blame] | 482 | trace_xfs_dqreclaim_done(dqp); |
Bill O'Donnell | ff6d6af | 2015-10-12 18:21:22 +1100 | [diff] [blame] | 483 | XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaims); |
Dave Chinner | 3516341 | 2013-08-28 10:18:08 +1000 | [diff] [blame] | 484 | return LRU_REMOVED; |
Dave Chinner | cd56a39 | 2013-08-28 10:18:07 +1000 | [diff] [blame] | 485 | |
| 486 | out_miss_busy: |
| 487 | trace_xfs_dqreclaim_busy(dqp); |
Bill O'Donnell | ff6d6af | 2015-10-12 18:21:22 +1100 | [diff] [blame] | 488 | XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses); |
Dave Chinner | 3516341 | 2013-08-28 10:18:08 +1000 | [diff] [blame] | 489 | return LRU_SKIP; |
Dave Chinner | cd56a39 | 2013-08-28 10:18:07 +1000 | [diff] [blame] | 490 | |
| 491 | out_unlock_dirty: |
| 492 | trace_xfs_dqreclaim_busy(dqp); |
Bill O'Donnell | ff6d6af | 2015-10-12 18:21:22 +1100 | [diff] [blame] | 493 | XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses); |
Dave Chinner | 3516341 | 2013-08-28 10:18:08 +1000 | [diff] [blame] | 494 | xfs_dqunlock(dqp); |
| 495 | spin_lock(lru_lock); |
| 496 | return LRU_RETRY; |
Dave Chinner | cd56a39 | 2013-08-28 10:18:07 +1000 | [diff] [blame] | 497 | } |
| 498 | |
Andrew Morton | 2f5b56f | 2013-08-28 10:18:08 +1000 | [diff] [blame] | 499 | static unsigned long |
Dave Chinner | cd56a39 | 2013-08-28 10:18:07 +1000 | [diff] [blame] | 500 | xfs_qm_shrink_scan( |
| 501 | struct shrinker *shrink, |
| 502 | struct shrink_control *sc) |
| 503 | { |
| 504 | struct xfs_quotainfo *qi = container_of(shrink, |
| 505 | struct xfs_quotainfo, qi_shrinker); |
| 506 | struct xfs_qm_isolate isol; |
Andrew Morton | 2f5b56f | 2013-08-28 10:18:08 +1000 | [diff] [blame] | 507 | unsigned long freed; |
Dave Chinner | cd56a39 | 2013-08-28 10:18:07 +1000 | [diff] [blame] | 508 | int error; |
Dave Chinner | cd56a39 | 2013-08-28 10:18:07 +1000 | [diff] [blame] | 509 | |
Mel Gorman | d0164ad | 2015-11-06 16:28:21 -0800 | [diff] [blame] | 510 | if ((sc->gfp_mask & (__GFP_FS|__GFP_DIRECT_RECLAIM)) != (__GFP_FS|__GFP_DIRECT_RECLAIM)) |
Dave Chinner | cd56a39 | 2013-08-28 10:18:07 +1000 | [diff] [blame] | 511 | return 0; |
| 512 | |
| 513 | INIT_LIST_HEAD(&isol.buffers); |
| 514 | INIT_LIST_HEAD(&isol.dispose); |
| 515 | |
Vladimir Davydov | 503c358 | 2015-02-12 14:58:47 -0800 | [diff] [blame] | 516 | freed = list_lru_shrink_walk(&qi->qi_lru, sc, |
| 517 | xfs_qm_dquot_isolate, &isol); |
Dave Chinner | cd56a39 | 2013-08-28 10:18:07 +1000 | [diff] [blame] | 518 | |
| 519 | error = xfs_buf_delwri_submit(&isol.buffers); |
| 520 | if (error) |
| 521 | xfs_warn(NULL, "%s: dquot reclaim failed", __func__); |
| 522 | |
| 523 | while (!list_empty(&isol.dispose)) { |
| 524 | struct xfs_dquot *dqp; |
| 525 | |
| 526 | dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru); |
| 527 | list_del_init(&dqp->q_lru); |
| 528 | xfs_qm_dqfree_one(dqp); |
| 529 | } |
| 530 | |
| 531 | return freed; |
| 532 | } |
| 533 | |
Andrew Morton | 2f5b56f | 2013-08-28 10:18:08 +1000 | [diff] [blame] | 534 | static unsigned long |
Dave Chinner | cd56a39 | 2013-08-28 10:18:07 +1000 | [diff] [blame] | 535 | xfs_qm_shrink_count( |
| 536 | struct shrinker *shrink, |
| 537 | struct shrink_control *sc) |
| 538 | { |
| 539 | struct xfs_quotainfo *qi = container_of(shrink, |
| 540 | struct xfs_quotainfo, qi_shrinker); |
| 541 | |
Vladimir Davydov | 503c358 | 2015-02-12 14:58:47 -0800 | [diff] [blame] | 542 | return list_lru_shrink_count(&qi->qi_lru, sc); |
Dave Chinner | cd56a39 | 2013-08-28 10:18:07 +1000 | [diff] [blame] | 543 | } |
| 544 | |
Carlos Maiolino | be60794 | 2016-02-08 11:27:55 +1100 | [diff] [blame] | 545 | STATIC void |
| 546 | xfs_qm_set_defquota( |
Pavel Reichl | c072fbe | 2019-11-12 17:04:26 -0800 | [diff] [blame] | 547 | struct xfs_mount *mp, |
| 548 | uint type, |
| 549 | struct xfs_quotainfo *qinf) |
Carlos Maiolino | be60794 | 2016-02-08 11:27:55 +1100 | [diff] [blame] | 550 | { |
Pavel Reichl | aefe69a | 2019-11-12 17:04:02 -0800 | [diff] [blame] | 551 | struct xfs_dquot *dqp; |
| 552 | struct xfs_def_quota *defq; |
Darrick J. Wong | eac69e1 | 2018-05-14 06:34:32 -0700 | [diff] [blame] | 553 | struct xfs_disk_dquot *ddqp; |
Carlos Maiolino | be60794 | 2016-02-08 11:27:55 +1100 | [diff] [blame] | 554 | int error; |
| 555 | |
Darrick J. Wong | 114e73c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 556 | error = xfs_qm_dqget_uncached(mp, 0, type, &dqp); |
Darrick J. Wong | eac69e1 | 2018-05-14 06:34:32 -0700 | [diff] [blame] | 557 | if (error) |
| 558 | return; |
Carlos Maiolino | be60794 | 2016-02-08 11:27:55 +1100 | [diff] [blame] | 559 | |
Darrick J. Wong | eac69e1 | 2018-05-14 06:34:32 -0700 | [diff] [blame] | 560 | ddqp = &dqp->q_core; |
| 561 | defq = xfs_get_defquota(dqp, qinf); |
Carlos Maiolino | be60794 | 2016-02-08 11:27:55 +1100 | [diff] [blame] | 562 | |
Darrick J. Wong | eac69e1 | 2018-05-14 06:34:32 -0700 | [diff] [blame] | 563 | /* |
| 564 | * Timers and warnings have been already set, let's just set the |
| 565 | * default limits for this quota type |
| 566 | */ |
| 567 | defq->bhardlimit = be64_to_cpu(ddqp->d_blk_hardlimit); |
| 568 | defq->bsoftlimit = be64_to_cpu(ddqp->d_blk_softlimit); |
| 569 | defq->ihardlimit = be64_to_cpu(ddqp->d_ino_hardlimit); |
| 570 | defq->isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit); |
| 571 | defq->rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit); |
| 572 | defq->rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit); |
| 573 | xfs_qm_dqdestroy(dqp); |
| 574 | } |
| 575 | |
| 576 | /* Initialize quota time limits from the root dquot. */ |
| 577 | static void |
| 578 | xfs_qm_init_timelimits( |
| 579 | struct xfs_mount *mp, |
| 580 | struct xfs_quotainfo *qinf) |
| 581 | { |
| 582 | struct xfs_disk_dquot *ddqp; |
| 583 | struct xfs_dquot *dqp; |
| 584 | uint type; |
| 585 | int error; |
| 586 | |
| 587 | qinf->qi_btimelimit = XFS_QM_BTIMELIMIT; |
| 588 | qinf->qi_itimelimit = XFS_QM_ITIMELIMIT; |
| 589 | qinf->qi_rtbtimelimit = XFS_QM_RTBTIMELIMIT; |
| 590 | qinf->qi_bwarnlimit = XFS_QM_BWARNLIMIT; |
| 591 | qinf->qi_iwarnlimit = XFS_QM_IWARNLIMIT; |
| 592 | qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT; |
| 593 | |
| 594 | /* |
| 595 | * We try to get the limits from the superuser's limits fields. |
| 596 | * This is quite hacky, but it is standard quota practice. |
| 597 | * |
| 598 | * Since we may not have done a quotacheck by this point, just read |
| 599 | * the dquot without attaching it to any hashtables or lists. |
| 600 | * |
| 601 | * Timers and warnings are globally set by the first timer found in |
| 602 | * user/group/proj quota types, otherwise a default value is used. |
| 603 | * This should be split into different fields per quota type. |
| 604 | */ |
| 605 | if (XFS_IS_UQUOTA_RUNNING(mp)) |
| 606 | type = XFS_DQ_USER; |
| 607 | else if (XFS_IS_GQUOTA_RUNNING(mp)) |
| 608 | type = XFS_DQ_GROUP; |
| 609 | else |
| 610 | type = XFS_DQ_PROJ; |
| 611 | error = xfs_qm_dqget_uncached(mp, 0, type, &dqp); |
| 612 | if (error) |
| 613 | return; |
| 614 | |
| 615 | ddqp = &dqp->q_core; |
| 616 | /* |
| 617 | * The warnings and timers set the grace period given to |
| 618 | * a user or group before he or she can not perform any |
| 619 | * more writing. If it is zero, a default is used. |
| 620 | */ |
| 621 | if (ddqp->d_btimer) |
| 622 | qinf->qi_btimelimit = be32_to_cpu(ddqp->d_btimer); |
| 623 | if (ddqp->d_itimer) |
| 624 | qinf->qi_itimelimit = be32_to_cpu(ddqp->d_itimer); |
| 625 | if (ddqp->d_rtbtimer) |
| 626 | qinf->qi_rtbtimelimit = be32_to_cpu(ddqp->d_rtbtimer); |
| 627 | if (ddqp->d_bwarns) |
| 628 | qinf->qi_bwarnlimit = be16_to_cpu(ddqp->d_bwarns); |
| 629 | if (ddqp->d_iwarns) |
| 630 | qinf->qi_iwarnlimit = be16_to_cpu(ddqp->d_iwarns); |
| 631 | if (ddqp->d_rtbwarns) |
| 632 | qinf->qi_rtbwarnlimit = be16_to_cpu(ddqp->d_rtbwarns); |
| 633 | |
| 634 | xfs_qm_dqdestroy(dqp); |
Carlos Maiolino | be60794 | 2016-02-08 11:27:55 +1100 | [diff] [blame] | 635 | } |
| 636 | |
Christoph Hellwig | a4edd1d | 2009-01-19 02:03:11 +0100 | [diff] [blame] | 637 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 638 | * This initializes all the quota information that's kept in the |
| 639 | * mount structure |
| 640 | */ |
Christoph Hellwig | ba0f32d | 2005-06-21 15:36:52 +1000 | [diff] [blame] | 641 | STATIC int |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 642 | xfs_qm_init_quotainfo( |
Darrick J. Wong | 114e73c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 643 | struct xfs_mount *mp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 644 | { |
Darrick J. Wong | 114e73c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 645 | struct xfs_quotainfo *qinf; |
Darrick J. Wong | 114e73c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 646 | int error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 647 | |
| 648 | ASSERT(XFS_IS_QUOTA_RUNNING(mp)); |
| 649 | |
Pavel Reichl | c072fbe | 2019-11-12 17:04:26 -0800 | [diff] [blame] | 650 | qinf = mp->m_quotainfo = kmem_zalloc(sizeof(struct xfs_quotainfo), 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 651 | |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 652 | error = list_lru_init(&qinf->qi_lru); |
Dave Chinner | ee4eec4 | 2014-05-15 09:23:24 +1000 | [diff] [blame] | 653 | if (error) |
| 654 | goto out_free_qinf; |
Glauber Costa | 5ca302c | 2013-08-28 10:18:18 +1000 | [diff] [blame] | 655 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 656 | /* |
| 657 | * See if quotainodes are setup, and if not, allocate them, |
| 658 | * and change the superblock accordingly. |
| 659 | */ |
Dave Chinner | ee4eec4 | 2014-05-15 09:23:24 +1000 | [diff] [blame] | 660 | error = xfs_qm_init_quotainos(mp); |
| 661 | if (error) |
| 662 | goto out_free_lru; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 663 | |
Christoph Hellwig | 9f920f1 | 2012-03-13 08:52:35 +0000 | [diff] [blame] | 664 | INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS); |
| 665 | INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS); |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 666 | INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS); |
Christoph Hellwig | 9f920f1 | 2012-03-13 08:52:35 +0000 | [diff] [blame] | 667 | mutex_init(&qinf->qi_tree_lock); |
| 668 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 669 | /* mutex used to serialize quotaoffs */ |
Jes Sorensen | 794ee1b | 2006-01-09 15:59:21 -0800 | [diff] [blame] | 670 | mutex_init(&qinf->qi_quotaofflock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 671 | |
| 672 | /* Precalc some constants */ |
| 673 | qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB); |
Eric Sandeen | 6ea94bb | 2014-04-14 19:03:34 +1000 | [diff] [blame] | 674 | qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(qinf->qi_dqchunklen); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 675 | |
| 676 | mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD); |
| 677 | |
Darrick J. Wong | eac69e1 | 2018-05-14 06:34:32 -0700 | [diff] [blame] | 678 | xfs_qm_init_timelimits(mp, qinf); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 679 | |
Carlos Maiolino | be60794 | 2016-02-08 11:27:55 +1100 | [diff] [blame] | 680 | if (XFS_IS_UQUOTA_RUNNING(mp)) |
| 681 | xfs_qm_set_defquota(mp, XFS_DQ_USER, qinf); |
| 682 | if (XFS_IS_GQUOTA_RUNNING(mp)) |
| 683 | xfs_qm_set_defquota(mp, XFS_DQ_GROUP, qinf); |
| 684 | if (XFS_IS_PQUOTA_RUNNING(mp)) |
| 685 | xfs_qm_set_defquota(mp, XFS_DQ_PROJ, qinf); |
| 686 | |
Dave Chinner | cd56a39 | 2013-08-28 10:18:07 +1000 | [diff] [blame] | 687 | qinf->qi_shrinker.count_objects = xfs_qm_shrink_count; |
| 688 | qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan; |
Christoph Hellwig | f8739c3 | 2012-03-13 08:52:34 +0000 | [diff] [blame] | 689 | qinf->qi_shrinker.seeks = DEFAULT_SEEKS; |
Dave Chinner | cd56a39 | 2013-08-28 10:18:07 +1000 | [diff] [blame] | 690 | qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE; |
Aliaksei Karaliou | 3a3882f | 2017-12-21 13:18:26 -0800 | [diff] [blame] | 691 | |
| 692 | error = register_shrinker(&qinf->qi_shrinker); |
| 693 | if (error) |
| 694 | goto out_free_inos; |
| 695 | |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 696 | return 0; |
Dave Chinner | ee4eec4 | 2014-05-15 09:23:24 +1000 | [diff] [blame] | 697 | |
Aliaksei Karaliou | 3a3882f | 2017-12-21 13:18:26 -0800 | [diff] [blame] | 698 | out_free_inos: |
| 699 | mutex_destroy(&qinf->qi_quotaofflock); |
| 700 | mutex_destroy(&qinf->qi_tree_lock); |
| 701 | xfs_qm_destroy_quotainos(qinf); |
Dave Chinner | ee4eec4 | 2014-05-15 09:23:24 +1000 | [diff] [blame] | 702 | out_free_lru: |
| 703 | list_lru_destroy(&qinf->qi_lru); |
| 704 | out_free_qinf: |
| 705 | kmem_free(qinf); |
| 706 | mp->m_quotainfo = NULL; |
| 707 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 708 | } |
| 709 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 710 | /* |
| 711 | * Gets called when unmounting a filesystem or when all quotas get |
| 712 | * turned off. |
| 713 | * This purges the quota inodes, destroys locks and frees itself. |
| 714 | */ |
| 715 | void |
| 716 | xfs_qm_destroy_quotainfo( |
Pavel Reichl | c072fbe | 2019-11-12 17:04:26 -0800 | [diff] [blame] | 717 | struct xfs_mount *mp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 718 | { |
Pavel Reichl | c072fbe | 2019-11-12 17:04:26 -0800 | [diff] [blame] | 719 | struct xfs_quotainfo *qi; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 720 | |
| 721 | qi = mp->m_quotainfo; |
| 722 | ASSERT(qi != NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 723 | |
Christoph Hellwig | f8739c3 | 2012-03-13 08:52:34 +0000 | [diff] [blame] | 724 | unregister_shrinker(&qi->qi_shrinker); |
Glauber Costa | f5e1dd3 | 2013-08-28 10:18:18 +1000 | [diff] [blame] | 725 | list_lru_destroy(&qi->qi_lru); |
Aliaksei Karaliou | 3a3882f | 2017-12-21 13:18:26 -0800 | [diff] [blame] | 726 | xfs_qm_destroy_quotainos(qi); |
Aliaksei Karaliou | 2196881 | 2017-12-21 13:18:26 -0800 | [diff] [blame] | 727 | mutex_destroy(&qi->qi_tree_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 728 | mutex_destroy(&qi->qi_quotaofflock); |
Denys Vlasenko | f0e2d93 | 2008-05-19 16:31:57 +1000 | [diff] [blame] | 729 | kmem_free(qi); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 730 | mp->m_quotainfo = NULL; |
| 731 | } |
| 732 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 733 | /* |
| 734 | * Create an inode and return with a reference already taken, but unlocked |
| 735 | * This is how we create quota inodes |
| 736 | */ |
| 737 | STATIC int |
| 738 | xfs_qm_qino_alloc( |
| 739 | xfs_mount_t *mp, |
| 740 | xfs_inode_t **ip, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 741 | uint flags) |
| 742 | { |
| 743 | xfs_trans_t *tp; |
| 744 | int error; |
Dave Chinner | 58c9047 | 2015-02-23 22:38:08 +1100 | [diff] [blame] | 745 | bool need_alloc = true; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 746 | |
Chandra Seetharaman | d892d58 | 2013-07-19 17:36:02 -0500 | [diff] [blame] | 747 | *ip = NULL; |
| 748 | /* |
| 749 | * With superblock that doesn't have separate pquotino, we |
| 750 | * share an inode between gquota and pquota. If the on-disk |
| 751 | * superblock has GQUOTA and the filesystem is now mounted |
| 752 | * with PQUOTA, just use sb_gquotino for sb_pquotino and |
| 753 | * vice-versa. |
| 754 | */ |
| 755 | if (!xfs_sb_version_has_pquotino(&mp->m_sb) && |
| 756 | (flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) { |
| 757 | xfs_ino_t ino = NULLFSINO; |
| 758 | |
| 759 | if ((flags & XFS_QMOPT_PQUOTA) && |
| 760 | (mp->m_sb.sb_gquotino != NULLFSINO)) { |
| 761 | ino = mp->m_sb.sb_gquotino; |
Darrick J. Wong | a71895c | 2019-11-11 12:53:22 -0800 | [diff] [blame] | 762 | if (XFS_IS_CORRUPT(mp, |
| 763 | mp->m_sb.sb_pquotino != NULLFSINO)) |
Darrick J. Wong | a5155b8 | 2019-11-02 09:40:53 -0700 | [diff] [blame] | 764 | return -EFSCORRUPTED; |
Chandra Seetharaman | d892d58 | 2013-07-19 17:36:02 -0500 | [diff] [blame] | 765 | } else if ((flags & XFS_QMOPT_GQUOTA) && |
| 766 | (mp->m_sb.sb_pquotino != NULLFSINO)) { |
| 767 | ino = mp->m_sb.sb_pquotino; |
Darrick J. Wong | a71895c | 2019-11-11 12:53:22 -0800 | [diff] [blame] | 768 | if (XFS_IS_CORRUPT(mp, |
| 769 | mp->m_sb.sb_gquotino != NULLFSINO)) |
Darrick J. Wong | a5155b8 | 2019-11-02 09:40:53 -0700 | [diff] [blame] | 770 | return -EFSCORRUPTED; |
Chandra Seetharaman | d892d58 | 2013-07-19 17:36:02 -0500 | [diff] [blame] | 771 | } |
| 772 | if (ino != NULLFSINO) { |
| 773 | error = xfs_iget(mp, NULL, ino, 0, 0, ip); |
| 774 | if (error) |
| 775 | return error; |
| 776 | mp->m_sb.sb_gquotino = NULLFSINO; |
| 777 | mp->m_sb.sb_pquotino = NULLFSINO; |
Dave Chinner | 58c9047 | 2015-02-23 22:38:08 +1100 | [diff] [blame] | 778 | need_alloc = false; |
Chandra Seetharaman | d892d58 | 2013-07-19 17:36:02 -0500 | [diff] [blame] | 779 | } |
| 780 | } |
| 781 | |
Christoph Hellwig | 253f491 | 2016-04-06 09:19:55 +1000 | [diff] [blame] | 782 | error = xfs_trans_alloc(mp, &M_RES(mp)->tr_create, |
Kaixu Xia | fb353ff | 2020-04-22 21:54:28 -0700 | [diff] [blame] | 783 | need_alloc ? XFS_QM_QINOCREATE_SPACE_RES(mp) : 0, |
| 784 | 0, 0, &tp); |
Christoph Hellwig | 253f491 | 2016-04-06 09:19:55 +1000 | [diff] [blame] | 785 | if (error) |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 786 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 787 | |
Dave Chinner | 58c9047 | 2015-02-23 22:38:08 +1100 | [diff] [blame] | 788 | if (need_alloc) { |
Chandan Rajendra | c959025 | 2018-04-02 15:47:43 -0700 | [diff] [blame] | 789 | error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, ip); |
Chandra Seetharaman | d892d58 | 2013-07-19 17:36:02 -0500 | [diff] [blame] | 790 | if (error) { |
Christoph Hellwig | 4906e21 | 2015-06-04 13:47:56 +1000 | [diff] [blame] | 791 | xfs_trans_cancel(tp); |
Chandra Seetharaman | d892d58 | 2013-07-19 17:36:02 -0500 | [diff] [blame] | 792 | return error; |
| 793 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 794 | } |
| 795 | |
| 796 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 797 | * Make the changes in the superblock, and log those too. |
| 798 | * sbfields arg may contain fields other than *QUOTINO; |
| 799 | * VERSIONNUM for example. |
| 800 | */ |
Eric Sandeen | 3685c2a | 2007-10-11 17:42:32 +1000 | [diff] [blame] | 801 | spin_lock(&mp->m_sb_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 802 | if (flags & XFS_QMOPT_SBVERSION) { |
Eric Sandeen | 6211870 | 2008-03-06 13:44:28 +1100 | [diff] [blame] | 803 | ASSERT(!xfs_sb_version_hasquota(&mp->m_sb)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 804 | |
Eric Sandeen | 6211870 | 2008-03-06 13:44:28 +1100 | [diff] [blame] | 805 | xfs_sb_version_addquota(&mp->m_sb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 806 | mp->m_sb.sb_uquotino = NULLFSINO; |
| 807 | mp->m_sb.sb_gquotino = NULLFSINO; |
Chandra Seetharaman | d892d58 | 2013-07-19 17:36:02 -0500 | [diff] [blame] | 808 | mp->m_sb.sb_pquotino = NULLFSINO; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 809 | |
Chandra Seetharaman | d892d58 | 2013-07-19 17:36:02 -0500 | [diff] [blame] | 810 | /* qflags will get updated fully _after_ quotacheck */ |
| 811 | mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 812 | } |
| 813 | if (flags & XFS_QMOPT_UQUOTA) |
| 814 | mp->m_sb.sb_uquotino = (*ip)->i_ino; |
Chandra Seetharaman | d892d58 | 2013-07-19 17:36:02 -0500 | [diff] [blame] | 815 | else if (flags & XFS_QMOPT_GQUOTA) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 816 | mp->m_sb.sb_gquotino = (*ip)->i_ino; |
Chandra Seetharaman | d892d58 | 2013-07-19 17:36:02 -0500 | [diff] [blame] | 817 | else |
| 818 | mp->m_sb.sb_pquotino = (*ip)->i_ino; |
Eric Sandeen | 3685c2a | 2007-10-11 17:42:32 +1000 | [diff] [blame] | 819 | spin_unlock(&mp->m_sb_lock); |
Dave Chinner | 61e63ec | 2015-01-22 09:10:31 +1100 | [diff] [blame] | 820 | xfs_log_sb(tp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 821 | |
Christoph Hellwig | 7039331 | 2015-06-04 13:48:08 +1000 | [diff] [blame] | 822 | error = xfs_trans_commit(tp); |
Dave Chinner | 58c9047 | 2015-02-23 22:38:08 +1100 | [diff] [blame] | 823 | if (error) { |
| 824 | ASSERT(XFS_FORCED_SHUTDOWN(mp)); |
Dave Chinner | 5348778 | 2011-03-07 10:05:35 +1100 | [diff] [blame] | 825 | xfs_alert(mp, "%s failed (error %d)!", __func__, error); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 826 | } |
Dave Chinner | 58c9047 | 2015-02-23 22:38:08 +1100 | [diff] [blame] | 827 | if (need_alloc) |
| 828 | xfs_finish_inode_setup(*ip); |
| 829 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 830 | } |
| 831 | |
| 832 | |
David Chinner | 5b13973 | 2008-04-10 12:20:10 +1000 | [diff] [blame] | 833 | STATIC void |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 834 | xfs_qm_reset_dqcounts( |
| 835 | xfs_mount_t *mp, |
| 836 | xfs_buf_t *bp, |
| 837 | xfs_dqid_t id, |
| 838 | uint type) |
| 839 | { |
Dave Chinner | 6fcdc59 | 2013-06-03 15:28:46 +1000 | [diff] [blame] | 840 | struct xfs_dqblk *dqb; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 841 | int j; |
Darrick J. Wong | eebf3ca | 2018-01-08 10:51:25 -0800 | [diff] [blame] | 842 | xfs_failaddr_t fa; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 843 | |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 844 | trace_xfs_reset_dqcounts(bp, _RET_IP_); |
| 845 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 846 | /* |
| 847 | * Reset all counters and timers. They'll be |
| 848 | * started afresh by xfs_qm_quotacheck. |
| 849 | */ |
| 850 | #ifdef DEBUG |
Eric Sandeen | 9011540 | 2017-04-19 12:55:57 -0700 | [diff] [blame] | 851 | j = (int)XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB) / |
| 852 | sizeof(xfs_dqblk_t); |
Christoph Hellwig | 8a7b8a8 | 2010-04-20 17:01:30 +1000 | [diff] [blame] | 853 | ASSERT(mp->m_quotainfo->qi_dqperchunk == j); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 854 | #endif |
Dave Chinner | 6fcdc59 | 2013-06-03 15:28:46 +1000 | [diff] [blame] | 855 | dqb = bp->b_addr; |
Christoph Hellwig | 8a7b8a8 | 2010-04-20 17:01:30 +1000 | [diff] [blame] | 856 | for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) { |
Dave Chinner | 6fcdc59 | 2013-06-03 15:28:46 +1000 | [diff] [blame] | 857 | struct xfs_disk_dquot *ddq; |
| 858 | |
| 859 | ddq = (struct xfs_disk_dquot *)&dqb[j]; |
| 860 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 861 | /* |
| 862 | * Do a sanity check, and if needed, repair the dqblk. Don't |
| 863 | * output any warnings because it's perfectly possible to |
Darrick J. Wong | eebf3ca | 2018-01-08 10:51:25 -0800 | [diff] [blame] | 864 | * find uninitialised dquot blks. See comment in |
| 865 | * xfs_dquot_verify. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 866 | */ |
Eric Sandeen | 7224fa4 | 2018-05-07 09:20:18 -0700 | [diff] [blame] | 867 | fa = xfs_dqblk_verify(mp, &dqb[j], id + j, type); |
Darrick J. Wong | eebf3ca | 2018-01-08 10:51:25 -0800 | [diff] [blame] | 868 | if (fa) |
Eric Sandeen | 48fa1db | 2018-05-07 09:20:17 -0700 | [diff] [blame] | 869 | xfs_dqblk_repair(mp, &dqb[j], id + j, type); |
Darrick J. Wong | eeea798 | 2018-01-08 10:51:24 -0800 | [diff] [blame] | 870 | |
Jan Kara | dfcc70a | 2015-02-23 22:34:17 +1100 | [diff] [blame] | 871 | /* |
| 872 | * Reset type in case we are reusing group quota file for |
| 873 | * project quotas or vice versa |
| 874 | */ |
| 875 | ddq->d_flags = type; |
Christoph Hellwig | 1149d96 | 2005-11-02 15:01:12 +1100 | [diff] [blame] | 876 | ddq->d_bcount = 0; |
| 877 | ddq->d_icount = 0; |
| 878 | ddq->d_rtbcount = 0; |
Darrick J. Wong | 5885539 | 2020-03-24 20:12:53 -0700 | [diff] [blame] | 879 | |
| 880 | /* |
| 881 | * dquot id 0 stores the default grace period and the maximum |
| 882 | * warning limit that were set by the administrator, so we |
| 883 | * should not reset them. |
| 884 | */ |
| 885 | if (ddq->d_id != 0) { |
| 886 | ddq->d_btimer = 0; |
| 887 | ddq->d_itimer = 0; |
| 888 | ddq->d_rtbtimer = 0; |
| 889 | ddq->d_bwarns = 0; |
| 890 | ddq->d_iwarns = 0; |
| 891 | ddq->d_rtbwarns = 0; |
| 892 | } |
Dave Chinner | 6fcdc59 | 2013-06-03 15:28:46 +1000 | [diff] [blame] | 893 | |
| 894 | if (xfs_sb_version_hascrc(&mp->m_sb)) { |
| 895 | xfs_update_cksum((char *)&dqb[j], |
| 896 | sizeof(struct xfs_dqblk), |
| 897 | XFS_DQUOT_CRC_OFF); |
| 898 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 899 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 900 | } |
| 901 | |
| 902 | STATIC int |
Darrick J. Wong | 28b9060 | 2018-05-04 15:31:20 -0700 | [diff] [blame] | 903 | xfs_qm_reset_dqcounts_all( |
Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 904 | struct xfs_mount *mp, |
| 905 | xfs_dqid_t firstid, |
| 906 | xfs_fsblock_t bno, |
| 907 | xfs_filblks_t blkcnt, |
| 908 | uint flags, |
| 909 | struct list_head *buffer_list) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 910 | { |
Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 911 | struct xfs_buf *bp; |
| 912 | int error; |
| 913 | int type; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 914 | |
| 915 | ASSERT(blkcnt > 0); |
Nathan Scott | c8ad20f | 2005-06-21 15:38:48 +1000 | [diff] [blame] | 916 | type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER : |
| 917 | (flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 918 | error = 0; |
| 919 | |
| 920 | /* |
| 921 | * Blkcnt arg can be a very big number, and might even be |
| 922 | * larger than the log itself. So, we have to break it up into |
| 923 | * manageable-sized transactions. |
| 924 | * Note that we don't start a permanent transaction here; we might |
| 925 | * not be able to get a log reservation for the whole thing up front, |
| 926 | * and we don't really care to either, because we just discard |
| 927 | * everything if we were to crash in the middle of this loop. |
| 928 | */ |
| 929 | while (blkcnt--) { |
| 930 | error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, |
| 931 | XFS_FSB_TO_DADDR(mp, bno), |
Dave Chinner | c631919 | 2012-11-14 17:50:13 +1100 | [diff] [blame] | 932 | mp->m_quotainfo->qi_dqchunklen, 0, &bp, |
Dave Chinner | 1813dd6 | 2012-11-14 17:54:40 +1100 | [diff] [blame] | 933 | &xfs_dquot_buf_ops); |
Dave Chinner | 6fcdc59 | 2013-06-03 15:28:46 +1000 | [diff] [blame] | 934 | |
| 935 | /* |
| 936 | * CRC and validation errors will return a EFSCORRUPTED here. If |
| 937 | * this occurs, re-read without CRC validation so that we can |
| 938 | * repair the damage via xfs_qm_reset_dqcounts(). This process |
| 939 | * will leave a trace in the log indicating corruption has |
| 940 | * been detected. |
| 941 | */ |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 942 | if (error == -EFSCORRUPTED) { |
Dave Chinner | 6fcdc59 | 2013-06-03 15:28:46 +1000 | [diff] [blame] | 943 | error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, |
| 944 | XFS_FSB_TO_DADDR(mp, bno), |
| 945 | mp->m_quotainfo->qi_dqchunklen, 0, &bp, |
| 946 | NULL); |
| 947 | } |
| 948 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 949 | if (error) |
| 950 | break; |
| 951 | |
Dave Chinner | 5fd364f | 2014-08-04 12:43:26 +1000 | [diff] [blame] | 952 | /* |
| 953 | * A corrupt buffer might not have a verifier attached, so |
| 954 | * make sure we have the correct one attached before writeback |
| 955 | * occurs. |
| 956 | */ |
| 957 | bp->b_ops = &xfs_dquot_buf_ops; |
David Chinner | 5b13973 | 2008-04-10 12:20:10 +1000 | [diff] [blame] | 958 | xfs_qm_reset_dqcounts(mp, bp, firstid, type); |
Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 959 | xfs_buf_delwri_queue(bp, buffer_list); |
Christoph Hellwig | 61551f1 | 2011-08-23 08:28:06 +0000 | [diff] [blame] | 960 | xfs_buf_relse(bp); |
Dave Chinner | 6fcdc59 | 2013-06-03 15:28:46 +1000 | [diff] [blame] | 961 | |
| 962 | /* goto the next block. */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 963 | bno++; |
Christoph Hellwig | 8a7b8a8 | 2010-04-20 17:01:30 +1000 | [diff] [blame] | 964 | firstid += mp->m_quotainfo->qi_dqperchunk; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 965 | } |
Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 966 | |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 967 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 968 | } |
| 969 | |
| 970 | /* |
Darrick J. Wong | 28b9060 | 2018-05-04 15:31:20 -0700 | [diff] [blame] | 971 | * Iterate over all allocated dquot blocks in this quota inode, zeroing all |
| 972 | * counters for every chunk of dquots that we find. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 973 | */ |
| 974 | STATIC int |
Darrick J. Wong | 28b9060 | 2018-05-04 15:31:20 -0700 | [diff] [blame] | 975 | xfs_qm_reset_dqcounts_buf( |
Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 976 | struct xfs_mount *mp, |
| 977 | struct xfs_inode *qip, |
| 978 | uint flags, |
| 979 | struct list_head *buffer_list) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 980 | { |
Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 981 | struct xfs_bmbt_irec *map; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 982 | int i, nmaps; /* number of map entries */ |
| 983 | int error; /* return value */ |
| 984 | xfs_fileoff_t lblkno; |
| 985 | xfs_filblks_t maxlblkcnt; |
| 986 | xfs_dqid_t firstid; |
| 987 | xfs_fsblock_t rablkno; |
| 988 | xfs_filblks_t rablkcnt; |
| 989 | |
| 990 | error = 0; |
| 991 | /* |
Nathan Scott | c41564b | 2006-03-29 08:55:14 +1000 | [diff] [blame] | 992 | * This looks racy, but we can't keep an inode lock across a |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 993 | * trans_reserve. But, this gets called during quotacheck, and that |
| 994 | * happens only at mount time which is single threaded. |
| 995 | */ |
| 996 | if (qip->i_d.di_nblocks == 0) |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 997 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 998 | |
Tetsuo Handa | 707e0dd | 2019-08-26 12:06:22 -0700 | [diff] [blame] | 999 | map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1000 | |
| 1001 | lblkno = 0; |
Dave Chinner | 3297238 | 2012-06-08 15:44:54 +1000 | [diff] [blame] | 1002 | maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1003 | do { |
Christoph Hellwig | da51d32 | 2013-12-06 12:30:14 -0800 | [diff] [blame] | 1004 | uint lock_mode; |
| 1005 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1006 | nmaps = XFS_DQITER_MAP_SIZE; |
| 1007 | /* |
| 1008 | * We aren't changing the inode itself. Just changing |
| 1009 | * some of its data. No new blocks are added here, and |
| 1010 | * the inode is never added to the transaction. |
| 1011 | */ |
Christoph Hellwig | da51d32 | 2013-12-06 12:30:14 -0800 | [diff] [blame] | 1012 | lock_mode = xfs_ilock_data_map_shared(qip); |
Dave Chinner | 5c8ed20 | 2011-09-18 20:40:45 +0000 | [diff] [blame] | 1013 | error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno, |
| 1014 | map, &nmaps, 0); |
Christoph Hellwig | da51d32 | 2013-12-06 12:30:14 -0800 | [diff] [blame] | 1015 | xfs_iunlock(qip, lock_mode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1016 | if (error) |
| 1017 | break; |
| 1018 | |
| 1019 | ASSERT(nmaps <= XFS_DQITER_MAP_SIZE); |
| 1020 | for (i = 0; i < nmaps; i++) { |
| 1021 | ASSERT(map[i].br_startblock != DELAYSTARTBLOCK); |
| 1022 | ASSERT(map[i].br_blockcount); |
| 1023 | |
| 1024 | |
| 1025 | lblkno += map[i].br_blockcount; |
| 1026 | |
| 1027 | if (map[i].br_startblock == HOLESTARTBLOCK) |
| 1028 | continue; |
| 1029 | |
| 1030 | firstid = (xfs_dqid_t) map[i].br_startoff * |
Christoph Hellwig | 8a7b8a8 | 2010-04-20 17:01:30 +1000 | [diff] [blame] | 1031 | mp->m_quotainfo->qi_dqperchunk; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1032 | /* |
| 1033 | * Do a read-ahead on the next extent. |
| 1034 | */ |
| 1035 | if ((i+1 < nmaps) && |
| 1036 | (map[i+1].br_startblock != HOLESTARTBLOCK)) { |
| 1037 | rablkcnt = map[i+1].br_blockcount; |
| 1038 | rablkno = map[i+1].br_startblock; |
| 1039 | while (rablkcnt--) { |
Christoph Hellwig | 1a1a3e9 | 2010-10-06 18:41:18 +0000 | [diff] [blame] | 1040 | xfs_buf_readahead(mp->m_ddev_targp, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1041 | XFS_FSB_TO_DADDR(mp, rablkno), |
Dave Chinner | c3f8fc7 | 2012-11-12 22:54:01 +1100 | [diff] [blame] | 1042 | mp->m_quotainfo->qi_dqchunklen, |
Dave Chinner | 5fd364f | 2014-08-04 12:43:26 +1000 | [diff] [blame] | 1043 | &xfs_dquot_buf_ops); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1044 | rablkno++; |
| 1045 | } |
| 1046 | } |
| 1047 | /* |
| 1048 | * Iterate thru all the blks in the extent and |
| 1049 | * reset the counters of all the dquots inside them. |
| 1050 | */ |
Darrick J. Wong | 28b9060 | 2018-05-04 15:31:20 -0700 | [diff] [blame] | 1051 | error = xfs_qm_reset_dqcounts_all(mp, firstid, |
Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 1052 | map[i].br_startblock, |
| 1053 | map[i].br_blockcount, |
| 1054 | flags, buffer_list); |
| 1055 | if (error) |
| 1056 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1057 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1058 | } while (nmaps > 0); |
| 1059 | |
Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 1060 | out: |
Denys Vlasenko | f0e2d93 | 2008-05-19 16:31:57 +1000 | [diff] [blame] | 1061 | kmem_free(map); |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 1062 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1063 | } |
| 1064 | |
| 1065 | /* |
| 1066 | * Called by dqusage_adjust in doing a quotacheck. |
Christoph Hellwig | 52fda11 | 2010-09-06 01:44:22 +0000 | [diff] [blame] | 1067 | * |
| 1068 | * Given the inode, and a dquot id this updates both the incore dqout as well |
| 1069 | * as the buffer copy. This is so that once the quotacheck is done, we can |
| 1070 | * just log all the buffers, as opposed to logging numerous updates to |
| 1071 | * individual dquots. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1072 | */ |
Christoph Hellwig | 52fda11 | 2010-09-06 01:44:22 +0000 | [diff] [blame] | 1073 | STATIC int |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1074 | xfs_qm_quotacheck_dqadjust( |
Christoph Hellwig | 52fda11 | 2010-09-06 01:44:22 +0000 | [diff] [blame] | 1075 | struct xfs_inode *ip, |
Christoph Hellwig | 52fda11 | 2010-09-06 01:44:22 +0000 | [diff] [blame] | 1076 | uint type, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1077 | xfs_qcnt_t nblks, |
| 1078 | xfs_qcnt_t rtblks) |
| 1079 | { |
Christoph Hellwig | 52fda11 | 2010-09-06 01:44:22 +0000 | [diff] [blame] | 1080 | struct xfs_mount *mp = ip->i_mount; |
| 1081 | struct xfs_dquot *dqp; |
Darrick J. Wong | 0fcef12 | 2018-05-04 15:30:22 -0700 | [diff] [blame] | 1082 | xfs_dqid_t id; |
Christoph Hellwig | 52fda11 | 2010-09-06 01:44:22 +0000 | [diff] [blame] | 1083 | int error; |
| 1084 | |
Darrick J. Wong | 0fcef12 | 2018-05-04 15:30:22 -0700 | [diff] [blame] | 1085 | id = xfs_qm_id_for_quotatype(ip, type); |
Darrick J. Wong | 30ab2dc | 2018-05-04 15:30:24 -0700 | [diff] [blame] | 1086 | error = xfs_qm_dqget(mp, id, type, true, &dqp); |
Christoph Hellwig | 52fda11 | 2010-09-06 01:44:22 +0000 | [diff] [blame] | 1087 | if (error) { |
| 1088 | /* |
| 1089 | * Shouldn't be able to turn off quotas here. |
| 1090 | */ |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 1091 | ASSERT(error != -ESRCH); |
| 1092 | ASSERT(error != -ENOENT); |
Christoph Hellwig | 52fda11 | 2010-09-06 01:44:22 +0000 | [diff] [blame] | 1093 | return error; |
| 1094 | } |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 1095 | |
| 1096 | trace_xfs_dqadjust(dqp); |
| 1097 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1098 | /* |
| 1099 | * Adjust the inode count and the block count to reflect this inode's |
| 1100 | * resource usage. |
| 1101 | */ |
Marcin Slusarz | 413d57c | 2008-02-13 15:03:29 -0800 | [diff] [blame] | 1102 | be64_add_cpu(&dqp->q_core.d_icount, 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1103 | dqp->q_res_icount++; |
| 1104 | if (nblks) { |
Marcin Slusarz | 413d57c | 2008-02-13 15:03:29 -0800 | [diff] [blame] | 1105 | be64_add_cpu(&dqp->q_core.d_bcount, nblks); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1106 | dqp->q_res_bcount += nblks; |
| 1107 | } |
| 1108 | if (rtblks) { |
Marcin Slusarz | 413d57c | 2008-02-13 15:03:29 -0800 | [diff] [blame] | 1109 | be64_add_cpu(&dqp->q_core.d_rtbcount, rtblks); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1110 | dqp->q_res_rtbcount += rtblks; |
| 1111 | } |
| 1112 | |
| 1113 | /* |
| 1114 | * Set default limits, adjust timers (since we changed usages) |
Christoph Hellwig | 191f848 | 2010-04-20 17:01:53 +1000 | [diff] [blame] | 1115 | * |
| 1116 | * There are no timers for the default values set in the root dquot. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1117 | */ |
Christoph Hellwig | 191f848 | 2010-04-20 17:01:53 +1000 | [diff] [blame] | 1118 | if (dqp->q_core.d_id) { |
Brian Foster | 4b6eae2e | 2013-03-18 10:51:45 -0400 | [diff] [blame] | 1119 | xfs_qm_adjust_dqlimits(mp, dqp); |
Eric Sandeen | 3dbb9aa | 2020-05-21 13:07:00 -0700 | [diff] [blame^] | 1120 | xfs_qm_adjust_dqtimers(mp, dqp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1121 | } |
| 1122 | |
| 1123 | dqp->dq_flags |= XFS_DQ_DIRTY; |
Christoph Hellwig | 52fda11 | 2010-09-06 01:44:22 +0000 | [diff] [blame] | 1124 | xfs_qm_dqput(dqp); |
| 1125 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1126 | } |
| 1127 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1128 | /* |
| 1129 | * callback routine supplied to bulkstat(). Given an inumber, find its |
| 1130 | * dquots and update them to account for resources taken by that inode. |
| 1131 | */ |
| 1132 | /* ARGSUSED */ |
| 1133 | STATIC int |
| 1134 | xfs_qm_dqusage_adjust( |
Darrick J. Wong | ebd126a | 2019-07-02 09:39:39 -0700 | [diff] [blame] | 1135 | struct xfs_mount *mp, |
| 1136 | struct xfs_trans *tp, |
| 1137 | xfs_ino_t ino, |
| 1138 | void *data) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1139 | { |
Darrick J. Wong | ebd126a | 2019-07-02 09:39:39 -0700 | [diff] [blame] | 1140 | struct xfs_inode *ip; |
| 1141 | xfs_qcnt_t nblks; |
| 1142 | xfs_filblks_t rtblks = 0; /* total rt blks */ |
| 1143 | int error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1144 | |
| 1145 | ASSERT(XFS_IS_QUOTA_RUNNING(mp)); |
| 1146 | |
| 1147 | /* |
| 1148 | * rootino must have its resources accounted for, not so with the quota |
| 1149 | * inodes. |
| 1150 | */ |
Darrick J. Wong | ebd126a | 2019-07-02 09:39:39 -0700 | [diff] [blame] | 1151 | if (xfs_is_quota_inode(&mp->m_sb, ino)) |
| 1152 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1153 | |
| 1154 | /* |
Darrick J. Wong | 0fcef12 | 2018-05-04 15:30:22 -0700 | [diff] [blame] | 1155 | * We don't _need_ to take the ilock EXCL here because quotacheck runs |
| 1156 | * at mount time and therefore nobody will be racing chown/chproj. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1157 | */ |
Darrick J. Wong | ebd126a | 2019-07-02 09:39:39 -0700 | [diff] [blame] | 1158 | error = xfs_iget(mp, tp, ino, XFS_IGET_DONTCACHE, 0, &ip); |
| 1159 | if (error == -EINVAL || error == -ENOENT) |
| 1160 | return 0; |
| 1161 | if (error) |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 1162 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1163 | |
Christoph Hellwig | 52fda11 | 2010-09-06 01:44:22 +0000 | [diff] [blame] | 1164 | ASSERT(ip->i_delayed_blks == 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1165 | |
Christoph Hellwig | 52fda11 | 2010-09-06 01:44:22 +0000 | [diff] [blame] | 1166 | if (XFS_IS_REALTIME_INODE(ip)) { |
Christoph Hellwig | 8bfadd8 | 2017-08-29 15:44:14 -0700 | [diff] [blame] | 1167 | struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); |
| 1168 | |
| 1169 | if (!(ifp->if_flags & XFS_IFEXTENTS)) { |
Darrick J. Wong | ebd126a | 2019-07-02 09:39:39 -0700 | [diff] [blame] | 1170 | error = xfs_iread_extents(tp, ip, XFS_DATA_FORK); |
Christoph Hellwig | 8bfadd8 | 2017-08-29 15:44:14 -0700 | [diff] [blame] | 1171 | if (error) |
| 1172 | goto error0; |
| 1173 | } |
| 1174 | |
| 1175 | xfs_bmap_count_leaves(ifp, &rtblks); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1176 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1177 | |
Christoph Hellwig | 52fda11 | 2010-09-06 01:44:22 +0000 | [diff] [blame] | 1178 | nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1179 | |
| 1180 | /* |
| 1181 | * Add the (disk blocks and inode) resources occupied by this |
| 1182 | * inode to its dquots. We do this adjustment in the incore dquot, |
| 1183 | * and also copy the changes to its buffer. |
| 1184 | * We don't care about putting these changes in a transaction |
| 1185 | * envelope because if we crash in the middle of a 'quotacheck' |
| 1186 | * we have to start from the beginning anyway. |
| 1187 | * Once we're done, we'll log all the dquot bufs. |
| 1188 | * |
Nathan Scott | c41564b | 2006-03-29 08:55:14 +1000 | [diff] [blame] | 1189 | * The *QUOTA_ON checks below may look pretty racy, but quotachecks |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1190 | * and quotaoffs don't race. (Quotachecks happen at mount time only). |
| 1191 | */ |
| 1192 | if (XFS_IS_UQUOTA_ON(mp)) { |
Darrick J. Wong | 0fcef12 | 2018-05-04 15:30:22 -0700 | [diff] [blame] | 1193 | error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQ_USER, nblks, |
| 1194 | rtblks); |
Christoph Hellwig | 52fda11 | 2010-09-06 01:44:22 +0000 | [diff] [blame] | 1195 | if (error) |
| 1196 | goto error0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1197 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1198 | |
Christoph Hellwig | 52fda11 | 2010-09-06 01:44:22 +0000 | [diff] [blame] | 1199 | if (XFS_IS_GQUOTA_ON(mp)) { |
Darrick J. Wong | 0fcef12 | 2018-05-04 15:30:22 -0700 | [diff] [blame] | 1200 | error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQ_GROUP, nblks, |
| 1201 | rtblks); |
Christoph Hellwig | 52fda11 | 2010-09-06 01:44:22 +0000 | [diff] [blame] | 1202 | if (error) |
| 1203 | goto error0; |
| 1204 | } |
| 1205 | |
| 1206 | if (XFS_IS_PQUOTA_ON(mp)) { |
Darrick J. Wong | 0fcef12 | 2018-05-04 15:30:22 -0700 | [diff] [blame] | 1207 | error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQ_PROJ, nblks, |
| 1208 | rtblks); |
Christoph Hellwig | 52fda11 | 2010-09-06 01:44:22 +0000 | [diff] [blame] | 1209 | if (error) |
| 1210 | goto error0; |
| 1211 | } |
| 1212 | |
Christoph Hellwig | 52fda11 | 2010-09-06 01:44:22 +0000 | [diff] [blame] | 1213 | error0: |
Darrick J. Wong | 44a8736 | 2018-07-25 12:52:32 -0700 | [diff] [blame] | 1214 | xfs_irele(ip); |
Christoph Hellwig | 52fda11 | 2010-09-06 01:44:22 +0000 | [diff] [blame] | 1215 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1216 | } |
| 1217 | |
Christoph Hellwig | b84a3a9 | 2012-03-14 11:53:34 -0500 | [diff] [blame] | 1218 | STATIC int |
| 1219 | xfs_qm_flush_one( |
Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 1220 | struct xfs_dquot *dqp, |
| 1221 | void *data) |
Christoph Hellwig | b84a3a9 | 2012-03-14 11:53:34 -0500 | [diff] [blame] | 1222 | { |
Brian Foster | 7912e7f | 2017-06-14 21:21:45 -0700 | [diff] [blame] | 1223 | struct xfs_mount *mp = dqp->q_mount; |
Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 1224 | struct list_head *buffer_list = data; |
Christoph Hellwig | fe7257f | 2012-04-23 15:58:37 +1000 | [diff] [blame] | 1225 | struct xfs_buf *bp = NULL; |
Christoph Hellwig | b84a3a9 | 2012-03-14 11:53:34 -0500 | [diff] [blame] | 1226 | int error = 0; |
| 1227 | |
| 1228 | xfs_dqlock(dqp); |
| 1229 | if (dqp->dq_flags & XFS_DQ_FREEING) |
| 1230 | goto out_unlock; |
| 1231 | if (!XFS_DQ_IS_DIRTY(dqp)) |
| 1232 | goto out_unlock; |
| 1233 | |
Brian Foster | 7912e7f | 2017-06-14 21:21:45 -0700 | [diff] [blame] | 1234 | /* |
| 1235 | * The only way the dquot is already flush locked by the time quotacheck |
| 1236 | * gets here is if reclaim flushed it before the dqadjust walk dirtied |
| 1237 | * it for the final time. Quotacheck collects all dquot bufs in the |
| 1238 | * local delwri queue before dquots are dirtied, so reclaim can't have |
| 1239 | * possibly queued it for I/O. The only way out is to push the buffer to |
| 1240 | * cycle the flush lock. |
| 1241 | */ |
| 1242 | if (!xfs_dqflock_nowait(dqp)) { |
| 1243 | /* buf is pinned in-core by delwri list */ |
Dave Chinner | 8925a3d | 2018-04-18 08:25:20 -0700 | [diff] [blame] | 1244 | bp = xfs_buf_incore(mp->m_ddev_targp, dqp->q_blkno, |
| 1245 | mp->m_quotainfo->qi_dqchunklen, 0); |
Brian Foster | 7912e7f | 2017-06-14 21:21:45 -0700 | [diff] [blame] | 1246 | if (!bp) { |
| 1247 | error = -EINVAL; |
| 1248 | goto out_unlock; |
| 1249 | } |
| 1250 | xfs_buf_unlock(bp); |
| 1251 | |
| 1252 | xfs_buf_delwri_pushbuf(bp, buffer_list); |
| 1253 | xfs_buf_rele(bp); |
| 1254 | |
| 1255 | error = -EAGAIN; |
| 1256 | goto out_unlock; |
| 1257 | } |
| 1258 | |
Christoph Hellwig | fe7257f | 2012-04-23 15:58:37 +1000 | [diff] [blame] | 1259 | error = xfs_qm_dqflush(dqp, &bp); |
| 1260 | if (error) |
| 1261 | goto out_unlock; |
Christoph Hellwig | b84a3a9 | 2012-03-14 11:53:34 -0500 | [diff] [blame] | 1262 | |
Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 1263 | xfs_buf_delwri_queue(bp, buffer_list); |
Christoph Hellwig | fe7257f | 2012-04-23 15:58:37 +1000 | [diff] [blame] | 1264 | xfs_buf_relse(bp); |
Christoph Hellwig | b84a3a9 | 2012-03-14 11:53:34 -0500 | [diff] [blame] | 1265 | out_unlock: |
| 1266 | xfs_dqunlock(dqp); |
| 1267 | return error; |
| 1268 | } |
| 1269 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1270 | /* |
| 1271 | * Walk thru all the filesystem inodes and construct a consistent view |
| 1272 | * of the disk quota world. If the quotacheck fails, disable quotas. |
| 1273 | */ |
Jie Liu | eb866bb | 2014-07-24 20:49:57 +1000 | [diff] [blame] | 1274 | STATIC int |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1275 | xfs_qm_quotacheck( |
| 1276 | xfs_mount_t *mp) |
| 1277 | { |
Darrick J. Wong | ebd126a | 2019-07-02 09:39:39 -0700 | [diff] [blame] | 1278 | int error, error2; |
Chandra Seetharaman | 113a568 | 2013-06-27 17:25:07 -0500 | [diff] [blame] | 1279 | uint flags; |
| 1280 | LIST_HEAD (buffer_list); |
| 1281 | struct xfs_inode *uip = mp->m_quotainfo->qi_uquotaip; |
| 1282 | struct xfs_inode *gip = mp->m_quotainfo->qi_gquotaip; |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 1283 | struct xfs_inode *pip = mp->m_quotainfo->qi_pquotaip; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1284 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1285 | flags = 0; |
| 1286 | |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 1287 | ASSERT(uip || gip || pip); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1288 | ASSERT(XFS_IS_QUOTA_RUNNING(mp)); |
| 1289 | |
Dave Chinner | 0b932cc | 2011-03-07 10:08:35 +1100 | [diff] [blame] | 1290 | xfs_notice(mp, "Quotacheck needed: Please wait."); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1291 | |
| 1292 | /* |
Nathan Scott | c8ad20f | 2005-06-21 15:38:48 +1000 | [diff] [blame] | 1293 | * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1294 | * their counters to zero. We need a clean slate. |
| 1295 | * We don't log our changes till later. |
| 1296 | */ |
Christoph Hellwig | 8a7b8a8 | 2010-04-20 17:01:30 +1000 | [diff] [blame] | 1297 | if (uip) { |
Darrick J. Wong | 28b9060 | 2018-05-04 15:31:20 -0700 | [diff] [blame] | 1298 | error = xfs_qm_reset_dqcounts_buf(mp, uip, XFS_QMOPT_UQUOTA, |
Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 1299 | &buffer_list); |
Christoph Hellwig | 8a7b8a8 | 2010-04-20 17:01:30 +1000 | [diff] [blame] | 1300 | if (error) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1301 | goto error_return; |
| 1302 | flags |= XFS_UQUOTA_CHKD; |
| 1303 | } |
| 1304 | |
Christoph Hellwig | 8a7b8a8 | 2010-04-20 17:01:30 +1000 | [diff] [blame] | 1305 | if (gip) { |
Darrick J. Wong | 28b9060 | 2018-05-04 15:31:20 -0700 | [diff] [blame] | 1306 | error = xfs_qm_reset_dqcounts_buf(mp, gip, XFS_QMOPT_GQUOTA, |
Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 1307 | &buffer_list); |
Christoph Hellwig | 8a7b8a8 | 2010-04-20 17:01:30 +1000 | [diff] [blame] | 1308 | if (error) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1309 | goto error_return; |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 1310 | flags |= XFS_GQUOTA_CHKD; |
| 1311 | } |
| 1312 | |
| 1313 | if (pip) { |
Darrick J. Wong | 28b9060 | 2018-05-04 15:31:20 -0700 | [diff] [blame] | 1314 | error = xfs_qm_reset_dqcounts_buf(mp, pip, XFS_QMOPT_PQUOTA, |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 1315 | &buffer_list); |
| 1316 | if (error) |
| 1317 | goto error_return; |
| 1318 | flags |= XFS_PQUOTA_CHKD; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1319 | } |
| 1320 | |
Darrick J. Wong | 13d59a2 | 2019-07-03 20:36:28 -0700 | [diff] [blame] | 1321 | error = xfs_iwalk_threaded(mp, 0, 0, xfs_qm_dqusage_adjust, 0, true, |
| 1322 | NULL); |
Darrick J. Wong | ebd126a | 2019-07-02 09:39:39 -0700 | [diff] [blame] | 1323 | if (error) |
| 1324 | goto error_return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1325 | |
| 1326 | /* |
Christoph Hellwig | b84a3a9 | 2012-03-14 11:53:34 -0500 | [diff] [blame] | 1327 | * We've made all the changes that we need to make incore. Flush them |
| 1328 | * down to disk buffers if everything was updated successfully. |
David Chinner | 4b8879d | 2008-04-10 12:20:17 +1000 | [diff] [blame] | 1329 | */ |
Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 1330 | if (XFS_IS_UQUOTA_ON(mp)) { |
| 1331 | error = xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_flush_one, |
| 1332 | &buffer_list); |
| 1333 | } |
Christoph Hellwig | b84a3a9 | 2012-03-14 11:53:34 -0500 | [diff] [blame] | 1334 | if (XFS_IS_GQUOTA_ON(mp)) { |
Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 1335 | error2 = xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_flush_one, |
| 1336 | &buffer_list); |
Christoph Hellwig | b84a3a9 | 2012-03-14 11:53:34 -0500 | [diff] [blame] | 1337 | if (!error) |
| 1338 | error = error2; |
| 1339 | } |
| 1340 | if (XFS_IS_PQUOTA_ON(mp)) { |
Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 1341 | error2 = xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_flush_one, |
| 1342 | &buffer_list); |
Christoph Hellwig | b84a3a9 | 2012-03-14 11:53:34 -0500 | [diff] [blame] | 1343 | if (!error) |
| 1344 | error = error2; |
| 1345 | } |
David Chinner | 4b8879d | 2008-04-10 12:20:17 +1000 | [diff] [blame] | 1346 | |
Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 1347 | error2 = xfs_buf_delwri_submit(&buffer_list); |
| 1348 | if (!error) |
| 1349 | error = error2; |
| 1350 | |
David Chinner | 4b8879d | 2008-04-10 12:20:17 +1000 | [diff] [blame] | 1351 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1352 | * We can get this error if we couldn't do a dquot allocation inside |
| 1353 | * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the |
| 1354 | * dirty dquots that might be cached, we just want to get rid of them |
| 1355 | * and turn quotaoff. The dquots won't be attached to any of the inodes |
| 1356 | * at this point (because we intentionally didn't in dqget_noattach). |
| 1357 | */ |
| 1358 | if (error) { |
Christoph Hellwig | 8112e9d | 2010-04-20 17:02:29 +1000 | [diff] [blame] | 1359 | xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1360 | goto error_return; |
| 1361 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1362 | |
| 1363 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1364 | * If one type of quotas is off, then it will lose its |
| 1365 | * quotachecked status, since we won't be doing accounting for |
| 1366 | * that type anymore. |
| 1367 | */ |
Chandra Seetharaman | 4177af3 | 2012-01-23 17:31:43 +0000 | [diff] [blame] | 1368 | mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1369 | mp->m_qflags |= flags; |
| 1370 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1371 | error_return: |
Brian Foster | 20e8a06 | 2017-04-21 12:40:44 -0700 | [diff] [blame] | 1372 | xfs_buf_delwri_cancel(&buffer_list); |
Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 1373 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1374 | if (error) { |
Dave Chinner | 0b932cc | 2011-03-07 10:08:35 +1100 | [diff] [blame] | 1375 | xfs_warn(mp, |
| 1376 | "Quotacheck: Unsuccessful (Error %d): Disabling quotas.", |
| 1377 | error); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1378 | /* |
| 1379 | * We must turn off quotas. |
| 1380 | */ |
| 1381 | ASSERT(mp->m_quotainfo != NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1382 | xfs_qm_destroy_quotainfo(mp); |
David Chinner | 31d5577 | 2008-04-10 12:20:38 +1000 | [diff] [blame] | 1383 | if (xfs_mount_reset_sbqflags(mp)) { |
Dave Chinner | 0b932cc | 2011-03-07 10:08:35 +1100 | [diff] [blame] | 1384 | xfs_warn(mp, |
| 1385 | "Quotacheck: Failed to reset quota flags."); |
David Chinner | 31d5577 | 2008-04-10 12:20:38 +1000 | [diff] [blame] | 1386 | } |
Dave Chinner | 0b932cc | 2011-03-07 10:08:35 +1100 | [diff] [blame] | 1387 | } else |
| 1388 | xfs_notice(mp, "Quotacheck: Done."); |
Eric Sandeen | d99831f | 2014-06-22 15:03:54 +1000 | [diff] [blame] | 1389 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1390 | } |
| 1391 | |
| 1392 | /* |
Jie Liu | eb866bb | 2014-07-24 20:49:57 +1000 | [diff] [blame] | 1393 | * This is called from xfs_mountfs to start quotas and initialize all |
| 1394 | * necessary data structures like quotainfo. This is also responsible for |
| 1395 | * running a quotacheck as necessary. We are guaranteed that the superblock |
| 1396 | * is consistently read in at this point. |
| 1397 | * |
| 1398 | * If we fail here, the mount will continue with quota turned off. We don't |
| 1399 | * need to inidicate success or failure at all. |
| 1400 | */ |
| 1401 | void |
| 1402 | xfs_qm_mount_quotas( |
| 1403 | struct xfs_mount *mp) |
| 1404 | { |
| 1405 | int error = 0; |
| 1406 | uint sbf; |
| 1407 | |
| 1408 | /* |
| 1409 | * If quotas on realtime volumes is not supported, we disable |
| 1410 | * quotas immediately. |
| 1411 | */ |
| 1412 | if (mp->m_sb.sb_rextents) { |
| 1413 | xfs_notice(mp, "Cannot turn on quotas for realtime filesystem"); |
| 1414 | mp->m_qflags = 0; |
| 1415 | goto write_changes; |
| 1416 | } |
| 1417 | |
| 1418 | ASSERT(XFS_IS_QUOTA_RUNNING(mp)); |
| 1419 | |
| 1420 | /* |
| 1421 | * Allocate the quotainfo structure inside the mount struct, and |
| 1422 | * create quotainode(s), and change/rev superblock if necessary. |
| 1423 | */ |
| 1424 | error = xfs_qm_init_quotainfo(mp); |
| 1425 | if (error) { |
| 1426 | /* |
| 1427 | * We must turn off quotas. |
| 1428 | */ |
| 1429 | ASSERT(mp->m_quotainfo == NULL); |
| 1430 | mp->m_qflags = 0; |
| 1431 | goto write_changes; |
| 1432 | } |
| 1433 | /* |
| 1434 | * If any of the quotas are not consistent, do a quotacheck. |
| 1435 | */ |
| 1436 | if (XFS_QM_NEED_QUOTACHECK(mp)) { |
| 1437 | error = xfs_qm_quotacheck(mp); |
| 1438 | if (error) { |
| 1439 | /* Quotacheck failed and disabled quotas. */ |
| 1440 | return; |
| 1441 | } |
| 1442 | } |
| 1443 | /* |
| 1444 | * If one type of quotas is off, then it will lose its |
| 1445 | * quotachecked status, since we won't be doing accounting for |
| 1446 | * that type anymore. |
| 1447 | */ |
| 1448 | if (!XFS_IS_UQUOTA_ON(mp)) |
| 1449 | mp->m_qflags &= ~XFS_UQUOTA_CHKD; |
| 1450 | if (!XFS_IS_GQUOTA_ON(mp)) |
| 1451 | mp->m_qflags &= ~XFS_GQUOTA_CHKD; |
| 1452 | if (!XFS_IS_PQUOTA_ON(mp)) |
| 1453 | mp->m_qflags &= ~XFS_PQUOTA_CHKD; |
| 1454 | |
| 1455 | write_changes: |
| 1456 | /* |
| 1457 | * We actually don't have to acquire the m_sb_lock at all. |
| 1458 | * This can only be called from mount, and that's single threaded. XXX |
| 1459 | */ |
| 1460 | spin_lock(&mp->m_sb_lock); |
| 1461 | sbf = mp->m_sb.sb_qflags; |
| 1462 | mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL; |
| 1463 | spin_unlock(&mp->m_sb_lock); |
| 1464 | |
| 1465 | if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) { |
Dave Chinner | 61e63ec | 2015-01-22 09:10:31 +1100 | [diff] [blame] | 1466 | if (xfs_sync_sb(mp, false)) { |
Jie Liu | eb866bb | 2014-07-24 20:49:57 +1000 | [diff] [blame] | 1467 | /* |
| 1468 | * We could only have been turning quotas off. |
| 1469 | * We aren't in very good shape actually because |
| 1470 | * the incore structures are convinced that quotas are |
| 1471 | * off, but the on disk superblock doesn't know that ! |
| 1472 | */ |
| 1473 | ASSERT(!(XFS_IS_QUOTA_RUNNING(mp))); |
| 1474 | xfs_alert(mp, "%s: Superblock update failed!", |
| 1475 | __func__); |
| 1476 | } |
| 1477 | } |
| 1478 | |
| 1479 | if (error) { |
| 1480 | xfs_warn(mp, "Failed to initialize disk quotas."); |
| 1481 | return; |
| 1482 | } |
| 1483 | } |
| 1484 | |
| 1485 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1486 | * This is called after the superblock has been read in and we're ready to |
| 1487 | * iget the quota inodes. |
| 1488 | */ |
| 1489 | STATIC int |
| 1490 | xfs_qm_init_quotainos( |
| 1491 | xfs_mount_t *mp) |
| 1492 | { |
Chandra Seetharaman | 113a568 | 2013-06-27 17:25:07 -0500 | [diff] [blame] | 1493 | struct xfs_inode *uip = NULL; |
| 1494 | struct xfs_inode *gip = NULL; |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 1495 | struct xfs_inode *pip = NULL; |
Chandra Seetharaman | 113a568 | 2013-06-27 17:25:07 -0500 | [diff] [blame] | 1496 | int error; |
Chandra Seetharaman | 113a568 | 2013-06-27 17:25:07 -0500 | [diff] [blame] | 1497 | uint flags = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1498 | |
| 1499 | ASSERT(mp->m_quotainfo); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1500 | |
| 1501 | /* |
| 1502 | * Get the uquota and gquota inodes |
| 1503 | */ |
Eric Sandeen | 6211870 | 2008-03-06 13:44:28 +1100 | [diff] [blame] | 1504 | if (xfs_sb_version_hasquota(&mp->m_sb)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1505 | if (XFS_IS_UQUOTA_ON(mp) && |
| 1506 | mp->m_sb.sb_uquotino != NULLFSINO) { |
| 1507 | ASSERT(mp->m_sb.sb_uquotino > 0); |
Chandra Seetharaman | 113a568 | 2013-06-27 17:25:07 -0500 | [diff] [blame] | 1508 | error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, |
| 1509 | 0, 0, &uip); |
| 1510 | if (error) |
Eric Sandeen | b474c7a | 2014-06-22 15:04:54 +1000 | [diff] [blame] | 1511 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1512 | } |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 1513 | if (XFS_IS_GQUOTA_ON(mp) && |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1514 | mp->m_sb.sb_gquotino != NULLFSINO) { |
| 1515 | ASSERT(mp->m_sb.sb_gquotino > 0); |
Chandra Seetharaman | 113a568 | 2013-06-27 17:25:07 -0500 | [diff] [blame] | 1516 | error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, |
| 1517 | 0, 0, &gip); |
| 1518 | if (error) |
| 1519 | goto error_rele; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1520 | } |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 1521 | if (XFS_IS_PQUOTA_ON(mp) && |
Chandra Seetharaman | d892d58 | 2013-07-19 17:36:02 -0500 | [diff] [blame] | 1522 | mp->m_sb.sb_pquotino != NULLFSINO) { |
| 1523 | ASSERT(mp->m_sb.sb_pquotino > 0); |
| 1524 | error = xfs_iget(mp, NULL, mp->m_sb.sb_pquotino, |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 1525 | 0, 0, &pip); |
| 1526 | if (error) |
| 1527 | goto error_rele; |
| 1528 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1529 | } else { |
| 1530 | flags |= XFS_QMOPT_SBVERSION; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1531 | } |
| 1532 | |
| 1533 | /* |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 1534 | * Create the three inodes, if they don't exist already. The changes |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1535 | * made above will get added to a transaction and logged in one of |
| 1536 | * the qino_alloc calls below. If the device is readonly, |
| 1537 | * temporarily switch to read-write to do this. |
| 1538 | */ |
| 1539 | if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) { |
Chandra Seetharaman | 113a568 | 2013-06-27 17:25:07 -0500 | [diff] [blame] | 1540 | error = xfs_qm_qino_alloc(mp, &uip, |
Chandra Seetharaman | 113a568 | 2013-06-27 17:25:07 -0500 | [diff] [blame] | 1541 | flags | XFS_QMOPT_UQUOTA); |
| 1542 | if (error) |
| 1543 | goto error_rele; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1544 | |
| 1545 | flags &= ~XFS_QMOPT_SBVERSION; |
| 1546 | } |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 1547 | if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) { |
Nathan Scott | c8ad20f | 2005-06-21 15:38:48 +1000 | [diff] [blame] | 1548 | error = xfs_qm_qino_alloc(mp, &gip, |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 1549 | flags | XFS_QMOPT_GQUOTA); |
| 1550 | if (error) |
| 1551 | goto error_rele; |
| 1552 | |
| 1553 | flags &= ~XFS_QMOPT_SBVERSION; |
| 1554 | } |
| 1555 | if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) { |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 1556 | error = xfs_qm_qino_alloc(mp, &pip, |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 1557 | flags | XFS_QMOPT_PQUOTA); |
Chandra Seetharaman | 113a568 | 2013-06-27 17:25:07 -0500 | [diff] [blame] | 1558 | if (error) |
| 1559 | goto error_rele; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1560 | } |
| 1561 | |
Christoph Hellwig | 8a7b8a8 | 2010-04-20 17:01:30 +1000 | [diff] [blame] | 1562 | mp->m_quotainfo->qi_uquotaip = uip; |
| 1563 | mp->m_quotainfo->qi_gquotaip = gip; |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 1564 | mp->m_quotainfo->qi_pquotaip = pip; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1565 | |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 1566 | return 0; |
Chandra Seetharaman | 113a568 | 2013-06-27 17:25:07 -0500 | [diff] [blame] | 1567 | |
| 1568 | error_rele: |
| 1569 | if (uip) |
Darrick J. Wong | 44a8736 | 2018-07-25 12:52:32 -0700 | [diff] [blame] | 1570 | xfs_irele(uip); |
Chandra Seetharaman | 113a568 | 2013-06-27 17:25:07 -0500 | [diff] [blame] | 1571 | if (gip) |
Darrick J. Wong | 44a8736 | 2018-07-25 12:52:32 -0700 | [diff] [blame] | 1572 | xfs_irele(gip); |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 1573 | if (pip) |
Darrick J. Wong | 44a8736 | 2018-07-25 12:52:32 -0700 | [diff] [blame] | 1574 | xfs_irele(pip); |
Eric Sandeen | b474c7a | 2014-06-22 15:04:54 +1000 | [diff] [blame] | 1575 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1576 | } |
| 1577 | |
Christoph Hellwig | 92b2e5b | 2012-02-01 13:57:20 +0000 | [diff] [blame] | 1578 | STATIC void |
Aliaksei Karaliou | 3a3882f | 2017-12-21 13:18:26 -0800 | [diff] [blame] | 1579 | xfs_qm_destroy_quotainos( |
Pavel Reichl | c072fbe | 2019-11-12 17:04:26 -0800 | [diff] [blame] | 1580 | struct xfs_quotainfo *qi) |
Aliaksei Karaliou | 3a3882f | 2017-12-21 13:18:26 -0800 | [diff] [blame] | 1581 | { |
| 1582 | if (qi->qi_uquotaip) { |
Darrick J. Wong | 44a8736 | 2018-07-25 12:52:32 -0700 | [diff] [blame] | 1583 | xfs_irele(qi->qi_uquotaip); |
Aliaksei Karaliou | 3a3882f | 2017-12-21 13:18:26 -0800 | [diff] [blame] | 1584 | qi->qi_uquotaip = NULL; /* paranoia */ |
| 1585 | } |
| 1586 | if (qi->qi_gquotaip) { |
Darrick J. Wong | 44a8736 | 2018-07-25 12:52:32 -0700 | [diff] [blame] | 1587 | xfs_irele(qi->qi_gquotaip); |
Aliaksei Karaliou | 3a3882f | 2017-12-21 13:18:26 -0800 | [diff] [blame] | 1588 | qi->qi_gquotaip = NULL; |
| 1589 | } |
| 1590 | if (qi->qi_pquotaip) { |
Darrick J. Wong | 44a8736 | 2018-07-25 12:52:32 -0700 | [diff] [blame] | 1591 | xfs_irele(qi->qi_pquotaip); |
Aliaksei Karaliou | 3a3882f | 2017-12-21 13:18:26 -0800 | [diff] [blame] | 1592 | qi->qi_pquotaip = NULL; |
| 1593 | } |
| 1594 | } |
| 1595 | |
| 1596 | STATIC void |
Christoph Hellwig | 92b2e5b | 2012-02-01 13:57:20 +0000 | [diff] [blame] | 1597 | xfs_qm_dqfree_one( |
| 1598 | struct xfs_dquot *dqp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1599 | { |
Christoph Hellwig | 92b2e5b | 2012-02-01 13:57:20 +0000 | [diff] [blame] | 1600 | struct xfs_mount *mp = dqp->q_mount; |
| 1601 | struct xfs_quotainfo *qi = mp->m_quotainfo; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1602 | |
Christoph Hellwig | 9f920f1 | 2012-03-13 08:52:35 +0000 | [diff] [blame] | 1603 | mutex_lock(&qi->qi_tree_lock); |
Chandra Seetharaman | 329e087 | 2013-06-27 17:25:05 -0500 | [diff] [blame] | 1604 | radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags), |
Christoph Hellwig | 9f920f1 | 2012-03-13 08:52:35 +0000 | [diff] [blame] | 1605 | be32_to_cpu(dqp->q_core.d_id)); |
Christoph Hellwig | bf72de3 | 2011-12-06 21:58:19 +0000 | [diff] [blame] | 1606 | |
Christoph Hellwig | 92b2e5b | 2012-02-01 13:57:20 +0000 | [diff] [blame] | 1607 | qi->qi_dquots--; |
Christoph Hellwig | b84a3a9 | 2012-03-14 11:53:34 -0500 | [diff] [blame] | 1608 | mutex_unlock(&qi->qi_tree_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1609 | |
Christoph Hellwig | 92b2e5b | 2012-02-01 13:57:20 +0000 | [diff] [blame] | 1610 | xfs_qm_dqdestroy(dqp); |
| 1611 | } |
Christoph Hellwig | be7ffc3 | 2011-12-06 21:58:17 +0000 | [diff] [blame] | 1612 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1613 | /* --------------- utility functions for vnodeops ---------------- */ |
| 1614 | |
| 1615 | |
| 1616 | /* |
Christoph Hellwig | 6c77b0e | 2010-10-06 18:41:17 +0000 | [diff] [blame] | 1617 | * Given an inode, a uid, gid and prid make sure that we have |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1618 | * allocated relevant dquot(s) on disk, and that we won't exceed inode |
| 1619 | * quotas by creating this file. |
| 1620 | * This also attaches dquot(s) to the given inode after locking it, |
| 1621 | * and returns the dquots corresponding to the uid and/or gid. |
| 1622 | * |
| 1623 | * in : inode (unlocked) |
| 1624 | * out : udquot, gdquot with references taken and unlocked |
| 1625 | */ |
| 1626 | int |
| 1627 | xfs_qm_vop_dqalloc( |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 1628 | struct xfs_inode *ip, |
Christoph Hellwig | 5429515 | 2020-02-21 08:31:27 -0800 | [diff] [blame] | 1629 | kuid_t uid, |
| 1630 | kgid_t gid, |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 1631 | prid_t prid, |
| 1632 | uint flags, |
| 1633 | struct xfs_dquot **O_udqpp, |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 1634 | struct xfs_dquot **O_gdqpp, |
| 1635 | struct xfs_dquot **O_pdqpp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1636 | { |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 1637 | struct xfs_mount *mp = ip->i_mount; |
Christoph Hellwig | 5429515 | 2020-02-21 08:31:27 -0800 | [diff] [blame] | 1638 | struct inode *inode = VFS_I(ip); |
Christoph Hellwig | ba8adad | 2020-02-21 08:31:27 -0800 | [diff] [blame] | 1639 | struct user_namespace *user_ns = inode->i_sb->s_user_ns; |
Chandra Seetharaman | 113a568 | 2013-06-27 17:25:07 -0500 | [diff] [blame] | 1640 | struct xfs_dquot *uq = NULL; |
| 1641 | struct xfs_dquot *gq = NULL; |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 1642 | struct xfs_dquot *pq = NULL; |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 1643 | int error; |
| 1644 | uint lockflags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1645 | |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 1646 | if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1647 | return 0; |
| 1648 | |
| 1649 | lockflags = XFS_ILOCK_EXCL; |
| 1650 | xfs_ilock(ip, lockflags); |
| 1651 | |
Christoph Hellwig | bd186aa | 2007-08-30 17:21:12 +1000 | [diff] [blame] | 1652 | if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip)) |
Christoph Hellwig | 5429515 | 2020-02-21 08:31:27 -0800 | [diff] [blame] | 1653 | gid = inode->i_gid; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1654 | |
| 1655 | /* |
| 1656 | * Attach the dquot(s) to this inode, doing a dquot allocation |
| 1657 | * if necessary. The dquot(s) will not be locked. |
| 1658 | */ |
| 1659 | if (XFS_NOT_DQATTACHED(mp, ip)) { |
Darrick J. Wong | 4882c19 | 2018-05-04 15:30:22 -0700 | [diff] [blame] | 1660 | error = xfs_qm_dqattach_locked(ip, true); |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 1661 | if (error) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1662 | xfs_iunlock(ip, lockflags); |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 1663 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1664 | } |
| 1665 | } |
| 1666 | |
Nathan Scott | c8ad20f | 2005-06-21 15:38:48 +1000 | [diff] [blame] | 1667 | if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) { |
Christoph Hellwig | 5429515 | 2020-02-21 08:31:27 -0800 | [diff] [blame] | 1668 | if (!uid_eq(inode->i_uid, uid)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1669 | /* |
| 1670 | * What we need is the dquot that has this uid, and |
| 1671 | * if we send the inode to dqget, the uid of the inode |
| 1672 | * takes priority over what's sent in the uid argument. |
| 1673 | * We must unlock inode here before calling dqget if |
| 1674 | * we're not sending the inode, because otherwise |
| 1675 | * we'll deadlock by doing trans_reserve while |
| 1676 | * holding ilock. |
| 1677 | */ |
| 1678 | xfs_iunlock(ip, lockflags); |
Christoph Hellwig | ba8adad | 2020-02-21 08:31:27 -0800 | [diff] [blame] | 1679 | error = xfs_qm_dqget(mp, from_kuid(user_ns, uid), |
Christoph Hellwig | 5429515 | 2020-02-21 08:31:27 -0800 | [diff] [blame] | 1680 | XFS_DQ_USER, true, &uq); |
Chandra Seetharaman | 113a568 | 2013-06-27 17:25:07 -0500 | [diff] [blame] | 1681 | if (error) { |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 1682 | ASSERT(error != -ENOENT); |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 1683 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1684 | } |
| 1685 | /* |
| 1686 | * Get the ilock in the right order. |
| 1687 | */ |
| 1688 | xfs_dqunlock(uq); |
| 1689 | lockflags = XFS_ILOCK_SHARED; |
| 1690 | xfs_ilock(ip, lockflags); |
| 1691 | } else { |
| 1692 | /* |
| 1693 | * Take an extra reference, because we'll return |
| 1694 | * this to caller |
| 1695 | */ |
| 1696 | ASSERT(ip->i_udquot); |
Christoph Hellwig | 78e5589 | 2011-12-06 21:58:22 +0000 | [diff] [blame] | 1697 | uq = xfs_qm_dqhold(ip->i_udquot); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1698 | } |
| 1699 | } |
Nathan Scott | c8ad20f | 2005-06-21 15:38:48 +1000 | [diff] [blame] | 1700 | if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) { |
Christoph Hellwig | 5429515 | 2020-02-21 08:31:27 -0800 | [diff] [blame] | 1701 | if (!gid_eq(inode->i_gid, gid)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1702 | xfs_iunlock(ip, lockflags); |
Christoph Hellwig | ba8adad | 2020-02-21 08:31:27 -0800 | [diff] [blame] | 1703 | error = xfs_qm_dqget(mp, from_kgid(user_ns, gid), |
Christoph Hellwig | 5429515 | 2020-02-21 08:31:27 -0800 | [diff] [blame] | 1704 | XFS_DQ_GROUP, true, &gq); |
Chandra Seetharaman | 113a568 | 2013-06-27 17:25:07 -0500 | [diff] [blame] | 1705 | if (error) { |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 1706 | ASSERT(error != -ENOENT); |
Chandra Seetharaman | 113a568 | 2013-06-27 17:25:07 -0500 | [diff] [blame] | 1707 | goto error_rele; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1708 | } |
| 1709 | xfs_dqunlock(gq); |
| 1710 | lockflags = XFS_ILOCK_SHARED; |
| 1711 | xfs_ilock(ip, lockflags); |
| 1712 | } else { |
| 1713 | ASSERT(ip->i_gdquot); |
Christoph Hellwig | 78e5589 | 2011-12-06 21:58:22 +0000 | [diff] [blame] | 1714 | gq = xfs_qm_dqhold(ip->i_gdquot); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1715 | } |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 1716 | } |
| 1717 | if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) { |
Christoph Hellwig | de7a866 | 2019-11-12 08:22:54 -0800 | [diff] [blame] | 1718 | if (ip->i_d.di_projid != prid) { |
Nathan Scott | c8ad20f | 2005-06-21 15:38:48 +1000 | [diff] [blame] | 1719 | xfs_iunlock(ip, lockflags); |
Darrick J. Wong | 4882c19 | 2018-05-04 15:30:22 -0700 | [diff] [blame] | 1720 | error = xfs_qm_dqget(mp, (xfs_dqid_t)prid, XFS_DQ_PROJ, |
Darrick J. Wong | 30ab2dc | 2018-05-04 15:30:24 -0700 | [diff] [blame] | 1721 | true, &pq); |
Chandra Seetharaman | 113a568 | 2013-06-27 17:25:07 -0500 | [diff] [blame] | 1722 | if (error) { |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 1723 | ASSERT(error != -ENOENT); |
Chandra Seetharaman | 113a568 | 2013-06-27 17:25:07 -0500 | [diff] [blame] | 1724 | goto error_rele; |
Nathan Scott | c8ad20f | 2005-06-21 15:38:48 +1000 | [diff] [blame] | 1725 | } |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 1726 | xfs_dqunlock(pq); |
Nathan Scott | c8ad20f | 2005-06-21 15:38:48 +1000 | [diff] [blame] | 1727 | lockflags = XFS_ILOCK_SHARED; |
| 1728 | xfs_ilock(ip, lockflags); |
| 1729 | } else { |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 1730 | ASSERT(ip->i_pdquot); |
| 1731 | pq = xfs_qm_dqhold(ip->i_pdquot); |
Nathan Scott | c8ad20f | 2005-06-21 15:38:48 +1000 | [diff] [blame] | 1732 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1733 | } |
Kaixu Xia | c140735 | 2020-04-22 21:54:27 -0700 | [diff] [blame] | 1734 | trace_xfs_dquot_dqalloc(ip); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1735 | |
| 1736 | xfs_iunlock(ip, lockflags); |
| 1737 | if (O_udqpp) |
| 1738 | *O_udqpp = uq; |
Markus Elfring | d2a5e3c | 2014-12-01 08:24:20 +1100 | [diff] [blame] | 1739 | else |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1740 | xfs_qm_dqrele(uq); |
| 1741 | if (O_gdqpp) |
| 1742 | *O_gdqpp = gq; |
Markus Elfring | d2a5e3c | 2014-12-01 08:24:20 +1100 | [diff] [blame] | 1743 | else |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1744 | xfs_qm_dqrele(gq); |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 1745 | if (O_pdqpp) |
| 1746 | *O_pdqpp = pq; |
Markus Elfring | d2a5e3c | 2014-12-01 08:24:20 +1100 | [diff] [blame] | 1747 | else |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 1748 | xfs_qm_dqrele(pq); |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 1749 | return 0; |
Chandra Seetharaman | 113a568 | 2013-06-27 17:25:07 -0500 | [diff] [blame] | 1750 | |
| 1751 | error_rele: |
Markus Elfring | d2a5e3c | 2014-12-01 08:24:20 +1100 | [diff] [blame] | 1752 | xfs_qm_dqrele(gq); |
| 1753 | xfs_qm_dqrele(uq); |
Chandra Seetharaman | 113a568 | 2013-06-27 17:25:07 -0500 | [diff] [blame] | 1754 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1755 | } |
| 1756 | |
| 1757 | /* |
| 1758 | * Actually transfer ownership, and do dquot modifications. |
| 1759 | * These were already reserved. |
| 1760 | */ |
Pavel Reichl | aefe69a | 2019-11-12 17:04:02 -0800 | [diff] [blame] | 1761 | struct xfs_dquot * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1762 | xfs_qm_vop_chown( |
Pavel Reichl | aefe69a | 2019-11-12 17:04:02 -0800 | [diff] [blame] | 1763 | struct xfs_trans *tp, |
| 1764 | struct xfs_inode *ip, |
| 1765 | struct xfs_dquot **IO_olddq, |
| 1766 | struct xfs_dquot *newdq) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1767 | { |
Pavel Reichl | aefe69a | 2019-11-12 17:04:02 -0800 | [diff] [blame] | 1768 | struct xfs_dquot *prevdq; |
Nathan Scott | 06d10dd | 2005-06-21 15:48:47 +1000 | [diff] [blame] | 1769 | uint bfield = XFS_IS_REALTIME_INODE(ip) ? |
| 1770 | XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT; |
| 1771 | |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 1772 | |
Christoph Hellwig | 579aa9c | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 1773 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1774 | ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount)); |
| 1775 | |
| 1776 | /* old dquot */ |
| 1777 | prevdq = *IO_olddq; |
| 1778 | ASSERT(prevdq); |
| 1779 | ASSERT(prevdq != newdq); |
| 1780 | |
Nathan Scott | 06d10dd | 2005-06-21 15:48:47 +1000 | [diff] [blame] | 1781 | xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_d.di_nblocks)); |
| 1782 | xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1783 | |
| 1784 | /* the sparkling new dquot */ |
Nathan Scott | 06d10dd | 2005-06-21 15:48:47 +1000 | [diff] [blame] | 1785 | xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_d.di_nblocks); |
| 1786 | xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1787 | |
| 1788 | /* |
Christoph Hellwig | 78e5589 | 2011-12-06 21:58:22 +0000 | [diff] [blame] | 1789 | * Take an extra reference, because the inode is going to keep |
| 1790 | * this dquot pointer even after the trans_commit. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1791 | */ |
Christoph Hellwig | 78e5589 | 2011-12-06 21:58:22 +0000 | [diff] [blame] | 1792 | *IO_olddq = xfs_qm_dqhold(newdq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1793 | |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 1794 | return prevdq; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1795 | } |
| 1796 | |
| 1797 | /* |
Nathan Scott | c8ad20f | 2005-06-21 15:38:48 +1000 | [diff] [blame] | 1798 | * Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1799 | */ |
| 1800 | int |
| 1801 | xfs_qm_vop_chown_reserve( |
Chandra Seetharaman | 113a568 | 2013-06-27 17:25:07 -0500 | [diff] [blame] | 1802 | struct xfs_trans *tp, |
| 1803 | struct xfs_inode *ip, |
| 1804 | struct xfs_dquot *udqp, |
| 1805 | struct xfs_dquot *gdqp, |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 1806 | struct xfs_dquot *pdqp, |
Chandra Seetharaman | 113a568 | 2013-06-27 17:25:07 -0500 | [diff] [blame] | 1807 | uint flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1808 | { |
Chandra Seetharaman | 113a568 | 2013-06-27 17:25:07 -0500 | [diff] [blame] | 1809 | struct xfs_mount *mp = ip->i_mount; |
Darrick J. Wong | 394aafd | 2019-04-17 16:30:24 -0700 | [diff] [blame] | 1810 | uint64_t delblks; |
Eric Sandeen | dcf1ccc | 2020-05-21 13:06:59 -0700 | [diff] [blame] | 1811 | unsigned int blkflags; |
Chandra Seetharaman | 113a568 | 2013-06-27 17:25:07 -0500 | [diff] [blame] | 1812 | struct xfs_dquot *udq_unres = NULL; |
| 1813 | struct xfs_dquot *gdq_unres = NULL; |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 1814 | struct xfs_dquot *pdq_unres = NULL; |
Chandra Seetharaman | 113a568 | 2013-06-27 17:25:07 -0500 | [diff] [blame] | 1815 | struct xfs_dquot *udq_delblks = NULL; |
| 1816 | struct xfs_dquot *gdq_delblks = NULL; |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 1817 | struct xfs_dquot *pdq_delblks = NULL; |
Chandra Seetharaman | 113a568 | 2013-06-27 17:25:07 -0500 | [diff] [blame] | 1818 | int error; |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 1819 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1820 | |
Christoph Hellwig | 579aa9c | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 1821 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1822 | ASSERT(XFS_IS_QUOTA_RUNNING(mp)); |
| 1823 | |
| 1824 | delblks = ip->i_delayed_blks; |
Nathan Scott | 06d10dd | 2005-06-21 15:48:47 +1000 | [diff] [blame] | 1825 | blkflags = XFS_IS_REALTIME_INODE(ip) ? |
| 1826 | XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1827 | |
| 1828 | if (XFS_IS_UQUOTA_ON(mp) && udqp && |
Christoph Hellwig | ba8adad | 2020-02-21 08:31:27 -0800 | [diff] [blame] | 1829 | i_uid_read(VFS_I(ip)) != be32_to_cpu(udqp->q_core.d_id)) { |
Chandra Seetharaman | 113a568 | 2013-06-27 17:25:07 -0500 | [diff] [blame] | 1830 | udq_delblks = udqp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1831 | /* |
| 1832 | * If there are delayed allocation blocks, then we have to |
| 1833 | * unreserve those from the old dquot, and add them to the |
| 1834 | * new dquot. |
| 1835 | */ |
| 1836 | if (delblks) { |
| 1837 | ASSERT(ip->i_udquot); |
Chandra Seetharaman | 113a568 | 2013-06-27 17:25:07 -0500 | [diff] [blame] | 1838 | udq_unres = ip->i_udquot; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1839 | } |
| 1840 | } |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 1841 | if (XFS_IS_GQUOTA_ON(ip->i_mount) && gdqp && |
Christoph Hellwig | ba8adad | 2020-02-21 08:31:27 -0800 | [diff] [blame] | 1842 | i_gid_read(VFS_I(ip)) != be32_to_cpu(gdqp->q_core.d_id)) { |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 1843 | gdq_delblks = gdqp; |
| 1844 | if (delblks) { |
| 1845 | ASSERT(ip->i_gdquot); |
| 1846 | gdq_unres = ip->i_gdquot; |
| 1847 | } |
| 1848 | } |
Nathan Scott | 9a2a7de | 2006-03-31 13:04:49 +1000 | [diff] [blame] | 1849 | |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 1850 | if (XFS_IS_PQUOTA_ON(ip->i_mount) && pdqp && |
Christoph Hellwig | de7a866 | 2019-11-12 08:22:54 -0800 | [diff] [blame] | 1851 | ip->i_d.di_projid != be32_to_cpu(pdqp->q_core.d_id)) { |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 1852 | pdq_delblks = pdqp; |
| 1853 | if (delblks) { |
| 1854 | ASSERT(ip->i_pdquot); |
| 1855 | pdq_unres = ip->i_pdquot; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1856 | } |
| 1857 | } |
| 1858 | |
Chandra Seetharaman | 113a568 | 2013-06-27 17:25:07 -0500 | [diff] [blame] | 1859 | error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount, |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 1860 | udq_delblks, gdq_delblks, pdq_delblks, |
Eric Sandeen | dcf1ccc | 2020-05-21 13:06:59 -0700 | [diff] [blame] | 1861 | ip->i_d.di_nblocks, 1, flags | blkflags); |
Chandra Seetharaman | 113a568 | 2013-06-27 17:25:07 -0500 | [diff] [blame] | 1862 | if (error) |
| 1863 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1864 | |
| 1865 | /* |
| 1866 | * Do the delayed blks reservations/unreservations now. Since, these |
| 1867 | * are done without the help of a transaction, if a reservation fails |
| 1868 | * its previous reservations won't be automatically undone by trans |
| 1869 | * code. So, we have to do it manually here. |
| 1870 | */ |
| 1871 | if (delblks) { |
| 1872 | /* |
| 1873 | * Do the reservations first. Unreservation can't fail. |
| 1874 | */ |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 1875 | ASSERT(udq_delblks || gdq_delblks || pdq_delblks); |
| 1876 | ASSERT(udq_unres || gdq_unres || pdq_unres); |
Chandra Seetharaman | 113a568 | 2013-06-27 17:25:07 -0500 | [diff] [blame] | 1877 | error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount, |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 1878 | udq_delblks, gdq_delblks, pdq_delblks, |
Eric Sandeen | dcf1ccc | 2020-05-21 13:06:59 -0700 | [diff] [blame] | 1879 | (xfs_qcnt_t)delblks, 0, flags | blkflags); |
Chandra Seetharaman | 113a568 | 2013-06-27 17:25:07 -0500 | [diff] [blame] | 1880 | if (error) |
| 1881 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1882 | xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount, |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 1883 | udq_unres, gdq_unres, pdq_unres, |
| 1884 | -((xfs_qcnt_t)delblks), 0, blkflags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1885 | } |
| 1886 | |
Eric Sandeen | d99831f | 2014-06-22 15:03:54 +1000 | [diff] [blame] | 1887 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1888 | } |
| 1889 | |
| 1890 | int |
| 1891 | xfs_qm_vop_rename_dqattach( |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 1892 | struct xfs_inode **i_tab) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1893 | { |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 1894 | struct xfs_mount *mp = i_tab[0]->i_mount; |
| 1895 | int i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1896 | |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 1897 | if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 1898 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1899 | |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 1900 | for (i = 0; (i < 4 && i_tab[i]); i++) { |
| 1901 | struct xfs_inode *ip = i_tab[i]; |
| 1902 | int error; |
| 1903 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1904 | /* |
| 1905 | * Watch out for duplicate entries in the table. |
| 1906 | */ |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 1907 | if (i == 0 || ip != i_tab[i-1]) { |
| 1908 | if (XFS_NOT_DQATTACHED(mp, ip)) { |
Darrick J. Wong | c14cfcc | 2018-05-04 15:30:21 -0700 | [diff] [blame] | 1909 | error = xfs_qm_dqattach(ip); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1910 | if (error) |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 1911 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1912 | } |
| 1913 | } |
| 1914 | } |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 1915 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1916 | } |
| 1917 | |
| 1918 | void |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 1919 | xfs_qm_vop_create_dqattach( |
| 1920 | struct xfs_trans *tp, |
| 1921 | struct xfs_inode *ip, |
| 1922 | struct xfs_dquot *udqp, |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 1923 | struct xfs_dquot *gdqp, |
| 1924 | struct xfs_dquot *pdqp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1925 | { |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 1926 | struct xfs_mount *mp = tp->t_mountp; |
| 1927 | |
| 1928 | if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1929 | return; |
| 1930 | |
Christoph Hellwig | 579aa9c | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 1931 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1932 | |
Jie Liu | 37eb970 | 2013-11-26 21:38:54 +0800 | [diff] [blame] | 1933 | if (udqp && XFS_IS_UQUOTA_ON(mp)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1934 | ASSERT(ip->i_udquot == NULL); |
Christoph Hellwig | ba8adad | 2020-02-21 08:31:27 -0800 | [diff] [blame] | 1935 | ASSERT(i_uid_read(VFS_I(ip)) == be32_to_cpu(udqp->q_core.d_id)); |
Christoph Hellwig | 78e5589 | 2011-12-06 21:58:22 +0000 | [diff] [blame] | 1936 | |
| 1937 | ip->i_udquot = xfs_qm_dqhold(udqp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1938 | xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1); |
| 1939 | } |
Jie Liu | 37eb970 | 2013-11-26 21:38:54 +0800 | [diff] [blame] | 1940 | if (gdqp && XFS_IS_GQUOTA_ON(mp)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1941 | ASSERT(ip->i_gdquot == NULL); |
Christoph Hellwig | ba8adad | 2020-02-21 08:31:27 -0800 | [diff] [blame] | 1942 | ASSERT(i_gid_read(VFS_I(ip)) == be32_to_cpu(gdqp->q_core.d_id)); |
Christoph Hellwig | 5429515 | 2020-02-21 08:31:27 -0800 | [diff] [blame] | 1943 | |
Christoph Hellwig | 78e5589 | 2011-12-06 21:58:22 +0000 | [diff] [blame] | 1944 | ip->i_gdquot = xfs_qm_dqhold(gdqp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1945 | xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1); |
| 1946 | } |
Jie Liu | 37eb970 | 2013-11-26 21:38:54 +0800 | [diff] [blame] | 1947 | if (pdqp && XFS_IS_PQUOTA_ON(mp)) { |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 1948 | ASSERT(ip->i_pdquot == NULL); |
Christoph Hellwig | de7a866 | 2019-11-12 08:22:54 -0800 | [diff] [blame] | 1949 | ASSERT(ip->i_d.di_projid == be32_to_cpu(pdqp->q_core.d_id)); |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 1950 | |
| 1951 | ip->i_pdquot = xfs_qm_dqhold(pdqp); |
| 1952 | xfs_trans_mod_dquot(tp, pdqp, XFS_TRANS_DQ_ICOUNT, 1); |
| 1953 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1954 | } |
| 1955 | |