Dave Chinner | 0b61f8a | 2018-06-05 19:42:14 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
Nathan Scott | 4ce3121 | 2005-11-02 14:59:41 +1100 | [diff] [blame] | 3 | * Copyright (c) 2000-2003 Silicon Graphics, Inc. |
| 4 | * All Rights Reserved. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | #include "xfs.h" |
| 7 | #include "xfs_fs.h" |
Dave Chinner | 6ca1c90 | 2013-08-12 20:49:26 +1000 | [diff] [blame] | 8 | #include "xfs_format.h" |
Dave Chinner | 239880e | 2013-10-23 10:50:10 +1100 | [diff] [blame] | 9 | #include "xfs_log_format.h" |
Dave Chinner | 70a9883 | 2013-10-23 10:36:05 +1100 | [diff] [blame] | 10 | #include "xfs_shared.h" |
Dave Chinner | 239880e | 2013-10-23 10:50:10 +1100 | [diff] [blame] | 11 | #include "xfs_trans_resv.h" |
Nathan Scott | a844f45 | 2005-11-02 14:38:42 +1100 | [diff] [blame] | 12 | #include "xfs_bit.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #include "xfs_mount.h" |
Darrick J. Wong | 3ab78df | 2016-08-03 11:15:38 +1000 | [diff] [blame] | 14 | #include "xfs_defer.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include "xfs_inode.h" |
| 16 | #include "xfs_bmap.h" |
Dave Chinner | 239880e | 2013-10-23 10:50:10 +1100 | [diff] [blame] | 17 | #include "xfs_quota.h" |
Dave Chinner | 239880e | 2013-10-23 10:50:10 +1100 | [diff] [blame] | 18 | #include "xfs_trans.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | #include "xfs_buf_item.h" |
| 20 | #include "xfs_trans_space.h" |
| 21 | #include "xfs_trans_priv.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | #include "xfs_qm.h" |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 23 | #include "xfs_trace.h" |
Dave Chinner | 239880e | 2013-10-23 10:50:10 +1100 | [diff] [blame] | 24 | #include "xfs_log.h" |
Dave Chinner | a4fbe6a | 2013-10-23 10:51:50 +1100 | [diff] [blame] | 25 | #include "xfs_bmap_btree.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | /* |
Christoph Hellwig | bf72de3 | 2011-12-06 21:58:19 +0000 | [diff] [blame] | 28 | * Lock order: |
| 29 | * |
| 30 | * ip->i_lock |
Christoph Hellwig | 9f920f1 | 2012-03-13 08:52:35 +0000 | [diff] [blame] | 31 | * qi->qi_tree_lock |
Christoph Hellwig | b84a3a9 | 2012-03-14 11:53:34 -0500 | [diff] [blame] | 32 | * dquot->q_qlock (xfs_dqlock() and friends) |
| 33 | * dquot->q_flush (xfs_dqflock() and friends) |
| 34 | * qi->qi_lru_lock |
Christoph Hellwig | bf72de3 | 2011-12-06 21:58:19 +0000 | [diff] [blame] | 35 | * |
| 36 | * If two dquots need to be locked the order is user before group/project, |
| 37 | * otherwise by the lowest id first, see xfs_dqlock2. |
| 38 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | |
Christoph Hellwig | a05931c | 2012-03-13 08:52:37 +0000 | [diff] [blame] | 40 | struct kmem_zone *xfs_qm_dqtrxzone; |
| 41 | static struct kmem_zone *xfs_qm_dqzone; |
| 42 | |
Dave Chinner | f112a04 | 2013-09-30 09:37:03 +1000 | [diff] [blame] | 43 | static struct lock_class_key xfs_dquot_group_class; |
| 44 | static struct lock_class_key xfs_dquot_project_class; |
Christoph Hellwig | 98b8c7a | 2009-01-19 02:03:25 +0100 | [diff] [blame] | 45 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | * This is called to free all the memory associated with a dquot |
| 48 | */ |
| 49 | void |
| 50 | xfs_qm_dqdestroy( |
Pavel Reichl | aefe69a | 2019-11-12 17:04:02 -0800 | [diff] [blame] | 51 | struct xfs_dquot *dqp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | { |
Christoph Hellwig | f8739c3 | 2012-03-13 08:52:34 +0000 | [diff] [blame] | 53 | ASSERT(list_empty(&dqp->q_lru)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | |
Dave Chinner | b1c5ebb | 2016-07-22 09:52:35 +1000 | [diff] [blame] | 55 | kmem_free(dqp->q_logitem.qli_item.li_lv_shadow); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 | mutex_destroy(&dqp->q_qlock); |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 57 | |
Bill O'Donnell | ff6d6af | 2015-10-12 18:21:22 +1100 | [diff] [blame] | 58 | XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot); |
Carlos Maiolino | 377bcd5 | 2019-11-14 12:43:04 -0800 | [diff] [blame] | 59 | kmem_cache_free(xfs_qm_dqzone, dqp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | } |
| 61 | |
| 62 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 | * If default limits are in force, push them into the dquot now. |
| 64 | * We overwrite the dquot limits only if they are zero and this |
| 65 | * is not the root dquot. |
| 66 | */ |
| 67 | void |
| 68 | xfs_qm_adjust_dqlimits( |
Brian Foster | 4b6eae2e | 2013-03-18 10:51:45 -0400 | [diff] [blame] | 69 | struct xfs_mount *mp, |
| 70 | struct xfs_dquot *dq) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 | { |
Brian Foster | 4b6eae2e | 2013-03-18 10:51:45 -0400 | [diff] [blame] | 72 | struct xfs_quotainfo *q = mp->m_quotainfo; |
| 73 | struct xfs_disk_dquot *d = &dq->q_core; |
Carlos Maiolino | be60794 | 2016-02-08 11:27:55 +1100 | [diff] [blame] | 74 | struct xfs_def_quota *defq; |
Brian Foster | b136645 | 2013-03-18 10:51:46 -0400 | [diff] [blame] | 75 | int prealloc = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 76 | |
| 77 | ASSERT(d->d_id); |
Carlos Maiolino | be60794 | 2016-02-08 11:27:55 +1100 | [diff] [blame] | 78 | defq = xfs_get_defquota(dq, q); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 | |
Carlos Maiolino | be60794 | 2016-02-08 11:27:55 +1100 | [diff] [blame] | 80 | if (defq->bsoftlimit && !d->d_blk_softlimit) { |
| 81 | d->d_blk_softlimit = cpu_to_be64(defq->bsoftlimit); |
Brian Foster | b136645 | 2013-03-18 10:51:46 -0400 | [diff] [blame] | 82 | prealloc = 1; |
| 83 | } |
Carlos Maiolino | be60794 | 2016-02-08 11:27:55 +1100 | [diff] [blame] | 84 | if (defq->bhardlimit && !d->d_blk_hardlimit) { |
| 85 | d->d_blk_hardlimit = cpu_to_be64(defq->bhardlimit); |
Brian Foster | b136645 | 2013-03-18 10:51:46 -0400 | [diff] [blame] | 86 | prealloc = 1; |
| 87 | } |
Carlos Maiolino | be60794 | 2016-02-08 11:27:55 +1100 | [diff] [blame] | 88 | if (defq->isoftlimit && !d->d_ino_softlimit) |
| 89 | d->d_ino_softlimit = cpu_to_be64(defq->isoftlimit); |
| 90 | if (defq->ihardlimit && !d->d_ino_hardlimit) |
| 91 | d->d_ino_hardlimit = cpu_to_be64(defq->ihardlimit); |
| 92 | if (defq->rtbsoftlimit && !d->d_rtb_softlimit) |
| 93 | d->d_rtb_softlimit = cpu_to_be64(defq->rtbsoftlimit); |
| 94 | if (defq->rtbhardlimit && !d->d_rtb_hardlimit) |
| 95 | d->d_rtb_hardlimit = cpu_to_be64(defq->rtbhardlimit); |
Brian Foster | b136645 | 2013-03-18 10:51:46 -0400 | [diff] [blame] | 96 | |
| 97 | if (prealloc) |
| 98 | xfs_dquot_set_prealloc_limits(dq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 | } |
| 100 | |
| 101 | /* |
| 102 | * Check the limits and timers of a dquot and start or reset timers |
| 103 | * if necessary. |
| 104 | * This gets called even when quota enforcement is OFF, which makes our |
| 105 | * life a little less complicated. (We just don't reject any quota |
| 106 | * reservations in that case, when enforcement is off). |
| 107 | * We also return 0 as the values of the timers in Q_GETQUOTA calls, when |
| 108 | * enforcement's off. |
| 109 | * In contrast, warnings are a little different in that they don't |
Nathan Scott | 754002b | 2005-06-21 15:49:06 +1000 | [diff] [blame] | 110 | * 'automatically' get started when limits get exceeded. They do |
| 111 | * get reset to zero, however, when we find the count to be under |
| 112 | * the soft limit (they are only ever set non-zero via userspace). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 | */ |
| 114 | void |
| 115 | xfs_qm_adjust_dqtimers( |
Pavel Reichl | aefe69a | 2019-11-12 17:04:02 -0800 | [diff] [blame] | 116 | struct xfs_mount *mp, |
| 117 | struct xfs_disk_dquot *d) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 118 | { |
| 119 | ASSERT(d->d_id); |
| 120 | |
Christoph Hellwig | ea15ab3 | 2011-07-13 13:43:50 +0200 | [diff] [blame] | 121 | #ifdef DEBUG |
Christoph Hellwig | 1149d96 | 2005-11-02 15:01:12 +1100 | [diff] [blame] | 122 | if (d->d_blk_hardlimit) |
| 123 | ASSERT(be64_to_cpu(d->d_blk_softlimit) <= |
| 124 | be64_to_cpu(d->d_blk_hardlimit)); |
| 125 | if (d->d_ino_hardlimit) |
| 126 | ASSERT(be64_to_cpu(d->d_ino_softlimit) <= |
| 127 | be64_to_cpu(d->d_ino_hardlimit)); |
| 128 | if (d->d_rtb_hardlimit) |
| 129 | ASSERT(be64_to_cpu(d->d_rtb_softlimit) <= |
| 130 | be64_to_cpu(d->d_rtb_hardlimit)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 | #endif |
Christoph Hellwig | ea15ab3 | 2011-07-13 13:43:50 +0200 | [diff] [blame] | 132 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 | if (!d->d_btimer) { |
Christoph Hellwig | 1149d96 | 2005-11-02 15:01:12 +1100 | [diff] [blame] | 134 | if ((d->d_blk_softlimit && |
Mitsuo Hayasaka | d0a3fe6 | 2012-02-06 12:50:07 +0000 | [diff] [blame] | 135 | (be64_to_cpu(d->d_bcount) > |
Christoph Hellwig | 1149d96 | 2005-11-02 15:01:12 +1100 | [diff] [blame] | 136 | be64_to_cpu(d->d_blk_softlimit))) || |
| 137 | (d->d_blk_hardlimit && |
Mitsuo Hayasaka | d0a3fe6 | 2012-02-06 12:50:07 +0000 | [diff] [blame] | 138 | (be64_to_cpu(d->d_bcount) > |
Christoph Hellwig | 1149d96 | 2005-11-02 15:01:12 +1100 | [diff] [blame] | 139 | be64_to_cpu(d->d_blk_hardlimit)))) { |
Arnd Bergmann | b8a0880 | 2020-01-02 13:27:45 -0800 | [diff] [blame] | 140 | d->d_btimer = cpu_to_be32(ktime_get_real_seconds() + |
Christoph Hellwig | 8a7b8a8 | 2010-04-20 17:01:30 +1000 | [diff] [blame] | 141 | mp->m_quotainfo->qi_btimelimit); |
Nathan Scott | 754002b | 2005-06-21 15:49:06 +1000 | [diff] [blame] | 142 | } else { |
| 143 | d->d_bwarns = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 144 | } |
| 145 | } else { |
| 146 | if ((!d->d_blk_softlimit || |
Mitsuo Hayasaka | d0a3fe6 | 2012-02-06 12:50:07 +0000 | [diff] [blame] | 147 | (be64_to_cpu(d->d_bcount) <= |
Christoph Hellwig | 1149d96 | 2005-11-02 15:01:12 +1100 | [diff] [blame] | 148 | be64_to_cpu(d->d_blk_softlimit))) && |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 149 | (!d->d_blk_hardlimit || |
Mitsuo Hayasaka | d0a3fe6 | 2012-02-06 12:50:07 +0000 | [diff] [blame] | 150 | (be64_to_cpu(d->d_bcount) <= |
Christoph Hellwig | 1149d96 | 2005-11-02 15:01:12 +1100 | [diff] [blame] | 151 | be64_to_cpu(d->d_blk_hardlimit)))) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 | d->d_btimer = 0; |
| 153 | } |
| 154 | } |
| 155 | |
| 156 | if (!d->d_itimer) { |
Christoph Hellwig | 1149d96 | 2005-11-02 15:01:12 +1100 | [diff] [blame] | 157 | if ((d->d_ino_softlimit && |
Mitsuo Hayasaka | d0a3fe6 | 2012-02-06 12:50:07 +0000 | [diff] [blame] | 158 | (be64_to_cpu(d->d_icount) > |
Christoph Hellwig | 1149d96 | 2005-11-02 15:01:12 +1100 | [diff] [blame] | 159 | be64_to_cpu(d->d_ino_softlimit))) || |
| 160 | (d->d_ino_hardlimit && |
Mitsuo Hayasaka | d0a3fe6 | 2012-02-06 12:50:07 +0000 | [diff] [blame] | 161 | (be64_to_cpu(d->d_icount) > |
Christoph Hellwig | 1149d96 | 2005-11-02 15:01:12 +1100 | [diff] [blame] | 162 | be64_to_cpu(d->d_ino_hardlimit)))) { |
Arnd Bergmann | b8a0880 | 2020-01-02 13:27:45 -0800 | [diff] [blame] | 163 | d->d_itimer = cpu_to_be32(ktime_get_real_seconds() + |
Christoph Hellwig | 8a7b8a8 | 2010-04-20 17:01:30 +1000 | [diff] [blame] | 164 | mp->m_quotainfo->qi_itimelimit); |
Nathan Scott | 754002b | 2005-06-21 15:49:06 +1000 | [diff] [blame] | 165 | } else { |
| 166 | d->d_iwarns = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 167 | } |
| 168 | } else { |
| 169 | if ((!d->d_ino_softlimit || |
Mitsuo Hayasaka | d0a3fe6 | 2012-02-06 12:50:07 +0000 | [diff] [blame] | 170 | (be64_to_cpu(d->d_icount) <= |
Christoph Hellwig | 1149d96 | 2005-11-02 15:01:12 +1100 | [diff] [blame] | 171 | be64_to_cpu(d->d_ino_softlimit))) && |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 172 | (!d->d_ino_hardlimit || |
Mitsuo Hayasaka | d0a3fe6 | 2012-02-06 12:50:07 +0000 | [diff] [blame] | 173 | (be64_to_cpu(d->d_icount) <= |
Christoph Hellwig | 1149d96 | 2005-11-02 15:01:12 +1100 | [diff] [blame] | 174 | be64_to_cpu(d->d_ino_hardlimit)))) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 175 | d->d_itimer = 0; |
| 176 | } |
| 177 | } |
| 178 | |
| 179 | if (!d->d_rtbtimer) { |
Christoph Hellwig | 1149d96 | 2005-11-02 15:01:12 +1100 | [diff] [blame] | 180 | if ((d->d_rtb_softlimit && |
Mitsuo Hayasaka | d0a3fe6 | 2012-02-06 12:50:07 +0000 | [diff] [blame] | 181 | (be64_to_cpu(d->d_rtbcount) > |
Christoph Hellwig | 1149d96 | 2005-11-02 15:01:12 +1100 | [diff] [blame] | 182 | be64_to_cpu(d->d_rtb_softlimit))) || |
| 183 | (d->d_rtb_hardlimit && |
Mitsuo Hayasaka | d0a3fe6 | 2012-02-06 12:50:07 +0000 | [diff] [blame] | 184 | (be64_to_cpu(d->d_rtbcount) > |
Christoph Hellwig | 1149d96 | 2005-11-02 15:01:12 +1100 | [diff] [blame] | 185 | be64_to_cpu(d->d_rtb_hardlimit)))) { |
Arnd Bergmann | b8a0880 | 2020-01-02 13:27:45 -0800 | [diff] [blame] | 186 | d->d_rtbtimer = cpu_to_be32(ktime_get_real_seconds() + |
Christoph Hellwig | 8a7b8a8 | 2010-04-20 17:01:30 +1000 | [diff] [blame] | 187 | mp->m_quotainfo->qi_rtbtimelimit); |
Nathan Scott | 754002b | 2005-06-21 15:49:06 +1000 | [diff] [blame] | 188 | } else { |
| 189 | d->d_rtbwarns = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 190 | } |
| 191 | } else { |
| 192 | if ((!d->d_rtb_softlimit || |
Mitsuo Hayasaka | d0a3fe6 | 2012-02-06 12:50:07 +0000 | [diff] [blame] | 193 | (be64_to_cpu(d->d_rtbcount) <= |
Christoph Hellwig | 1149d96 | 2005-11-02 15:01:12 +1100 | [diff] [blame] | 194 | be64_to_cpu(d->d_rtb_softlimit))) && |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 195 | (!d->d_rtb_hardlimit || |
Mitsuo Hayasaka | d0a3fe6 | 2012-02-06 12:50:07 +0000 | [diff] [blame] | 196 | (be64_to_cpu(d->d_rtbcount) <= |
Christoph Hellwig | 1149d96 | 2005-11-02 15:01:12 +1100 | [diff] [blame] | 197 | be64_to_cpu(d->d_rtb_hardlimit)))) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 198 | d->d_rtbtimer = 0; |
| 199 | } |
| 200 | } |
| 201 | } |
| 202 | |
| 203 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 204 | * initialize a buffer full of dquots and log the whole thing |
| 205 | */ |
| 206 | STATIC void |
| 207 | xfs_qm_init_dquot_blk( |
| 208 | xfs_trans_t *tp, |
| 209 | xfs_mount_t *mp, |
| 210 | xfs_dqid_t id, |
| 211 | uint type, |
| 212 | xfs_buf_t *bp) |
| 213 | { |
Christoph Hellwig | 8a7b8a8 | 2010-04-20 17:01:30 +1000 | [diff] [blame] | 214 | struct xfs_quotainfo *q = mp->m_quotainfo; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 215 | xfs_dqblk_t *d; |
Eric Sandeen | a484bcd | 2016-02-08 11:22:58 +1100 | [diff] [blame] | 216 | xfs_dqid_t curid; |
| 217 | int i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 218 | |
| 219 | ASSERT(tp); |
Christoph Hellwig | 0c842ad | 2011-07-08 14:36:19 +0200 | [diff] [blame] | 220 | ASSERT(xfs_buf_islocked(bp)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 221 | |
Chandra Seetharaman | 6292604 | 2011-07-22 23:40:15 +0000 | [diff] [blame] | 222 | d = bp->b_addr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 223 | |
| 224 | /* |
| 225 | * ID of the first dquot in the block - id's are zero based. |
| 226 | */ |
Christoph Hellwig | 8a7b8a8 | 2010-04-20 17:01:30 +1000 | [diff] [blame] | 227 | curid = id - (id % q->qi_dqperchunk); |
Christoph Hellwig | 8a7b8a8 | 2010-04-20 17:01:30 +1000 | [diff] [blame] | 228 | memset(d, 0, BBTOB(q->qi_dqchunklen)); |
Christoph Hellwig | 49d35a5 | 2011-12-06 21:58:23 +0000 | [diff] [blame] | 229 | for (i = 0; i < q->qi_dqperchunk; i++, d++, curid++) { |
| 230 | d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC); |
| 231 | d->dd_diskdq.d_version = XFS_DQUOT_VERSION; |
| 232 | d->dd_diskdq.d_id = cpu_to_be32(curid); |
| 233 | d->dd_diskdq.d_flags = type; |
Dave Chinner | 6fcdc59 | 2013-06-03 15:28:46 +1000 | [diff] [blame] | 234 | if (xfs_sb_version_hascrc(&mp->m_sb)) { |
Dave Chinner | 9286345 | 2015-08-19 10:32:01 +1000 | [diff] [blame] | 235 | uuid_copy(&d->dd_uuid, &mp->m_sb.sb_meta_uuid); |
Dave Chinner | 6fcdc59 | 2013-06-03 15:28:46 +1000 | [diff] [blame] | 236 | xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk), |
| 237 | XFS_DQUOT_CRC_OFF); |
| 238 | } |
Christoph Hellwig | 49d35a5 | 2011-12-06 21:58:23 +0000 | [diff] [blame] | 239 | } |
| 240 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 241 | xfs_trans_dquot_buf(tp, bp, |
Dave Chinner | c115541 | 2010-05-07 11:05:19 +1000 | [diff] [blame] | 242 | (type & XFS_DQ_USER ? XFS_BLF_UDQUOT_BUF : |
| 243 | ((type & XFS_DQ_PROJ) ? XFS_BLF_PDQUOT_BUF : |
| 244 | XFS_BLF_GDQUOT_BUF))); |
Christoph Hellwig | 8a7b8a8 | 2010-04-20 17:01:30 +1000 | [diff] [blame] | 245 | xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 246 | } |
| 247 | |
Brian Foster | b136645 | 2013-03-18 10:51:46 -0400 | [diff] [blame] | 248 | /* |
| 249 | * Initialize the dynamic speculative preallocation thresholds. The lo/hi |
| 250 | * watermarks correspond to the soft and hard limits by default. If a soft limit |
| 251 | * is not specified, we use 95% of the hard limit. |
| 252 | */ |
| 253 | void |
| 254 | xfs_dquot_set_prealloc_limits(struct xfs_dquot *dqp) |
| 255 | { |
Darrick J. Wong | c8ce540 | 2017-06-16 11:00:05 -0700 | [diff] [blame] | 256 | uint64_t space; |
Brian Foster | b136645 | 2013-03-18 10:51:46 -0400 | [diff] [blame] | 257 | |
| 258 | dqp->q_prealloc_hi_wmark = be64_to_cpu(dqp->q_core.d_blk_hardlimit); |
| 259 | dqp->q_prealloc_lo_wmark = be64_to_cpu(dqp->q_core.d_blk_softlimit); |
| 260 | if (!dqp->q_prealloc_lo_wmark) { |
| 261 | dqp->q_prealloc_lo_wmark = dqp->q_prealloc_hi_wmark; |
| 262 | do_div(dqp->q_prealloc_lo_wmark, 100); |
| 263 | dqp->q_prealloc_lo_wmark *= 95; |
| 264 | } |
| 265 | |
| 266 | space = dqp->q_prealloc_hi_wmark; |
| 267 | |
| 268 | do_div(space, 100); |
| 269 | dqp->q_low_space[XFS_QLOWSP_1_PCNT] = space; |
| 270 | dqp->q_low_space[XFS_QLOWSP_3_PCNT] = space * 3; |
| 271 | dqp->q_low_space[XFS_QLOWSP_5_PCNT] = space * 5; |
| 272 | } |
| 273 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 274 | /* |
Darrick J. Wong | d63192c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 275 | * Ensure that the given in-core dquot has a buffer on disk backing it, and |
Darrick J. Wong | 710d707 | 2019-04-24 09:27:41 -0700 | [diff] [blame] | 276 | * return the buffer locked and held. This is called when the bmapi finds a |
| 277 | * hole. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 278 | */ |
| 279 | STATIC int |
Darrick J. Wong | d63192c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 280 | xfs_dquot_disk_alloc( |
| 281 | struct xfs_trans **tpp, |
| 282 | struct xfs_dquot *dqp, |
| 283 | struct xfs_buf **bpp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 284 | { |
Darrick J. Wong | d63192c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 285 | struct xfs_bmbt_irec map; |
Brian Foster | 2ba1372 | 2018-07-11 22:26:11 -0700 | [diff] [blame] | 286 | struct xfs_trans *tp = *tpp; |
| 287 | struct xfs_mount *mp = tp->t_mountp; |
Darrick J. Wong | d63192c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 288 | struct xfs_buf *bp; |
| 289 | struct xfs_inode *quotip = xfs_quota_inode(mp, dqp->dq_flags); |
Darrick J. Wong | d63192c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 290 | int nmaps = 1; |
| 291 | int error; |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 292 | |
| 293 | trace_xfs_dqalloc(dqp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 294 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 295 | xfs_ilock(quotip, XFS_ILOCK_EXCL); |
Chandra Seetharaman | 6967b96 | 2012-01-23 17:31:25 +0000 | [diff] [blame] | 296 | if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) { |
Darrick J. Wong | d63192c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 297 | /* |
| 298 | * Return if this type of quotas is turned off while we didn't |
| 299 | * have an inode lock |
| 300 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 301 | xfs_iunlock(quotip, XFS_ILOCK_EXCL); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 302 | return -ESRCH; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 303 | } |
| 304 | |
Darrick J. Wong | d63192c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 305 | /* Create the block mapping. */ |
Brian Foster | 2ba1372 | 2018-07-11 22:26:11 -0700 | [diff] [blame] | 306 | xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL); |
| 307 | error = xfs_bmapi_write(tp, quotip, dqp->q_fileoffset, |
Brian Foster | da781e6 | 2019-10-21 09:26:48 -0700 | [diff] [blame] | 308 | XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA, 0, &map, |
| 309 | &nmaps); |
Dave Chinner | c0dc782 | 2011-09-18 20:40:52 +0000 | [diff] [blame] | 310 | if (error) |
Brian Foster | 73971b1 | 2018-08-07 10:57:13 -0700 | [diff] [blame] | 311 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 312 | ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB); |
| 313 | ASSERT(nmaps == 1); |
| 314 | ASSERT((map.br_startblock != DELAYSTARTBLOCK) && |
| 315 | (map.br_startblock != HOLESTARTBLOCK)); |
| 316 | |
| 317 | /* |
| 318 | * Keep track of the blkno to save a lookup later |
| 319 | */ |
| 320 | dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock); |
| 321 | |
| 322 | /* now we can just get the buffer (there's nothing to read yet) */ |
Darrick J. Wong | ce92464 | 2020-01-23 17:01:18 -0800 | [diff] [blame] | 323 | error = xfs_trans_get_buf(tp, mp->m_ddev_targp, dqp->q_blkno, |
| 324 | mp->m_quotainfo->qi_dqchunklen, 0, &bp); |
| 325 | if (error) |
| 326 | return error; |
Dave Chinner | 1813dd6 | 2012-11-14 17:54:40 +1100 | [diff] [blame] | 327 | bp->b_ops = &xfs_dquot_buf_ops; |
Chandra Seetharaman | 2a30f36d | 2011-09-20 13:56:55 +0000 | [diff] [blame] | 328 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 329 | /* |
| 330 | * Make a chunk of dquots out of this buffer and log |
| 331 | * the entire thing. |
| 332 | */ |
Brian Foster | 2ba1372 | 2018-07-11 22:26:11 -0700 | [diff] [blame] | 333 | xfs_qm_init_dquot_blk(tp, mp, be32_to_cpu(dqp->q_core.d_id), |
Nathan Scott | c8ad20f | 2005-06-21 15:38:48 +1000 | [diff] [blame] | 334 | dqp->dq_flags & XFS_DQ_ALLTYPES, bp); |
Darrick J. Wong | d63192c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 335 | xfs_buf_set_ref(bp, XFS_DQUOT_REF); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 336 | |
Tim Shimmin | efa092f | 2005-09-05 08:29:01 +1000 | [diff] [blame] | 337 | /* |
Darrick J. Wong | 7b6b50f | 2018-05-04 15:30:19 -0700 | [diff] [blame] | 338 | * Hold the buffer and join it to the dfops so that we'll still own |
| 339 | * the buffer when we return to the caller. The buffer disposal on |
| 340 | * error must be paid attention to very carefully, as it has been |
| 341 | * broken since commit efa092f3d4c6 "[XFS] Fixes a bug in the quota |
| 342 | * code when allocating a new dquot record" in 2005, and the later |
| 343 | * conversion to xfs_defer_ops in commit 310a75a3c6c747 failed to keep |
| 344 | * the buffer locked across the _defer_finish call. We can now do |
| 345 | * this correctly with xfs_defer_bjoin. |
Tim Shimmin | efa092f | 2005-09-05 08:29:01 +1000 | [diff] [blame] | 346 | * |
Brian Foster | 73971b1 | 2018-08-07 10:57:13 -0700 | [diff] [blame] | 347 | * Above, we allocated a disk block for the dquot information and used |
| 348 | * get_buf to initialize the dquot. If the _defer_finish fails, the old |
Darrick J. Wong | 7b6b50f | 2018-05-04 15:30:19 -0700 | [diff] [blame] | 349 | * transaction is gone but the new buffer is not joined or held to any |
| 350 | * transaction, so we must _buf_relse it. |
Tim Shimmin | efa092f | 2005-09-05 08:29:01 +1000 | [diff] [blame] | 351 | * |
Darrick J. Wong | 7b6b50f | 2018-05-04 15:30:19 -0700 | [diff] [blame] | 352 | * If everything succeeds, the caller of this function is returned a |
Darrick J. Wong | d63192c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 353 | * buffer that is locked and held to the transaction. The caller |
Darrick J. Wong | 7b6b50f | 2018-05-04 15:30:19 -0700 | [diff] [blame] | 354 | * is responsible for unlocking any buffer passed back, either |
Darrick J. Wong | 710d707 | 2019-04-24 09:27:41 -0700 | [diff] [blame] | 355 | * manually or by committing the transaction. On error, the buffer is |
| 356 | * released and not passed back. |
Tim Shimmin | efa092f | 2005-09-05 08:29:01 +1000 | [diff] [blame] | 357 | */ |
Brian Foster | 2ba1372 | 2018-07-11 22:26:11 -0700 | [diff] [blame] | 358 | xfs_trans_bhold(tp, bp); |
Brian Foster | 9e28a24 | 2018-07-24 13:43:15 -0700 | [diff] [blame] | 359 | error = xfs_defer_finish(tpp); |
Darrick J. Wong | 7b6b50f | 2018-05-04 15:30:19 -0700 | [diff] [blame] | 360 | if (error) { |
Darrick J. Wong | 710d707 | 2019-04-24 09:27:41 -0700 | [diff] [blame] | 361 | xfs_trans_bhold_release(*tpp, bp); |
| 362 | xfs_trans_brelse(*tpp, bp); |
Brian Foster | 73971b1 | 2018-08-07 10:57:13 -0700 | [diff] [blame] | 363 | return error; |
Darrick J. Wong | 7b6b50f | 2018-05-04 15:30:19 -0700 | [diff] [blame] | 364 | } |
Darrick J. Wong | d63192c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 365 | *bpp = bp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 366 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 367 | } |
Dave Chinner | 9aede1d | 2013-10-15 09:17:52 +1100 | [diff] [blame] | 368 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 369 | /* |
Darrick J. Wong | d63192c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 370 | * Read in the in-core dquot's on-disk metadata and return the buffer. |
| 371 | * Returns ENOENT to signal a hole. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 372 | */ |
| 373 | STATIC int |
Darrick J. Wong | d63192c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 374 | xfs_dquot_disk_read( |
| 375 | struct xfs_mount *mp, |
| 376 | struct xfs_dquot *dqp, |
| 377 | struct xfs_buf **bpp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 378 | { |
Chandra Seetharaman | 113a568 | 2013-06-27 17:25:07 -0500 | [diff] [blame] | 379 | struct xfs_bmbt_irec map; |
Chandra Seetharaman | 113a568 | 2013-06-27 17:25:07 -0500 | [diff] [blame] | 380 | struct xfs_buf *bp; |
Darrick J. Wong | d63192c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 381 | struct xfs_inode *quotip = xfs_quota_inode(mp, dqp->dq_flags); |
Christoph Hellwig | 0891f99 | 2017-07-13 12:14:34 -0700 | [diff] [blame] | 382 | uint lock_mode; |
Darrick J. Wong | d63192c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 383 | int nmaps = 1; |
| 384 | int error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 385 | |
Christoph Hellwig | 0891f99 | 2017-07-13 12:14:34 -0700 | [diff] [blame] | 386 | lock_mode = xfs_ilock_data_map_shared(quotip); |
Darrick J. Wong | d63192c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 387 | if (!xfs_this_quota_on(mp, dqp->dq_flags)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 388 | /* |
Christoph Hellwig | acecf1b | 2010-09-06 01:44:45 +0000 | [diff] [blame] | 389 | * Return if this type of quotas is turned off while we |
| 390 | * didn't have the quota inode lock. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 391 | */ |
Christoph Hellwig | 0891f99 | 2017-07-13 12:14:34 -0700 | [diff] [blame] | 392 | xfs_iunlock(quotip, lock_mode); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 393 | return -ESRCH; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 394 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 395 | |
| 396 | /* |
Christoph Hellwig | acecf1b | 2010-09-06 01:44:45 +0000 | [diff] [blame] | 397 | * Find the block map; no allocations yet |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 398 | */ |
Dave Chinner | 5c8ed20 | 2011-09-18 20:40:45 +0000 | [diff] [blame] | 399 | error = xfs_bmapi_read(quotip, dqp->q_fileoffset, |
Darrick J. Wong | d63192c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 400 | XFS_DQUOT_CLUSTER_SIZE_FSB, &map, &nmaps, 0); |
Christoph Hellwig | 0891f99 | 2017-07-13 12:14:34 -0700 | [diff] [blame] | 401 | xfs_iunlock(quotip, lock_mode); |
Christoph Hellwig | acecf1b | 2010-09-06 01:44:45 +0000 | [diff] [blame] | 402 | if (error) |
| 403 | return error; |
| 404 | |
| 405 | ASSERT(nmaps == 1); |
Darrick J. Wong | d63192c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 406 | ASSERT(map.br_blockcount >= 1); |
| 407 | ASSERT(map.br_startblock != DELAYSTARTBLOCK); |
| 408 | if (map.br_startblock == HOLESTARTBLOCK) |
| 409 | return -ENOENT; |
| 410 | |
| 411 | trace_xfs_dqtobp_read(dqp); |
Christoph Hellwig | acecf1b | 2010-09-06 01:44:45 +0000 | [diff] [blame] | 412 | |
| 413 | /* |
Darrick J. Wong | d63192c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 414 | * store the blkno etc so that we don't have to do the |
| 415 | * mapping all the time |
Christoph Hellwig | acecf1b | 2010-09-06 01:44:45 +0000 | [diff] [blame] | 416 | */ |
Darrick J. Wong | d63192c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 417 | dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock); |
Christoph Hellwig | acecf1b | 2010-09-06 01:44:45 +0000 | [diff] [blame] | 418 | |
Darrick J. Wong | d63192c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 419 | error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno, |
| 420 | mp->m_quotainfo->qi_dqchunklen, 0, &bp, |
| 421 | &xfs_dquot_buf_ops); |
| 422 | if (error) { |
| 423 | ASSERT(bp == NULL); |
| 424 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 425 | } |
| 426 | |
Dave Chinner | c631919 | 2012-11-14 17:50:13 +1100 | [diff] [blame] | 427 | ASSERT(xfs_buf_islocked(bp)); |
Darrick J. Wong | d63192c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 428 | xfs_buf_set_ref(bp, XFS_DQUOT_REF); |
| 429 | *bpp = bp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 430 | |
Eric Sandeen | d99831f | 2014-06-22 15:03:54 +1000 | [diff] [blame] | 431 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 432 | } |
| 433 | |
Darrick J. Wong | 617cd5c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 434 | /* Allocate and initialize everything we need for an incore dquot. */ |
| 435 | STATIC struct xfs_dquot * |
| 436 | xfs_dquot_alloc( |
Christoph Hellwig | 97e7ade | 2011-12-06 21:58:24 +0000 | [diff] [blame] | 437 | struct xfs_mount *mp, |
| 438 | xfs_dqid_t id, |
Darrick J. Wong | 617cd5c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 439 | uint type) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 440 | { |
Christoph Hellwig | 97e7ade | 2011-12-06 21:58:24 +0000 | [diff] [blame] | 441 | struct xfs_dquot *dqp; |
Christoph Hellwig | 92b2e5b | 2012-02-01 13:57:20 +0000 | [diff] [blame] | 442 | |
Tetsuo Handa | 707e0dd | 2019-08-26 12:06:22 -0700 | [diff] [blame] | 443 | dqp = kmem_zone_zalloc(xfs_qm_dqzone, 0); |
Christoph Hellwig | 92b2e5b | 2012-02-01 13:57:20 +0000 | [diff] [blame] | 444 | |
| 445 | dqp->dq_flags = type; |
| 446 | dqp->q_core.d_id = cpu_to_be32(id); |
| 447 | dqp->q_mount = mp; |
Christoph Hellwig | f8739c3 | 2012-03-13 08:52:34 +0000 | [diff] [blame] | 448 | INIT_LIST_HEAD(&dqp->q_lru); |
Christoph Hellwig | 92b2e5b | 2012-02-01 13:57:20 +0000 | [diff] [blame] | 449 | mutex_init(&dqp->q_qlock); |
| 450 | init_waitqueue_head(&dqp->q_pinwait); |
Darrick J. Wong | d63192c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 451 | dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk; |
| 452 | /* |
| 453 | * Offset of dquot in the (fixed sized) dquot chunk. |
| 454 | */ |
| 455 | dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) * |
| 456 | sizeof(xfs_dqblk_t); |
Christoph Hellwig | 92b2e5b | 2012-02-01 13:57:20 +0000 | [diff] [blame] | 457 | |
| 458 | /* |
| 459 | * Because we want to use a counting completion, complete |
| 460 | * the flush completion once to allow a single access to |
| 461 | * the flush completion without blocking. |
| 462 | */ |
| 463 | init_completion(&dqp->q_flush); |
| 464 | complete(&dqp->q_flush); |
| 465 | |
| 466 | /* |
| 467 | * Make sure group quotas have a different lock class than user |
| 468 | * quotas. |
| 469 | */ |
Dave Chinner | f112a04 | 2013-09-30 09:37:03 +1000 | [diff] [blame] | 470 | switch (type) { |
| 471 | case XFS_DQ_USER: |
| 472 | /* uses the default lock class */ |
| 473 | break; |
| 474 | case XFS_DQ_GROUP: |
| 475 | lockdep_set_class(&dqp->q_qlock, &xfs_dquot_group_class); |
| 476 | break; |
| 477 | case XFS_DQ_PROJ: |
| 478 | lockdep_set_class(&dqp->q_qlock, &xfs_dquot_project_class); |
| 479 | break; |
| 480 | default: |
| 481 | ASSERT(0); |
| 482 | break; |
| 483 | } |
Christoph Hellwig | 92b2e5b | 2012-02-01 13:57:20 +0000 | [diff] [blame] | 484 | |
Darrick J. Wong | 617cd5c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 485 | xfs_qm_dquot_logitem_init(dqp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 486 | |
Darrick J. Wong | 617cd5c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 487 | XFS_STATS_INC(mp, xs_qm_dquot); |
| 488 | return dqp; |
| 489 | } |
| 490 | |
| 491 | /* Copy the in-core quota fields in from the on-disk buffer. */ |
| 492 | STATIC void |
| 493 | xfs_dquot_from_disk( |
| 494 | struct xfs_dquot *dqp, |
Darrick J. Wong | d63192c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 495 | struct xfs_buf *bp) |
Darrick J. Wong | 617cd5c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 496 | { |
Darrick J. Wong | d63192c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 497 | struct xfs_disk_dquot *ddqp = bp->b_addr + dqp->q_bufoffset; |
| 498 | |
Darrick J. Wong | 617cd5c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 499 | /* copy everything from disk dquot to the incore dquot */ |
Pavel Reichl | aefe69a | 2019-11-12 17:04:02 -0800 | [diff] [blame] | 500 | memcpy(&dqp->q_core, ddqp, sizeof(struct xfs_disk_dquot)); |
Darrick J. Wong | 617cd5c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 501 | |
| 502 | /* |
| 503 | * Reservation counters are defined as reservation plus current usage |
| 504 | * to avoid having to add every time. |
| 505 | */ |
| 506 | dqp->q_res_bcount = be64_to_cpu(ddqp->d_bcount); |
| 507 | dqp->q_res_icount = be64_to_cpu(ddqp->d_icount); |
| 508 | dqp->q_res_rtbcount = be64_to_cpu(ddqp->d_rtbcount); |
| 509 | |
| 510 | /* initialize the dquot speculative prealloc thresholds */ |
| 511 | xfs_dquot_set_prealloc_limits(dqp); |
| 512 | } |
| 513 | |
Darrick J. Wong | d63192c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 514 | /* Allocate and initialize the dquot buffer for this in-core dquot. */ |
| 515 | static int |
| 516 | xfs_qm_dqread_alloc( |
| 517 | struct xfs_mount *mp, |
| 518 | struct xfs_dquot *dqp, |
| 519 | struct xfs_buf **bpp) |
| 520 | { |
| 521 | struct xfs_trans *tp; |
Darrick J. Wong | d63192c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 522 | int error; |
| 523 | |
| 524 | error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_dqalloc, |
| 525 | XFS_QM_DQALLOC_SPACE_RES(mp), 0, 0, &tp); |
| 526 | if (error) |
| 527 | goto err; |
| 528 | |
Darrick J. Wong | 710d707 | 2019-04-24 09:27:41 -0700 | [diff] [blame] | 529 | error = xfs_dquot_disk_alloc(&tp, dqp, bpp); |
Darrick J. Wong | d63192c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 530 | if (error) |
| 531 | goto err_cancel; |
| 532 | |
| 533 | error = xfs_trans_commit(tp); |
| 534 | if (error) { |
| 535 | /* |
| 536 | * Buffer was held to the transaction, so we have to unlock it |
| 537 | * manually here because we're not passing it back. |
| 538 | */ |
Darrick J. Wong | 710d707 | 2019-04-24 09:27:41 -0700 | [diff] [blame] | 539 | xfs_buf_relse(*bpp); |
| 540 | *bpp = NULL; |
Darrick J. Wong | d63192c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 541 | goto err; |
| 542 | } |
Darrick J. Wong | d63192c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 543 | return 0; |
| 544 | |
| 545 | err_cancel: |
| 546 | xfs_trans_cancel(tp); |
| 547 | err: |
| 548 | return error; |
| 549 | } |
| 550 | |
Darrick J. Wong | 617cd5c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 551 | /* |
| 552 | * Read in the ondisk dquot using dqtobp() then copy it to an incore version, |
Darrick J. Wong | 30ab2dc | 2018-05-04 15:30:24 -0700 | [diff] [blame] | 553 | * and release the buffer immediately. If @can_alloc is true, fill any |
| 554 | * holes in the on-disk metadata. |
Darrick J. Wong | 617cd5c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 555 | */ |
Darrick J. Wong | 114e73c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 556 | static int |
Darrick J. Wong | 617cd5c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 557 | xfs_qm_dqread( |
| 558 | struct xfs_mount *mp, |
| 559 | xfs_dqid_t id, |
| 560 | uint type, |
Darrick J. Wong | 30ab2dc | 2018-05-04 15:30:24 -0700 | [diff] [blame] | 561 | bool can_alloc, |
Darrick J. Wong | d63192c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 562 | struct xfs_dquot **dqpp) |
Darrick J. Wong | 617cd5c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 563 | { |
| 564 | struct xfs_dquot *dqp; |
Darrick J. Wong | 617cd5c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 565 | struct xfs_buf *bp; |
Darrick J. Wong | 617cd5c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 566 | int error; |
| 567 | |
| 568 | dqp = xfs_dquot_alloc(mp, id, type); |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 569 | trace_xfs_dqread(dqp); |
| 570 | |
Darrick J. Wong | d63192c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 571 | /* Try to read the buffer, allocating if necessary. */ |
| 572 | error = xfs_dquot_disk_read(mp, dqp, &bp); |
Darrick J. Wong | 30ab2dc | 2018-05-04 15:30:24 -0700 | [diff] [blame] | 573 | if (error == -ENOENT && can_alloc) |
Darrick J. Wong | d63192c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 574 | error = xfs_qm_dqread_alloc(mp, dqp, &bp); |
| 575 | if (error) |
| 576 | goto err; |
Christoph Hellwig | 97e7ade | 2011-12-06 21:58:24 +0000 | [diff] [blame] | 577 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 578 | /* |
Darrick J. Wong | d63192c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 579 | * At this point we should have a clean locked buffer. Copy the data |
| 580 | * to the incore dquot and release the buffer since the incore dquot |
| 581 | * has its own locking protocol so we needn't tie up the buffer any |
| 582 | * further. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 583 | */ |
Christoph Hellwig | 0c842ad | 2011-07-08 14:36:19 +0200 | [diff] [blame] | 584 | ASSERT(xfs_buf_islocked(bp)); |
Darrick J. Wong | d63192c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 585 | xfs_dquot_from_disk(dqp, bp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 586 | |
Darrick J. Wong | d63192c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 587 | xfs_buf_relse(bp); |
| 588 | *dqpp = dqp; |
Christoph Hellwig | 97e7ade | 2011-12-06 21:58:24 +0000 | [diff] [blame] | 589 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 590 | |
Darrick J. Wong | d63192c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 591 | err: |
| 592 | trace_xfs_dqread_fail(dqp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 593 | xfs_qm_dqdestroy(dqp); |
Darrick J. Wong | d63192c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 594 | *dqpp = NULL; |
Christoph Hellwig | 97e7ade | 2011-12-06 21:58:24 +0000 | [diff] [blame] | 595 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 596 | } |
| 597 | |
| 598 | /* |
Eric Sandeen | 296c24e | 2016-02-08 11:27:38 +1100 | [diff] [blame] | 599 | * Advance to the next id in the current chunk, or if at the |
| 600 | * end of the chunk, skip ahead to first id in next allocated chunk |
| 601 | * using the SEEK_DATA interface. |
| 602 | */ |
Eryu Guan | 6e3e6d5 | 2016-04-06 09:47:21 +1000 | [diff] [blame] | 603 | static int |
Eric Sandeen | 296c24e | 2016-02-08 11:27:38 +1100 | [diff] [blame] | 604 | xfs_dq_get_next_id( |
Christoph Hellwig | bda250d | 2017-06-29 12:28:36 -0700 | [diff] [blame] | 605 | struct xfs_mount *mp, |
Eric Sandeen | 296c24e | 2016-02-08 11:27:38 +1100 | [diff] [blame] | 606 | uint type, |
Christoph Hellwig | bda250d | 2017-06-29 12:28:36 -0700 | [diff] [blame] | 607 | xfs_dqid_t *id) |
Eric Sandeen | 296c24e | 2016-02-08 11:27:38 +1100 | [diff] [blame] | 608 | { |
Christoph Hellwig | bda250d | 2017-06-29 12:28:36 -0700 | [diff] [blame] | 609 | struct xfs_inode *quotip = xfs_quota_inode(mp, type); |
| 610 | xfs_dqid_t next_id = *id + 1; /* simple advance */ |
| 611 | uint lock_flags; |
| 612 | struct xfs_bmbt_irec got; |
Christoph Hellwig | b2b1712 | 2017-11-03 10:34:43 -0700 | [diff] [blame] | 613 | struct xfs_iext_cursor cur; |
Eric Sandeen | 296c24e | 2016-02-08 11:27:38 +1100 | [diff] [blame] | 614 | xfs_fsblock_t start; |
Eric Sandeen | 296c24e | 2016-02-08 11:27:38 +1100 | [diff] [blame] | 615 | int error = 0; |
| 616 | |
Eric Sandeen | 657bdfb | 2017-01-17 11:43:38 -0800 | [diff] [blame] | 617 | /* If we'd wrap past the max ID, stop */ |
| 618 | if (next_id < *id) |
| 619 | return -ENOENT; |
| 620 | |
Eric Sandeen | 296c24e | 2016-02-08 11:27:38 +1100 | [diff] [blame] | 621 | /* If new ID is within the current chunk, advancing it sufficed */ |
| 622 | if (next_id % mp->m_quotainfo->qi_dqperchunk) { |
| 623 | *id = next_id; |
| 624 | return 0; |
| 625 | } |
| 626 | |
| 627 | /* Nope, next_id is now past the current chunk, so find the next one */ |
| 628 | start = (xfs_fsblock_t)next_id / mp->m_quotainfo->qi_dqperchunk; |
| 629 | |
Christoph Hellwig | bda250d | 2017-06-29 12:28:36 -0700 | [diff] [blame] | 630 | lock_flags = xfs_ilock_data_map_shared(quotip); |
| 631 | if (!(quotip->i_df.if_flags & XFS_IFEXTENTS)) { |
| 632 | error = xfs_iread_extents(NULL, quotip, XFS_DATA_FORK); |
| 633 | if (error) |
| 634 | return error; |
| 635 | } |
Eric Sandeen | 296c24e | 2016-02-08 11:27:38 +1100 | [diff] [blame] | 636 | |
Christoph Hellwig | b2b1712 | 2017-11-03 10:34:43 -0700 | [diff] [blame] | 637 | if (xfs_iext_lookup_extent(quotip, "ip->i_df, start, &cur, &got)) { |
Brian Foster | 2192b0b | 2017-07-05 12:07:52 -0700 | [diff] [blame] | 638 | /* contiguous chunk, bump startoff for the id calculation */ |
| 639 | if (got.br_startoff < start) |
| 640 | got.br_startoff = start; |
Christoph Hellwig | bda250d | 2017-06-29 12:28:36 -0700 | [diff] [blame] | 641 | *id = got.br_startoff * mp->m_quotainfo->qi_dqperchunk; |
Brian Foster | 2192b0b | 2017-07-05 12:07:52 -0700 | [diff] [blame] | 642 | } else { |
Christoph Hellwig | bda250d | 2017-06-29 12:28:36 -0700 | [diff] [blame] | 643 | error = -ENOENT; |
Brian Foster | 2192b0b | 2017-07-05 12:07:52 -0700 | [diff] [blame] | 644 | } |
| 645 | |
Christoph Hellwig | bda250d | 2017-06-29 12:28:36 -0700 | [diff] [blame] | 646 | xfs_iunlock(quotip, lock_flags); |
Eric Sandeen | 296c24e | 2016-02-08 11:27:38 +1100 | [diff] [blame] | 647 | |
Christoph Hellwig | bda250d | 2017-06-29 12:28:36 -0700 | [diff] [blame] | 648 | return error; |
Eric Sandeen | 296c24e | 2016-02-08 11:27:38 +1100 | [diff] [blame] | 649 | } |
| 650 | |
| 651 | /* |
Darrick J. Wong | cc2047c | 2018-05-04 15:30:20 -0700 | [diff] [blame] | 652 | * Look up the dquot in the in-core cache. If found, the dquot is returned |
| 653 | * locked and ready to go. |
| 654 | */ |
| 655 | static struct xfs_dquot * |
| 656 | xfs_qm_dqget_cache_lookup( |
| 657 | struct xfs_mount *mp, |
| 658 | struct xfs_quotainfo *qi, |
| 659 | struct radix_tree_root *tree, |
| 660 | xfs_dqid_t id) |
| 661 | { |
| 662 | struct xfs_dquot *dqp; |
| 663 | |
| 664 | restart: |
| 665 | mutex_lock(&qi->qi_tree_lock); |
| 666 | dqp = radix_tree_lookup(tree, id); |
| 667 | if (!dqp) { |
| 668 | mutex_unlock(&qi->qi_tree_lock); |
| 669 | XFS_STATS_INC(mp, xs_qm_dqcachemisses); |
| 670 | return NULL; |
| 671 | } |
| 672 | |
| 673 | xfs_dqlock(dqp); |
| 674 | if (dqp->dq_flags & XFS_DQ_FREEING) { |
| 675 | xfs_dqunlock(dqp); |
| 676 | mutex_unlock(&qi->qi_tree_lock); |
| 677 | trace_xfs_dqget_freeing(dqp); |
| 678 | delay(1); |
| 679 | goto restart; |
| 680 | } |
| 681 | |
| 682 | dqp->q_nrefs++; |
| 683 | mutex_unlock(&qi->qi_tree_lock); |
| 684 | |
| 685 | trace_xfs_dqget_hit(dqp); |
| 686 | XFS_STATS_INC(mp, xs_qm_dqcachehits); |
| 687 | return dqp; |
| 688 | } |
| 689 | |
| 690 | /* |
| 691 | * Try to insert a new dquot into the in-core cache. If an error occurs the |
| 692 | * caller should throw away the dquot and start over. Otherwise, the dquot |
| 693 | * is returned locked (and held by the cache) as if there had been a cache |
| 694 | * hit. |
| 695 | */ |
| 696 | static int |
| 697 | xfs_qm_dqget_cache_insert( |
| 698 | struct xfs_mount *mp, |
| 699 | struct xfs_quotainfo *qi, |
| 700 | struct radix_tree_root *tree, |
| 701 | xfs_dqid_t id, |
| 702 | struct xfs_dquot *dqp) |
| 703 | { |
| 704 | int error; |
| 705 | |
| 706 | mutex_lock(&qi->qi_tree_lock); |
| 707 | error = radix_tree_insert(tree, id, dqp); |
| 708 | if (unlikely(error)) { |
| 709 | /* Duplicate found! Caller must try again. */ |
| 710 | WARN_ON(error != -EEXIST); |
| 711 | mutex_unlock(&qi->qi_tree_lock); |
| 712 | trace_xfs_dqget_dup(dqp); |
| 713 | return error; |
| 714 | } |
| 715 | |
| 716 | /* Return a locked dquot to the caller, with a reference taken. */ |
| 717 | xfs_dqlock(dqp); |
| 718 | dqp->q_nrefs = 1; |
| 719 | |
| 720 | qi->qi_dquots++; |
| 721 | mutex_unlock(&qi->qi_tree_lock); |
| 722 | |
| 723 | return 0; |
| 724 | } |
| 725 | |
Darrick J. Wong | d7103ee | 2018-05-04 15:30:21 -0700 | [diff] [blame] | 726 | /* Check our input parameters. */ |
| 727 | static int |
| 728 | xfs_qm_dqget_checks( |
| 729 | struct xfs_mount *mp, |
| 730 | uint type) |
| 731 | { |
| 732 | if (WARN_ON_ONCE(!XFS_IS_QUOTA_RUNNING(mp))) |
| 733 | return -ESRCH; |
| 734 | |
| 735 | switch (type) { |
| 736 | case XFS_DQ_USER: |
| 737 | if (!XFS_IS_UQUOTA_ON(mp)) |
| 738 | return -ESRCH; |
| 739 | return 0; |
| 740 | case XFS_DQ_GROUP: |
| 741 | if (!XFS_IS_GQUOTA_ON(mp)) |
| 742 | return -ESRCH; |
| 743 | return 0; |
| 744 | case XFS_DQ_PROJ: |
| 745 | if (!XFS_IS_PQUOTA_ON(mp)) |
| 746 | return -ESRCH; |
| 747 | return 0; |
| 748 | default: |
| 749 | WARN_ON_ONCE(0); |
| 750 | return -EINVAL; |
| 751 | } |
| 752 | } |
| 753 | |
Darrick J. Wong | cc2047c | 2018-05-04 15:30:20 -0700 | [diff] [blame] | 754 | /* |
Darrick J. Wong | 4882c19 | 2018-05-04 15:30:22 -0700 | [diff] [blame] | 755 | * Given the file system, id, and type (UDQUOT/GDQUOT), return a a locked |
| 756 | * dquot, doing an allocation (if requested) as needed. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 757 | */ |
| 758 | int |
| 759 | xfs_qm_dqget( |
Darrick J. Wong | 4882c19 | 2018-05-04 15:30:22 -0700 | [diff] [blame] | 760 | struct xfs_mount *mp, |
| 761 | xfs_dqid_t id, |
| 762 | uint type, |
Darrick J. Wong | 30ab2dc | 2018-05-04 15:30:24 -0700 | [diff] [blame] | 763 | bool can_alloc, |
Darrick J. Wong | 4882c19 | 2018-05-04 15:30:22 -0700 | [diff] [blame] | 764 | struct xfs_dquot **O_dqpp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 765 | { |
Christoph Hellwig | 9f920f1 | 2012-03-13 08:52:35 +0000 | [diff] [blame] | 766 | struct xfs_quotainfo *qi = mp->m_quotainfo; |
Darrick J. Wong | 4882c19 | 2018-05-04 15:30:22 -0700 | [diff] [blame] | 767 | struct radix_tree_root *tree = xfs_dquot_tree(qi, type); |
Christoph Hellwig | 9f920f1 | 2012-03-13 08:52:35 +0000 | [diff] [blame] | 768 | struct xfs_dquot *dqp; |
| 769 | int error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 770 | |
Darrick J. Wong | d7103ee | 2018-05-04 15:30:21 -0700 | [diff] [blame] | 771 | error = xfs_qm_dqget_checks(mp, type); |
| 772 | if (error) |
| 773 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 774 | |
Darrick J. Wong | 4882c19 | 2018-05-04 15:30:22 -0700 | [diff] [blame] | 775 | restart: |
| 776 | dqp = xfs_qm_dqget_cache_lookup(mp, qi, tree, id); |
| 777 | if (dqp) { |
| 778 | *O_dqpp = dqp; |
| 779 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 780 | } |
Christoph Hellwig | 9267855 | 2011-12-06 21:58:18 +0000 | [diff] [blame] | 781 | |
Darrick J. Wong | 30ab2dc | 2018-05-04 15:30:24 -0700 | [diff] [blame] | 782 | error = xfs_qm_dqread(mp, id, type, can_alloc, &dqp); |
Darrick J. Wong | 4882c19 | 2018-05-04 15:30:22 -0700 | [diff] [blame] | 783 | if (error) |
| 784 | return error; |
| 785 | |
| 786 | error = xfs_qm_dqget_cache_insert(mp, qi, tree, id, dqp); |
| 787 | if (error) { |
| 788 | /* |
| 789 | * Duplicate found. Just throw away the new dquot and start |
| 790 | * over. |
| 791 | */ |
| 792 | xfs_qm_dqdestroy(dqp); |
| 793 | XFS_STATS_INC(mp, xs_qm_dquot_dups); |
| 794 | goto restart; |
| 795 | } |
| 796 | |
| 797 | trace_xfs_dqget_miss(dqp); |
| 798 | *O_dqpp = dqp; |
| 799 | return 0; |
| 800 | } |
| 801 | |
Darrick J. Wong | 114e73c | 2018-05-04 15:30:23 -0700 | [diff] [blame] | 802 | /* |
| 803 | * Given a dquot id and type, read and initialize a dquot from the on-disk |
| 804 | * metadata. This function is only for use during quota initialization so |
| 805 | * it ignores the dquot cache assuming that the dquot shrinker isn't set up. |
| 806 | * The caller is responsible for _qm_dqdestroy'ing the returned dquot. |
| 807 | */ |
| 808 | int |
| 809 | xfs_qm_dqget_uncached( |
| 810 | struct xfs_mount *mp, |
| 811 | xfs_dqid_t id, |
| 812 | uint type, |
| 813 | struct xfs_dquot **dqpp) |
| 814 | { |
| 815 | int error; |
| 816 | |
| 817 | error = xfs_qm_dqget_checks(mp, type); |
| 818 | if (error) |
| 819 | return error; |
| 820 | |
| 821 | return xfs_qm_dqread(mp, id, type, 0, dqpp); |
| 822 | } |
| 823 | |
Darrick J. Wong | 4882c19 | 2018-05-04 15:30:22 -0700 | [diff] [blame] | 824 | /* Return the quota id for a given inode and type. */ |
| 825 | xfs_dqid_t |
| 826 | xfs_qm_id_for_quotatype( |
| 827 | struct xfs_inode *ip, |
| 828 | uint type) |
| 829 | { |
| 830 | switch (type) { |
| 831 | case XFS_DQ_USER: |
Christoph Hellwig | ba8adad | 2020-02-21 08:31:27 -0800 | [diff] [blame] | 832 | return i_uid_read(VFS_I(ip)); |
Darrick J. Wong | 4882c19 | 2018-05-04 15:30:22 -0700 | [diff] [blame] | 833 | case XFS_DQ_GROUP: |
Christoph Hellwig | ba8adad | 2020-02-21 08:31:27 -0800 | [diff] [blame] | 834 | return i_gid_read(VFS_I(ip)); |
Darrick J. Wong | 4882c19 | 2018-05-04 15:30:22 -0700 | [diff] [blame] | 835 | case XFS_DQ_PROJ: |
Christoph Hellwig | de7a866 | 2019-11-12 08:22:54 -0800 | [diff] [blame] | 836 | return ip->i_d.di_projid; |
Darrick J. Wong | 4882c19 | 2018-05-04 15:30:22 -0700 | [diff] [blame] | 837 | } |
| 838 | ASSERT(0); |
| 839 | return 0; |
| 840 | } |
| 841 | |
| 842 | /* |
| 843 | * Return the dquot for a given inode and type. If @can_alloc is true, then |
| 844 | * allocate blocks if needed. The inode's ILOCK must be held and it must not |
| 845 | * have already had an inode attached. |
| 846 | */ |
| 847 | int |
| 848 | xfs_qm_dqget_inode( |
| 849 | struct xfs_inode *ip, |
| 850 | uint type, |
| 851 | bool can_alloc, |
| 852 | struct xfs_dquot **O_dqpp) |
| 853 | { |
| 854 | struct xfs_mount *mp = ip->i_mount; |
| 855 | struct xfs_quotainfo *qi = mp->m_quotainfo; |
| 856 | struct radix_tree_root *tree = xfs_dquot_tree(qi, type); |
| 857 | struct xfs_dquot *dqp; |
| 858 | xfs_dqid_t id; |
Darrick J. Wong | 4882c19 | 2018-05-04 15:30:22 -0700 | [diff] [blame] | 859 | int error; |
| 860 | |
| 861 | error = xfs_qm_dqget_checks(mp, type); |
| 862 | if (error) |
| 863 | return error; |
| 864 | |
Darrick J. Wong | 4882c19 | 2018-05-04 15:30:22 -0700 | [diff] [blame] | 865 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); |
| 866 | ASSERT(xfs_inode_dquot(ip, type) == NULL); |
| 867 | |
| 868 | id = xfs_qm_id_for_quotatype(ip, type); |
| 869 | |
Christoph Hellwig | 9267855 | 2011-12-06 21:58:18 +0000 | [diff] [blame] | 870 | restart: |
Darrick J. Wong | cc2047c | 2018-05-04 15:30:20 -0700 | [diff] [blame] | 871 | dqp = xfs_qm_dqget_cache_lookup(mp, qi, tree, id); |
Christoph Hellwig | 9f920f1 | 2012-03-13 08:52:35 +0000 | [diff] [blame] | 872 | if (dqp) { |
Christoph Hellwig | 9f920f1 | 2012-03-13 08:52:35 +0000 | [diff] [blame] | 873 | *O_dqpp = dqp; |
| 874 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 875 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 876 | |
| 877 | /* |
| 878 | * Dquot cache miss. We don't want to keep the inode lock across |
| 879 | * a (potential) disk read. Also we don't want to deal with the lock |
| 880 | * ordering between quotainode and this inode. OTOH, dropping the inode |
| 881 | * lock here means dealing with a chown that can happen before |
| 882 | * we re-acquire the lock. |
| 883 | */ |
Darrick J. Wong | 4882c19 | 2018-05-04 15:30:22 -0700 | [diff] [blame] | 884 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
Darrick J. Wong | 30ab2dc | 2018-05-04 15:30:24 -0700 | [diff] [blame] | 885 | error = xfs_qm_dqread(mp, id, type, can_alloc, &dqp); |
Darrick J. Wong | 4882c19 | 2018-05-04 15:30:22 -0700 | [diff] [blame] | 886 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
Christoph Hellwig | 7ae4440 | 2011-12-06 21:58:25 +0000 | [diff] [blame] | 887 | if (error) |
| 888 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 889 | |
Darrick J. Wong | 4882c19 | 2018-05-04 15:30:22 -0700 | [diff] [blame] | 890 | /* |
| 891 | * A dquot could be attached to this inode by now, since we had |
| 892 | * dropped the ilock. |
| 893 | */ |
| 894 | if (xfs_this_quota_on(mp, type)) { |
| 895 | struct xfs_dquot *dqp1; |
Christoph Hellwig | 9f920f1 | 2012-03-13 08:52:35 +0000 | [diff] [blame] | 896 | |
Darrick J. Wong | 4882c19 | 2018-05-04 15:30:22 -0700 | [diff] [blame] | 897 | dqp1 = xfs_inode_dquot(ip, type); |
| 898 | if (dqp1) { |
Chandra Seetharaman | 3673141 | 2012-01-23 17:31:30 +0000 | [diff] [blame] | 899 | xfs_qm_dqdestroy(dqp); |
Darrick J. Wong | 4882c19 | 2018-05-04 15:30:22 -0700 | [diff] [blame] | 900 | dqp = dqp1; |
| 901 | xfs_dqlock(dqp); |
| 902 | goto dqret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 903 | } |
Darrick J. Wong | 4882c19 | 2018-05-04 15:30:22 -0700 | [diff] [blame] | 904 | } else { |
| 905 | /* inode stays locked on return */ |
| 906 | xfs_qm_dqdestroy(dqp); |
| 907 | return -ESRCH; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 908 | } |
| 909 | |
Darrick J. Wong | cc2047c | 2018-05-04 15:30:20 -0700 | [diff] [blame] | 910 | error = xfs_qm_dqget_cache_insert(mp, qi, tree, id, dqp); |
| 911 | if (error) { |
Christoph Hellwig | 9f920f1 | 2012-03-13 08:52:35 +0000 | [diff] [blame] | 912 | /* |
| 913 | * Duplicate found. Just throw away the new dquot and start |
| 914 | * over. |
| 915 | */ |
Christoph Hellwig | 9f920f1 | 2012-03-13 08:52:35 +0000 | [diff] [blame] | 916 | xfs_qm_dqdestroy(dqp); |
Bill O'Donnell | ff6d6af | 2015-10-12 18:21:22 +1100 | [diff] [blame] | 917 | XFS_STATS_INC(mp, xs_qm_dquot_dups); |
Christoph Hellwig | 9f920f1 | 2012-03-13 08:52:35 +0000 | [diff] [blame] | 918 | goto restart; |
| 919 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 920 | |
Darrick J. Wong | 4882c19 | 2018-05-04 15:30:22 -0700 | [diff] [blame] | 921 | dqret: |
| 922 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 923 | trace_xfs_dqget_miss(dqp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 924 | *O_dqpp = dqp; |
Eric Sandeen | d99831f | 2014-06-22 15:03:54 +1000 | [diff] [blame] | 925 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 926 | } |
| 927 | |
Christoph Hellwig | f8739c3 | 2012-03-13 08:52:34 +0000 | [diff] [blame] | 928 | /* |
Darrick J. Wong | 2e330e7 | 2018-05-04 15:30:20 -0700 | [diff] [blame] | 929 | * Starting at @id and progressing upwards, look for an initialized incore |
| 930 | * dquot, lock it, and return it. |
| 931 | */ |
| 932 | int |
| 933 | xfs_qm_dqget_next( |
| 934 | struct xfs_mount *mp, |
| 935 | xfs_dqid_t id, |
| 936 | uint type, |
| 937 | struct xfs_dquot **dqpp) |
| 938 | { |
| 939 | struct xfs_dquot *dqp; |
| 940 | int error = 0; |
| 941 | |
| 942 | *dqpp = NULL; |
| 943 | for (; !error; error = xfs_dq_get_next_id(mp, type, &id)) { |
Darrick J. Wong | 30ab2dc | 2018-05-04 15:30:24 -0700 | [diff] [blame] | 944 | error = xfs_qm_dqget(mp, id, type, false, &dqp); |
Darrick J. Wong | 2e330e7 | 2018-05-04 15:30:20 -0700 | [diff] [blame] | 945 | if (error == -ENOENT) |
| 946 | continue; |
| 947 | else if (error != 0) |
| 948 | break; |
| 949 | |
| 950 | if (!XFS_IS_DQUOT_UNINITIALIZED(dqp)) { |
| 951 | *dqpp = dqp; |
| 952 | return 0; |
| 953 | } |
| 954 | |
| 955 | xfs_qm_dqput(dqp); |
| 956 | } |
| 957 | |
| 958 | return error; |
| 959 | } |
| 960 | |
| 961 | /* |
Christoph Hellwig | f8739c3 | 2012-03-13 08:52:34 +0000 | [diff] [blame] | 962 | * Release a reference to the dquot (decrement ref-count) and unlock it. |
| 963 | * |
| 964 | * If there is a group quota attached to this dquot, carefully release that |
| 965 | * too without tripping over deadlocks'n'stuff. |
| 966 | */ |
| 967 | void |
| 968 | xfs_qm_dqput( |
| 969 | struct xfs_dquot *dqp) |
| 970 | { |
| 971 | ASSERT(dqp->q_nrefs > 0); |
| 972 | ASSERT(XFS_DQ_IS_LOCKED(dqp)); |
| 973 | |
| 974 | trace_xfs_dqput(dqp); |
| 975 | |
Dave Chinner | 3c3533757 | 2014-05-05 17:30:15 +1000 | [diff] [blame] | 976 | if (--dqp->q_nrefs == 0) { |
| 977 | struct xfs_quotainfo *qi = dqp->q_mount->m_quotainfo; |
| 978 | trace_xfs_dqput_free(dqp); |
| 979 | |
| 980 | if (list_lru_add(&qi->qi_lru, &dqp->q_lru)) |
Bill O'Donnell | ff6d6af | 2015-10-12 18:21:22 +1100 | [diff] [blame] | 981 | XFS_STATS_INC(dqp->q_mount, xs_qm_dquot_unused); |
Dave Chinner | 3c3533757 | 2014-05-05 17:30:15 +1000 | [diff] [blame] | 982 | } |
| 983 | xfs_dqunlock(dqp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 984 | } |
| 985 | |
| 986 | /* |
| 987 | * Release a dquot. Flush it if dirty, then dqput() it. |
| 988 | * dquot must not be locked. |
| 989 | */ |
| 990 | void |
| 991 | xfs_qm_dqrele( |
Pavel Reichl | aefe69a | 2019-11-12 17:04:02 -0800 | [diff] [blame] | 992 | struct xfs_dquot *dqp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 993 | { |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 994 | if (!dqp) |
| 995 | return; |
| 996 | |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 997 | trace_xfs_dqrele(dqp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 998 | |
| 999 | xfs_dqlock(dqp); |
| 1000 | /* |
| 1001 | * We don't care to flush it if the dquot is dirty here. |
| 1002 | * That will create stutters that we want to avoid. |
| 1003 | * Instead we do a delayed write when we try to reclaim |
| 1004 | * a dirty dquot. Also xfs_sync will take part of the burden... |
| 1005 | */ |
| 1006 | xfs_qm_dqput(dqp); |
| 1007 | } |
| 1008 | |
Christoph Hellwig | ca30b2a | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 1009 | /* |
| 1010 | * This is the dquot flushing I/O completion routine. It is called |
| 1011 | * from interrupt level when the buffer containing the dquot is |
| 1012 | * flushed to disk. It is responsible for removing the dquot logitem |
| 1013 | * from the AIL if it has not been re-logged, and unlocking the dquot's |
| 1014 | * flush lock. This behavior is very similar to that of inodes.. |
| 1015 | */ |
| 1016 | STATIC void |
| 1017 | xfs_qm_dqflush_done( |
| 1018 | struct xfs_buf *bp, |
| 1019 | struct xfs_log_item *lip) |
| 1020 | { |
Pavel Reichl | fd8b81d | 2019-11-12 17:04:26 -0800 | [diff] [blame] | 1021 | struct xfs_dq_logitem *qip = (struct xfs_dq_logitem *)lip; |
Pavel Reichl | aefe69a | 2019-11-12 17:04:02 -0800 | [diff] [blame] | 1022 | struct xfs_dquot *dqp = qip->qli_dquot; |
Christoph Hellwig | ca30b2a | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 1023 | struct xfs_ail *ailp = lip->li_ailp; |
| 1024 | |
| 1025 | /* |
| 1026 | * We only want to pull the item from the AIL if its |
| 1027 | * location in the log has not changed since we started the flush. |
| 1028 | * Thus, we only bother if the dquot's lsn has |
| 1029 | * not changed. First we check the lsn outside the lock |
| 1030 | * since it's cheaper, and then we recheck while |
| 1031 | * holding the lock before removing the dquot from the AIL. |
| 1032 | */ |
Dave Chinner | 22525c1 | 2018-05-09 07:47:34 -0700 | [diff] [blame] | 1033 | if (test_bit(XFS_LI_IN_AIL, &lip->li_flags) && |
Carlos Maiolino | 373b058 | 2017-11-28 08:54:10 -0800 | [diff] [blame] | 1034 | ((lip->li_lsn == qip->qli_flush_lsn) || |
Dave Chinner | 22525c1 | 2018-05-09 07:47:34 -0700 | [diff] [blame] | 1035 | test_bit(XFS_LI_FAILED, &lip->li_flags))) { |
Christoph Hellwig | ca30b2a | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 1036 | |
| 1037 | /* xfs_trans_ail_delete() drops the AIL lock. */ |
Matthew Wilcox | 57e8095 | 2018-03-07 14:59:39 -0800 | [diff] [blame] | 1038 | spin_lock(&ailp->ail_lock); |
Carlos Maiolino | 373b058 | 2017-11-28 08:54:10 -0800 | [diff] [blame] | 1039 | if (lip->li_lsn == qip->qli_flush_lsn) { |
Dave Chinner | 04913fd | 2012-04-23 15:58:41 +1000 | [diff] [blame] | 1040 | xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE); |
Carlos Maiolino | 373b058 | 2017-11-28 08:54:10 -0800 | [diff] [blame] | 1041 | } else { |
| 1042 | /* |
| 1043 | * Clear the failed state since we are about to drop the |
| 1044 | * flush lock |
| 1045 | */ |
Dave Chinner | 22525c1 | 2018-05-09 07:47:34 -0700 | [diff] [blame] | 1046 | xfs_clear_li_failed(lip); |
Matthew Wilcox | 57e8095 | 2018-03-07 14:59:39 -0800 | [diff] [blame] | 1047 | spin_unlock(&ailp->ail_lock); |
Carlos Maiolino | 373b058 | 2017-11-28 08:54:10 -0800 | [diff] [blame] | 1048 | } |
Christoph Hellwig | ca30b2a | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 1049 | } |
| 1050 | |
| 1051 | /* |
| 1052 | * Release the dq's flush lock since we're done with it. |
| 1053 | */ |
| 1054 | xfs_dqfunlock(dqp); |
| 1055 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1056 | |
| 1057 | /* |
| 1058 | * Write a modified dquot to disk. |
| 1059 | * The dquot must be locked and the flush lock too taken by caller. |
| 1060 | * The flush lock will not be unlocked until the dquot reaches the disk, |
| 1061 | * but the dquot is free to be unlocked and modified by the caller |
| 1062 | * in the interim. Dquot is still locked on return. This behavior is |
| 1063 | * identical to that of inodes. |
| 1064 | */ |
| 1065 | int |
| 1066 | xfs_qm_dqflush( |
Christoph Hellwig | fe7257f | 2012-04-23 15:58:37 +1000 | [diff] [blame] | 1067 | struct xfs_dquot *dqp, |
| 1068 | struct xfs_buf **bpp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1069 | { |
Christoph Hellwig | acecf1b | 2010-09-06 01:44:45 +0000 | [diff] [blame] | 1070 | struct xfs_mount *mp = dqp->q_mount; |
| 1071 | struct xfs_buf *bp; |
Eric Sandeen | 7224fa4 | 2018-05-07 09:20:18 -0700 | [diff] [blame] | 1072 | struct xfs_dqblk *dqb; |
Christoph Hellwig | acecf1b | 2010-09-06 01:44:45 +0000 | [diff] [blame] | 1073 | struct xfs_disk_dquot *ddqp; |
Darrick J. Wong | eebf3ca | 2018-01-08 10:51:25 -0800 | [diff] [blame] | 1074 | xfs_failaddr_t fa; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1075 | int error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1076 | |
| 1077 | ASSERT(XFS_DQ_IS_LOCKED(dqp)); |
David Chinner | e1f49cf | 2008-08-13 16:41:43 +1000 | [diff] [blame] | 1078 | ASSERT(!completion_done(&dqp->q_flush)); |
Christoph Hellwig | acecf1b | 2010-09-06 01:44:45 +0000 | [diff] [blame] | 1079 | |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 1080 | trace_xfs_dqflush(dqp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1081 | |
Christoph Hellwig | fe7257f | 2012-04-23 15:58:37 +1000 | [diff] [blame] | 1082 | *bpp = NULL; |
| 1083 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1084 | xfs_qm_dqunpin_wait(dqp); |
| 1085 | |
| 1086 | /* |
| 1087 | * This may have been unpinned because the filesystem is shutting |
| 1088 | * down forcibly. If that's the case we must not write this dquot |
Christoph Hellwig | dea9609 | 2012-04-23 15:58:31 +1000 | [diff] [blame] | 1089 | * to disk, because the log record didn't make it to disk. |
| 1090 | * |
| 1091 | * We also have to remove the log item from the AIL in this case, |
| 1092 | * as we wait for an emptry AIL as part of the unmount process. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1093 | */ |
Christoph Hellwig | acecf1b | 2010-09-06 01:44:45 +0000 | [diff] [blame] | 1094 | if (XFS_FORCED_SHUTDOWN(mp)) { |
Christoph Hellwig | dea9609 | 2012-04-23 15:58:31 +1000 | [diff] [blame] | 1095 | struct xfs_log_item *lip = &dqp->q_logitem.qli_item; |
Christoph Hellwig | acecf1b | 2010-09-06 01:44:45 +0000 | [diff] [blame] | 1096 | dqp->dq_flags &= ~XFS_DQ_DIRTY; |
Christoph Hellwig | dea9609 | 2012-04-23 15:58:31 +1000 | [diff] [blame] | 1097 | |
Brian Foster | 146e54b | 2015-08-19 10:01:08 +1000 | [diff] [blame] | 1098 | xfs_trans_ail_remove(lip, SHUTDOWN_CORRUPT_INCORE); |
| 1099 | |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 1100 | error = -EIO; |
Christoph Hellwig | fe7257f | 2012-04-23 15:58:37 +1000 | [diff] [blame] | 1101 | goto out_unlock; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1102 | } |
| 1103 | |
| 1104 | /* |
| 1105 | * Get the buffer containing the on-disk dquot |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1106 | */ |
Christoph Hellwig | acecf1b | 2010-09-06 01:44:45 +0000 | [diff] [blame] | 1107 | error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno, |
Brian Foster | 8d3d7e2 | 2020-03-27 08:29:45 -0700 | [diff] [blame] | 1108 | mp->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK, |
| 1109 | &bp, &xfs_dquot_buf_ops); |
Christoph Hellwig | fe7257f | 2012-04-23 15:58:37 +1000 | [diff] [blame] | 1110 | if (error) |
| 1111 | goto out_unlock; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1112 | |
Christoph Hellwig | acecf1b | 2010-09-06 01:44:45 +0000 | [diff] [blame] | 1113 | /* |
| 1114 | * Calculate the location of the dquot inside the buffer. |
| 1115 | */ |
Eric Sandeen | 7224fa4 | 2018-05-07 09:20:18 -0700 | [diff] [blame] | 1116 | dqb = bp->b_addr + dqp->q_bufoffset; |
| 1117 | ddqp = &dqb->dd_diskdq; |
Christoph Hellwig | acecf1b | 2010-09-06 01:44:45 +0000 | [diff] [blame] | 1118 | |
| 1119 | /* |
Eric Sandeen | 7224fa4 | 2018-05-07 09:20:18 -0700 | [diff] [blame] | 1120 | * A simple sanity check in case we got a corrupted dquot. |
Christoph Hellwig | acecf1b | 2010-09-06 01:44:45 +0000 | [diff] [blame] | 1121 | */ |
Eric Sandeen | 7224fa4 | 2018-05-07 09:20:18 -0700 | [diff] [blame] | 1122 | fa = xfs_dqblk_verify(mp, dqb, be32_to_cpu(ddqp->d_id), 0); |
Darrick J. Wong | eebf3ca | 2018-01-08 10:51:25 -0800 | [diff] [blame] | 1123 | if (fa) { |
| 1124 | xfs_alert(mp, "corrupt dquot ID 0x%x in memory at %pS", |
| 1125 | be32_to_cpu(ddqp->d_id), fa); |
Christoph Hellwig | acecf1b | 2010-09-06 01:44:45 +0000 | [diff] [blame] | 1126 | xfs_buf_relse(bp); |
| 1127 | xfs_dqfunlock(dqp); |
| 1128 | xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); |
Darrick J. Wong | c2414ad | 2019-10-28 16:12:34 -0700 | [diff] [blame] | 1129 | return -EFSCORRUPTED; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1130 | } |
| 1131 | |
| 1132 | /* This is the only portion of data that needs to persist */ |
Pavel Reichl | aefe69a | 2019-11-12 17:04:02 -0800 | [diff] [blame] | 1133 | memcpy(ddqp, &dqp->q_core, sizeof(struct xfs_disk_dquot)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1134 | |
| 1135 | /* |
| 1136 | * Clear the dirty field and remember the flush lsn for later use. |
| 1137 | */ |
Christoph Hellwig | acecf1b | 2010-09-06 01:44:45 +0000 | [diff] [blame] | 1138 | dqp->dq_flags &= ~XFS_DQ_DIRTY; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1139 | |
David Chinner | 7b2e2a3 | 2008-10-30 17:39:12 +1100 | [diff] [blame] | 1140 | xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn, |
| 1141 | &dqp->q_logitem.qli_item.li_lsn); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1142 | |
| 1143 | /* |
Christoph Hellwig | 3fe58f3 | 2013-04-03 16:11:16 +1100 | [diff] [blame] | 1144 | * copy the lsn into the on-disk dquot now while we have the in memory |
| 1145 | * dquot here. This can't be done later in the write verifier as we |
| 1146 | * can't get access to the log item at that point in time. |
Dave Chinner | 6fcdc59 | 2013-06-03 15:28:46 +1000 | [diff] [blame] | 1147 | * |
| 1148 | * We also calculate the CRC here so that the on-disk dquot in the |
| 1149 | * buffer always has a valid CRC. This ensures there is no possibility |
| 1150 | * of a dquot without an up-to-date CRC getting to disk. |
Christoph Hellwig | 3fe58f3 | 2013-04-03 16:11:16 +1100 | [diff] [blame] | 1151 | */ |
| 1152 | if (xfs_sb_version_hascrc(&mp->m_sb)) { |
Christoph Hellwig | 3fe58f3 | 2013-04-03 16:11:16 +1100 | [diff] [blame] | 1153 | dqb->dd_lsn = cpu_to_be64(dqp->q_logitem.qli_item.li_lsn); |
Dave Chinner | 6fcdc59 | 2013-06-03 15:28:46 +1000 | [diff] [blame] | 1154 | xfs_update_cksum((char *)dqb, sizeof(struct xfs_dqblk), |
| 1155 | XFS_DQUOT_CRC_OFF); |
Christoph Hellwig | 3fe58f3 | 2013-04-03 16:11:16 +1100 | [diff] [blame] | 1156 | } |
| 1157 | |
| 1158 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1159 | * Attach an iodone routine so that we can remove this dquot from the |
| 1160 | * AIL and release the flush lock once the dquot is synced to disk. |
| 1161 | */ |
Christoph Hellwig | ca30b2a | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 1162 | xfs_buf_attach_iodone(bp, xfs_qm_dqflush_done, |
| 1163 | &dqp->q_logitem.qli_item); |
| 1164 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1165 | /* |
| 1166 | * If the buffer is pinned then push on the log so we won't |
| 1167 | * get stuck waiting in the write for too long. |
| 1168 | */ |
Chandra Seetharaman | 811e64c | 2011-07-22 23:40:27 +0000 | [diff] [blame] | 1169 | if (xfs_buf_ispinned(bp)) { |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 1170 | trace_xfs_dqflush_force(dqp); |
Christoph Hellwig | a14a348 | 2010-01-19 09:56:46 +0000 | [diff] [blame] | 1171 | xfs_log_force(mp, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1172 | } |
| 1173 | |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 1174 | trace_xfs_dqflush_done(dqp); |
Christoph Hellwig | fe7257f | 2012-04-23 15:58:37 +1000 | [diff] [blame] | 1175 | *bpp = bp; |
| 1176 | return 0; |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 1177 | |
Christoph Hellwig | fe7257f | 2012-04-23 15:58:37 +1000 | [diff] [blame] | 1178 | out_unlock: |
| 1179 | xfs_dqfunlock(dqp); |
Brian Foster | 8d3d7e2 | 2020-03-27 08:29:45 -0700 | [diff] [blame] | 1180 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1181 | } |
| 1182 | |
Christoph Hellwig | 5bb87a3 | 2009-01-19 02:03:19 +0100 | [diff] [blame] | 1183 | /* |
| 1184 | * Lock two xfs_dquot structures. |
| 1185 | * |
| 1186 | * To avoid deadlocks we always lock the quota structure with |
| 1187 | * the lowerd id first. |
| 1188 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1189 | void |
| 1190 | xfs_dqlock2( |
Pavel Reichl | aefe69a | 2019-11-12 17:04:02 -0800 | [diff] [blame] | 1191 | struct xfs_dquot *d1, |
| 1192 | struct xfs_dquot *d2) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1193 | { |
| 1194 | if (d1 && d2) { |
| 1195 | ASSERT(d1 != d2); |
Christoph Hellwig | 1149d96 | 2005-11-02 15:01:12 +1100 | [diff] [blame] | 1196 | if (be32_to_cpu(d1->q_core.d_id) > |
| 1197 | be32_to_cpu(d2->q_core.d_id)) { |
Christoph Hellwig | 5bb87a3 | 2009-01-19 02:03:19 +0100 | [diff] [blame] | 1198 | mutex_lock(&d2->q_qlock); |
| 1199 | mutex_lock_nested(&d1->q_qlock, XFS_QLOCK_NESTED); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1200 | } else { |
Christoph Hellwig | 5bb87a3 | 2009-01-19 02:03:19 +0100 | [diff] [blame] | 1201 | mutex_lock(&d1->q_qlock); |
| 1202 | mutex_lock_nested(&d2->q_qlock, XFS_QLOCK_NESTED); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1203 | } |
Christoph Hellwig | 5bb87a3 | 2009-01-19 02:03:19 +0100 | [diff] [blame] | 1204 | } else if (d1) { |
| 1205 | mutex_lock(&d1->q_qlock); |
| 1206 | } else if (d2) { |
| 1207 | mutex_lock(&d2->q_qlock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1208 | } |
| 1209 | } |
| 1210 | |
Christoph Hellwig | a05931c | 2012-03-13 08:52:37 +0000 | [diff] [blame] | 1211 | int __init |
| 1212 | xfs_qm_init(void) |
| 1213 | { |
Carlos Maiolino | b123176 | 2019-11-14 12:43:03 -0800 | [diff] [blame] | 1214 | xfs_qm_dqzone = kmem_cache_create("xfs_dquot", |
| 1215 | sizeof(struct xfs_dquot), |
| 1216 | 0, 0, NULL); |
Christoph Hellwig | a05931c | 2012-03-13 08:52:37 +0000 | [diff] [blame] | 1217 | if (!xfs_qm_dqzone) |
| 1218 | goto out; |
| 1219 | |
Carlos Maiolino | b123176 | 2019-11-14 12:43:03 -0800 | [diff] [blame] | 1220 | xfs_qm_dqtrxzone = kmem_cache_create("xfs_dqtrx", |
| 1221 | sizeof(struct xfs_dquot_acct), |
| 1222 | 0, 0, NULL); |
Christoph Hellwig | a05931c | 2012-03-13 08:52:37 +0000 | [diff] [blame] | 1223 | if (!xfs_qm_dqtrxzone) |
| 1224 | goto out_free_dqzone; |
| 1225 | |
| 1226 | return 0; |
| 1227 | |
| 1228 | out_free_dqzone: |
Carlos Maiolino | aaf54eb | 2019-11-14 12:43:04 -0800 | [diff] [blame] | 1229 | kmem_cache_destroy(xfs_qm_dqzone); |
Christoph Hellwig | a05931c | 2012-03-13 08:52:37 +0000 | [diff] [blame] | 1230 | out: |
| 1231 | return -ENOMEM; |
| 1232 | } |
| 1233 | |
Gerard Snitselaar | 1c2ccc6 | 2012-03-16 18:36:18 +0000 | [diff] [blame] | 1234 | void |
Christoph Hellwig | a05931c | 2012-03-13 08:52:37 +0000 | [diff] [blame] | 1235 | xfs_qm_exit(void) |
| 1236 | { |
Carlos Maiolino | aaf54eb | 2019-11-14 12:43:04 -0800 | [diff] [blame] | 1237 | kmem_cache_destroy(xfs_qm_dqtrxzone); |
| 1238 | kmem_cache_destroy(xfs_qm_dqzone); |
Christoph Hellwig | a05931c | 2012-03-13 08:52:37 +0000 | [diff] [blame] | 1239 | } |
Darrick J. Wong | 554ba96 | 2018-05-04 15:31:21 -0700 | [diff] [blame] | 1240 | |
| 1241 | /* |
| 1242 | * Iterate every dquot of a particular type. The caller must ensure that the |
| 1243 | * particular quota type is active. iter_fn can return negative error codes, |
Darrick J. Wong | e7ee96d | 2019-08-28 14:37:57 -0700 | [diff] [blame] | 1244 | * or -ECANCELED to indicate that it wants to stop iterating. |
Darrick J. Wong | 554ba96 | 2018-05-04 15:31:21 -0700 | [diff] [blame] | 1245 | */ |
| 1246 | int |
| 1247 | xfs_qm_dqiterate( |
| 1248 | struct xfs_mount *mp, |
| 1249 | uint dqtype, |
| 1250 | xfs_qm_dqiterate_fn iter_fn, |
| 1251 | void *priv) |
| 1252 | { |
| 1253 | struct xfs_dquot *dq; |
| 1254 | xfs_dqid_t id = 0; |
| 1255 | int error; |
| 1256 | |
| 1257 | do { |
| 1258 | error = xfs_qm_dqget_next(mp, id, dqtype, &dq); |
| 1259 | if (error == -ENOENT) |
| 1260 | return 0; |
| 1261 | if (error) |
| 1262 | return error; |
| 1263 | |
| 1264 | error = iter_fn(dq, dqtype, priv); |
| 1265 | id = be32_to_cpu(dq->q_core.d_id); |
| 1266 | xfs_qm_dqput(dq); |
| 1267 | id++; |
| 1268 | } while (error == 0 && id != 0); |
| 1269 | |
| 1270 | return error; |
| 1271 | } |