blob: 91ec6d998aef2fe86da013cfc37b4c1a042efd99 [file] [log] [blame]
Dave Chinner0b61f8a2018-06-05 19:42:14 -07001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Nathan Scott4ce31212005-11-02 14:59:41 +11003 * Copyright (c) 2000-2003 Silicon Graphics, Inc.
4 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006#include "xfs.h"
7#include "xfs_fs.h"
Dave Chinner6ca1c902013-08-12 20:49:26 +10008#include "xfs_format.h"
Dave Chinner239880e2013-10-23 10:50:10 +11009#include "xfs_log_format.h"
Dave Chinner70a98832013-10-23 10:36:05 +110010#include "xfs_shared.h"
Dave Chinner239880e2013-10-23 10:50:10 +110011#include "xfs_trans_resv.h"
Nathan Scotta844f452005-11-02 14:38:42 +110012#include "xfs_bit.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include "xfs_mount.h"
Darrick J. Wong3ab78df2016-08-03 11:15:38 +100014#include "xfs_defer.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include "xfs_inode.h"
16#include "xfs_bmap.h"
Dave Chinner239880e2013-10-23 10:50:10 +110017#include "xfs_quota.h"
Dave Chinner239880e2013-10-23 10:50:10 +110018#include "xfs_trans.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include "xfs_buf_item.h"
20#include "xfs_trans_space.h"
21#include "xfs_trans_priv.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include "xfs_qm.h"
Christoph Hellwig0b1b2132009-12-14 23:14:59 +000023#include "xfs_trace.h"
Dave Chinner239880e2013-10-23 10:50:10 +110024#include "xfs_log.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110025#include "xfs_bmap_btree.h"
Darrick J. Wongafeda602020-07-14 10:36:09 -070026#include "xfs_error.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
Linus Torvalds1da177e2005-04-16 15:20:36 -070028/*
Christoph Hellwigbf72de32011-12-06 21:58:19 +000029 * Lock order:
30 *
31 * ip->i_lock
Christoph Hellwig9f920f12012-03-13 08:52:35 +000032 * qi->qi_tree_lock
Christoph Hellwigb84a3a92012-03-14 11:53:34 -050033 * dquot->q_qlock (xfs_dqlock() and friends)
34 * dquot->q_flush (xfs_dqflock() and friends)
35 * qi->qi_lru_lock
Christoph Hellwigbf72de32011-12-06 21:58:19 +000036 *
37 * If two dquots need to be locked the order is user before group/project,
38 * otherwise by the lowest id first, see xfs_dqlock2.
39 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Christoph Hellwiga05931c2012-03-13 08:52:37 +000041struct kmem_zone *xfs_qm_dqtrxzone;
42static struct kmem_zone *xfs_qm_dqzone;
43
Dave Chinnerf112a042013-09-30 09:37:03 +100044static struct lock_class_key xfs_dquot_group_class;
45static struct lock_class_key xfs_dquot_project_class;
Christoph Hellwig98b8c7a2009-01-19 02:03:25 +010046
Linus Torvalds1da177e2005-04-16 15:20:36 -070047/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070048 * This is called to free all the memory associated with a dquot
49 */
50void
51xfs_qm_dqdestroy(
Pavel Reichlaefe69a2019-11-12 17:04:02 -080052 struct xfs_dquot *dqp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070053{
Christoph Hellwigf8739c32012-03-13 08:52:34 +000054 ASSERT(list_empty(&dqp->q_lru));
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Dave Chinnerb1c5ebb2016-07-22 09:52:35 +100056 kmem_free(dqp->q_logitem.qli_item.li_lv_shadow);
Linus Torvalds1da177e2005-04-16 15:20:36 -070057 mutex_destroy(&dqp->q_qlock);
Christoph Hellwig0b1b2132009-12-14 23:14:59 +000058
Bill O'Donnellff6d6af2015-10-12 18:21:22 +110059 XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot);
Carlos Maiolino377bcd52019-11-14 12:43:04 -080060 kmem_cache_free(xfs_qm_dqzone, dqp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070061}
62
63/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070064 * If default limits are in force, push them into the dquot now.
65 * We overwrite the dquot limits only if they are zero and this
66 * is not the root dquot.
67 */
68void
69xfs_qm_adjust_dqlimits(
Brian Foster4b6eae2e2013-03-18 10:51:45 -040070 struct xfs_dquot *dq)
Linus Torvalds1da177e2005-04-16 15:20:36 -070071{
Darrick J. Wongc8c753e2020-07-14 10:37:33 -070072 struct xfs_mount *mp = dq->q_mount;
Brian Foster4b6eae2e2013-03-18 10:51:45 -040073 struct xfs_quotainfo *q = mp->m_quotainfo;
Carlos Maiolinobe607942016-02-08 11:27:55 +110074 struct xfs_def_quota *defq;
Brian Fosterb1366452013-03-18 10:51:46 -040075 int prealloc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
Darrick J. Wongc51df732020-07-14 10:37:30 -070077 ASSERT(dq->q_id);
Eric Sandeence6e7e79c2020-05-21 13:07:00 -070078 defq = xfs_get_defquota(q, xfs_dquot_type(dq));
Linus Torvalds1da177e2005-04-16 15:20:36 -070079
Darrick J. Wong12d720f2020-07-14 10:37:34 -070080 if (!dq->q_blk.softlimit) {
Darrick J. Wong438769e2020-07-14 10:37:32 -070081 dq->q_blk.softlimit = defq->blk.soft;
Brian Fosterb1366452013-03-18 10:51:46 -040082 prealloc = 1;
83 }
Darrick J. Wong12d720f2020-07-14 10:37:34 -070084 if (!dq->q_blk.hardlimit) {
Darrick J. Wong438769e2020-07-14 10:37:32 -070085 dq->q_blk.hardlimit = defq->blk.hard;
Brian Fosterb1366452013-03-18 10:51:46 -040086 prealloc = 1;
87 }
Darrick J. Wong12d720f2020-07-14 10:37:34 -070088 if (!dq->q_ino.softlimit)
Darrick J. Wong438769e2020-07-14 10:37:32 -070089 dq->q_ino.softlimit = defq->ino.soft;
Darrick J. Wong12d720f2020-07-14 10:37:34 -070090 if (!dq->q_ino.hardlimit)
Darrick J. Wong438769e2020-07-14 10:37:32 -070091 dq->q_ino.hardlimit = defq->ino.hard;
Darrick J. Wong12d720f2020-07-14 10:37:34 -070092 if (!dq->q_rtb.softlimit)
Darrick J. Wong438769e2020-07-14 10:37:32 -070093 dq->q_rtb.softlimit = defq->rtb.soft;
Darrick J. Wong12d720f2020-07-14 10:37:34 -070094 if (!dq->q_rtb.hardlimit)
Darrick J. Wong438769e2020-07-14 10:37:32 -070095 dq->q_rtb.hardlimit = defq->rtb.hard;
Brian Fosterb1366452013-03-18 10:51:46 -040096
97 if (prealloc)
98 xfs_dquot_set_prealloc_limits(dq);
Linus Torvalds1da177e2005-04-16 15:20:36 -070099}
100
Darrick J. Wong11d8a912020-08-17 09:58:36 -0700101/* Set the expiration time of a quota's grace period. */
102time64_t
103xfs_dquot_set_timeout(
104 struct xfs_mount *mp,
105 time64_t timeout)
106{
107 struct xfs_quotainfo *qi = mp->m_quotainfo;
108
109 return clamp_t(time64_t, timeout, qi->qi_expiry_min,
110 qi->qi_expiry_max);
111}
112
Darrick J. Wongccc8e772020-08-17 09:58:42 -0700113/* Set the length of the default grace period. */
114time64_t
115xfs_dquot_set_grace_period(
116 time64_t grace)
117{
118 return clamp_t(time64_t, grace, XFS_DQ_GRACE_MIN, XFS_DQ_GRACE_MAX);
119}
120
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121/*
Darrick J. Wongea0cc6f2020-07-14 10:37:33 -0700122 * Determine if this quota counter is over either limit and set the quota
123 * timers as appropriate.
124 */
125static inline void
126xfs_qm_adjust_res_timer(
Darrick J. Wong11d8a912020-08-17 09:58:36 -0700127 struct xfs_mount *mp,
Darrick J. Wongea0cc6f2020-07-14 10:37:33 -0700128 struct xfs_dquot_res *res,
129 struct xfs_quota_limits *qlim)
130{
131 ASSERT(res->hardlimit == 0 || res->softlimit <= res->hardlimit);
132
133 if ((res->softlimit && res->count > res->softlimit) ||
134 (res->hardlimit && res->count > res->hardlimit)) {
135 if (res->timer == 0)
Darrick J. Wong11d8a912020-08-17 09:58:36 -0700136 res->timer = xfs_dquot_set_timeout(mp,
137 ktime_get_real_seconds() + qlim->time);
Darrick J. Wongea0cc6f2020-07-14 10:37:33 -0700138 } else {
139 if (res->timer == 0)
140 res->warnings = 0;
141 else
142 res->timer = 0;
143 }
144}
145
146/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 * Check the limits and timers of a dquot and start or reset timers
148 * if necessary.
149 * This gets called even when quota enforcement is OFF, which makes our
150 * life a little less complicated. (We just don't reject any quota
151 * reservations in that case, when enforcement is off).
152 * We also return 0 as the values of the timers in Q_GETQUOTA calls, when
153 * enforcement's off.
154 * In contrast, warnings are a little different in that they don't
Nathan Scott754002b2005-06-21 15:49:06 +1000155 * 'automatically' get started when limits get exceeded. They do
156 * get reset to zero, however, when we find the count to be under
157 * the soft limit (they are only ever set non-zero via userspace).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158 */
159void
160xfs_qm_adjust_dqtimers(
Eric Sandeen3dbb9aa2020-05-21 13:07:00 -0700161 struct xfs_dquot *dq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162{
Darrick J. Wongc8c753e2020-07-14 10:37:33 -0700163 struct xfs_mount *mp = dq->q_mount;
Eric Sandeene8503012020-05-21 13:07:01 -0700164 struct xfs_quotainfo *qi = mp->m_quotainfo;
Eric Sandeene8503012020-05-21 13:07:01 -0700165 struct xfs_def_quota *defq;
166
Darrick J. Wongc51df732020-07-14 10:37:30 -0700167 ASSERT(dq->q_id);
Eric Sandeene8503012020-05-21 13:07:01 -0700168 defq = xfs_get_defquota(qi, xfs_dquot_type(dq));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169
Darrick J. Wong11d8a912020-08-17 09:58:36 -0700170 xfs_qm_adjust_res_timer(dq->q_mount, &dq->q_blk, &defq->blk);
171 xfs_qm_adjust_res_timer(dq->q_mount, &dq->q_ino, &defq->ino);
172 xfs_qm_adjust_res_timer(dq->q_mount, &dq->q_rtb, &defq->rtb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173}
174
175/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 * initialize a buffer full of dquots and log the whole thing
177 */
178STATIC void
179xfs_qm_init_dquot_blk(
Darrick J. Wong78bba5c2020-05-13 15:33:27 -0700180 struct xfs_trans *tp,
181 struct xfs_mount *mp,
182 xfs_dqid_t id,
Darrick J. Wong1a7ed272020-07-15 17:53:43 -0700183 xfs_dqtype_t type,
Darrick J. Wong78bba5c2020-05-13 15:33:27 -0700184 struct xfs_buf *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185{
Christoph Hellwig8a7b8a82010-04-20 17:01:30 +1000186 struct xfs_quotainfo *q = mp->m_quotainfo;
Darrick J. Wong78bba5c2020-05-13 15:33:27 -0700187 struct xfs_dqblk *d;
188 xfs_dqid_t curid;
189 unsigned int qflag;
190 unsigned int blftype;
191 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192
193 ASSERT(tp);
Christoph Hellwig0c842ad2011-07-08 14:36:19 +0200194 ASSERT(xfs_buf_islocked(bp));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195
Darrick J. Wonge6eb6032020-07-15 17:50:57 -0700196 switch (type) {
197 case XFS_DQTYPE_USER:
198 qflag = XFS_UQUOTA_CHKD;
199 blftype = XFS_BLF_UDQUOT_BUF;
200 break;
201 case XFS_DQTYPE_PROJ:
202 qflag = XFS_PQUOTA_CHKD;
203 blftype = XFS_BLF_PDQUOT_BUF;
204 break;
205 case XFS_DQTYPE_GROUP:
206 qflag = XFS_GQUOTA_CHKD;
207 blftype = XFS_BLF_GDQUOT_BUF;
208 break;
209 default:
210 ASSERT(0);
211 return;
212 }
213
Chandra Seetharaman62926042011-07-22 23:40:15 +0000214 d = bp->b_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215
216 /*
217 * ID of the first dquot in the block - id's are zero based.
218 */
Christoph Hellwig8a7b8a82010-04-20 17:01:30 +1000219 curid = id - (id % q->qi_dqperchunk);
Christoph Hellwig8a7b8a82010-04-20 17:01:30 +1000220 memset(d, 0, BBTOB(q->qi_dqchunklen));
Christoph Hellwig49d35a52011-12-06 21:58:23 +0000221 for (i = 0; i < q->qi_dqperchunk; i++, d++, curid++) {
222 d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
223 d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
224 d->dd_diskdq.d_id = cpu_to_be32(curid);
Darrick J. Wongd8c1af0d2020-07-15 18:05:39 -0700225 d->dd_diskdq.d_type = type;
Dave Chinner6fcdc592013-06-03 15:28:46 +1000226 if (xfs_sb_version_hascrc(&mp->m_sb)) {
Dave Chinner92863452015-08-19 10:32:01 +1000227 uuid_copy(&d->dd_uuid, &mp->m_sb.sb_meta_uuid);
Dave Chinner6fcdc592013-06-03 15:28:46 +1000228 xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
229 XFS_DQUOT_CRC_OFF);
230 }
Christoph Hellwig49d35a52011-12-06 21:58:23 +0000231 }
232
Darrick J. Wong78bba5c2020-05-13 15:33:27 -0700233 xfs_trans_dquot_buf(tp, bp, blftype);
234
235 /*
236 * quotacheck uses delayed writes to update all the dquots on disk in an
237 * efficient manner instead of logging the individual dquot changes as
238 * they are made. However if we log the buffer allocated here and crash
239 * after quotacheck while the logged initialisation is still in the
240 * active region of the log, log recovery can replay the dquot buffer
241 * initialisation over the top of the checked dquots and corrupt quota
242 * accounting.
243 *
244 * To avoid this problem, quotacheck cannot log the initialised buffer.
245 * We must still dirty the buffer and write it back before the
246 * allocation transaction clears the log. Therefore, mark the buffer as
247 * ordered instead of logging it directly. This is safe for quotacheck
248 * because it detects and repairs allocated but initialized dquot blocks
249 * in the quota inodes.
250 */
251 if (!(mp->m_qflags & qflag))
252 xfs_trans_ordered_buf(tp, bp);
253 else
254 xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255}
256
Brian Fosterb1366452013-03-18 10:51:46 -0400257/*
258 * Initialize the dynamic speculative preallocation thresholds. The lo/hi
259 * watermarks correspond to the soft and hard limits by default. If a soft limit
260 * is not specified, we use 95% of the hard limit.
261 */
262void
263xfs_dquot_set_prealloc_limits(struct xfs_dquot *dqp)
264{
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700265 uint64_t space;
Brian Fosterb1366452013-03-18 10:51:46 -0400266
Darrick J. Wongd3537cf2020-07-14 10:37:31 -0700267 dqp->q_prealloc_hi_wmark = dqp->q_blk.hardlimit;
268 dqp->q_prealloc_lo_wmark = dqp->q_blk.softlimit;
Brian Fosterb1366452013-03-18 10:51:46 -0400269 if (!dqp->q_prealloc_lo_wmark) {
270 dqp->q_prealloc_lo_wmark = dqp->q_prealloc_hi_wmark;
271 do_div(dqp->q_prealloc_lo_wmark, 100);
272 dqp->q_prealloc_lo_wmark *= 95;
273 }
274
275 space = dqp->q_prealloc_hi_wmark;
276
277 do_div(space, 100);
278 dqp->q_low_space[XFS_QLOWSP_1_PCNT] = space;
279 dqp->q_low_space[XFS_QLOWSP_3_PCNT] = space * 3;
280 dqp->q_low_space[XFS_QLOWSP_5_PCNT] = space * 5;
281}
282
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283/*
Darrick J. Wongd63192c2018-05-04 15:30:23 -0700284 * Ensure that the given in-core dquot has a buffer on disk backing it, and
Darrick J. Wong710d7072019-04-24 09:27:41 -0700285 * return the buffer locked and held. This is called when the bmapi finds a
286 * hole.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 */
288STATIC int
Darrick J. Wongd63192c2018-05-04 15:30:23 -0700289xfs_dquot_disk_alloc(
290 struct xfs_trans **tpp,
291 struct xfs_dquot *dqp,
292 struct xfs_buf **bpp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293{
Darrick J. Wongd63192c2018-05-04 15:30:23 -0700294 struct xfs_bmbt_irec map;
Brian Foster2ba13722018-07-11 22:26:11 -0700295 struct xfs_trans *tp = *tpp;
296 struct xfs_mount *mp = tp->t_mountp;
Darrick J. Wongd63192c2018-05-04 15:30:23 -0700297 struct xfs_buf *bp;
Darrick J. Wong1a7ed272020-07-15 17:53:43 -0700298 xfs_dqtype_t qtype = xfs_dquot_type(dqp);
Darrick J. Wong0b04dd52020-07-15 17:51:47 -0700299 struct xfs_inode *quotip = xfs_quota_inode(mp, qtype);
Darrick J. Wongd63192c2018-05-04 15:30:23 -0700300 int nmaps = 1;
301 int error;
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000302
303 trace_xfs_dqalloc(dqp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 xfs_ilock(quotip, XFS_ILOCK_EXCL);
Darrick J. Wong0b04dd52020-07-15 17:51:47 -0700306 if (!xfs_this_quota_on(dqp->q_mount, qtype)) {
Darrick J. Wongd63192c2018-05-04 15:30:23 -0700307 /*
308 * Return if this type of quotas is turned off while we didn't
309 * have an inode lock
310 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 xfs_iunlock(quotip, XFS_ILOCK_EXCL);
Dave Chinner24513372014-06-25 14:58:08 +1000312 return -ESRCH;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313 }
314
Darrick J. Wongd63192c2018-05-04 15:30:23 -0700315 /* Create the block mapping. */
Brian Foster2ba13722018-07-11 22:26:11 -0700316 xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL);
317 error = xfs_bmapi_write(tp, quotip, dqp->q_fileoffset,
Brian Fosterda781e62019-10-21 09:26:48 -0700318 XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA, 0, &map,
319 &nmaps);
Dave Chinnerc0dc7822011-09-18 20:40:52 +0000320 if (error)
Brian Foster73971b12018-08-07 10:57:13 -0700321 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB);
323 ASSERT(nmaps == 1);
324 ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
325 (map.br_startblock != HOLESTARTBLOCK));
326
327 /*
328 * Keep track of the blkno to save a lookup later
329 */
330 dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
331
332 /* now we can just get the buffer (there's nothing to read yet) */
Darrick J. Wongce924642020-01-23 17:01:18 -0800333 error = xfs_trans_get_buf(tp, mp->m_ddev_targp, dqp->q_blkno,
334 mp->m_quotainfo->qi_dqchunklen, 0, &bp);
335 if (error)
336 return error;
Dave Chinner1813dd62012-11-14 17:54:40 +1100337 bp->b_ops = &xfs_dquot_buf_ops;
Chandra Seetharaman2a30f36d2011-09-20 13:56:55 +0000338
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 /*
340 * Make a chunk of dquots out of this buffer and log
341 * the entire thing.
342 */
Darrick J. Wong0b04dd52020-07-15 17:51:47 -0700343 xfs_qm_init_dquot_blk(tp, mp, dqp->q_id, qtype, bp);
Darrick J. Wongd63192c2018-05-04 15:30:23 -0700344 xfs_buf_set_ref(bp, XFS_DQUOT_REF);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345
Tim Shimminefa092f2005-09-05 08:29:01 +1000346 /*
Darrick J. Wong7b6b50f2018-05-04 15:30:19 -0700347 * Hold the buffer and join it to the dfops so that we'll still own
348 * the buffer when we return to the caller. The buffer disposal on
349 * error must be paid attention to very carefully, as it has been
350 * broken since commit efa092f3d4c6 "[XFS] Fixes a bug in the quota
351 * code when allocating a new dquot record" in 2005, and the later
352 * conversion to xfs_defer_ops in commit 310a75a3c6c747 failed to keep
353 * the buffer locked across the _defer_finish call. We can now do
354 * this correctly with xfs_defer_bjoin.
Tim Shimminefa092f2005-09-05 08:29:01 +1000355 *
Brian Foster73971b12018-08-07 10:57:13 -0700356 * Above, we allocated a disk block for the dquot information and used
357 * get_buf to initialize the dquot. If the _defer_finish fails, the old
Darrick J. Wong7b6b50f2018-05-04 15:30:19 -0700358 * transaction is gone but the new buffer is not joined or held to any
359 * transaction, so we must _buf_relse it.
Tim Shimminefa092f2005-09-05 08:29:01 +1000360 *
Darrick J. Wong7b6b50f2018-05-04 15:30:19 -0700361 * If everything succeeds, the caller of this function is returned a
Darrick J. Wongd63192c2018-05-04 15:30:23 -0700362 * buffer that is locked and held to the transaction. The caller
Darrick J. Wong7b6b50f2018-05-04 15:30:19 -0700363 * is responsible for unlocking any buffer passed back, either
Darrick J. Wong710d7072019-04-24 09:27:41 -0700364 * manually or by committing the transaction. On error, the buffer is
365 * released and not passed back.
Tim Shimminefa092f2005-09-05 08:29:01 +1000366 */
Brian Foster2ba13722018-07-11 22:26:11 -0700367 xfs_trans_bhold(tp, bp);
Brian Foster9e28a242018-07-24 13:43:15 -0700368 error = xfs_defer_finish(tpp);
Darrick J. Wong7b6b50f2018-05-04 15:30:19 -0700369 if (error) {
Darrick J. Wong710d7072019-04-24 09:27:41 -0700370 xfs_trans_bhold_release(*tpp, bp);
371 xfs_trans_brelse(*tpp, bp);
Brian Foster73971b12018-08-07 10:57:13 -0700372 return error;
Darrick J. Wong7b6b50f2018-05-04 15:30:19 -0700373 }
Darrick J. Wongd63192c2018-05-04 15:30:23 -0700374 *bpp = bp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376}
Dave Chinner9aede1d2013-10-15 09:17:52 +1100377
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378/*
Darrick J. Wongd63192c2018-05-04 15:30:23 -0700379 * Read in the in-core dquot's on-disk metadata and return the buffer.
380 * Returns ENOENT to signal a hole.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 */
382STATIC int
Darrick J. Wongd63192c2018-05-04 15:30:23 -0700383xfs_dquot_disk_read(
384 struct xfs_mount *mp,
385 struct xfs_dquot *dqp,
386 struct xfs_buf **bpp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387{
Chandra Seetharaman113a5682013-06-27 17:25:07 -0500388 struct xfs_bmbt_irec map;
Chandra Seetharaman113a5682013-06-27 17:25:07 -0500389 struct xfs_buf *bp;
Darrick J. Wong1a7ed272020-07-15 17:53:43 -0700390 xfs_dqtype_t qtype = xfs_dquot_type(dqp);
Darrick J. Wong0b04dd52020-07-15 17:51:47 -0700391 struct xfs_inode *quotip = xfs_quota_inode(mp, qtype);
Christoph Hellwig0891f992017-07-13 12:14:34 -0700392 uint lock_mode;
Darrick J. Wongd63192c2018-05-04 15:30:23 -0700393 int nmaps = 1;
394 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395
Christoph Hellwig0891f992017-07-13 12:14:34 -0700396 lock_mode = xfs_ilock_data_map_shared(quotip);
Darrick J. Wong0b04dd52020-07-15 17:51:47 -0700397 if (!xfs_this_quota_on(mp, qtype)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398 /*
Christoph Hellwigacecf1b2010-09-06 01:44:45 +0000399 * Return if this type of quotas is turned off while we
400 * didn't have the quota inode lock.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 */
Christoph Hellwig0891f992017-07-13 12:14:34 -0700402 xfs_iunlock(quotip, lock_mode);
Dave Chinner24513372014-06-25 14:58:08 +1000403 return -ESRCH;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405
406 /*
Christoph Hellwigacecf1b2010-09-06 01:44:45 +0000407 * Find the block map; no allocations yet
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408 */
Dave Chinner5c8ed202011-09-18 20:40:45 +0000409 error = xfs_bmapi_read(quotip, dqp->q_fileoffset,
Darrick J. Wongd63192c2018-05-04 15:30:23 -0700410 XFS_DQUOT_CLUSTER_SIZE_FSB, &map, &nmaps, 0);
Christoph Hellwig0891f992017-07-13 12:14:34 -0700411 xfs_iunlock(quotip, lock_mode);
Christoph Hellwigacecf1b2010-09-06 01:44:45 +0000412 if (error)
413 return error;
414
415 ASSERT(nmaps == 1);
Darrick J. Wongd63192c2018-05-04 15:30:23 -0700416 ASSERT(map.br_blockcount >= 1);
417 ASSERT(map.br_startblock != DELAYSTARTBLOCK);
418 if (map.br_startblock == HOLESTARTBLOCK)
419 return -ENOENT;
420
421 trace_xfs_dqtobp_read(dqp);
Christoph Hellwigacecf1b2010-09-06 01:44:45 +0000422
423 /*
Darrick J. Wongd63192c2018-05-04 15:30:23 -0700424 * store the blkno etc so that we don't have to do the
425 * mapping all the time
Christoph Hellwigacecf1b2010-09-06 01:44:45 +0000426 */
Darrick J. Wongd63192c2018-05-04 15:30:23 -0700427 dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
Christoph Hellwigacecf1b2010-09-06 01:44:45 +0000428
Darrick J. Wongd63192c2018-05-04 15:30:23 -0700429 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
430 mp->m_quotainfo->qi_dqchunklen, 0, &bp,
431 &xfs_dquot_buf_ops);
432 if (error) {
433 ASSERT(bp == NULL);
434 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 }
436
Dave Chinnerc6319192012-11-14 17:50:13 +1100437 ASSERT(xfs_buf_islocked(bp));
Darrick J. Wongd63192c2018-05-04 15:30:23 -0700438 xfs_buf_set_ref(bp, XFS_DQUOT_REF);
439 *bpp = bp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440
Eric Sandeend99831f2014-06-22 15:03:54 +1000441 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442}
443
Darrick J. Wong617cd5c2018-05-04 15:30:23 -0700444/* Allocate and initialize everything we need for an incore dquot. */
445STATIC struct xfs_dquot *
446xfs_dquot_alloc(
Christoph Hellwig97e7ade2011-12-06 21:58:24 +0000447 struct xfs_mount *mp,
448 xfs_dqid_t id,
Darrick J. Wong1a7ed272020-07-15 17:53:43 -0700449 xfs_dqtype_t type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450{
Christoph Hellwig97e7ade2011-12-06 21:58:24 +0000451 struct xfs_dquot *dqp;
Christoph Hellwig92b2e5b2012-02-01 13:57:20 +0000452
Carlos Maiolino32a2b112020-07-22 09:23:10 -0700453 dqp = kmem_cache_zalloc(xfs_qm_dqzone, GFP_KERNEL | __GFP_NOFAIL);
Christoph Hellwig92b2e5b2012-02-01 13:57:20 +0000454
Darrick J. Wong1a7ed272020-07-15 17:53:43 -0700455 dqp->q_type = type;
Darrick J. Wongc51df732020-07-14 10:37:30 -0700456 dqp->q_id = id;
Christoph Hellwig92b2e5b2012-02-01 13:57:20 +0000457 dqp->q_mount = mp;
Christoph Hellwigf8739c32012-03-13 08:52:34 +0000458 INIT_LIST_HEAD(&dqp->q_lru);
Christoph Hellwig92b2e5b2012-02-01 13:57:20 +0000459 mutex_init(&dqp->q_qlock);
460 init_waitqueue_head(&dqp->q_pinwait);
Darrick J. Wongd63192c2018-05-04 15:30:23 -0700461 dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk;
462 /*
463 * Offset of dquot in the (fixed sized) dquot chunk.
464 */
465 dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) *
466 sizeof(xfs_dqblk_t);
Christoph Hellwig92b2e5b2012-02-01 13:57:20 +0000467
468 /*
469 * Because we want to use a counting completion, complete
470 * the flush completion once to allow a single access to
471 * the flush completion without blocking.
472 */
473 init_completion(&dqp->q_flush);
474 complete(&dqp->q_flush);
475
476 /*
477 * Make sure group quotas have a different lock class than user
478 * quotas.
479 */
Dave Chinnerf112a042013-09-30 09:37:03 +1000480 switch (type) {
Darrick J. Wong8cd49012020-07-15 17:42:36 -0700481 case XFS_DQTYPE_USER:
Dave Chinnerf112a042013-09-30 09:37:03 +1000482 /* uses the default lock class */
483 break;
Darrick J. Wong8cd49012020-07-15 17:42:36 -0700484 case XFS_DQTYPE_GROUP:
Dave Chinnerf112a042013-09-30 09:37:03 +1000485 lockdep_set_class(&dqp->q_qlock, &xfs_dquot_group_class);
486 break;
Darrick J. Wong8cd49012020-07-15 17:42:36 -0700487 case XFS_DQTYPE_PROJ:
Dave Chinnerf112a042013-09-30 09:37:03 +1000488 lockdep_set_class(&dqp->q_qlock, &xfs_dquot_project_class);
489 break;
490 default:
491 ASSERT(0);
492 break;
493 }
Christoph Hellwig92b2e5b2012-02-01 13:57:20 +0000494
Darrick J. Wong617cd5c2018-05-04 15:30:23 -0700495 xfs_qm_dquot_logitem_init(dqp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496
Darrick J. Wong617cd5c2018-05-04 15:30:23 -0700497 XFS_STATS_INC(mp, xs_qm_dquot);
498 return dqp;
499}
500
501/* Copy the in-core quota fields in from the on-disk buffer. */
Darrick J. Wongafeda602020-07-14 10:36:09 -0700502STATIC int
Darrick J. Wong617cd5c2018-05-04 15:30:23 -0700503xfs_dquot_from_disk(
504 struct xfs_dquot *dqp,
Darrick J. Wongd63192c2018-05-04 15:30:23 -0700505 struct xfs_buf *bp)
Darrick J. Wong617cd5c2018-05-04 15:30:23 -0700506{
Darrick J. Wongd63192c2018-05-04 15:30:23 -0700507 struct xfs_disk_dquot *ddqp = bp->b_addr + dqp->q_bufoffset;
508
Darrick J. Wongafeda602020-07-14 10:36:09 -0700509 /*
510 * Ensure that we got the type and ID we were looking for.
511 * Everything else was checked by the dquot buffer verifier.
512 */
Darrick J. Wongd8c1af0d2020-07-15 18:05:39 -0700513 if ((ddqp->d_type & XFS_DQTYPE_REC_MASK) != xfs_dquot_type(dqp) ||
Darrick J. Wongc51df732020-07-14 10:37:30 -0700514 be32_to_cpu(ddqp->d_id) != dqp->q_id) {
Darrick J. Wongafeda602020-07-14 10:36:09 -0700515 xfs_alert_tag(bp->b_mount, XFS_PTAG_VERIFIER_ERROR,
516 "Metadata corruption detected at %pS, quota %u",
Darrick J. Wongc51df732020-07-14 10:37:30 -0700517 __this_address, dqp->q_id);
Darrick J. Wongafeda602020-07-14 10:36:09 -0700518 xfs_alert(bp->b_mount, "Unmount and run xfs_repair");
519 return -EFSCORRUPTED;
520 }
521
Darrick J. Wong617cd5c2018-05-04 15:30:23 -0700522 /* copy everything from disk dquot to the incore dquot */
Darrick J. Wongd8c1af0d2020-07-15 18:05:39 -0700523 dqp->q_type = ddqp->d_type;
Darrick J. Wongd3537cf2020-07-14 10:37:31 -0700524 dqp->q_blk.hardlimit = be64_to_cpu(ddqp->d_blk_hardlimit);
525 dqp->q_blk.softlimit = be64_to_cpu(ddqp->d_blk_softlimit);
526 dqp->q_ino.hardlimit = be64_to_cpu(ddqp->d_ino_hardlimit);
527 dqp->q_ino.softlimit = be64_to_cpu(ddqp->d_ino_softlimit);
528 dqp->q_rtb.hardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);
529 dqp->q_rtb.softlimit = be64_to_cpu(ddqp->d_rtb_softlimit);
Darrick J. Wong617cd5c2018-05-04 15:30:23 -0700530
Darrick J. Wongbe37d402020-07-14 10:37:31 -0700531 dqp->q_blk.count = be64_to_cpu(ddqp->d_bcount);
532 dqp->q_ino.count = be64_to_cpu(ddqp->d_icount);
533 dqp->q_rtb.count = be64_to_cpu(ddqp->d_rtbcount);
534
Darrick J. Wongc8c45fb2020-07-14 10:37:31 -0700535 dqp->q_blk.warnings = be16_to_cpu(ddqp->d_bwarns);
536 dqp->q_ino.warnings = be16_to_cpu(ddqp->d_iwarns);
537 dqp->q_rtb.warnings = be16_to_cpu(ddqp->d_rtbwarns);
538
Darrick J. Wong19dce7e2020-07-14 10:37:32 -0700539 dqp->q_blk.timer = be32_to_cpu(ddqp->d_btimer);
540 dqp->q_ino.timer = be32_to_cpu(ddqp->d_itimer);
541 dqp->q_rtb.timer = be32_to_cpu(ddqp->d_rtbtimer);
542
Darrick J. Wong617cd5c2018-05-04 15:30:23 -0700543 /*
544 * Reservation counters are defined as reservation plus current usage
545 * to avoid having to add every time.
546 */
Darrick J. Wongbe37d402020-07-14 10:37:31 -0700547 dqp->q_blk.reserved = dqp->q_blk.count;
548 dqp->q_ino.reserved = dqp->q_ino.count;
549 dqp->q_rtb.reserved = dqp->q_rtb.count;
Darrick J. Wong617cd5c2018-05-04 15:30:23 -0700550
551 /* initialize the dquot speculative prealloc thresholds */
552 xfs_dquot_set_prealloc_limits(dqp);
Darrick J. Wongafeda602020-07-14 10:36:09 -0700553 return 0;
Darrick J. Wong617cd5c2018-05-04 15:30:23 -0700554}
555
Darrick J. Wong0b0fa1d2020-07-14 10:37:22 -0700556/* Copy the in-core quota fields into the on-disk buffer. */
557void
558xfs_dquot_to_disk(
559 struct xfs_disk_dquot *ddqp,
560 struct xfs_dquot *dqp)
561{
Darrick J. Wong51dbb1b2020-07-14 10:37:32 -0700562 ddqp->d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
563 ddqp->d_version = XFS_DQUOT_VERSION;
Darrick J. Wongd8c1af0d2020-07-15 18:05:39 -0700564 ddqp->d_type = dqp->q_type;
Darrick J. Wong51dbb1b2020-07-14 10:37:32 -0700565 ddqp->d_id = cpu_to_be32(dqp->q_id);
566 ddqp->d_pad0 = 0;
567 ddqp->d_pad = 0;
568
Darrick J. Wongd3537cf2020-07-14 10:37:31 -0700569 ddqp->d_blk_hardlimit = cpu_to_be64(dqp->q_blk.hardlimit);
570 ddqp->d_blk_softlimit = cpu_to_be64(dqp->q_blk.softlimit);
571 ddqp->d_ino_hardlimit = cpu_to_be64(dqp->q_ino.hardlimit);
572 ddqp->d_ino_softlimit = cpu_to_be64(dqp->q_ino.softlimit);
573 ddqp->d_rtb_hardlimit = cpu_to_be64(dqp->q_rtb.hardlimit);
574 ddqp->d_rtb_softlimit = cpu_to_be64(dqp->q_rtb.softlimit);
Darrick J. Wongbe37d402020-07-14 10:37:31 -0700575
576 ddqp->d_bcount = cpu_to_be64(dqp->q_blk.count);
577 ddqp->d_icount = cpu_to_be64(dqp->q_ino.count);
578 ddqp->d_rtbcount = cpu_to_be64(dqp->q_rtb.count);
Darrick J. Wongc8c45fb2020-07-14 10:37:31 -0700579
580 ddqp->d_bwarns = cpu_to_be16(dqp->q_blk.warnings);
581 ddqp->d_iwarns = cpu_to_be16(dqp->q_ino.warnings);
582 ddqp->d_rtbwarns = cpu_to_be16(dqp->q_rtb.warnings);
Darrick J. Wong19dce7e2020-07-14 10:37:32 -0700583
584 ddqp->d_btimer = cpu_to_be32(dqp->q_blk.timer);
585 ddqp->d_itimer = cpu_to_be32(dqp->q_ino.timer);
586 ddqp->d_rtbtimer = cpu_to_be32(dqp->q_rtb.timer);
Darrick J. Wong0b0fa1d2020-07-14 10:37:22 -0700587}
588
Darrick J. Wongd63192c2018-05-04 15:30:23 -0700589/* Allocate and initialize the dquot buffer for this in-core dquot. */
590static int
591xfs_qm_dqread_alloc(
592 struct xfs_mount *mp,
593 struct xfs_dquot *dqp,
594 struct xfs_buf **bpp)
595{
596 struct xfs_trans *tp;
Darrick J. Wongd63192c2018-05-04 15:30:23 -0700597 int error;
598
599 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_dqalloc,
600 XFS_QM_DQALLOC_SPACE_RES(mp), 0, 0, &tp);
601 if (error)
602 goto err;
603
Darrick J. Wong710d7072019-04-24 09:27:41 -0700604 error = xfs_dquot_disk_alloc(&tp, dqp, bpp);
Darrick J. Wongd63192c2018-05-04 15:30:23 -0700605 if (error)
606 goto err_cancel;
607
608 error = xfs_trans_commit(tp);
609 if (error) {
610 /*
611 * Buffer was held to the transaction, so we have to unlock it
612 * manually here because we're not passing it back.
613 */
Darrick J. Wong710d7072019-04-24 09:27:41 -0700614 xfs_buf_relse(*bpp);
615 *bpp = NULL;
Darrick J. Wongd63192c2018-05-04 15:30:23 -0700616 goto err;
617 }
Darrick J. Wongd63192c2018-05-04 15:30:23 -0700618 return 0;
619
620err_cancel:
621 xfs_trans_cancel(tp);
622err:
623 return error;
624}
625
Darrick J. Wong617cd5c2018-05-04 15:30:23 -0700626/*
627 * Read in the ondisk dquot using dqtobp() then copy it to an incore version,
Darrick J. Wong30ab2dc2018-05-04 15:30:24 -0700628 * and release the buffer immediately. If @can_alloc is true, fill any
629 * holes in the on-disk metadata.
Darrick J. Wong617cd5c2018-05-04 15:30:23 -0700630 */
Darrick J. Wong114e73c2018-05-04 15:30:23 -0700631static int
Darrick J. Wong617cd5c2018-05-04 15:30:23 -0700632xfs_qm_dqread(
633 struct xfs_mount *mp,
634 xfs_dqid_t id,
Darrick J. Wong1a7ed272020-07-15 17:53:43 -0700635 xfs_dqtype_t type,
Darrick J. Wong30ab2dc2018-05-04 15:30:24 -0700636 bool can_alloc,
Darrick J. Wongd63192c2018-05-04 15:30:23 -0700637 struct xfs_dquot **dqpp)
Darrick J. Wong617cd5c2018-05-04 15:30:23 -0700638{
639 struct xfs_dquot *dqp;
Darrick J. Wong617cd5c2018-05-04 15:30:23 -0700640 struct xfs_buf *bp;
Darrick J. Wong617cd5c2018-05-04 15:30:23 -0700641 int error;
642
643 dqp = xfs_dquot_alloc(mp, id, type);
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000644 trace_xfs_dqread(dqp);
645
Darrick J. Wongd63192c2018-05-04 15:30:23 -0700646 /* Try to read the buffer, allocating if necessary. */
647 error = xfs_dquot_disk_read(mp, dqp, &bp);
Darrick J. Wong30ab2dc2018-05-04 15:30:24 -0700648 if (error == -ENOENT && can_alloc)
Darrick J. Wongd63192c2018-05-04 15:30:23 -0700649 error = xfs_qm_dqread_alloc(mp, dqp, &bp);
650 if (error)
651 goto err;
Christoph Hellwig97e7ade2011-12-06 21:58:24 +0000652
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653 /*
Darrick J. Wongd63192c2018-05-04 15:30:23 -0700654 * At this point we should have a clean locked buffer. Copy the data
655 * to the incore dquot and release the buffer since the incore dquot
656 * has its own locking protocol so we needn't tie up the buffer any
657 * further.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658 */
Christoph Hellwig0c842ad2011-07-08 14:36:19 +0200659 ASSERT(xfs_buf_islocked(bp));
Darrick J. Wongafeda602020-07-14 10:36:09 -0700660 error = xfs_dquot_from_disk(dqp, bp);
Darrick J. Wongd63192c2018-05-04 15:30:23 -0700661 xfs_buf_relse(bp);
Darrick J. Wongafeda602020-07-14 10:36:09 -0700662 if (error)
663 goto err;
664
Darrick J. Wongd63192c2018-05-04 15:30:23 -0700665 *dqpp = dqp;
Christoph Hellwig97e7ade2011-12-06 21:58:24 +0000666 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667
Darrick J. Wongd63192c2018-05-04 15:30:23 -0700668err:
669 trace_xfs_dqread_fail(dqp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 xfs_qm_dqdestroy(dqp);
Darrick J. Wongd63192c2018-05-04 15:30:23 -0700671 *dqpp = NULL;
Christoph Hellwig97e7ade2011-12-06 21:58:24 +0000672 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673}
674
675/*
Eric Sandeen296c24e2016-02-08 11:27:38 +1100676 * Advance to the next id in the current chunk, or if at the
677 * end of the chunk, skip ahead to first id in next allocated chunk
678 * using the SEEK_DATA interface.
679 */
Eryu Guan6e3e6d52016-04-06 09:47:21 +1000680static int
Eric Sandeen296c24e2016-02-08 11:27:38 +1100681xfs_dq_get_next_id(
Christoph Hellwigbda250d2017-06-29 12:28:36 -0700682 struct xfs_mount *mp,
Darrick J. Wong1a7ed272020-07-15 17:53:43 -0700683 xfs_dqtype_t type,
Christoph Hellwigbda250d2017-06-29 12:28:36 -0700684 xfs_dqid_t *id)
Eric Sandeen296c24e2016-02-08 11:27:38 +1100685{
Christoph Hellwigbda250d2017-06-29 12:28:36 -0700686 struct xfs_inode *quotip = xfs_quota_inode(mp, type);
687 xfs_dqid_t next_id = *id + 1; /* simple advance */
688 uint lock_flags;
689 struct xfs_bmbt_irec got;
Christoph Hellwigb2b17122017-11-03 10:34:43 -0700690 struct xfs_iext_cursor cur;
Eric Sandeen296c24e2016-02-08 11:27:38 +1100691 xfs_fsblock_t start;
Eric Sandeen296c24e2016-02-08 11:27:38 +1100692 int error = 0;
693
Eric Sandeen657bdfb2017-01-17 11:43:38 -0800694 /* If we'd wrap past the max ID, stop */
695 if (next_id < *id)
696 return -ENOENT;
697
Eric Sandeen296c24e2016-02-08 11:27:38 +1100698 /* If new ID is within the current chunk, advancing it sufficed */
699 if (next_id % mp->m_quotainfo->qi_dqperchunk) {
700 *id = next_id;
701 return 0;
702 }
703
704 /* Nope, next_id is now past the current chunk, so find the next one */
705 start = (xfs_fsblock_t)next_id / mp->m_quotainfo->qi_dqperchunk;
706
Christoph Hellwigbda250d2017-06-29 12:28:36 -0700707 lock_flags = xfs_ilock_data_map_shared(quotip);
708 if (!(quotip->i_df.if_flags & XFS_IFEXTENTS)) {
709 error = xfs_iread_extents(NULL, quotip, XFS_DATA_FORK);
710 if (error)
711 return error;
712 }
Eric Sandeen296c24e2016-02-08 11:27:38 +1100713
Christoph Hellwigb2b17122017-11-03 10:34:43 -0700714 if (xfs_iext_lookup_extent(quotip, &quotip->i_df, start, &cur, &got)) {
Brian Foster2192b0b2017-07-05 12:07:52 -0700715 /* contiguous chunk, bump startoff for the id calculation */
716 if (got.br_startoff < start)
717 got.br_startoff = start;
Christoph Hellwigbda250d2017-06-29 12:28:36 -0700718 *id = got.br_startoff * mp->m_quotainfo->qi_dqperchunk;
Brian Foster2192b0b2017-07-05 12:07:52 -0700719 } else {
Christoph Hellwigbda250d2017-06-29 12:28:36 -0700720 error = -ENOENT;
Brian Foster2192b0b2017-07-05 12:07:52 -0700721 }
722
Christoph Hellwigbda250d2017-06-29 12:28:36 -0700723 xfs_iunlock(quotip, lock_flags);
Eric Sandeen296c24e2016-02-08 11:27:38 +1100724
Christoph Hellwigbda250d2017-06-29 12:28:36 -0700725 return error;
Eric Sandeen296c24e2016-02-08 11:27:38 +1100726}
727
728/*
Darrick J. Wongcc2047c2018-05-04 15:30:20 -0700729 * Look up the dquot in the in-core cache. If found, the dquot is returned
730 * locked and ready to go.
731 */
732static struct xfs_dquot *
733xfs_qm_dqget_cache_lookup(
734 struct xfs_mount *mp,
735 struct xfs_quotainfo *qi,
736 struct radix_tree_root *tree,
737 xfs_dqid_t id)
738{
739 struct xfs_dquot *dqp;
740
741restart:
742 mutex_lock(&qi->qi_tree_lock);
743 dqp = radix_tree_lookup(tree, id);
744 if (!dqp) {
745 mutex_unlock(&qi->qi_tree_lock);
746 XFS_STATS_INC(mp, xs_qm_dqcachemisses);
747 return NULL;
748 }
749
750 xfs_dqlock(dqp);
Darrick J. Wong985a78f2020-07-14 10:37:13 -0700751 if (dqp->q_flags & XFS_DQFLAG_FREEING) {
Darrick J. Wongcc2047c2018-05-04 15:30:20 -0700752 xfs_dqunlock(dqp);
753 mutex_unlock(&qi->qi_tree_lock);
754 trace_xfs_dqget_freeing(dqp);
755 delay(1);
756 goto restart;
757 }
758
759 dqp->q_nrefs++;
760 mutex_unlock(&qi->qi_tree_lock);
761
762 trace_xfs_dqget_hit(dqp);
763 XFS_STATS_INC(mp, xs_qm_dqcachehits);
764 return dqp;
765}
766
767/*
768 * Try to insert a new dquot into the in-core cache. If an error occurs the
769 * caller should throw away the dquot and start over. Otherwise, the dquot
770 * is returned locked (and held by the cache) as if there had been a cache
771 * hit.
772 */
773static int
774xfs_qm_dqget_cache_insert(
775 struct xfs_mount *mp,
776 struct xfs_quotainfo *qi,
777 struct radix_tree_root *tree,
778 xfs_dqid_t id,
779 struct xfs_dquot *dqp)
780{
781 int error;
782
783 mutex_lock(&qi->qi_tree_lock);
784 error = radix_tree_insert(tree, id, dqp);
785 if (unlikely(error)) {
786 /* Duplicate found! Caller must try again. */
787 WARN_ON(error != -EEXIST);
788 mutex_unlock(&qi->qi_tree_lock);
789 trace_xfs_dqget_dup(dqp);
790 return error;
791 }
792
793 /* Return a locked dquot to the caller, with a reference taken. */
794 xfs_dqlock(dqp);
795 dqp->q_nrefs = 1;
796
797 qi->qi_dquots++;
798 mutex_unlock(&qi->qi_tree_lock);
799
800 return 0;
801}
802
Darrick J. Wongd7103ee2018-05-04 15:30:21 -0700803/* Check our input parameters. */
804static int
805xfs_qm_dqget_checks(
806 struct xfs_mount *mp,
Darrick J. Wong1a7ed272020-07-15 17:53:43 -0700807 xfs_dqtype_t type)
Darrick J. Wongd7103ee2018-05-04 15:30:21 -0700808{
809 if (WARN_ON_ONCE(!XFS_IS_QUOTA_RUNNING(mp)))
810 return -ESRCH;
811
812 switch (type) {
Darrick J. Wong8cd49012020-07-15 17:42:36 -0700813 case XFS_DQTYPE_USER:
Darrick J. Wongd7103ee2018-05-04 15:30:21 -0700814 if (!XFS_IS_UQUOTA_ON(mp))
815 return -ESRCH;
816 return 0;
Darrick J. Wong8cd49012020-07-15 17:42:36 -0700817 case XFS_DQTYPE_GROUP:
Darrick J. Wongd7103ee2018-05-04 15:30:21 -0700818 if (!XFS_IS_GQUOTA_ON(mp))
819 return -ESRCH;
820 return 0;
Darrick J. Wong8cd49012020-07-15 17:42:36 -0700821 case XFS_DQTYPE_PROJ:
Darrick J. Wongd7103ee2018-05-04 15:30:21 -0700822 if (!XFS_IS_PQUOTA_ON(mp))
823 return -ESRCH;
824 return 0;
825 default:
826 WARN_ON_ONCE(0);
827 return -EINVAL;
828 }
829}
830
Darrick J. Wongcc2047c2018-05-04 15:30:20 -0700831/*
Randy Dunlapb63da6c2020-08-05 08:49:58 -0700832 * Given the file system, id, and type (UDQUOT/GDQUOT), return a locked
Darrick J. Wong4882c192018-05-04 15:30:22 -0700833 * dquot, doing an allocation (if requested) as needed.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834 */
835int
836xfs_qm_dqget(
Darrick J. Wong4882c192018-05-04 15:30:22 -0700837 struct xfs_mount *mp,
838 xfs_dqid_t id,
Darrick J. Wong1a7ed272020-07-15 17:53:43 -0700839 xfs_dqtype_t type,
Darrick J. Wong30ab2dc2018-05-04 15:30:24 -0700840 bool can_alloc,
Darrick J. Wong4882c192018-05-04 15:30:22 -0700841 struct xfs_dquot **O_dqpp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842{
Christoph Hellwig9f920f12012-03-13 08:52:35 +0000843 struct xfs_quotainfo *qi = mp->m_quotainfo;
Darrick J. Wong4882c192018-05-04 15:30:22 -0700844 struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
Christoph Hellwig9f920f12012-03-13 08:52:35 +0000845 struct xfs_dquot *dqp;
846 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847
Darrick J. Wongd7103ee2018-05-04 15:30:21 -0700848 error = xfs_qm_dqget_checks(mp, type);
849 if (error)
850 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851
Darrick J. Wong4882c192018-05-04 15:30:22 -0700852restart:
853 dqp = xfs_qm_dqget_cache_lookup(mp, qi, tree, id);
854 if (dqp) {
855 *O_dqpp = dqp;
856 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 }
Christoph Hellwig92678552011-12-06 21:58:18 +0000858
Darrick J. Wong30ab2dc2018-05-04 15:30:24 -0700859 error = xfs_qm_dqread(mp, id, type, can_alloc, &dqp);
Darrick J. Wong4882c192018-05-04 15:30:22 -0700860 if (error)
861 return error;
862
863 error = xfs_qm_dqget_cache_insert(mp, qi, tree, id, dqp);
864 if (error) {
865 /*
866 * Duplicate found. Just throw away the new dquot and start
867 * over.
868 */
869 xfs_qm_dqdestroy(dqp);
870 XFS_STATS_INC(mp, xs_qm_dquot_dups);
871 goto restart;
872 }
873
874 trace_xfs_dqget_miss(dqp);
875 *O_dqpp = dqp;
876 return 0;
877}
878
Darrick J. Wong114e73c2018-05-04 15:30:23 -0700879/*
880 * Given a dquot id and type, read and initialize a dquot from the on-disk
881 * metadata. This function is only for use during quota initialization so
882 * it ignores the dquot cache assuming that the dquot shrinker isn't set up.
883 * The caller is responsible for _qm_dqdestroy'ing the returned dquot.
884 */
885int
886xfs_qm_dqget_uncached(
887 struct xfs_mount *mp,
888 xfs_dqid_t id,
Darrick J. Wong1a7ed272020-07-15 17:53:43 -0700889 xfs_dqtype_t type,
Darrick J. Wong114e73c2018-05-04 15:30:23 -0700890 struct xfs_dquot **dqpp)
891{
892 int error;
893
894 error = xfs_qm_dqget_checks(mp, type);
895 if (error)
896 return error;
897
898 return xfs_qm_dqread(mp, id, type, 0, dqpp);
899}
900
Darrick J. Wong4882c192018-05-04 15:30:22 -0700901/* Return the quota id for a given inode and type. */
902xfs_dqid_t
903xfs_qm_id_for_quotatype(
904 struct xfs_inode *ip,
Darrick J. Wong1a7ed272020-07-15 17:53:43 -0700905 xfs_dqtype_t type)
Darrick J. Wong4882c192018-05-04 15:30:22 -0700906{
907 switch (type) {
Darrick J. Wong8cd49012020-07-15 17:42:36 -0700908 case XFS_DQTYPE_USER:
Christoph Hellwigba8adad2020-02-21 08:31:27 -0800909 return i_uid_read(VFS_I(ip));
Darrick J. Wong8cd49012020-07-15 17:42:36 -0700910 case XFS_DQTYPE_GROUP:
Christoph Hellwigba8adad2020-02-21 08:31:27 -0800911 return i_gid_read(VFS_I(ip));
Darrick J. Wong8cd49012020-07-15 17:42:36 -0700912 case XFS_DQTYPE_PROJ:
Christoph Hellwigde7a8662019-11-12 08:22:54 -0800913 return ip->i_d.di_projid;
Darrick J. Wong4882c192018-05-04 15:30:22 -0700914 }
915 ASSERT(0);
916 return 0;
917}
918
919/*
920 * Return the dquot for a given inode and type. If @can_alloc is true, then
921 * allocate blocks if needed. The inode's ILOCK must be held and it must not
922 * have already had an inode attached.
923 */
924int
925xfs_qm_dqget_inode(
926 struct xfs_inode *ip,
Darrick J. Wong1a7ed272020-07-15 17:53:43 -0700927 xfs_dqtype_t type,
Darrick J. Wong4882c192018-05-04 15:30:22 -0700928 bool can_alloc,
929 struct xfs_dquot **O_dqpp)
930{
931 struct xfs_mount *mp = ip->i_mount;
932 struct xfs_quotainfo *qi = mp->m_quotainfo;
933 struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
934 struct xfs_dquot *dqp;
935 xfs_dqid_t id;
Darrick J. Wong4882c192018-05-04 15:30:22 -0700936 int error;
937
938 error = xfs_qm_dqget_checks(mp, type);
939 if (error)
940 return error;
941
Darrick J. Wong4882c192018-05-04 15:30:22 -0700942 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
943 ASSERT(xfs_inode_dquot(ip, type) == NULL);
944
945 id = xfs_qm_id_for_quotatype(ip, type);
946
Christoph Hellwig92678552011-12-06 21:58:18 +0000947restart:
Darrick J. Wongcc2047c2018-05-04 15:30:20 -0700948 dqp = xfs_qm_dqget_cache_lookup(mp, qi, tree, id);
Christoph Hellwig9f920f12012-03-13 08:52:35 +0000949 if (dqp) {
Christoph Hellwig9f920f12012-03-13 08:52:35 +0000950 *O_dqpp = dqp;
951 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953
954 /*
955 * Dquot cache miss. We don't want to keep the inode lock across
956 * a (potential) disk read. Also we don't want to deal with the lock
957 * ordering between quotainode and this inode. OTOH, dropping the inode
958 * lock here means dealing with a chown that can happen before
959 * we re-acquire the lock.
960 */
Darrick J. Wong4882c192018-05-04 15:30:22 -0700961 xfs_iunlock(ip, XFS_ILOCK_EXCL);
Darrick J. Wong30ab2dc2018-05-04 15:30:24 -0700962 error = xfs_qm_dqread(mp, id, type, can_alloc, &dqp);
Darrick J. Wong4882c192018-05-04 15:30:22 -0700963 xfs_ilock(ip, XFS_ILOCK_EXCL);
Christoph Hellwig7ae44402011-12-06 21:58:25 +0000964 if (error)
965 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966
Darrick J. Wong4882c192018-05-04 15:30:22 -0700967 /*
968 * A dquot could be attached to this inode by now, since we had
969 * dropped the ilock.
970 */
971 if (xfs_this_quota_on(mp, type)) {
972 struct xfs_dquot *dqp1;
Christoph Hellwig9f920f12012-03-13 08:52:35 +0000973
Darrick J. Wong4882c192018-05-04 15:30:22 -0700974 dqp1 = xfs_inode_dquot(ip, type);
975 if (dqp1) {
Chandra Seetharaman36731412012-01-23 17:31:30 +0000976 xfs_qm_dqdestroy(dqp);
Darrick J. Wong4882c192018-05-04 15:30:22 -0700977 dqp = dqp1;
978 xfs_dqlock(dqp);
979 goto dqret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980 }
Darrick J. Wong4882c192018-05-04 15:30:22 -0700981 } else {
982 /* inode stays locked on return */
983 xfs_qm_dqdestroy(dqp);
984 return -ESRCH;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985 }
986
Darrick J. Wongcc2047c2018-05-04 15:30:20 -0700987 error = xfs_qm_dqget_cache_insert(mp, qi, tree, id, dqp);
988 if (error) {
Christoph Hellwig9f920f12012-03-13 08:52:35 +0000989 /*
990 * Duplicate found. Just throw away the new dquot and start
991 * over.
992 */
Christoph Hellwig9f920f12012-03-13 08:52:35 +0000993 xfs_qm_dqdestroy(dqp);
Bill O'Donnellff6d6af2015-10-12 18:21:22 +1100994 XFS_STATS_INC(mp, xs_qm_dquot_dups);
Christoph Hellwig9f920f12012-03-13 08:52:35 +0000995 goto restart;
996 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997
Darrick J. Wong4882c192018-05-04 15:30:22 -0700998dqret:
999 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00001000 trace_xfs_dqget_miss(dqp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001 *O_dqpp = dqp;
Eric Sandeend99831f2014-06-22 15:03:54 +10001002 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003}
1004
Christoph Hellwigf8739c32012-03-13 08:52:34 +00001005/*
Darrick J. Wong2e330e72018-05-04 15:30:20 -07001006 * Starting at @id and progressing upwards, look for an initialized incore
1007 * dquot, lock it, and return it.
1008 */
1009int
1010xfs_qm_dqget_next(
1011 struct xfs_mount *mp,
1012 xfs_dqid_t id,
Darrick J. Wong1a7ed272020-07-15 17:53:43 -07001013 xfs_dqtype_t type,
Darrick J. Wong2e330e72018-05-04 15:30:20 -07001014 struct xfs_dquot **dqpp)
1015{
1016 struct xfs_dquot *dqp;
1017 int error = 0;
1018
1019 *dqpp = NULL;
1020 for (; !error; error = xfs_dq_get_next_id(mp, type, &id)) {
Darrick J. Wong30ab2dc2018-05-04 15:30:24 -07001021 error = xfs_qm_dqget(mp, id, type, false, &dqp);
Darrick J. Wong2e330e72018-05-04 15:30:20 -07001022 if (error == -ENOENT)
1023 continue;
1024 else if (error != 0)
1025 break;
1026
1027 if (!XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
1028 *dqpp = dqp;
1029 return 0;
1030 }
1031
1032 xfs_qm_dqput(dqp);
1033 }
1034
1035 return error;
1036}
1037
1038/*
Christoph Hellwigf8739c32012-03-13 08:52:34 +00001039 * Release a reference to the dquot (decrement ref-count) and unlock it.
1040 *
1041 * If there is a group quota attached to this dquot, carefully release that
1042 * too without tripping over deadlocks'n'stuff.
1043 */
1044void
1045xfs_qm_dqput(
1046 struct xfs_dquot *dqp)
1047{
1048 ASSERT(dqp->q_nrefs > 0);
1049 ASSERT(XFS_DQ_IS_LOCKED(dqp));
1050
1051 trace_xfs_dqput(dqp);
1052
Dave Chinner3c35337572014-05-05 17:30:15 +10001053 if (--dqp->q_nrefs == 0) {
1054 struct xfs_quotainfo *qi = dqp->q_mount->m_quotainfo;
1055 trace_xfs_dqput_free(dqp);
1056
1057 if (list_lru_add(&qi->qi_lru, &dqp->q_lru))
Bill O'Donnellff6d6af2015-10-12 18:21:22 +11001058 XFS_STATS_INC(dqp->q_mount, xs_qm_dquot_unused);
Dave Chinner3c35337572014-05-05 17:30:15 +10001059 }
1060 xfs_dqunlock(dqp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061}
1062
1063/*
1064 * Release a dquot. Flush it if dirty, then dqput() it.
1065 * dquot must not be locked.
1066 */
1067void
1068xfs_qm_dqrele(
Pavel Reichlaefe69a2019-11-12 17:04:02 -08001069 struct xfs_dquot *dqp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070{
Christoph Hellwig7d095252009-06-08 15:33:32 +02001071 if (!dqp)
1072 return;
1073
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00001074 trace_xfs_dqrele(dqp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075
1076 xfs_dqlock(dqp);
1077 /*
1078 * We don't care to flush it if the dquot is dirty here.
1079 * That will create stutters that we want to avoid.
1080 * Instead we do a delayed write when we try to reclaim
1081 * a dirty dquot. Also xfs_sync will take part of the burden...
1082 */
1083 xfs_qm_dqput(dqp);
1084}
1085
Christoph Hellwigca30b2a2010-06-23 18:11:15 +10001086/*
1087 * This is the dquot flushing I/O completion routine. It is called
1088 * from interrupt level when the buffer containing the dquot is
1089 * flushed to disk. It is responsible for removing the dquot logitem
1090 * from the AIL if it has not been re-logged, and unlocking the dquot's
1091 * flush lock. This behavior is very similar to that of inodes..
1092 */
Dave Chinner6f5de182020-06-29 14:48:59 -07001093static void
Christoph Hellwigca30b2a2010-06-23 18:11:15 +10001094xfs_qm_dqflush_done(
Christoph Hellwigca30b2a2010-06-23 18:11:15 +10001095 struct xfs_log_item *lip)
1096{
Pavel Reichlfd8b81d2019-11-12 17:04:26 -08001097 struct xfs_dq_logitem *qip = (struct xfs_dq_logitem *)lip;
Pavel Reichlaefe69a2019-11-12 17:04:02 -08001098 struct xfs_dquot *dqp = qip->qli_dquot;
Christoph Hellwigca30b2a2010-06-23 18:11:15 +10001099 struct xfs_ail *ailp = lip->li_ailp;
Brian Foster849274c2020-05-06 13:25:23 -07001100 xfs_lsn_t tail_lsn;
Christoph Hellwigca30b2a2010-06-23 18:11:15 +10001101
1102 /*
1103 * We only want to pull the item from the AIL if its
1104 * location in the log has not changed since we started the flush.
1105 * Thus, we only bother if the dquot's lsn has
1106 * not changed. First we check the lsn outside the lock
1107 * since it's cheaper, and then we recheck while
1108 * holding the lock before removing the dquot from the AIL.
1109 */
Dave Chinner22525c12018-05-09 07:47:34 -07001110 if (test_bit(XFS_LI_IN_AIL, &lip->li_flags) &&
Carlos Maiolino373b0582017-11-28 08:54:10 -08001111 ((lip->li_lsn == qip->qli_flush_lsn) ||
Dave Chinner22525c12018-05-09 07:47:34 -07001112 test_bit(XFS_LI_FAILED, &lip->li_flags))) {
Christoph Hellwigca30b2a2010-06-23 18:11:15 +10001113
Matthew Wilcox57e80952018-03-07 14:59:39 -08001114 spin_lock(&ailp->ail_lock);
Dave Chinnere98084b2020-06-29 14:49:15 -07001115 xfs_clear_li_failed(lip);
Carlos Maiolino373b0582017-11-28 08:54:10 -08001116 if (lip->li_lsn == qip->qli_flush_lsn) {
Brian Foster849274c2020-05-06 13:25:23 -07001117 /* xfs_ail_update_finish() drops the AIL lock */
1118 tail_lsn = xfs_ail_delete_one(ailp, lip);
1119 xfs_ail_update_finish(ailp, tail_lsn);
Carlos Maiolino373b0582017-11-28 08:54:10 -08001120 } else {
Matthew Wilcox57e80952018-03-07 14:59:39 -08001121 spin_unlock(&ailp->ail_lock);
Carlos Maiolino373b0582017-11-28 08:54:10 -08001122 }
Christoph Hellwigca30b2a2010-06-23 18:11:15 +10001123 }
1124
1125 /*
1126 * Release the dq's flush lock since we're done with it.
1127 */
1128 xfs_dqfunlock(dqp);
1129}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130
Dave Chinner6f5de182020-06-29 14:48:59 -07001131void
Christoph Hellwig664ffb82020-09-01 10:55:29 -07001132xfs_buf_dquot_iodone(
Dave Chinner6f5de182020-06-29 14:48:59 -07001133 struct xfs_buf *bp)
1134{
1135 struct xfs_log_item *lip, *n;
1136
1137 list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) {
1138 list_del_init(&lip->li_bio_list);
1139 xfs_qm_dqflush_done(lip);
1140 }
1141}
1142
Christoph Hellwig664ffb82020-09-01 10:55:29 -07001143void
1144xfs_buf_dquot_io_fail(
1145 struct xfs_buf *bp)
1146{
1147 struct xfs_log_item *lip;
1148
1149 spin_lock(&bp->b_mount->m_ail->ail_lock);
1150 list_for_each_entry(lip, &bp->b_li_list, li_bio_list)
1151 xfs_set_li_failed(lip, bp);
1152 spin_unlock(&bp->b_mount->m_ail->ail_lock);
1153}
1154
Darrick J. Wong0b0fa1d2020-07-14 10:37:22 -07001155/* Check incore dquot for errors before we flush. */
1156static xfs_failaddr_t
1157xfs_qm_dqflush_check(
1158 struct xfs_dquot *dqp)
1159{
Darrick J. Wong1a7ed272020-07-15 17:53:43 -07001160 xfs_dqtype_t type = xfs_dquot_type(dqp);
Darrick J. Wong0b0fa1d2020-07-14 10:37:22 -07001161
Darrick J. Wong8cd49012020-07-15 17:42:36 -07001162 if (type != XFS_DQTYPE_USER &&
1163 type != XFS_DQTYPE_GROUP &&
1164 type != XFS_DQTYPE_PROJ)
Darrick J. Wong0b0fa1d2020-07-14 10:37:22 -07001165 return __this_address;
1166
Darrick J. Wongd3537cf2020-07-14 10:37:31 -07001167 if (dqp->q_id == 0)
1168 return NULL;
1169
Darrick J. Wongbe37d402020-07-14 10:37:31 -07001170 if (dqp->q_blk.softlimit && dqp->q_blk.count > dqp->q_blk.softlimit &&
Darrick J. Wong19dce7e2020-07-14 10:37:32 -07001171 !dqp->q_blk.timer)
Darrick J. Wongd3537cf2020-07-14 10:37:31 -07001172 return __this_address;
1173
Darrick J. Wongbe37d402020-07-14 10:37:31 -07001174 if (dqp->q_ino.softlimit && dqp->q_ino.count > dqp->q_ino.softlimit &&
Darrick J. Wong19dce7e2020-07-14 10:37:32 -07001175 !dqp->q_ino.timer)
Darrick J. Wongd3537cf2020-07-14 10:37:31 -07001176 return __this_address;
1177
Darrick J. Wongbe37d402020-07-14 10:37:31 -07001178 if (dqp->q_rtb.softlimit && dqp->q_rtb.count > dqp->q_rtb.softlimit &&
Darrick J. Wong19dce7e2020-07-14 10:37:32 -07001179 !dqp->q_rtb.timer)
Darrick J. Wongd3537cf2020-07-14 10:37:31 -07001180 return __this_address;
1181
Darrick J. Wong0b0fa1d2020-07-14 10:37:22 -07001182 return NULL;
1183}
1184
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185/*
1186 * Write a modified dquot to disk.
1187 * The dquot must be locked and the flush lock too taken by caller.
1188 * The flush lock will not be unlocked until the dquot reaches the disk,
1189 * but the dquot is free to be unlocked and modified by the caller
1190 * in the interim. Dquot is still locked on return. This behavior is
1191 * identical to that of inodes.
1192 */
1193int
1194xfs_qm_dqflush(
Christoph Hellwigfe7257f2012-04-23 15:58:37 +10001195 struct xfs_dquot *dqp,
1196 struct xfs_buf **bpp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197{
Christoph Hellwigacecf1b2010-09-06 01:44:45 +00001198 struct xfs_mount *mp = dqp->q_mount;
Brian Fosterb707fff2020-05-06 13:25:22 -07001199 struct xfs_log_item *lip = &dqp->q_logitem.qli_item;
Christoph Hellwigacecf1b2010-09-06 01:44:45 +00001200 struct xfs_buf *bp;
Darrick J. Wong51dbb1b2020-07-14 10:37:32 -07001201 struct xfs_dqblk *dqblk;
Darrick J. Wongeebf3ca2018-01-08 10:51:25 -08001202 xfs_failaddr_t fa;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204
1205 ASSERT(XFS_DQ_IS_LOCKED(dqp));
David Chinnere1f49cf2008-08-13 16:41:43 +10001206 ASSERT(!completion_done(&dqp->q_flush));
Christoph Hellwigacecf1b2010-09-06 01:44:45 +00001207
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00001208 trace_xfs_dqflush(dqp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209
Christoph Hellwigfe7257f2012-04-23 15:58:37 +10001210 *bpp = NULL;
1211
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212 xfs_qm_dqunpin_wait(dqp);
1213
1214 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215 * Get the buffer containing the on-disk dquot
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216 */
Christoph Hellwigacecf1b2010-09-06 01:44:45 +00001217 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
Brian Foster8d3d7e22020-03-27 08:29:45 -07001218 mp->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK,
1219 &bp, &xfs_dquot_buf_ops);
Brian Fosterb707fff2020-05-06 13:25:22 -07001220 if (error == -EAGAIN)
Christoph Hellwigfe7257f2012-04-23 15:58:37 +10001221 goto out_unlock;
Brian Fosterb707fff2020-05-06 13:25:22 -07001222 if (error)
1223 goto out_abort;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224
Darrick J. Wong0b0fa1d2020-07-14 10:37:22 -07001225 fa = xfs_qm_dqflush_check(dqp);
1226 if (fa) {
1227 xfs_alert(mp, "corrupt dquot ID 0x%x in memory at %pS",
Darrick J. Wongc51df732020-07-14 10:37:30 -07001228 dqp->q_id, fa);
Darrick J. Wong0b0fa1d2020-07-14 10:37:22 -07001229 xfs_buf_relse(bp);
1230 error = -EFSCORRUPTED;
1231 goto out_abort;
1232 }
1233
Darrick J. Wong51dbb1b2020-07-14 10:37:32 -07001234 /* Flush the incore dquot to the ondisk buffer. */
1235 dqblk = bp->b_addr + dqp->q_bufoffset;
1236 xfs_dquot_to_disk(&dqblk->dd_diskdq, dqp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237
1238 /*
1239 * Clear the dirty field and remember the flush lsn for later use.
1240 */
Darrick J. Wong985a78f2020-07-14 10:37:13 -07001241 dqp->q_flags &= ~XFS_DQFLAG_DIRTY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242
David Chinner7b2e2a32008-10-30 17:39:12 +11001243 xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn,
1244 &dqp->q_logitem.qli_item.li_lsn);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245
1246 /*
Christoph Hellwig3fe58f32013-04-03 16:11:16 +11001247 * copy the lsn into the on-disk dquot now while we have the in memory
1248 * dquot here. This can't be done later in the write verifier as we
1249 * can't get access to the log item at that point in time.
Dave Chinner6fcdc592013-06-03 15:28:46 +10001250 *
1251 * We also calculate the CRC here so that the on-disk dquot in the
1252 * buffer always has a valid CRC. This ensures there is no possibility
1253 * of a dquot without an up-to-date CRC getting to disk.
Christoph Hellwig3fe58f32013-04-03 16:11:16 +11001254 */
1255 if (xfs_sb_version_hascrc(&mp->m_sb)) {
Darrick J. Wong51dbb1b2020-07-14 10:37:32 -07001256 dqblk->dd_lsn = cpu_to_be64(dqp->q_logitem.qli_item.li_lsn);
1257 xfs_update_cksum((char *)dqblk, sizeof(struct xfs_dqblk),
Dave Chinner6fcdc592013-06-03 15:28:46 +10001258 XFS_DQUOT_CRC_OFF);
Christoph Hellwig3fe58f32013-04-03 16:11:16 +11001259 }
1260
1261 /*
Dave Chinner2ef3f7f2020-06-29 14:49:14 -07001262 * Attach the dquot to the buffer so that we can remove this dquot from
1263 * the AIL and release the flush lock once the dquot is synced to disk.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264 */
Dave Chinner0c7e5af2020-06-29 14:48:46 -07001265 bp->b_flags |= _XBF_DQUOTS;
Dave Chinner2ef3f7f2020-06-29 14:49:14 -07001266 list_add_tail(&dqp->q_logitem.qli_item.li_bio_list, &bp->b_li_list);
Christoph Hellwigca30b2a2010-06-23 18:11:15 +10001267
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268 /*
1269 * If the buffer is pinned then push on the log so we won't
1270 * get stuck waiting in the write for too long.
1271 */
Chandra Seetharaman811e64c2011-07-22 23:40:27 +00001272 if (xfs_buf_ispinned(bp)) {
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00001273 trace_xfs_dqflush_force(dqp);
Christoph Hellwiga14a3482010-01-19 09:56:46 +00001274 xfs_log_force(mp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275 }
1276
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00001277 trace_xfs_dqflush_done(dqp);
Christoph Hellwigfe7257f2012-04-23 15:58:37 +10001278 *bpp = bp;
1279 return 0;
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00001280
Brian Fosterb707fff2020-05-06 13:25:22 -07001281out_abort:
Darrick J. Wong985a78f2020-07-14 10:37:13 -07001282 dqp->q_flags &= ~XFS_DQFLAG_DIRTY;
Brian Foster2b3cf092020-05-06 13:27:04 -07001283 xfs_trans_ail_delete(lip, 0);
Brian Fosterb707fff2020-05-06 13:25:22 -07001284 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
Christoph Hellwigfe7257f2012-04-23 15:58:37 +10001285out_unlock:
1286 xfs_dqfunlock(dqp);
Brian Foster8d3d7e22020-03-27 08:29:45 -07001287 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288}
1289
Christoph Hellwig5bb87a32009-01-19 02:03:19 +01001290/*
1291 * Lock two xfs_dquot structures.
1292 *
1293 * To avoid deadlocks we always lock the quota structure with
1294 * the lowerd id first.
1295 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296void
1297xfs_dqlock2(
Pavel Reichlaefe69a2019-11-12 17:04:02 -08001298 struct xfs_dquot *d1,
1299 struct xfs_dquot *d2)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001300{
1301 if (d1 && d2) {
1302 ASSERT(d1 != d2);
Darrick J. Wongc51df732020-07-14 10:37:30 -07001303 if (d1->q_id > d2->q_id) {
Christoph Hellwig5bb87a32009-01-19 02:03:19 +01001304 mutex_lock(&d2->q_qlock);
1305 mutex_lock_nested(&d1->q_qlock, XFS_QLOCK_NESTED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001306 } else {
Christoph Hellwig5bb87a32009-01-19 02:03:19 +01001307 mutex_lock(&d1->q_qlock);
1308 mutex_lock_nested(&d2->q_qlock, XFS_QLOCK_NESTED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309 }
Christoph Hellwig5bb87a32009-01-19 02:03:19 +01001310 } else if (d1) {
1311 mutex_lock(&d1->q_qlock);
1312 } else if (d2) {
1313 mutex_lock(&d2->q_qlock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314 }
1315}
1316
Christoph Hellwiga05931c2012-03-13 08:52:37 +00001317int __init
1318xfs_qm_init(void)
1319{
Carlos Maiolinob1231762019-11-14 12:43:03 -08001320 xfs_qm_dqzone = kmem_cache_create("xfs_dquot",
1321 sizeof(struct xfs_dquot),
1322 0, 0, NULL);
Christoph Hellwiga05931c2012-03-13 08:52:37 +00001323 if (!xfs_qm_dqzone)
1324 goto out;
1325
Carlos Maiolinob1231762019-11-14 12:43:03 -08001326 xfs_qm_dqtrxzone = kmem_cache_create("xfs_dqtrx",
1327 sizeof(struct xfs_dquot_acct),
1328 0, 0, NULL);
Christoph Hellwiga05931c2012-03-13 08:52:37 +00001329 if (!xfs_qm_dqtrxzone)
1330 goto out_free_dqzone;
1331
1332 return 0;
1333
1334out_free_dqzone:
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001335 kmem_cache_destroy(xfs_qm_dqzone);
Christoph Hellwiga05931c2012-03-13 08:52:37 +00001336out:
1337 return -ENOMEM;
1338}
1339
Gerard Snitselaar1c2ccc62012-03-16 18:36:18 +00001340void
Christoph Hellwiga05931c2012-03-13 08:52:37 +00001341xfs_qm_exit(void)
1342{
Carlos Maiolinoaaf54eb2019-11-14 12:43:04 -08001343 kmem_cache_destroy(xfs_qm_dqtrxzone);
1344 kmem_cache_destroy(xfs_qm_dqzone);
Christoph Hellwiga05931c2012-03-13 08:52:37 +00001345}
Darrick J. Wong554ba962018-05-04 15:31:21 -07001346
1347/*
1348 * Iterate every dquot of a particular type. The caller must ensure that the
1349 * particular quota type is active. iter_fn can return negative error codes,
Darrick J. Wonge7ee96d2019-08-28 14:37:57 -07001350 * or -ECANCELED to indicate that it wants to stop iterating.
Darrick J. Wong554ba962018-05-04 15:31:21 -07001351 */
1352int
1353xfs_qm_dqiterate(
1354 struct xfs_mount *mp,
Darrick J. Wong1a7ed272020-07-15 17:53:43 -07001355 xfs_dqtype_t type,
Darrick J. Wong554ba962018-05-04 15:31:21 -07001356 xfs_qm_dqiterate_fn iter_fn,
1357 void *priv)
1358{
1359 struct xfs_dquot *dq;
1360 xfs_dqid_t id = 0;
1361 int error;
1362
1363 do {
Darrick J. Wong1a7ed272020-07-15 17:53:43 -07001364 error = xfs_qm_dqget_next(mp, id, type, &dq);
Darrick J. Wong554ba962018-05-04 15:31:21 -07001365 if (error == -ENOENT)
1366 return 0;
1367 if (error)
1368 return error;
1369
Darrick J. Wong1a7ed272020-07-15 17:53:43 -07001370 error = iter_fn(dq, type, priv);
Darrick J. Wongc51df732020-07-14 10:37:30 -07001371 id = dq->q_id;
Darrick J. Wong554ba962018-05-04 15:31:21 -07001372 xfs_qm_dqput(dq);
Darrick J. Wong554ba962018-05-04 15:31:21 -07001373 } while (error == 0 && id != 0);
1374
1375 return error;
1376}