Dave Chinner | 0b61f8a | 2018-06-05 19:42:14 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
Nathan Scott | 4ce3121 | 2005-11-02 14:59:41 +1100 | [diff] [blame] | 3 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. |
| 4 | * All Rights Reserved. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | */ |
| 6 | #ifndef __XFS_DQUOT_H__ |
| 7 | #define __XFS_DQUOT_H__ |
| 8 | |
| 9 | /* |
| 10 | * Dquots are structures that hold quota information about a user or a group, |
| 11 | * much like inodes are for files. In fact, dquots share many characteristics |
| 12 | * with inodes. However, dquots can also be a centralized resource, relative |
| 13 | * to a collection of inodes. In this respect, dquots share some characteristics |
| 14 | * of the superblock. |
| 15 | * XFS dquots exploit both those in its algorithms. They make every attempt |
| 16 | * to not be a bottleneck when quotas are on and have minimal impact, if any, |
| 17 | * when quotas are off. |
| 18 | */ |
| 19 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | struct xfs_mount; |
| 21 | struct xfs_trans; |
| 22 | |
Brian Foster | b136645 | 2013-03-18 10:51:46 -0400 | [diff] [blame] | 23 | enum { |
| 24 | XFS_QLOWSP_1_PCNT = 0, |
| 25 | XFS_QLOWSP_3_PCNT, |
| 26 | XFS_QLOWSP_5_PCNT, |
| 27 | XFS_QLOWSP_MAX |
| 28 | }; |
| 29 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | * The incore dquot structure |
| 32 | */ |
Pavel Reichl | aefe69a | 2019-11-12 17:04:02 -0800 | [diff] [blame] | 33 | struct xfs_dquot { |
| 34 | uint dq_flags; |
| 35 | struct list_head q_lru; |
| 36 | struct xfs_mount *q_mount; |
| 37 | uint q_nrefs; |
| 38 | xfs_daddr_t q_blkno; |
| 39 | int q_bufoffset; |
| 40 | xfs_fileoff_t q_fileoffset; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | |
Pavel Reichl | aefe69a | 2019-11-12 17:04:02 -0800 | [diff] [blame] | 42 | struct xfs_disk_dquot q_core; |
Pavel Reichl | fd8b81d | 2019-11-12 17:04:26 -0800 | [diff] [blame] | 43 | struct xfs_dq_logitem q_logitem; |
Pavel Reichl | aefe69a | 2019-11-12 17:04:02 -0800 | [diff] [blame] | 44 | /* total regular nblks used+reserved */ |
| 45 | xfs_qcnt_t q_res_bcount; |
| 46 | /* total inos allocd+reserved */ |
| 47 | xfs_qcnt_t q_res_icount; |
| 48 | /* total realtime blks used+reserved */ |
| 49 | xfs_qcnt_t q_res_rtbcount; |
| 50 | xfs_qcnt_t q_prealloc_lo_wmark; |
| 51 | xfs_qcnt_t q_prealloc_hi_wmark; |
| 52 | int64_t q_low_space[XFS_QLOWSP_MAX]; |
| 53 | struct mutex q_qlock; |
| 54 | struct completion q_flush; |
| 55 | atomic_t q_pincount; |
| 56 | struct wait_queue_head q_pinwait; |
| 57 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | |
Christoph Hellwig | 5bb87a3 | 2009-01-19 02:03:19 +0100 | [diff] [blame] | 59 | /* |
André Goddard Rosa | af901ca | 2009-11-14 13:09:05 -0200 | [diff] [blame] | 60 | * Lock hierarchy for q_qlock: |
Christoph Hellwig | 5bb87a3 | 2009-01-19 02:03:19 +0100 | [diff] [blame] | 61 | * XFS_QLOCK_NORMAL is the implicit default, |
Pavel Reichl | aefe69a | 2019-11-12 17:04:02 -0800 | [diff] [blame] | 62 | * XFS_QLOCK_NESTED is the dquot with the higher id in xfs_dqlock2 |
Christoph Hellwig | 5bb87a3 | 2009-01-19 02:03:19 +0100 | [diff] [blame] | 63 | */ |
| 64 | enum { |
| 65 | XFS_QLOCK_NORMAL = 0, |
| 66 | XFS_QLOCK_NESTED, |
| 67 | }; |
| 68 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | /* |
Pavel Reichl | aefe69a | 2019-11-12 17:04:02 -0800 | [diff] [blame] | 70 | * Manage the q_flush completion queue embedded in the dquot. This completion |
David Chinner | e1f49cf | 2008-08-13 16:41:43 +1000 | [diff] [blame] | 71 | * queue synchronizes processes attempting to flush the in-core dquot back to |
| 72 | * disk. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 | */ |
Pavel Reichl | aefe69a | 2019-11-12 17:04:02 -0800 | [diff] [blame] | 74 | static inline void xfs_dqflock(struct xfs_dquot *dqp) |
David Chinner | e1f49cf | 2008-08-13 16:41:43 +1000 | [diff] [blame] | 75 | { |
| 76 | wait_for_completion(&dqp->q_flush); |
| 77 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 78 | |
Pavel Reichl | aefe69a | 2019-11-12 17:04:02 -0800 | [diff] [blame] | 79 | static inline bool xfs_dqflock_nowait(struct xfs_dquot *dqp) |
David Chinner | e1f49cf | 2008-08-13 16:41:43 +1000 | [diff] [blame] | 80 | { |
| 81 | return try_wait_for_completion(&dqp->q_flush); |
| 82 | } |
| 83 | |
Pavel Reichl | aefe69a | 2019-11-12 17:04:02 -0800 | [diff] [blame] | 84 | static inline void xfs_dqfunlock(struct xfs_dquot *dqp) |
David Chinner | e1f49cf | 2008-08-13 16:41:43 +1000 | [diff] [blame] | 85 | { |
| 86 | complete(&dqp->q_flush); |
| 87 | } |
| 88 | |
Christoph Hellwig | 800b484 | 2011-12-06 21:58:14 +0000 | [diff] [blame] | 89 | static inline int xfs_dqlock_nowait(struct xfs_dquot *dqp) |
| 90 | { |
| 91 | return mutex_trylock(&dqp->q_qlock); |
| 92 | } |
| 93 | |
| 94 | static inline void xfs_dqlock(struct xfs_dquot *dqp) |
| 95 | { |
| 96 | mutex_lock(&dqp->q_qlock); |
| 97 | } |
| 98 | |
Christoph Hellwig | 5b03ff1 | 2012-02-20 02:31:22 +0000 | [diff] [blame] | 99 | static inline void xfs_dqunlock(struct xfs_dquot *dqp) |
Christoph Hellwig | 800b484 | 2011-12-06 21:58:14 +0000 | [diff] [blame] | 100 | { |
| 101 | mutex_unlock(&dqp->q_qlock); |
| 102 | } |
| 103 | |
Chandra Seetharaman | 6967b96 | 2012-01-23 17:31:25 +0000 | [diff] [blame] | 104 | static inline int xfs_this_quota_on(struct xfs_mount *mp, int type) |
| 105 | { |
| 106 | switch (type & XFS_DQ_ALLTYPES) { |
| 107 | case XFS_DQ_USER: |
| 108 | return XFS_IS_UQUOTA_ON(mp); |
| 109 | case XFS_DQ_GROUP: |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 110 | return XFS_IS_GQUOTA_ON(mp); |
Chandra Seetharaman | 6967b96 | 2012-01-23 17:31:25 +0000 | [diff] [blame] | 111 | case XFS_DQ_PROJ: |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 112 | return XFS_IS_PQUOTA_ON(mp); |
Chandra Seetharaman | 6967b96 | 2012-01-23 17:31:25 +0000 | [diff] [blame] | 113 | default: |
| 114 | return 0; |
| 115 | } |
| 116 | } |
| 117 | |
Pavel Reichl | aefe69a | 2019-11-12 17:04:02 -0800 | [diff] [blame] | 118 | static inline struct xfs_dquot *xfs_inode_dquot(struct xfs_inode *ip, int type) |
Chandra Seetharaman | 3673141 | 2012-01-23 17:31:30 +0000 | [diff] [blame] | 119 | { |
| 120 | switch (type & XFS_DQ_ALLTYPES) { |
| 121 | case XFS_DQ_USER: |
| 122 | return ip->i_udquot; |
| 123 | case XFS_DQ_GROUP: |
Chandra Seetharaman | 3673141 | 2012-01-23 17:31:30 +0000 | [diff] [blame] | 124 | return ip->i_gdquot; |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 125 | case XFS_DQ_PROJ: |
| 126 | return ip->i_pdquot; |
Chandra Seetharaman | 3673141 | 2012-01-23 17:31:30 +0000 | [diff] [blame] | 127 | default: |
| 128 | return NULL; |
| 129 | } |
| 130 | } |
| 131 | |
Brian Foster | dc06f398 | 2014-07-24 19:49:28 +1000 | [diff] [blame] | 132 | /* |
| 133 | * Check whether a dquot is under low free space conditions. We assume the quota |
| 134 | * is enabled and enforced. |
| 135 | */ |
| 136 | static inline bool xfs_dquot_lowsp(struct xfs_dquot *dqp) |
| 137 | { |
| 138 | int64_t freesp; |
| 139 | |
| 140 | freesp = be64_to_cpu(dqp->q_core.d_blk_hardlimit) - dqp->q_res_bcount; |
| 141 | if (freesp < dqp->q_low_space[XFS_QLOWSP_1_PCNT]) |
| 142 | return true; |
| 143 | |
| 144 | return false; |
| 145 | } |
| 146 | |
Christoph Hellwig | 7201813 | 2009-02-09 08:39:24 +0100 | [diff] [blame] | 147 | #define XFS_DQ_IS_LOCKED(dqp) (mutex_is_locked(&((dqp)->q_qlock))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 148 | #define XFS_DQ_IS_DIRTY(dqp) ((dqp)->dq_flags & XFS_DQ_DIRTY) |
| 149 | #define XFS_QM_ISUDQ(dqp) ((dqp)->dq_flags & XFS_DQ_USER) |
Nathan Scott | c8ad20f | 2005-06-21 15:38:48 +1000 | [diff] [blame] | 150 | #define XFS_QM_ISPDQ(dqp) ((dqp)->dq_flags & XFS_DQ_PROJ) |
| 151 | #define XFS_QM_ISGDQ(dqp) ((dqp)->dq_flags & XFS_DQ_GROUP) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 | |
Pavel Reichl | aefe69a | 2019-11-12 17:04:02 -0800 | [diff] [blame] | 153 | void xfs_qm_dqdestroy(struct xfs_dquot *dqp); |
| 154 | int xfs_qm_dqflush(struct xfs_dquot *dqp, struct xfs_buf **bpp); |
| 155 | void xfs_qm_dqunpin_wait(struct xfs_dquot *dqp); |
| 156 | void xfs_qm_adjust_dqtimers(struct xfs_mount *mp, |
| 157 | struct xfs_disk_dquot *d); |
| 158 | void xfs_qm_adjust_dqlimits(struct xfs_mount *mp, |
| 159 | struct xfs_dquot *d); |
| 160 | xfs_dqid_t xfs_qm_id_for_quotatype(struct xfs_inode *ip, uint type); |
| 161 | int xfs_qm_dqget(struct xfs_mount *mp, xfs_dqid_t id, |
Darrick J. Wong | 30ab2dc | 2018-05-04 15:30:24 -0700 | [diff] [blame] | 162 | uint type, bool can_alloc, |
Darrick J. Wong | 4882c19 | 2018-05-04 15:30:22 -0700 | [diff] [blame] | 163 | struct xfs_dquot **dqpp); |
Pavel Reichl | aefe69a | 2019-11-12 17:04:02 -0800 | [diff] [blame] | 164 | int xfs_qm_dqget_inode(struct xfs_inode *ip, uint type, |
| 165 | bool can_alloc, |
| 166 | struct xfs_dquot **dqpp); |
| 167 | int xfs_qm_dqget_next(struct xfs_mount *mp, xfs_dqid_t id, |
Darrick J. Wong | 2e330e7 | 2018-05-04 15:30:20 -0700 | [diff] [blame] | 168 | uint type, struct xfs_dquot **dqpp); |
Pavel Reichl | aefe69a | 2019-11-12 17:04:02 -0800 | [diff] [blame] | 169 | int xfs_qm_dqget_uncached(struct xfs_mount *mp, |
| 170 | xfs_dqid_t id, uint type, |
| 171 | struct xfs_dquot **dqpp); |
| 172 | void xfs_qm_dqput(struct xfs_dquot *dqp); |
Christoph Hellwig | 800b484 | 2011-12-06 21:58:14 +0000 | [diff] [blame] | 173 | |
Pavel Reichl | aefe69a | 2019-11-12 17:04:02 -0800 | [diff] [blame] | 174 | void xfs_dqlock2(struct xfs_dquot *, struct xfs_dquot *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 175 | |
Pavel Reichl | aefe69a | 2019-11-12 17:04:02 -0800 | [diff] [blame] | 176 | void xfs_dquot_set_prealloc_limits(struct xfs_dquot *); |
Brian Foster | b136645 | 2013-03-18 10:51:46 -0400 | [diff] [blame] | 177 | |
Christoph Hellwig | 78e5589 | 2011-12-06 21:58:22 +0000 | [diff] [blame] | 178 | static inline struct xfs_dquot *xfs_qm_dqhold(struct xfs_dquot *dqp) |
| 179 | { |
| 180 | xfs_dqlock(dqp); |
| 181 | dqp->q_nrefs++; |
| 182 | xfs_dqunlock(dqp); |
| 183 | return dqp; |
| 184 | } |
| 185 | |
Darrick J. Wong | 554ba96 | 2018-05-04 15:31:21 -0700 | [diff] [blame] | 186 | typedef int (*xfs_qm_dqiterate_fn)(struct xfs_dquot *dq, uint dqtype, |
| 187 | void *priv); |
| 188 | int xfs_qm_dqiterate(struct xfs_mount *mp, uint dqtype, |
| 189 | xfs_qm_dqiterate_fn iter_fn, void *priv); |
| 190 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 191 | #endif /* __XFS_DQUOT_H__ */ |