Dave Chinner | 0b61f8a | 2018-06-05 19:42:14 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 3 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. |
| 4 | * All Rights Reserved. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | */ |
| 6 | #ifndef __XFS_QUOTA_H__ |
| 7 | #define __XFS_QUOTA_H__ |
| 8 | |
Dave Chinner | 76456fc | 2013-08-12 20:49:30 +1000 | [diff] [blame] | 9 | #include "xfs_quota_defs.h" |
| 10 | |
| 11 | /* |
| 12 | * Kernel only quota definitions and functions |
| 13 | */ |
| 14 | |
Christoph Hellwig | fcafb71 | 2009-02-09 08:47:34 +0100 | [diff] [blame] | 15 | struct xfs_trans; |
| 16 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | * This check is done typically without holding the inode lock; |
Nathan Scott | c41564b | 2006-03-29 08:55:14 +1000 | [diff] [blame] | 19 | * that may seem racy, but it is harmless in the context that it is used. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | * The inode cannot go inactive as long a reference is kept, and |
| 21 | * therefore if dquot(s) were attached, they'll stay consistent. |
| 22 | * If, for example, the ownership of the inode changes while |
| 23 | * we didn't have the inode locked, the appropriate dquot(s) will be |
| 24 | * attached atomically. |
| 25 | */ |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 26 | #define XFS_NOT_DQATTACHED(mp, ip) \ |
| 27 | ((XFS_IS_UQUOTA_ON(mp) && (ip)->i_udquot == NULL) || \ |
| 28 | (XFS_IS_GQUOTA_ON(mp) && (ip)->i_gdquot == NULL) || \ |
| 29 | (XFS_IS_PQUOTA_ON(mp) && (ip)->i_pdquot == NULL)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | |
Nathan Scott | c8ad20f | 2005-06-21 15:38:48 +1000 | [diff] [blame] | 31 | #define XFS_QM_NEED_QUOTACHECK(mp) \ |
| 32 | ((XFS_IS_UQUOTA_ON(mp) && \ |
| 33 | (mp->m_sb.sb_qflags & XFS_UQUOTA_CHKD) == 0) || \ |
| 34 | (XFS_IS_GQUOTA_ON(mp) && \ |
Chandra Seetharaman | 83e782e | 2013-06-27 17:25:10 -0500 | [diff] [blame] | 35 | (mp->m_sb.sb_qflags & XFS_GQUOTA_CHKD) == 0) || \ |
Nathan Scott | c8ad20f | 2005-06-21 15:38:48 +1000 | [diff] [blame] | 36 | (XFS_IS_PQUOTA_ON(mp) && \ |
Chandra Seetharaman | 83e782e | 2013-06-27 17:25:10 -0500 | [diff] [blame] | 37 | (mp->m_sb.sb_qflags & XFS_PQUOTA_CHKD) == 0)) |
Nathan Scott | c8ad20f | 2005-06-21 15:38:48 +1000 | [diff] [blame] | 38 | |
Darrick J. Wong | 7e85bc6 | 2018-05-29 22:18:11 -0700 | [diff] [blame] | 39 | static inline uint |
| 40 | xfs_quota_chkd_flag( |
| 41 | uint dqtype) |
| 42 | { |
| 43 | switch (dqtype) { |
| 44 | case XFS_DQ_USER: |
| 45 | return XFS_UQUOTA_CHKD; |
| 46 | case XFS_DQ_GROUP: |
| 47 | return XFS_GQUOTA_CHKD; |
| 48 | case XFS_DQ_PROJ: |
| 49 | return XFS_PQUOTA_CHKD; |
| 50 | default: |
| 51 | return 0; |
| 52 | } |
| 53 | } |
| 54 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | /* |
| 56 | * The structure kept inside the xfs_trans_t keep track of dquot changes |
| 57 | * within a transaction and apply them later. |
| 58 | */ |
Darrick J. Wong | 078f4a7 | 2019-04-17 16:30:24 -0700 | [diff] [blame] | 59 | struct xfs_dqtrx { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | struct xfs_dquot *qt_dquot; /* the dquot this refers to */ |
Darrick J. Wong | 903b1fc | 2019-04-17 16:30:24 -0700 | [diff] [blame] | 61 | |
| 62 | uint64_t qt_blk_res; /* blks reserved on a dquot */ |
| 63 | int64_t qt_bcount_delta; /* dquot blk count changes */ |
| 64 | int64_t qt_delbcnt_delta; /* delayed dquot blk count changes */ |
| 65 | |
| 66 | uint64_t qt_rtblk_res; /* # blks reserved on a dquot */ |
| 67 | uint64_t qt_rtblk_res_used;/* # blks used from reservation */ |
| 68 | int64_t qt_rtbcount_delta;/* dquot realtime blk changes */ |
| 69 | int64_t qt_delrtb_delta; /* delayed RT blk count changes */ |
| 70 | |
| 71 | uint64_t qt_ino_res; /* inode reserved on a dquot */ |
| 72 | uint64_t qt_ino_res_used; /* inodes used from the reservation */ |
| 73 | int64_t qt_icount_delta; /* dquot inode count changes */ |
Darrick J. Wong | 078f4a7 | 2019-04-17 16:30:24 -0700 | [diff] [blame] | 74 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 75 | |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 76 | #ifdef CONFIG_XFS_QUOTA |
| 77 | extern void xfs_trans_dup_dqinfo(struct xfs_trans *, struct xfs_trans *); |
| 78 | extern void xfs_trans_free_dqinfo(struct xfs_trans *); |
| 79 | extern void xfs_trans_mod_dquot_byino(struct xfs_trans *, struct xfs_inode *, |
Darrick J. Wong | 903b1fc | 2019-04-17 16:30:24 -0700 | [diff] [blame] | 80 | uint, int64_t); |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 81 | extern void xfs_trans_apply_dquot_deltas(struct xfs_trans *); |
| 82 | extern void xfs_trans_unreserve_and_mod_dquots(struct xfs_trans *); |
| 83 | extern int xfs_trans_reserve_quota_nblks(struct xfs_trans *, |
Darrick J. Wong | 903b1fc | 2019-04-17 16:30:24 -0700 | [diff] [blame] | 84 | struct xfs_inode *, int64_t, long, uint); |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 85 | extern int xfs_trans_reserve_quota_bydquots(struct xfs_trans *, |
| 86 | struct xfs_mount *, struct xfs_dquot *, |
Darrick J. Wong | 903b1fc | 2019-04-17 16:30:24 -0700 | [diff] [blame] | 87 | struct xfs_dquot *, struct xfs_dquot *, int64_t, long, uint); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 | |
Christoph Hellwig | 5429515 | 2020-02-21 08:31:27 -0800 | [diff] [blame] | 89 | extern int xfs_qm_vop_dqalloc(struct xfs_inode *, kuid_t, kgid_t, |
Dwight Engen | 7aab1b2 | 2013-08-15 14:08:01 -0400 | [diff] [blame] | 90 | prid_t, uint, struct xfs_dquot **, struct xfs_dquot **, |
| 91 | struct xfs_dquot **); |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 92 | extern void xfs_qm_vop_create_dqattach(struct xfs_trans *, struct xfs_inode *, |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 93 | struct xfs_dquot *, struct xfs_dquot *, struct xfs_dquot *); |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 94 | extern int xfs_qm_vop_rename_dqattach(struct xfs_inode **); |
| 95 | extern struct xfs_dquot *xfs_qm_vop_chown(struct xfs_trans *, |
| 96 | struct xfs_inode *, struct xfs_dquot **, struct xfs_dquot *); |
| 97 | extern int xfs_qm_vop_chown_reserve(struct xfs_trans *, struct xfs_inode *, |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 98 | struct xfs_dquot *, struct xfs_dquot *, |
| 99 | struct xfs_dquot *, uint); |
Darrick J. Wong | c14cfcc | 2018-05-04 15:30:21 -0700 | [diff] [blame] | 100 | extern int xfs_qm_dqattach(struct xfs_inode *); |
Darrick J. Wong | 4882c19 | 2018-05-04 15:30:22 -0700 | [diff] [blame] | 101 | extern int xfs_qm_dqattach_locked(struct xfs_inode *ip, bool doalloc); |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 102 | extern void xfs_qm_dqdetach(struct xfs_inode *); |
| 103 | extern void xfs_qm_dqrele(struct xfs_dquot *); |
| 104 | extern void xfs_qm_statvfs(struct xfs_inode *, struct kstatfs *); |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 105 | extern int xfs_qm_newmount(struct xfs_mount *, uint *, uint *); |
| 106 | extern void xfs_qm_mount_quotas(struct xfs_mount *); |
| 107 | extern void xfs_qm_unmount(struct xfs_mount *); |
| 108 | extern void xfs_qm_unmount_quotas(struct xfs_mount *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 110 | #else |
Christoph Hellwig | 493b87e5 | 2009-06-12 11:34:55 -0400 | [diff] [blame] | 111 | static inline int |
Christoph Hellwig | 5429515 | 2020-02-21 08:31:27 -0800 | [diff] [blame] | 112 | xfs_qm_vop_dqalloc(struct xfs_inode *ip, kuid_t kuid, kgid_t kgid, |
Dwight Engen | 7aab1b2 | 2013-08-15 14:08:01 -0400 | [diff] [blame] | 113 | prid_t prid, uint flags, struct xfs_dquot **udqp, |
| 114 | struct xfs_dquot **gdqp, struct xfs_dquot **pdqp) |
Christoph Hellwig | 493b87e5 | 2009-06-12 11:34:55 -0400 | [diff] [blame] | 115 | { |
| 116 | *udqp = NULL; |
| 117 | *gdqp = NULL; |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 118 | *pdqp = NULL; |
Christoph Hellwig | 493b87e5 | 2009-06-12 11:34:55 -0400 | [diff] [blame] | 119 | return 0; |
| 120 | } |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 121 | #define xfs_trans_dup_dqinfo(tp, tp2) |
| 122 | #define xfs_trans_free_dqinfo(tp) |
| 123 | #define xfs_trans_mod_dquot_byino(tp, ip, fields, delta) |
| 124 | #define xfs_trans_apply_dquot_deltas(tp) |
| 125 | #define xfs_trans_unreserve_and_mod_dquots(tp) |
Christoph Hellwig | 5d2bf8a | 2010-11-06 11:42:56 +0000 | [diff] [blame] | 126 | static inline int xfs_trans_reserve_quota_nblks(struct xfs_trans *tp, |
Darrick J. Wong | 903b1fc | 2019-04-17 16:30:24 -0700 | [diff] [blame] | 127 | struct xfs_inode *ip, int64_t nblks, long ninos, uint flags) |
Christoph Hellwig | 5d2bf8a | 2010-11-06 11:42:56 +0000 | [diff] [blame] | 128 | { |
| 129 | return 0; |
| 130 | } |
| 131 | static inline int xfs_trans_reserve_quota_bydquots(struct xfs_trans *tp, |
| 132 | struct xfs_mount *mp, struct xfs_dquot *udqp, |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 133 | struct xfs_dquot *gdqp, struct xfs_dquot *pdqp, |
Darrick J. Wong | 903b1fc | 2019-04-17 16:30:24 -0700 | [diff] [blame] | 134 | int64_t nblks, long nions, uint flags) |
Christoph Hellwig | 5d2bf8a | 2010-11-06 11:42:56 +0000 | [diff] [blame] | 135 | { |
| 136 | return 0; |
| 137 | } |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 138 | #define xfs_qm_vop_create_dqattach(tp, ip, u, g, p) |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 139 | #define xfs_qm_vop_rename_dqattach(it) (0) |
| 140 | #define xfs_qm_vop_chown(tp, ip, old, new) (NULL) |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 141 | #define xfs_qm_vop_chown_reserve(tp, ip, u, g, p, fl) (0) |
Darrick J. Wong | c14cfcc | 2018-05-04 15:30:21 -0700 | [diff] [blame] | 142 | #define xfs_qm_dqattach(ip) (0) |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 143 | #define xfs_qm_dqattach_locked(ip, fl) (0) |
| 144 | #define xfs_qm_dqdetach(ip) |
| 145 | #define xfs_qm_dqrele(d) |
| 146 | #define xfs_qm_statvfs(ip, s) |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 147 | #define xfs_qm_newmount(mp, a, b) (0) |
| 148 | #define xfs_qm_mount_quotas(mp) |
| 149 | #define xfs_qm_unmount(mp) |
Christoph Hellwig | 5d2bf8a | 2010-11-06 11:42:56 +0000 | [diff] [blame] | 150 | #define xfs_qm_unmount_quotas(mp) |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 151 | #endif /* CONFIG_XFS_QUOTA */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 | |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 153 | #define xfs_trans_unreserve_quota_nblks(tp, ip, nblks, ninos, flags) \ |
| 154 | xfs_trans_reserve_quota_nblks(tp, ip, -(nblks), -(ninos), flags) |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 155 | #define xfs_trans_reserve_quota(tp, mp, ud, gd, pd, nb, ni, f) \ |
| 156 | xfs_trans_reserve_quota_bydquots(tp, mp, ud, gd, pd, nb, ni, \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 157 | f | XFS_QMOPT_RES_REGBLKS) |
| 158 | |
Tim Shimmin | 4cd4a03 | 2005-09-05 08:24:10 +1000 | [diff] [blame] | 159 | extern int xfs_mount_reset_sbqflags(struct xfs_mount *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 161 | #endif /* __XFS_QUOTA_H__ */ |