blob: be0997e24d60b5525bed4dedb8b4fe41cf95429e [file] [log] [blame]
Thomas Gleixner7336d0e2019-05-31 01:09:56 -07001// SPDX-License-Identifier: GPL-2.0-only
David Teiglandb3b94fa2006-01-16 16:50:04 +00002/*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
Bob Peterson0d0868b2007-12-11 18:51:25 -06004 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
David Teiglandb3b94fa2006-01-16 16:50:04 +00005 */
6
7/*
8 * Quota change tags are associated with each transaction that allocates or
9 * deallocates space. Those changes are accumulated locally to each node (in a
10 * per-node file) and then are periodically synced to the quota file. This
11 * avoids the bottleneck of constantly touching the quota file, but introduces
12 * fuzziness in the current usage value of IDs that are being used on different
13 * nodes in the cluster simultaneously. So, it is possible for a user on
14 * multiple nodes to overrun their quota, but that overrun is controlable.
Steven Whitehouse1e72c0f2009-09-15 20:42:56 +010015 * Since quota tags are part of transactions, there is no need for a quota check
David Teiglandb3b94fa2006-01-16 16:50:04 +000016 * program to be run on node crashes or anything like that.
17 *
18 * There are couple of knobs that let the administrator manage the quota
19 * fuzziness. "quota_quantum" sets the maximum time a quota change can be
20 * sitting on one node before being synced to the quota file. (The default is
21 * 60 seconds.) Another knob, "quota_scale" controls how quickly the frequency
22 * of quota file syncs increases as the user moves closer to their limit. The
23 * more frequent the syncs, the more accurate the quota enforcement, but that
24 * means that there is more contention between the nodes for the quota file.
25 * The default value is one. This sets the maximum theoretical quota overrun
26 * (with infinite node with infinite bandwidth) to twice the user's limit. (In
27 * practice, the maximum overrun you see should be much less.) A "quota_scale"
28 * number greater than one makes quota syncs more frequent and reduces the
29 * maximum overrun. Numbers less than one (but greater than zero) make quota
30 * syncs less frequent.
31 *
32 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
33 * the quota file, so it is not being constantly read.
34 */
35
Joe Perchesd77d1b52014-03-06 12:10:45 -080036#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37
David Teiglandb3b94fa2006-01-16 16:50:04 +000038#include <linux/sched.h>
39#include <linux/slab.h>
Ying Han1495f232011-05-24 17:12:27 -070040#include <linux/mm.h>
David Teiglandb3b94fa2006-01-16 16:50:04 +000041#include <linux/spinlock.h>
42#include <linux/completion.h>
43#include <linux/buffer_head.h>
David Teiglandb3b94fa2006-01-16 16:50:04 +000044#include <linux/sort.h>
Steven Whitehouse18ec7d52006-02-08 11:50:51 +000045#include <linux/fs.h>
Steven Whitehouse2e565bb2006-10-02 11:38:25 -040046#include <linux/bio.h>
Steven Whitehouse5c676f62006-02-27 17:23:27 -050047#include <linux/gfs2_ondisk.h>
Steven Whitehouse37b2c832008-11-17 14:25:37 +000048#include <linux/kthread.h>
49#include <linux/freezer.h>
Steven Whitehouse2ec46502009-09-28 12:49:15 +010050#include <linux/quota.h>
Steven Whitehouse1d371b52009-09-11 15:57:27 +010051#include <linux/dqblk_xfs.h>
Steven Whitehouse9b9f0392013-11-01 14:52:06 -040052#include <linux/lockref.h>
Steven Whitehouse2147dbf2013-11-04 10:15:08 +000053#include <linux/list_lru.h>
Steven Whitehousec754fbb2013-12-12 10:47:59 +000054#include <linux/rcupdate.h>
55#include <linux/rculist_bl.h>
56#include <linux/bit_spinlock.h>
57#include <linux/jhash.h>
Steven Whitehouse1e3d3622014-01-15 12:57:25 +000058#include <linux/vmalloc.h>
David Teiglandb3b94fa2006-01-16 16:50:04 +000059
60#include "gfs2.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050061#include "incore.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000062#include "bmap.h"
63#include "glock.h"
64#include "glops.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000065#include "log.h"
66#include "meta_io.h"
67#include "quota.h"
68#include "rgrp.h"
69#include "super.h"
70#include "trans.h"
Steven Whitehouse18ec7d52006-02-08 11:50:51 +000071#include "inode.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050072#include "util.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000073
Steven Whitehousec754fbb2013-12-12 10:47:59 +000074#define GFS2_QD_HASH_SHIFT 12
Fabian Frederick47a9a522016-08-02 12:05:27 -050075#define GFS2_QD_HASH_SIZE BIT(GFS2_QD_HASH_SHIFT)
Steven Whitehousec754fbb2013-12-12 10:47:59 +000076#define GFS2_QD_HASH_MASK (GFS2_QD_HASH_SIZE - 1)
77
78/* Lock order: qd_lock -> bucket lock -> qd->lockref.lock -> lru lock */
Steven Whitehouse2d9e7232013-12-13 11:46:28 +000079/* -> sd_bitmap_lock */
Steven Whitehouse7d808232013-11-01 14:52:08 -040080static DEFINE_SPINLOCK(qd_lock);
Steven Whitehouse2147dbf2013-11-04 10:15:08 +000081struct list_lru gfs2_qd_lru;
Abhijith Das0a7ab792009-01-07 16:03:37 -060082
Steven Whitehousec754fbb2013-12-12 10:47:59 +000083static struct hlist_bl_head qd_hash_table[GFS2_QD_HASH_SIZE];
84
85static unsigned int gfs2_qd_hash(const struct gfs2_sbd *sdp,
86 const struct kqid qid)
87{
88 unsigned int h;
89
90 h = jhash(&sdp, sizeof(struct gfs2_sbd *), 0);
91 h = jhash(&qid, sizeof(struct kqid), h);
92
93 return h & GFS2_QD_HASH_MASK;
94}
95
96static inline void spin_lock_bucket(unsigned int hash)
97{
98 hlist_bl_lock(&qd_hash_table[hash]);
99}
100
101static inline void spin_unlock_bucket(unsigned int hash)
102{
103 hlist_bl_unlock(&qd_hash_table[hash]);
104}
105
106static void gfs2_qd_dealloc(struct rcu_head *rcu)
107{
108 struct gfs2_quota_data *qd = container_of(rcu, struct gfs2_quota_data, qd_rcu);
109 kmem_cache_free(gfs2_quotad_cachep, qd);
110}
111
Steven Whitehouse2147dbf2013-11-04 10:15:08 +0000112static void gfs2_qd_dispose(struct list_head *list)
Abhijith Das0a7ab792009-01-07 16:03:37 -0600113{
114 struct gfs2_quota_data *qd;
115 struct gfs2_sbd *sdp;
Abhijith Das0a7ab792009-01-07 16:03:37 -0600116
Steven Whitehouse2147dbf2013-11-04 10:15:08 +0000117 while (!list_empty(list)) {
Andreas Gruenbacher969183b2020-02-03 19:22:45 +0100118 qd = list_first_entry(list, struct gfs2_quota_data, qd_lru);
Bob Peterson15562c42015-03-16 11:52:05 -0500119 sdp = qd->qd_gl->gl_name.ln_sbd;
Abhijith Das0a7ab792009-01-07 16:03:37 -0600120
Steven Whitehouse2147dbf2013-11-04 10:15:08 +0000121 list_del(&qd->qd_lru);
122
Abhijith Das0a7ab792009-01-07 16:03:37 -0600123 /* Free from the filesystem-specific list */
Steven Whitehouse2147dbf2013-11-04 10:15:08 +0000124 spin_lock(&qd_lock);
Abhijith Das0a7ab792009-01-07 16:03:37 -0600125 list_del(&qd->qd_list);
Steven Whitehouse2147dbf2013-11-04 10:15:08 +0000126 spin_unlock(&qd_lock);
Abhijith Das0a7ab792009-01-07 16:03:37 -0600127
Steven Whitehousec754fbb2013-12-12 10:47:59 +0000128 spin_lock_bucket(qd->qd_hash);
129 hlist_bl_del_rcu(&qd->qd_hlist);
130 spin_unlock_bucket(qd->qd_hash);
131
Abhijith Das0a7ab792009-01-07 16:03:37 -0600132 gfs2_assert_warn(sdp, !qd->qd_change);
133 gfs2_assert_warn(sdp, !qd->qd_slot_count);
134 gfs2_assert_warn(sdp, !qd->qd_bh_count);
135
Steven Whitehousef057f6c2009-01-12 10:43:39 +0000136 gfs2_glock_put(qd->qd_gl);
Abhijith Das0a7ab792009-01-07 16:03:37 -0600137 atomic_dec(&sdp->sd_quota_count);
138
139 /* Delete it from the common reclaim list */
Steven Whitehousec754fbb2013-12-12 10:47:59 +0000140 call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
Abhijith Das0a7ab792009-01-07 16:03:37 -0600141 }
Steven Whitehouse2147dbf2013-11-04 10:15:08 +0000142}
143
144
Vladimir Davydov3f97b162015-02-12 14:59:35 -0800145static enum lru_status gfs2_qd_isolate(struct list_head *item,
146 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
Steven Whitehouse2147dbf2013-11-04 10:15:08 +0000147{
148 struct list_head *dispose = arg;
149 struct gfs2_quota_data *qd = list_entry(item, struct gfs2_quota_data, qd_lru);
150
151 if (!spin_trylock(&qd->qd_lockref.lock))
152 return LRU_SKIP;
153
154 if (qd->qd_lockref.count == 0) {
155 lockref_mark_dead(&qd->qd_lockref);
Vladimir Davydov3f97b162015-02-12 14:59:35 -0800156 list_lru_isolate_move(lru, &qd->qd_lru, dispose);
Steven Whitehouse2147dbf2013-11-04 10:15:08 +0000157 }
158
159 spin_unlock(&qd->qd_lockref.lock);
160 return LRU_REMOVED;
161}
162
163static unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
164 struct shrink_control *sc)
165{
166 LIST_HEAD(dispose);
167 unsigned long freed;
168
169 if (!(sc->gfp_mask & __GFP_FS))
170 return SHRINK_STOP;
171
Vladimir Davydov503c3582015-02-12 14:58:47 -0800172 freed = list_lru_shrink_walk(&gfs2_qd_lru, sc,
173 gfs2_qd_isolate, &dispose);
Steven Whitehouse2147dbf2013-11-04 10:15:08 +0000174
175 gfs2_qd_dispose(&dispose);
176
Dave Chinner1ab6c492013-08-28 10:18:09 +1000177 return freed;
178}
Abhijith Das0a7ab792009-01-07 16:03:37 -0600179
Steven Whitehouse2147dbf2013-11-04 10:15:08 +0000180static unsigned long gfs2_qd_shrink_count(struct shrinker *shrink,
181 struct shrink_control *sc)
Dave Chinner1ab6c492013-08-28 10:18:09 +1000182{
Vladimir Davydov503c3582015-02-12 14:58:47 -0800183 return vfs_pressure_ratio(list_lru_shrink_count(&gfs2_qd_lru, sc));
Abhijith Das0a7ab792009-01-07 16:03:37 -0600184}
185
Steven Whitehouse2147dbf2013-11-04 10:15:08 +0000186struct shrinker gfs2_qd_shrinker = {
187 .count_objects = gfs2_qd_shrink_count,
188 .scan_objects = gfs2_qd_shrink_scan,
189 .seeks = DEFAULT_SEEKS,
190 .flags = SHRINKER_NUMA_AWARE,
191};
192
193
Eric W. Biederman2f6c9892013-01-31 18:33:38 -0800194static u64 qd2index(struct gfs2_quota_data *qd)
195{
Eric W. Biederman05e0a602013-01-31 19:52:08 -0800196 struct kqid qid = qd->qd_id;
197 return (2 * (u64)from_kqid(&init_user_ns, qid)) +
Bob Peterson37f71572013-05-10 11:59:18 -0400198 ((qid.type == USRQUOTA) ? 0 : 1);
Eric W. Biederman2f6c9892013-01-31 18:33:38 -0800199}
200
Steven Whitehousecd915492006-09-04 12:49:07 -0400201static u64 qd2offset(struct gfs2_quota_data *qd)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000202{
Steven Whitehousecd915492006-09-04 12:49:07 -0400203 u64 offset;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000204
Eric W. Biederman2f6c9892013-01-31 18:33:38 -0800205 offset = qd2index(qd);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000206 offset *= sizeof(struct gfs2_quota);
207
208 return offset;
209}
210
Steven Whitehousec754fbb2013-12-12 10:47:59 +0000211static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, struct kqid qid)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000212{
213 struct gfs2_quota_data *qd;
214 int error;
215
Steven Whitehouse37b2c832008-11-17 14:25:37 +0000216 qd = kmem_cache_zalloc(gfs2_quotad_cachep, GFP_NOFS);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000217 if (!qd)
Steven Whitehousec754fbb2013-12-12 10:47:59 +0000218 return NULL;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000219
Steven Whitehousec754fbb2013-12-12 10:47:59 +0000220 qd->qd_sbd = sdp;
Steven Whitehouse9b9f0392013-11-01 14:52:06 -0400221 qd->qd_lockref.count = 1;
222 spin_lock_init(&qd->qd_lockref.lock);
Eric W. Biederman05e0a602013-01-31 19:52:08 -0800223 qd->qd_id = qid;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000224 qd->qd_slot = -1;
Steven Whitehouse2147dbf2013-11-04 10:15:08 +0000225 INIT_LIST_HEAD(&qd->qd_lru);
Steven Whitehousec754fbb2013-12-12 10:47:59 +0000226 qd->qd_hash = hash;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000227
Eric W. Biederman2f6c9892013-01-31 18:33:38 -0800228 error = gfs2_glock_get(sdp, qd2index(qd),
David Teiglandb3b94fa2006-01-16 16:50:04 +0000229 &gfs2_quota_glops, CREATE, &qd->qd_gl);
230 if (error)
231 goto fail;
232
Steven Whitehousec754fbb2013-12-12 10:47:59 +0000233 return qd;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000234
Steven Whitehousea91ea692006-09-04 12:04:26 -0400235fail:
Steven Whitehouse37b2c832008-11-17 14:25:37 +0000236 kmem_cache_free(gfs2_quotad_cachep, qd);
Steven Whitehousec754fbb2013-12-12 10:47:59 +0000237 return NULL;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000238}
239
Steven Whitehousec754fbb2013-12-12 10:47:59 +0000240static struct gfs2_quota_data *gfs2_qd_search_bucket(unsigned int hash,
241 const struct gfs2_sbd *sdp,
242 struct kqid qid)
243{
244 struct gfs2_quota_data *qd;
245 struct hlist_bl_node *h;
246
247 hlist_bl_for_each_entry_rcu(qd, h, &qd_hash_table[hash], qd_hlist) {
248 if (!qid_eq(qd->qd_id, qid))
249 continue;
250 if (qd->qd_sbd != sdp)
251 continue;
252 if (lockref_get_not_dead(&qd->qd_lockref)) {
253 list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
254 return qd;
255 }
256 }
257
258 return NULL;
259}
260
261
Eric W. Biederman05e0a602013-01-31 19:52:08 -0800262static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000263 struct gfs2_quota_data **qdp)
264{
Steven Whitehousec754fbb2013-12-12 10:47:59 +0000265 struct gfs2_quota_data *qd, *new_qd;
266 unsigned int hash = gfs2_qd_hash(sdp, qid);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000267
Steven Whitehousec754fbb2013-12-12 10:47:59 +0000268 rcu_read_lock();
269 *qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
270 rcu_read_unlock();
David Teiglandb3b94fa2006-01-16 16:50:04 +0000271
Steven Whitehousec754fbb2013-12-12 10:47:59 +0000272 if (qd)
273 return 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000274
Steven Whitehousec754fbb2013-12-12 10:47:59 +0000275 new_qd = qd_alloc(hash, sdp, qid);
276 if (!new_qd)
277 return -ENOMEM;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000278
Steven Whitehousec754fbb2013-12-12 10:47:59 +0000279 spin_lock(&qd_lock);
280 spin_lock_bucket(hash);
281 *qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
282 if (qd == NULL) {
283 *qdp = new_qd;
284 list_add(&new_qd->qd_list, &sdp->sd_quota_list);
285 hlist_bl_add_head_rcu(&new_qd->qd_hlist, &qd_hash_table[hash]);
286 atomic_inc(&sdp->sd_quota_count);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000287 }
Steven Whitehousec754fbb2013-12-12 10:47:59 +0000288 spin_unlock_bucket(hash);
289 spin_unlock(&qd_lock);
290
291 if (qd) {
292 gfs2_glock_put(new_qd->qd_gl);
293 kmem_cache_free(gfs2_quotad_cachep, new_qd);
294 }
295
296 return 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000297}
298
Steven Whitehousec754fbb2013-12-12 10:47:59 +0000299
David Teiglandb3b94fa2006-01-16 16:50:04 +0000300static void qd_hold(struct gfs2_quota_data *qd)
301{
Bob Peterson15562c42015-03-16 11:52:05 -0500302 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
Steven Whitehouse9b9f0392013-11-01 14:52:06 -0400303 gfs2_assert(sdp, !__lockref_is_dead(&qd->qd_lockref));
304 lockref_get(&qd->qd_lockref);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000305}
306
307static void qd_put(struct gfs2_quota_data *qd)
308{
Steven Whitehouse2147dbf2013-11-04 10:15:08 +0000309 if (lockref_put_or_lock(&qd->qd_lockref))
310 return;
Steven Whitehouse9b9f0392013-11-01 14:52:06 -0400311
Steven Whitehouse2147dbf2013-11-04 10:15:08 +0000312 qd->qd_lockref.count = 0;
313 list_lru_add(&gfs2_qd_lru, &qd->qd_lru);
314 spin_unlock(&qd->qd_lockref.lock);
Steven Whitehouse9b9f0392013-11-01 14:52:06 -0400315
David Teiglandb3b94fa2006-01-16 16:50:04 +0000316}
317
318static int slot_get(struct gfs2_quota_data *qd)
319{
Steven Whitehouseee2411a2013-12-12 17:29:32 +0000320 struct gfs2_sbd *sdp = qd->qd_sbd;
321 unsigned int bit;
322 int error = 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000323
Steven Whitehouse2d9e7232013-12-13 11:46:28 +0000324 spin_lock(&sdp->sd_bitmap_lock);
Steven Whitehouseee2411a2013-12-12 17:29:32 +0000325 if (qd->qd_slot_count != 0)
326 goto out;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000327
Steven Whitehouseee2411a2013-12-12 17:29:32 +0000328 error = -ENOSPC;
329 bit = find_first_zero_bit(sdp->sd_quota_bitmap, sdp->sd_quota_slots);
330 if (bit < sdp->sd_quota_slots) {
331 set_bit(bit, sdp->sd_quota_bitmap);
332 qd->qd_slot = bit;
Abhi Dase9fb7c72014-03-31 01:19:29 -0500333 error = 0;
Steven Whitehouseee2411a2013-12-12 17:29:32 +0000334out:
335 qd->qd_slot_count++;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000336 }
Steven Whitehouse2d9e7232013-12-13 11:46:28 +0000337 spin_unlock(&sdp->sd_bitmap_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000338
Steven Whitehouseee2411a2013-12-12 17:29:32 +0000339 return error;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000340}
341
342static void slot_hold(struct gfs2_quota_data *qd)
343{
Steven Whitehouseee2411a2013-12-12 17:29:32 +0000344 struct gfs2_sbd *sdp = qd->qd_sbd;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000345
Steven Whitehouse2d9e7232013-12-13 11:46:28 +0000346 spin_lock(&sdp->sd_bitmap_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000347 gfs2_assert(sdp, qd->qd_slot_count);
348 qd->qd_slot_count++;
Steven Whitehouse2d9e7232013-12-13 11:46:28 +0000349 spin_unlock(&sdp->sd_bitmap_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000350}
351
352static void slot_put(struct gfs2_quota_data *qd)
353{
Steven Whitehouseee2411a2013-12-12 17:29:32 +0000354 struct gfs2_sbd *sdp = qd->qd_sbd;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000355
Steven Whitehouse2d9e7232013-12-13 11:46:28 +0000356 spin_lock(&sdp->sd_bitmap_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000357 gfs2_assert(sdp, qd->qd_slot_count);
358 if (!--qd->qd_slot_count) {
Steven Whitehouseee2411a2013-12-12 17:29:32 +0000359 BUG_ON(!test_and_clear_bit(qd->qd_slot, sdp->sd_quota_bitmap));
David Teiglandb3b94fa2006-01-16 16:50:04 +0000360 qd->qd_slot = -1;
361 }
Steven Whitehouse2d9e7232013-12-13 11:46:28 +0000362 spin_unlock(&sdp->sd_bitmap_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000363}
364
365static int bh_get(struct gfs2_quota_data *qd)
366{
Bob Peterson15562c42015-03-16 11:52:05 -0500367 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400368 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000369 unsigned int block, offset;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000370 struct buffer_head *bh;
371 int error;
Steven Whitehouse23591252006-10-13 17:25:45 -0400372 struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
David Teiglandb3b94fa2006-01-16 16:50:04 +0000373
Steven Whitehousef55ab262006-02-21 12:51:39 +0000374 mutex_lock(&sdp->sd_quota_mutex);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000375
376 if (qd->qd_bh_count++) {
Steven Whitehousef55ab262006-02-21 12:51:39 +0000377 mutex_unlock(&sdp->sd_quota_mutex);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000378 return 0;
379 }
380
381 block = qd->qd_slot / sdp->sd_qc_per_block;
Bob Peterson0d0868b2007-12-11 18:51:25 -0600382 offset = qd->qd_slot % sdp->sd_qc_per_block;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000383
Fabian Frederick47a9a522016-08-02 12:05:27 -0500384 bh_map.b_size = BIT(ip->i_inode.i_blkbits);
Bob Petersone9e1ef22007-12-10 14:13:27 -0600385 error = gfs2_block_map(&ip->i_inode, block, &bh_map, 0);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000386 if (error)
387 goto fail;
Andreas Gruenbacherc8d57702015-11-11 15:00:35 -0600388 error = gfs2_meta_read(ip->i_gl, bh_map.b_blocknr, DIO_WAIT, 0, &bh);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000389 if (error)
390 goto fail;
391 error = -EIO;
392 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
393 goto fail_brelse;
394
395 qd->qd_bh = bh;
396 qd->qd_bh_qc = (struct gfs2_quota_change *)
397 (bh->b_data + sizeof(struct gfs2_meta_header) +
398 offset * sizeof(struct gfs2_quota_change));
399
Josef Whiter2e95b662007-02-20 00:03:29 -0500400 mutex_unlock(&sdp->sd_quota_mutex);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000401
402 return 0;
403
Steven Whitehousea91ea692006-09-04 12:04:26 -0400404fail_brelse:
David Teiglandb3b94fa2006-01-16 16:50:04 +0000405 brelse(bh);
Steven Whitehousea91ea692006-09-04 12:04:26 -0400406fail:
David Teiglandb3b94fa2006-01-16 16:50:04 +0000407 qd->qd_bh_count--;
Steven Whitehousef55ab262006-02-21 12:51:39 +0000408 mutex_unlock(&sdp->sd_quota_mutex);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000409 return error;
410}
411
412static void bh_put(struct gfs2_quota_data *qd)
413{
Bob Peterson15562c42015-03-16 11:52:05 -0500414 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000415
Steven Whitehousef55ab262006-02-21 12:51:39 +0000416 mutex_lock(&sdp->sd_quota_mutex);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000417 gfs2_assert(sdp, qd->qd_bh_count);
418 if (!--qd->qd_bh_count) {
419 brelse(qd->qd_bh);
420 qd->qd_bh = NULL;
421 qd->qd_bh_qc = NULL;
422 }
Steven Whitehousef55ab262006-02-21 12:51:39 +0000423 mutex_unlock(&sdp->sd_quota_mutex);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000424}
425
Steven Whitehouse1bf59bf2013-10-04 11:14:46 +0100426static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd,
427 u64 *sync_gen)
428{
429 if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
430 !test_bit(QDF_CHANGE, &qd->qd_flags) ||
431 (sync_gen && (qd->qd_sync_gen >= *sync_gen)))
432 return 0;
433
Steven Whitehouse2147dbf2013-11-04 10:15:08 +0000434 if (!lockref_get_not_dead(&qd->qd_lockref))
435 return 0;
Steven Whitehouse1bf59bf2013-10-04 11:14:46 +0100436
Steven Whitehouse2147dbf2013-11-04 10:15:08 +0000437 list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
Steven Whitehouse1bf59bf2013-10-04 11:14:46 +0100438 set_bit(QDF_LOCKED, &qd->qd_flags);
Steven Whitehouse1bf59bf2013-10-04 11:14:46 +0100439 qd->qd_change_sync = qd->qd_change;
Steven Whitehouse2d9e7232013-12-13 11:46:28 +0000440 slot_hold(qd);
Steven Whitehouse1bf59bf2013-10-04 11:14:46 +0100441 return 1;
442}
443
David Teiglandb3b94fa2006-01-16 16:50:04 +0000444static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
445{
446 struct gfs2_quota_data *qd = NULL;
447 int error;
448 int found = 0;
449
450 *qdp = NULL;
451
David Howellsbc98a422017-07-17 08:45:34 +0100452 if (sb_rdonly(sdp->sd_vfs))
David Teiglandb3b94fa2006-01-16 16:50:04 +0000453 return 0;
454
Steven Whitehouse7d808232013-11-01 14:52:08 -0400455 spin_lock(&qd_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000456
457 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
Steven Whitehouse1bf59bf2013-10-04 11:14:46 +0100458 found = qd_check_sync(sdp, qd, &sdp->sd_quota_sync_gen);
459 if (found)
460 break;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000461 }
462
463 if (!found)
464 qd = NULL;
465
Steven Whitehouse7d808232013-11-01 14:52:08 -0400466 spin_unlock(&qd_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000467
468 if (qd) {
469 gfs2_assert_warn(sdp, qd->qd_change_sync);
470 error = bh_get(qd);
471 if (error) {
472 clear_bit(QDF_LOCKED, &qd->qd_flags);
473 slot_put(qd);
474 qd_put(qd);
475 return error;
476 }
477 }
478
479 *qdp = qd;
480
481 return 0;
482}
483
David Teiglandb3b94fa2006-01-16 16:50:04 +0000484static void qd_unlock(struct gfs2_quota_data *qd)
485{
Bob Peterson15562c42015-03-16 11:52:05 -0500486 gfs2_assert_warn(qd->qd_gl->gl_name.ln_sbd,
Steven Whitehouse568f4c92006-02-27 12:00:42 -0500487 test_bit(QDF_LOCKED, &qd->qd_flags));
David Teiglandb3b94fa2006-01-16 16:50:04 +0000488 clear_bit(QDF_LOCKED, &qd->qd_flags);
489 bh_put(qd);
490 slot_put(qd);
491 qd_put(qd);
492}
493
Eric W. Biedermanb59c8b62013-01-31 19:35:56 -0800494static int qdsb_get(struct gfs2_sbd *sdp, struct kqid qid,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000495 struct gfs2_quota_data **qdp)
496{
497 int error;
498
Eric W. Biederman05e0a602013-01-31 19:52:08 -0800499 error = qd_get(sdp, qid, qdp);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000500 if (error)
501 return error;
502
503 error = slot_get(*qdp);
504 if (error)
505 goto fail;
506
507 error = bh_get(*qdp);
508 if (error)
509 goto fail_slot;
510
511 return 0;
512
Steven Whitehousea91ea692006-09-04 12:04:26 -0400513fail_slot:
David Teiglandb3b94fa2006-01-16 16:50:04 +0000514 slot_put(*qdp);
Steven Whitehousea91ea692006-09-04 12:04:26 -0400515fail:
David Teiglandb3b94fa2006-01-16 16:50:04 +0000516 qd_put(*qdp);
517 return error;
518}
519
520static void qdsb_put(struct gfs2_quota_data *qd)
521{
522 bh_put(qd);
523 slot_put(qd);
524 qd_put(qd);
525}
526
Bob Petersonb54e9a02015-10-26 10:40:28 -0500527/**
Bob Peterson2fba46a2020-02-27 12:47:53 -0600528 * gfs2_qa_get - make sure we have a quota allocations data structure,
529 * if necessary
Bob Petersonb54e9a02015-10-26 10:40:28 -0500530 * @ip: the inode for this reservation
531 */
Bob Peterson2fba46a2020-02-27 12:47:53 -0600532int gfs2_qa_get(struct gfs2_inode *ip)
Bob Petersonb54e9a02015-10-26 10:40:28 -0500533{
534 int error = 0;
535 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
536
537 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
538 return 0;
539
540 down_write(&ip->i_rw_mutex);
541 if (ip->i_qadata == NULL) {
542 ip->i_qadata = kmem_cache_zalloc(gfs2_qadata_cachep, GFP_NOFS);
Bob Peterson2fba46a2020-02-27 12:47:53 -0600543 if (!ip->i_qadata) {
Bob Petersonb54e9a02015-10-26 10:40:28 -0500544 error = -ENOMEM;
Bob Peterson2fba46a2020-02-27 12:47:53 -0600545 goto out;
546 }
Bob Petersonb54e9a02015-10-26 10:40:28 -0500547 }
Bob Peterson2fba46a2020-02-27 12:47:53 -0600548 ip->i_qadata->qa_ref++;
549out:
Bob Petersonb54e9a02015-10-26 10:40:28 -0500550 up_write(&ip->i_rw_mutex);
551 return error;
552}
553
Bob Peterson2fba46a2020-02-27 12:47:53 -0600554void gfs2_qa_put(struct gfs2_inode *ip)
Bob Petersonb54e9a02015-10-26 10:40:28 -0500555{
556 down_write(&ip->i_rw_mutex);
Bob Peterson2fba46a2020-02-27 12:47:53 -0600557 if (ip->i_qadata && --ip->i_qadata->qa_ref == 0) {
Bob Petersonb54e9a02015-10-26 10:40:28 -0500558 kmem_cache_free(gfs2_qadata_cachep, ip->i_qadata);
559 ip->i_qadata = NULL;
560 }
561 up_write(&ip->i_rw_mutex);
562}
563
Eric W. Biederman7c06b5d2013-01-31 20:27:54 -0800564int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000565{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400566 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
Bob Peterson5407e242012-05-18 09:28:23 -0400567 struct gfs2_quota_data **qd;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000568 int error;
569
Bob Petersonb54e9a02015-10-26 10:40:28 -0500570 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
571 return 0;
572
Bob Peterson2fba46a2020-02-27 12:47:53 -0600573 error = gfs2_qa_get(ip);
574 if (error)
575 return error;
Bob Peterson5407e242012-05-18 09:28:23 -0400576
Bob Petersonb54e9a02015-10-26 10:40:28 -0500577 qd = ip->i_qadata->qa_qd;
Bob Peterson5407e242012-05-18 09:28:23 -0400578
Bob Petersonb54e9a02015-10-26 10:40:28 -0500579 if (gfs2_assert_warn(sdp, !ip->i_qadata->qa_qd_num) ||
Bob Peterson2fba46a2020-02-27 12:47:53 -0600580 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags))) {
581 error = -EIO;
582 goto out;
583 }
David Teiglandb3b94fa2006-01-16 16:50:04 +0000584
Eric W. Biedermanb59c8b62013-01-31 19:35:56 -0800585 error = qdsb_get(sdp, make_kqid_uid(ip->i_inode.i_uid), qd);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000586 if (error)
Bob Peterson2fba46a2020-02-27 12:47:53 -0600587 goto out_unhold;
Bob Petersonb54e9a02015-10-26 10:40:28 -0500588 ip->i_qadata->qa_qd_num++;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000589 qd++;
590
Eric W. Biedermanb59c8b62013-01-31 19:35:56 -0800591 error = qdsb_get(sdp, make_kqid_gid(ip->i_inode.i_gid), qd);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000592 if (error)
Bob Peterson2fba46a2020-02-27 12:47:53 -0600593 goto out_unhold;
Bob Petersonb54e9a02015-10-26 10:40:28 -0500594 ip->i_qadata->qa_qd_num++;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000595 qd++;
596
Eric W. Biederman6b24c0d2013-01-31 21:56:13 -0800597 if (!uid_eq(uid, NO_UID_QUOTA_CHANGE) &&
598 !uid_eq(uid, ip->i_inode.i_uid)) {
Eric W. Biedermanb59c8b62013-01-31 19:35:56 -0800599 error = qdsb_get(sdp, make_kqid_uid(uid), qd);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000600 if (error)
Bob Peterson2fba46a2020-02-27 12:47:53 -0600601 goto out_unhold;
Bob Petersonb54e9a02015-10-26 10:40:28 -0500602 ip->i_qadata->qa_qd_num++;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000603 qd++;
604 }
605
Eric W. Biederman6b24c0d2013-01-31 21:56:13 -0800606 if (!gid_eq(gid, NO_GID_QUOTA_CHANGE) &&
607 !gid_eq(gid, ip->i_inode.i_gid)) {
Eric W. Biedermanb59c8b62013-01-31 19:35:56 -0800608 error = qdsb_get(sdp, make_kqid_gid(gid), qd);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000609 if (error)
Bob Peterson2fba46a2020-02-27 12:47:53 -0600610 goto out_unhold;
Bob Petersonb54e9a02015-10-26 10:40:28 -0500611 ip->i_qadata->qa_qd_num++;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000612 qd++;
613 }
614
Bob Peterson2fba46a2020-02-27 12:47:53 -0600615out_unhold:
David Teiglandb3b94fa2006-01-16 16:50:04 +0000616 if (error)
617 gfs2_quota_unhold(ip);
Bob Peterson2fba46a2020-02-27 12:47:53 -0600618out:
David Teiglandb3b94fa2006-01-16 16:50:04 +0000619 return error;
620}
621
622void gfs2_quota_unhold(struct gfs2_inode *ip)
623{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400624 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
Bob Petersonb58bf402015-07-24 09:45:43 -0500625 u32 x;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000626
Bob Petersonb54e9a02015-10-26 10:40:28 -0500627 if (ip->i_qadata == NULL)
Bob Peterson5407e242012-05-18 09:28:23 -0400628 return;
Bob Peterson2fba46a2020-02-27 12:47:53 -0600629
David Teiglandb3b94fa2006-01-16 16:50:04 +0000630 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
631
Bob Petersonb54e9a02015-10-26 10:40:28 -0500632 for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
633 qdsb_put(ip->i_qadata->qa_qd[x]);
634 ip->i_qadata->qa_qd[x] = NULL;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000635 }
Bob Petersonb54e9a02015-10-26 10:40:28 -0500636 ip->i_qadata->qa_qd_num = 0;
Bob Peterson2fba46a2020-02-27 12:47:53 -0600637 gfs2_qa_put(ip);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000638}
639
640static int sort_qd(const void *a, const void *b)
641{
Steven Whitehouse48fac172006-09-05 15:17:12 -0400642 const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a;
643 const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000644
Eric W. Biederman05e0a602013-01-31 19:52:08 -0800645 if (qid_lt(qd_a->qd_id, qd_b->qd_id))
Steven Whitehouse48fac172006-09-05 15:17:12 -0400646 return -1;
Eric W. Biederman05e0a602013-01-31 19:52:08 -0800647 if (qid_lt(qd_b->qd_id, qd_a->qd_id))
Steven Whitehouse48fac172006-09-05 15:17:12 -0400648 return 1;
Steven Whitehouse48fac172006-09-05 15:17:12 -0400649 return 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000650}
651
Steven Whitehousecd915492006-09-04 12:49:07 -0400652static void do_qc(struct gfs2_quota_data *qd, s64 change)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000653{
Bob Peterson15562c42015-03-16 11:52:05 -0500654 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400655 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000656 struct gfs2_quota_change *qc = qd->qd_bh_qc;
Steven Whitehousecd915492006-09-04 12:49:07 -0400657 s64 x;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000658
Steven Whitehousef55ab262006-02-21 12:51:39 +0000659 mutex_lock(&sdp->sd_quota_mutex);
Steven Whitehouse350a9b02012-12-14 12:36:02 +0000660 gfs2_trans_add_meta(ip->i_gl, qd->qd_bh);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000661
662 if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
663 qc->qc_change = 0;
664 qc->qc_flags = 0;
Eric W. Biederman05e0a602013-01-31 19:52:08 -0800665 if (qd->qd_id.type == USRQUOTA)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000666 qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
Eric W. Biederman05e0a602013-01-31 19:52:08 -0800667 qc->qc_id = cpu_to_be32(from_kqid(&init_user_ns, qd->qd_id));
David Teiglandb3b94fa2006-01-16 16:50:04 +0000668 }
669
Al Virob44b84d2006-10-14 10:46:30 -0400670 x = be64_to_cpu(qc->qc_change) + change;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000671 qc->qc_change = cpu_to_be64(x);
672
Steven Whitehouse7d808232013-11-01 14:52:08 -0400673 spin_lock(&qd_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000674 qd->qd_change = x;
Steven Whitehouse7d808232013-11-01 14:52:08 -0400675 spin_unlock(&qd_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000676
677 if (!x) {
678 gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
679 clear_bit(QDF_CHANGE, &qd->qd_flags);
680 qc->qc_flags = 0;
681 qc->qc_id = 0;
682 slot_put(qd);
683 qd_put(qd);
684 } else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
685 qd_hold(qd);
686 slot_hold(qd);
687 }
Steven Whitehouse907b9bc2006-09-25 09:26:04 -0400688
Abhi Das9cde2892015-06-02 11:03:04 -0500689 if (change < 0) /* Reset quiet flag if we freed some blocks */
690 clear_bit(QDF_QMSG_QUIET, &qd->qd_flags);
Steven Whitehousef55ab262006-02-21 12:51:39 +0000691 mutex_unlock(&sdp->sd_quota_mutex);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000692}
693
Abhi Das39a72582015-06-02 11:02:24 -0500694static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index,
695 unsigned off, void *buf, unsigned bytes)
696{
697 struct inode *inode = &ip->i_inode;
698 struct gfs2_sbd *sdp = GFS2_SB(inode);
699 struct address_space *mapping = inode->i_mapping;
700 struct page *page;
701 struct buffer_head *bh;
702 void *kaddr;
703 u64 blk;
704 unsigned bsize = sdp->sd_sb.sb_bsize, bnum = 0, boff = 0;
705 unsigned to_write = bytes, pg_off = off;
706 int done = 0;
707
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300708 blk = index << (PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift);
Abhi Das39a72582015-06-02 11:02:24 -0500709 boff = off % bsize;
710
711 page = find_or_create_page(mapping, index, GFP_NOFS);
712 if (!page)
713 return -ENOMEM;
714 if (!page_has_buffers(page))
715 create_empty_buffers(page, bsize, 0);
716
717 bh = page_buffers(page);
718 while (!done) {
719 /* Find the beginning block within the page */
720 if (pg_off >= ((bnum * bsize) + bsize)) {
721 bh = bh->b_this_page;
722 bnum++;
723 blk++;
724 continue;
725 }
726 if (!buffer_mapped(bh)) {
727 gfs2_block_map(inode, blk, bh, 1);
728 if (!buffer_mapped(bh))
729 goto unlock_out;
730 /* If it's a newly allocated disk block, zero it */
731 if (buffer_new(bh))
732 zero_user(page, bnum * bsize, bh->b_size);
733 }
734 if (PageUptodate(page))
735 set_buffer_uptodate(bh);
736 if (!buffer_uptodate(bh)) {
Coly Lie477b242017-07-21 07:48:22 -0500737 ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &bh);
Abhi Das39a72582015-06-02 11:02:24 -0500738 wait_on_buffer(bh);
739 if (!buffer_uptodate(bh))
740 goto unlock_out;
741 }
Andreas Gruenbacher845802b2018-06-04 07:50:16 -0500742 if (gfs2_is_jdata(ip))
743 gfs2_trans_add_data(ip->i_gl, bh);
744 else
745 gfs2_ordered_add_inode(ip);
Abhi Das39a72582015-06-02 11:02:24 -0500746
747 /* If we need to write to the next block as well */
748 if (to_write > (bsize - boff)) {
749 pg_off += (bsize - boff);
750 to_write -= (bsize - boff);
751 boff = pg_off % bsize;
752 continue;
753 }
754 done = 1;
755 }
756
757 /* Write to the page, now that we have setup the buffer(s) */
758 kaddr = kmap_atomic(page);
759 memcpy(kaddr + off, buf, bytes);
760 flush_dcache_page(page);
761 kunmap_atomic(kaddr);
762 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300763 put_page(page);
Abhi Das39a72582015-06-02 11:02:24 -0500764
765 return 0;
766
767unlock_out:
768 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300769 put_page(page);
Abhi Das39a72582015-06-02 11:02:24 -0500770 return -EIO;
771}
772
773static int gfs2_write_disk_quota(struct gfs2_inode *ip, struct gfs2_quota *qp,
774 loff_t loc)
775{
776 unsigned long pg_beg;
777 unsigned pg_off, nbytes, overflow = 0;
778 int pg_oflow = 0, error;
779 void *ptr;
780
781 nbytes = sizeof(struct gfs2_quota);
782
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300783 pg_beg = loc >> PAGE_SHIFT;
Andreas Gruenbacher45eb0502019-09-02 17:31:06 +0100784 pg_off = offset_in_page(loc);
Abhi Das39a72582015-06-02 11:02:24 -0500785
786 /* If the quota straddles a page boundary, split the write in two */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300787 if ((pg_off + nbytes) > PAGE_SIZE) {
Abhi Das39a72582015-06-02 11:02:24 -0500788 pg_oflow = 1;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300789 overflow = (pg_off + nbytes) - PAGE_SIZE;
Abhi Das39a72582015-06-02 11:02:24 -0500790 }
791
792 ptr = qp;
793 error = gfs2_write_buf_to_page(ip, pg_beg, pg_off, ptr,
794 nbytes - overflow);
795 /* If there's an overflow, write the remaining bytes to the next page */
796 if (!error && pg_oflow)
797 error = gfs2_write_buf_to_page(ip, pg_beg + 1, 0,
798 ptr + nbytes - overflow,
799 overflow);
800 return error;
801}
802
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000803/**
Steven Whitehouse1e72c0f2009-09-15 20:42:56 +0100804 * gfs2_adjust_quota - adjust record of current block usage
805 * @ip: The quota inode
806 * @loc: Offset of the entry in the quota file
Steven Whitehousee285c102009-09-23 13:50:49 +0100807 * @change: The amount of usage change to record
Steven Whitehouse1e72c0f2009-09-15 20:42:56 +0100808 * @qd: The quota data
Steven Whitehousee285c102009-09-23 13:50:49 +0100809 * @fdq: The updated limits to record
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000810 *
811 * This function was mostly borrowed from gfs2_block_truncate_page which was
812 * in turn mostly borrowed from ext3
Steven Whitehouse1e72c0f2009-09-15 20:42:56 +0100813 *
814 * Returns: 0 or -ve on error
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000815 */
Steven Whitehouse1e72c0f2009-09-15 20:42:56 +0100816
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000817static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
Steven Whitehousee285c102009-09-23 13:50:49 +0100818 s64 change, struct gfs2_quota_data *qd,
Jan Kara14bf61f2014-10-09 16:03:13 +0200819 struct qc_dqblk *fdq)
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000820{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400821 struct inode *inode = &ip->i_inode;
Abhijith Das14870b42010-11-18 11:24:24 -0500822 struct gfs2_sbd *sdp = GFS2_SB(inode);
Al Viro951b4bd2013-06-02 19:53:40 -0400823 struct gfs2_quota q;
Abhi Das39a72582015-06-02 11:02:24 -0500824 int err;
Steven Whitehousee285c102009-09-23 13:50:49 +0100825 u64 size;
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000826
Steven Whitehouse891a8e92011-09-19 10:25:49 +0100827 if (gfs2_is_stuffed(ip)) {
Andreas Gruenbacher7a607a42021-06-17 21:36:50 +0200828 err = gfs2_unstuff_dinode(ip);
Steven Whitehouse891a8e92011-09-19 10:25:49 +0100829 if (err)
830 return err;
831 }
Abhijith Das7e619bc2010-05-07 17:50:18 -0400832
833 memset(&q, 0, sizeof(struct gfs2_quota));
Andrew Price43066292012-04-16 16:40:55 +0100834 err = gfs2_internal_read(ip, (char *)&q, &loc, sizeof(q));
Abhijith Das7e619bc2010-05-07 17:50:18 -0400835 if (err < 0)
836 return err;
837
Abhi Das39a72582015-06-02 11:02:24 -0500838 loc -= sizeof(q); /* gfs2_internal_read would've advanced the loc ptr */
Abhijith Das7e619bc2010-05-07 17:50:18 -0400839 err = -EIO;
Al Viro951b4bd2013-06-02 19:53:40 -0400840 be64_add_cpu(&q.qu_value, change);
Abhi Das1bdf4532015-06-08 11:20:50 -0500841 if (((s64)be64_to_cpu(q.qu_value)) < 0)
Abhi Das39a72582015-06-02 11:02:24 -0500842 q.qu_value = 0; /* Never go negative on quota usage */
Al Viro951b4bd2013-06-02 19:53:40 -0400843 qd->qd_qb.qb_value = q.qu_value;
Abhijith Das7e619bc2010-05-07 17:50:18 -0400844 if (fdq) {
Jan Kara14bf61f2014-10-09 16:03:13 +0200845 if (fdq->d_fieldmask & QC_SPC_SOFT) {
846 q.qu_warn = cpu_to_be64(fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift);
Al Viro951b4bd2013-06-02 19:53:40 -0400847 qd->qd_qb.qb_warn = q.qu_warn;
Abhijith Das7e619bc2010-05-07 17:50:18 -0400848 }
Jan Kara14bf61f2014-10-09 16:03:13 +0200849 if (fdq->d_fieldmask & QC_SPC_HARD) {
850 q.qu_limit = cpu_to_be64(fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift);
Al Viro951b4bd2013-06-02 19:53:40 -0400851 qd->qd_qb.qb_limit = q.qu_limit;
Abhijith Das7e619bc2010-05-07 17:50:18 -0400852 }
Jan Kara14bf61f2014-10-09 16:03:13 +0200853 if (fdq->d_fieldmask & QC_SPACE) {
854 q.qu_value = cpu_to_be64(fdq->d_space >> sdp->sd_sb.sb_bsize_shift);
Al Viro951b4bd2013-06-02 19:53:40 -0400855 qd->qd_qb.qb_value = q.qu_value;
Abhijith Das802ec9b2010-11-18 11:26:46 -0500856 }
Abhijith Das7e619bc2010-05-07 17:50:18 -0400857 }
858
Abhi Das39a72582015-06-02 11:02:24 -0500859 err = gfs2_write_disk_quota(ip, &q, loc);
860 if (!err) {
861 size = loc + sizeof(struct gfs2_quota);
862 if (size > inode->i_size)
863 i_size_write(inode, size);
Deepa Dinamani078cd822016-09-14 07:48:04 -0700864 inode->i_mtime = inode->i_atime = current_time(inode);
Abhi Das39a72582015-06-02 11:02:24 -0500865 mark_inode_dirty(inode);
866 set_bit(QDF_REFRESH, &qd->qd_flags);
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000867 }
868
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000869 return err;
870}
871
David Teiglandb3b94fa2006-01-16 16:50:04 +0000872static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
873{
Bob Peterson15562c42015-03-16 11:52:05 -0500874 struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_name.ln_sbd;
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400875 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
Steven Whitehouse7b9cff42013-10-02 11:13:25 +0100876 struct gfs2_alloc_parms ap = { .aflags = 0, };
David Teiglandb3b94fa2006-01-16 16:50:04 +0000877 unsigned int data_blocks, ind_blocks;
878 struct gfs2_holder *ghs, i_gh;
879 unsigned int qx, x;
880 struct gfs2_quota_data *qd;
Steven Whitehouse71f890f2012-07-30 14:53:19 +0100881 unsigned reserved;
Steven Whitehousef42faf42006-01-30 18:34:10 +0000882 loff_t offset;
Abhijith Das20b95bf2008-03-06 17:43:52 -0600883 unsigned int nalloc = 0, blocks;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000884 int error;
885
Bob Peterson2fba46a2020-02-27 12:47:53 -0600886 error = gfs2_qa_get(ip);
Bob Peterson0a305e42012-06-06 11:17:59 +0100887 if (error)
888 return error;
889
David Teiglandb3b94fa2006-01-16 16:50:04 +0000890 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
891 &data_blocks, &ind_blocks);
892
Kees Cook6da2ec52018-06-12 13:55:00 -0700893 ghs = kmalloc_array(num_qd, sizeof(struct gfs2_holder), GFP_NOFS);
Bob Peterson2fba46a2020-02-27 12:47:53 -0600894 if (!ghs) {
895 error = -ENOMEM;
896 goto out;
897 }
David Teiglandb3b94fa2006-01-16 16:50:04 +0000898
899 sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
Al Viro59551022016-01-22 15:40:57 -0500900 inode_lock(&ip->i_inode);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000901 for (qx = 0; qx < num_qd; qx++) {
Steven Whitehouse1e72c0f2009-09-15 20:42:56 +0100902 error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000903 GL_NOCACHE, &ghs[qx]);
904 if (error)
Bob Peterson2fba46a2020-02-27 12:47:53 -0600905 goto out_dq;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000906 }
907
908 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
909 if (error)
Bob Peterson2fba46a2020-02-27 12:47:53 -0600910 goto out_dq;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000911
912 for (x = 0; x < num_qd; x++) {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000913 offset = qd2offset(qda[x]);
Bob Peterson461cb412010-06-24 19:21:20 -0400914 if (gfs2_write_alloc_required(ip, offset,
915 sizeof(struct gfs2_quota)))
David Teiglandb3b94fa2006-01-16 16:50:04 +0000916 nalloc++;
917 }
918
Abhijith Das20b95bf2008-03-06 17:43:52 -0600919 /*
920 * 1 blk for unstuffing inode if stuffed. We add this extra
921 * block to the reservation unconditionally. If the inode
922 * doesn't need unstuffing, the block will be released to the
923 * rgrp since it won't be allocated during the transaction
924 */
Abhijith Das7e619bc2010-05-07 17:50:18 -0400925 /* +3 in the end for unstuffing block, inode size update block
926 * and another block in case quota straddles page boundary and
927 * two blocks need to be updated instead of 1 */
928 blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
Abhijith Das20b95bf2008-03-06 17:43:52 -0600929
Steven Whitehouse71f890f2012-07-30 14:53:19 +0100930 reserved = 1 + (nalloc * (data_blocks + ind_blocks));
Steven Whitehouse7b9cff42013-10-02 11:13:25 +0100931 ap.target = reserved;
932 error = gfs2_inplace_reserve(ip, &ap);
Abhijith Das20b95bf2008-03-06 17:43:52 -0600933 if (error)
934 goto out_alloc;
935
936 if (nalloc)
Steven Whitehouse71f890f2012-07-30 14:53:19 +0100937 blocks += gfs2_rg_blocks(ip, reserved) + nalloc * ind_blocks + RES_STATFS;
Abhijith Das20b95bf2008-03-06 17:43:52 -0600938
939 error = gfs2_trans_begin(sdp, blocks, 0);
940 if (error)
941 goto out_ipres;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000942
943 for (x = 0; x < num_qd; x++) {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000944 qd = qda[x];
945 offset = qd2offset(qd);
Steven Whitehousee285c102009-09-23 13:50:49 +0100946 error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, qd, NULL);
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000947 if (error)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000948 goto out_end_trans;
949
David Teiglandb3b94fa2006-01-16 16:50:04 +0000950 do_qc(qd, -qd->qd_change_sync);
Abhijith Das662e3a52011-03-08 10:40:42 -0500951 set_bit(QDF_REFRESH, &qd->qd_flags);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000952 }
953
954 error = 0;
955
Steven Whitehousea91ea692006-09-04 12:04:26 -0400956out_end_trans:
David Teiglandb3b94fa2006-01-16 16:50:04 +0000957 gfs2_trans_end(sdp);
Steven Whitehousea91ea692006-09-04 12:04:26 -0400958out_ipres:
Abhijith Das20b95bf2008-03-06 17:43:52 -0600959 gfs2_inplace_release(ip);
Steven Whitehousea91ea692006-09-04 12:04:26 -0400960out_alloc:
David Teiglandb3b94fa2006-01-16 16:50:04 +0000961 gfs2_glock_dq_uninit(&i_gh);
Bob Peterson2fba46a2020-02-27 12:47:53 -0600962out_dq:
David Teiglandb3b94fa2006-01-16 16:50:04 +0000963 while (qx--)
964 gfs2_glock_dq_uninit(&ghs[qx]);
Al Viro59551022016-01-22 15:40:57 -0500965 inode_unlock(&ip->i_inode);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000966 kfree(ghs);
Bob Petersonc1696fb2018-01-17 00:01:33 +0100967 gfs2_log_flush(ip->i_gl->gl_name.ln_sbd, ip->i_gl,
Bob Peterson805c09072018-01-08 10:34:17 -0500968 GFS2_LOG_HEAD_FLUSH_NORMAL | GFS2_LFC_DO_SYNC);
Bob Peterson2fba46a2020-02-27 12:47:53 -0600969out:
970 gfs2_qa_put(ip);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000971 return error;
972}
973
Steven Whitehousee285c102009-09-23 13:50:49 +0100974static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
975{
976 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
977 struct gfs2_quota q;
978 struct gfs2_quota_lvb *qlvb;
979 loff_t pos;
980 int error;
981
982 memset(&q, 0, sizeof(struct gfs2_quota));
983 pos = qd2offset(qd);
Andrew Price43066292012-04-16 16:40:55 +0100984 error = gfs2_internal_read(ip, (char *)&q, &pos, sizeof(q));
Steven Whitehousee285c102009-09-23 13:50:49 +0100985 if (error < 0)
986 return error;
987
David Teigland4e2f8842012-11-14 13:47:37 -0500988 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
Steven Whitehousee285c102009-09-23 13:50:49 +0100989 qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
990 qlvb->__pad = 0;
991 qlvb->qb_limit = q.qu_limit;
992 qlvb->qb_warn = q.qu_warn;
993 qlvb->qb_value = q.qu_value;
994 qd->qd_qb = *qlvb;
995
996 return 0;
997}
998
David Teiglandb3b94fa2006-01-16 16:50:04 +0000999static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
1000 struct gfs2_holder *q_gh)
1001{
Bob Peterson15562c42015-03-16 11:52:05 -05001002 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04001003 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001004 struct gfs2_holder i_gh;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001005 int error;
1006
Steven Whitehousea91ea692006-09-04 12:04:26 -04001007restart:
David Teiglandb3b94fa2006-01-16 16:50:04 +00001008 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
1009 if (error)
1010 return error;
1011
Abhi Das30133172015-04-08 09:03:56 -05001012 if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags))
1013 force_refresh = FORCE;
1014
David Teigland4e2f8842012-11-14 13:47:37 -05001015 qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001016
Steven Whitehousee9fc2aa2006-09-01 11:05:15 -04001017 if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
David Teiglandb3b94fa2006-01-16 16:50:04 +00001018 gfs2_glock_dq_uninit(q_gh);
Steven Whitehouse91094d02009-09-11 15:21:56 +01001019 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE,
1020 GL_NOCACHE, q_gh);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001021 if (error)
1022 return error;
1023
Steven Whitehousee9fc2aa2006-09-01 11:05:15 -04001024 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001025 if (error)
1026 goto fail;
1027
Steven Whitehousee285c102009-09-23 13:50:49 +01001028 error = update_qd(sdp, qd);
1029 if (error)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001030 goto fail_gunlock;
Steven Whitehousee285c102009-09-23 13:50:49 +01001031
David Teiglandb3b94fa2006-01-16 16:50:04 +00001032 gfs2_glock_dq_uninit(&i_gh);
Steven Whitehouse91094d02009-09-11 15:21:56 +01001033 gfs2_glock_dq_uninit(q_gh);
1034 force_refresh = 0;
1035 goto restart;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001036 }
1037
1038 return 0;
1039
Steven Whitehousea91ea692006-09-04 12:04:26 -04001040fail_gunlock:
David Teiglandb3b94fa2006-01-16 16:50:04 +00001041 gfs2_glock_dq_uninit(&i_gh);
Steven Whitehousea91ea692006-09-04 12:04:26 -04001042fail:
David Teiglandb3b94fa2006-01-16 16:50:04 +00001043 gfs2_glock_dq_uninit(q_gh);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001044 return error;
1045}
1046
Eric W. Biederman7c06b5d2013-01-31 20:27:54 -08001047int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001048{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04001049 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
Abhijith Das662e3a52011-03-08 10:40:42 -05001050 struct gfs2_quota_data *qd;
Bob Petersonb58bf402015-07-24 09:45:43 -05001051 u32 x;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001052 int error = 0;
1053
Bob Peterson4ed0c302020-05-05 11:53:21 -05001054 if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001055 return 0;
1056
Bob Petersonb54e9a02015-10-26 10:40:28 -05001057 error = gfs2_quota_hold(ip, uid, gid);
1058 if (error)
1059 return error;
1060
1061 sort(ip->i_qadata->qa_qd, ip->i_qadata->qa_qd_num,
Bob Peterson5407e242012-05-18 09:28:23 -04001062 sizeof(struct gfs2_quota_data *), sort_qd, NULL);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001063
Bob Petersonb54e9a02015-10-26 10:40:28 -05001064 for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1065 qd = ip->i_qadata->qa_qd[x];
1066 error = do_glock(qd, NO_FORCE, &ip->i_qadata->qa_qd_ghs[x]);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001067 if (error)
1068 break;
1069 }
1070
1071 if (!error)
1072 set_bit(GIF_QD_LOCKED, &ip->i_flags);
1073 else {
1074 while (x--)
Bob Petersonb54e9a02015-10-26 10:40:28 -05001075 gfs2_glock_dq_uninit(&ip->i_qadata->qa_qd_ghs[x]);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001076 gfs2_quota_unhold(ip);
1077 }
1078
1079 return error;
1080}
1081
1082static int need_sync(struct gfs2_quota_data *qd)
1083{
Bob Peterson15562c42015-03-16 11:52:05 -05001084 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001085 struct gfs2_tune *gt = &sdp->sd_tune;
Steven Whitehousecd915492006-09-04 12:49:07 -04001086 s64 value;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001087 unsigned int num, den;
1088 int do_sync = 1;
1089
1090 if (!qd->qd_qb.qb_limit)
1091 return 0;
1092
Steven Whitehouse7d808232013-11-01 14:52:08 -04001093 spin_lock(&qd_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001094 value = qd->qd_change;
Steven Whitehouse7d808232013-11-01 14:52:08 -04001095 spin_unlock(&qd_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001096
1097 spin_lock(&gt->gt_spin);
1098 num = gt->gt_quota_scale_num;
1099 den = gt->gt_quota_scale_den;
1100 spin_unlock(&gt->gt_spin);
1101
1102 if (value < 0)
1103 do_sync = 0;
Steven Whitehousee9fc2aa2006-09-01 11:05:15 -04001104 else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >=
1105 (s64)be64_to_cpu(qd->qd_qb.qb_limit))
David Teiglandb3b94fa2006-01-16 16:50:04 +00001106 do_sync = 0;
1107 else {
1108 value *= gfs2_jindex_size(sdp) * num;
David Howells4abaca172008-07-11 14:39:56 +01001109 value = div_s64(value, den);
Steven Whitehousee9fc2aa2006-09-01 11:05:15 -04001110 value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
Steven Whitehousecd915492006-09-04 12:49:07 -04001111 if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
David Teiglandb3b94fa2006-01-16 16:50:04 +00001112 do_sync = 0;
1113 }
1114
1115 return do_sync;
1116}
1117
1118void gfs2_quota_unlock(struct gfs2_inode *ip)
1119{
Steven Whitehouseaabd7c72013-10-04 11:31:05 +01001120 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001121 struct gfs2_quota_data *qda[4];
1122 unsigned int count = 0;
Bob Petersonb58bf402015-07-24 09:45:43 -05001123 u32 x;
Steven Whitehouseaabd7c72013-10-04 11:31:05 +01001124 int found;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001125
1126 if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
Bob Petersonc9cb9e32020-05-05 11:55:03 -05001127 return;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001128
Bob Petersonb54e9a02015-10-26 10:40:28 -05001129 for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
David Teiglandb3b94fa2006-01-16 16:50:04 +00001130 struct gfs2_quota_data *qd;
1131 int sync;
1132
Bob Petersonb54e9a02015-10-26 10:40:28 -05001133 qd = ip->i_qadata->qa_qd[x];
David Teiglandb3b94fa2006-01-16 16:50:04 +00001134 sync = need_sync(qd);
1135
Bob Petersonb54e9a02015-10-26 10:40:28 -05001136 gfs2_glock_dq_uninit(&ip->i_qadata->qa_qd_ghs[x]);
Steven Whitehouseaabd7c72013-10-04 11:31:05 +01001137 if (!sync)
1138 continue;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001139
Steven Whitehouse7d808232013-11-01 14:52:08 -04001140 spin_lock(&qd_lock);
Steven Whitehouseaabd7c72013-10-04 11:31:05 +01001141 found = qd_check_sync(sdp, qd, NULL);
Steven Whitehouse7d808232013-11-01 14:52:08 -04001142 spin_unlock(&qd_lock);
Steven Whitehouseaabd7c72013-10-04 11:31:05 +01001143
1144 if (!found)
1145 continue;
1146
1147 gfs2_assert_warn(sdp, qd->qd_change_sync);
1148 if (bh_get(qd)) {
1149 clear_bit(QDF_LOCKED, &qd->qd_flags);
1150 slot_put(qd);
1151 qd_put(qd);
1152 continue;
1153 }
1154
1155 qda[count++] = qd;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001156 }
1157
1158 if (count) {
1159 do_sync(count, qda);
1160 for (x = 0; x < count; x++)
1161 qd_unlock(qda[x]);
1162 }
1163
David Teiglandb3b94fa2006-01-16 16:50:04 +00001164 gfs2_quota_unhold(ip);
1165}
1166
1167#define MAX_LINE 256
1168
1169static int print_message(struct gfs2_quota_data *qd, char *type)
1170{
Bob Peterson15562c42015-03-16 11:52:05 -05001171 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001172
Joe Perches8382e262014-03-06 12:10:46 -08001173 fs_info(sdp, "quota %s for %s %u\n",
1174 type,
Joe Perchesd77d1b52014-03-06 12:10:45 -08001175 (qd->qd_id.type == USRQUOTA) ? "user" : "group",
1176 from_kqid(&init_user_ns, qd->qd_id));
David Teiglandb3b94fa2006-01-16 16:50:04 +00001177
1178 return 0;
1179}
1180
Abhi Das25435e52015-03-18 12:04:37 -05001181/**
1182 * gfs2_quota_check - check if allocating new blocks will exceed quota
1183 * @ip: The inode for which this check is being performed
1184 * @uid: The uid to check against
1185 * @gid: The gid to check against
1186 * @ap: The allocation parameters. ap->target contains the requested
1187 * blocks. ap->min_target, if set, contains the minimum blks
1188 * requested.
1189 *
1190 * Returns: 0 on success.
1191 * min_req = ap->min_target ? ap->min_target : ap->target;
Andreas Gruenbacher243fea42018-10-02 10:22:41 +01001192 * quota must allow at least min_req blks for success and
Abhi Das25435e52015-03-18 12:04:37 -05001193 * ap->allowed is set to the number of blocks allowed
1194 *
1195 * -EDQUOT otherwise, quota violation. ap->allowed is set to number
1196 * of blocks available.
1197 */
Abhi Dasb8fbf472015-03-18 12:03:41 -05001198int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid,
1199 struct gfs2_alloc_parms *ap)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001200{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04001201 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001202 struct gfs2_quota_data *qd;
Abhi Das25435e52015-03-18 12:04:37 -05001203 s64 value, warn, limit;
Bob Petersonb58bf402015-07-24 09:45:43 -05001204 u32 x;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001205 int error = 0;
1206
Abhi Das25435e52015-03-18 12:04:37 -05001207 ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */
David Teiglandb3b94fa2006-01-16 16:50:04 +00001208 if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
1209 return 0;
1210
Bob Petersonb54e9a02015-10-26 10:40:28 -05001211 for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1212 qd = ip->i_qadata->qa_qd[x];
David Teiglandb3b94fa2006-01-16 16:50:04 +00001213
Eric W. Biederman05e0a602013-01-31 19:52:08 -08001214 if (!(qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
1215 qid_eq(qd->qd_id, make_kqid_gid(gid))))
David Teiglandb3b94fa2006-01-16 16:50:04 +00001216 continue;
1217
Abhi Das25435e52015-03-18 12:04:37 -05001218 warn = (s64)be64_to_cpu(qd->qd_qb.qb_warn);
1219 limit = (s64)be64_to_cpu(qd->qd_qb.qb_limit);
Steven Whitehousee9fc2aa2006-09-01 11:05:15 -04001220 value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
Steven Whitehouse7d808232013-11-01 14:52:08 -04001221 spin_lock(&qd_lock);
Abhi Das25435e52015-03-18 12:04:37 -05001222 value += qd->qd_change;
Steven Whitehouse7d808232013-11-01 14:52:08 -04001223 spin_unlock(&qd_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001224
Abhi Das25435e52015-03-18 12:04:37 -05001225 if (limit > 0 && (limit - value) < ap->allowed)
1226 ap->allowed = limit - value;
1227 /* If we can't meet the target */
1228 if (limit && limit < (value + (s64)ap->target)) {
1229 /* If no min_target specified or we don't meet
1230 * min_target, return -EDQUOT */
1231 if (!ap->min_target || ap->min_target > ap->allowed) {
Abhi Das9cde2892015-06-02 11:03:04 -05001232 if (!test_and_set_bit(QDF_QMSG_QUIET,
1233 &qd->qd_flags)) {
1234 print_message(qd, "exceeded");
1235 quota_send_warning(qd->qd_id,
1236 sdp->sd_vfs->s_dev,
1237 QUOTA_NL_BHARDWARN);
1238 }
Abhi Das25435e52015-03-18 12:04:37 -05001239 error = -EDQUOT;
1240 break;
1241 }
1242 } else if (warn && warn < value &&
David Teiglandb3b94fa2006-01-16 16:50:04 +00001243 time_after_eq(jiffies, qd->qd_last_warn +
Abhi Das25435e52015-03-18 12:04:37 -05001244 gfs2_tune_get(sdp, gt_quota_warn_period)
1245 * HZ)) {
Eric W. Biederman05e0a602013-01-31 19:52:08 -08001246 quota_send_warning(qd->qd_id,
Steven Whitehouse2ec46502009-09-28 12:49:15 +01001247 sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001248 error = print_message(qd, "warning");
1249 qd->qd_last_warn = jiffies;
1250 }
1251 }
David Teiglandb3b94fa2006-01-16 16:50:04 +00001252 return error;
1253}
1254
Steven Whitehousecd915492006-09-04 12:49:07 -04001255void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
Eric W. Biederman7c06b5d2013-01-31 20:27:54 -08001256 kuid_t uid, kgid_t gid)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001257{
David Teiglandb3b94fa2006-01-16 16:50:04 +00001258 struct gfs2_quota_data *qd;
Bob Petersonb58bf402015-07-24 09:45:43 -05001259 u32 x;
Bob Petersonb54e9a02015-10-26 10:40:28 -05001260 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001261
Bob Petersonb54e9a02015-10-26 10:40:28 -05001262 if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON ||
1263 gfs2_assert_warn(sdp, change))
David Teiglandb3b94fa2006-01-16 16:50:04 +00001264 return;
Steven Whitehouse383f01f2008-11-04 10:05:22 +00001265 if (ip->i_diskflags & GFS2_DIF_SYSTEM)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001266 return;
1267
Bob Petersonf9615fe2020-05-05 11:50:24 -05001268 if (gfs2_assert_withdraw(sdp, ip->i_qadata &&
1269 ip->i_qadata->qa_ref > 0))
1270 return;
Bob Petersonb54e9a02015-10-26 10:40:28 -05001271 for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1272 qd = ip->i_qadata->qa_qd[x];
David Teiglandb3b94fa2006-01-16 16:50:04 +00001273
Eric W. Biederman05e0a602013-01-31 19:52:08 -08001274 if (qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
1275 qid_eq(qd->qd_id, make_kqid_gid(gid))) {
David Teiglandb3b94fa2006-01-16 16:50:04 +00001276 do_qc(qd, change);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001277 }
1278 }
1279}
1280
Jan Karaceed1722012-07-03 16:45:28 +02001281int gfs2_quota_sync(struct super_block *sb, int type)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001282{
Steven Whitehouse8c42d632009-09-11 14:36:44 +01001283 struct gfs2_sbd *sdp = sb->s_fs_info;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001284 struct gfs2_quota_data **qda;
Andreas Gruenbacherf3b64b52019-08-31 21:29:12 +01001285 unsigned int max_qd = PAGE_SIZE / sizeof(struct gfs2_holder);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001286 unsigned int num_qd;
1287 unsigned int x;
1288 int error = 0;
1289
David Teiglandb3b94fa2006-01-16 16:50:04 +00001290 qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
1291 if (!qda)
1292 return -ENOMEM;
1293
Steven Whitehousee46c7722013-10-04 12:29:34 +01001294 mutex_lock(&sdp->sd_quota_sync_mutex);
1295 sdp->sd_quota_sync_gen++;
1296
David Teiglandb3b94fa2006-01-16 16:50:04 +00001297 do {
1298 num_qd = 0;
1299
1300 for (;;) {
1301 error = qd_fish(sdp, qda + num_qd);
1302 if (error || !qda[num_qd])
1303 break;
1304 if (++num_qd == max_qd)
1305 break;
1306 }
1307
1308 if (num_qd) {
1309 if (!error)
1310 error = do_sync(num_qd, qda);
1311 if (!error)
1312 for (x = 0; x < num_qd; x++)
1313 qda[x]->qd_sync_gen =
1314 sdp->sd_quota_sync_gen;
1315
1316 for (x = 0; x < num_qd; x++)
1317 qd_unlock(qda[x]);
1318 }
1319 } while (!error && num_qd == max_qd);
1320
Steven Whitehousee46c7722013-10-04 12:29:34 +01001321 mutex_unlock(&sdp->sd_quota_sync_mutex);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001322 kfree(qda);
1323
1324 return error;
1325}
1326
Eric W. Biedermaned87dab2013-01-31 19:42:40 -08001327int gfs2_quota_refresh(struct gfs2_sbd *sdp, struct kqid qid)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001328{
1329 struct gfs2_quota_data *qd;
1330 struct gfs2_holder q_gh;
1331 int error;
1332
Eric W. Biederman05e0a602013-01-31 19:52:08 -08001333 error = qd_get(sdp, qid, &qd);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001334 if (error)
1335 return error;
1336
1337 error = do_glock(qd, FORCE, &q_gh);
1338 if (!error)
1339 gfs2_glock_dq_uninit(&q_gh);
1340
1341 qd_put(qd);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001342 return error;
1343}
1344
David Teiglandb3b94fa2006-01-16 16:50:04 +00001345int gfs2_quota_init(struct gfs2_sbd *sdp)
1346{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04001347 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
Steven Whitehousea2e0f792010-08-11 09:53:11 +01001348 u64 size = i_size_read(sdp->sd_qc_inode);
1349 unsigned int blocks = size >> sdp->sd_sb.sb_bsize_shift;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001350 unsigned int x, slot = 0;
1351 unsigned int found = 0;
Steven Whitehousec754fbb2013-12-12 10:47:59 +00001352 unsigned int hash;
Steven Whitehouseee2411a2013-12-12 17:29:32 +00001353 unsigned int bm_size;
Steven Whitehousecd915492006-09-04 12:49:07 -04001354 u64 dblock;
1355 u32 extlen = 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001356 int error;
1357
Steven Whitehousea2e0f792010-08-11 09:53:11 +01001358 if (gfs2_check_internal_file_size(sdp->sd_qc_inode, 1, 64 << 20))
Steven Whitehouse907b9bc2006-09-25 09:26:04 -04001359 return -EIO;
Steven Whitehousea2e0f792010-08-11 09:53:11 +01001360
David Teiglandb3b94fa2006-01-16 16:50:04 +00001361 sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
Steven Whitehouseee2411a2013-12-12 17:29:32 +00001362 bm_size = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * sizeof(unsigned long));
1363 bm_size *= sizeof(unsigned long);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001364 error = -ENOMEM;
Fabian Frederickfcf10d32014-02-26 19:07:56 +01001365 sdp->sd_quota_bitmap = kzalloc(bm_size, GFP_NOFS | __GFP_NOWARN);
Steven Whitehouseee2411a2013-12-12 17:29:32 +00001366 if (sdp->sd_quota_bitmap == NULL)
Fabian Frederickfcf10d32014-02-26 19:07:56 +01001367 sdp->sd_quota_bitmap = __vmalloc(bm_size, GFP_NOFS |
Christoph Hellwig88dca4c2020-06-01 21:51:40 -07001368 __GFP_ZERO);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001369 if (!sdp->sd_quota_bitmap)
1370 return error;
1371
David Teiglandb3b94fa2006-01-16 16:50:04 +00001372 for (x = 0; x < blocks; x++) {
1373 struct buffer_head *bh;
Steven Whitehouse7aed98f2013-11-26 15:17:09 +00001374 const struct gfs2_quota_change *qc;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001375 unsigned int y;
1376
1377 if (!extlen) {
Andreas Gruenbacher9153dac2021-03-31 23:17:38 +02001378 extlen = 32;
1379 error = gfs2_get_extent(&ip->i_inode, x, &dblock, &extlen);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001380 if (error)
1381 goto fail;
1382 }
David Teiglandb3b94fa2006-01-16 16:50:04 +00001383 error = -EIO;
Steven Whitehouse7276b3b2006-09-21 17:05:23 -04001384 bh = gfs2_meta_ra(ip->i_gl, dblock, extlen);
1385 if (!bh)
1386 goto fail;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001387 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
1388 brelse(bh);
1389 goto fail;
1390 }
1391
Steven Whitehouse7aed98f2013-11-26 15:17:09 +00001392 qc = (const struct gfs2_quota_change *)(bh->b_data + sizeof(struct gfs2_meta_header));
Steven Whitehouse7276b3b2006-09-21 17:05:23 -04001393 for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001394 y++, slot++) {
David Teiglandb3b94fa2006-01-16 16:50:04 +00001395 struct gfs2_quota_data *qd;
Steven Whitehouse7aed98f2013-11-26 15:17:09 +00001396 s64 qc_change = be64_to_cpu(qc->qc_change);
1397 u32 qc_flags = be32_to_cpu(qc->qc_flags);
1398 enum quota_type qtype = (qc_flags & GFS2_QCF_USER) ?
1399 USRQUOTA : GRPQUOTA;
1400 struct kqid qc_id = make_kqid(&init_user_ns, qtype,
1401 be32_to_cpu(qc->qc_id));
1402 qc++;
1403 if (!qc_change)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001404 continue;
1405
Steven Whitehousec754fbb2013-12-12 10:47:59 +00001406 hash = gfs2_qd_hash(sdp, qc_id);
1407 qd = qd_alloc(hash, sdp, qc_id);
1408 if (qd == NULL) {
David Teiglandb3b94fa2006-01-16 16:50:04 +00001409 brelse(bh);
1410 goto fail;
1411 }
1412
1413 set_bit(QDF_CHANGE, &qd->qd_flags);
Steven Whitehouse7aed98f2013-11-26 15:17:09 +00001414 qd->qd_change = qc_change;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001415 qd->qd_slot = slot;
1416 qd->qd_slot_count = 1;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001417
Steven Whitehouse7d808232013-11-01 14:52:08 -04001418 spin_lock(&qd_lock);
Steven Whitehouseee2411a2013-12-12 17:29:32 +00001419 BUG_ON(test_and_set_bit(slot, sdp->sd_quota_bitmap));
David Teiglandb3b94fa2006-01-16 16:50:04 +00001420 list_add(&qd->qd_list, &sdp->sd_quota_list);
1421 atomic_inc(&sdp->sd_quota_count);
Steven Whitehouse7d808232013-11-01 14:52:08 -04001422 spin_unlock(&qd_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001423
Steven Whitehousec754fbb2013-12-12 10:47:59 +00001424 spin_lock_bucket(hash);
1425 hlist_bl_add_head_rcu(&qd->qd_hlist, &qd_hash_table[hash]);
1426 spin_unlock_bucket(hash);
1427
David Teiglandb3b94fa2006-01-16 16:50:04 +00001428 found++;
1429 }
1430
1431 brelse(bh);
1432 dblock++;
1433 extlen--;
1434 }
1435
1436 if (found)
1437 fs_info(sdp, "found %u quota changes\n", found);
1438
1439 return 0;
1440
Steven Whitehousea91ea692006-09-04 12:04:26 -04001441fail:
David Teiglandb3b94fa2006-01-16 16:50:04 +00001442 gfs2_quota_cleanup(sdp);
1443 return error;
1444}
1445
David Teiglandb3b94fa2006-01-16 16:50:04 +00001446void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
1447{
1448 struct list_head *head = &sdp->sd_quota_list;
1449 struct gfs2_quota_data *qd;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001450
Steven Whitehouse7d808232013-11-01 14:52:08 -04001451 spin_lock(&qd_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001452 while (!list_empty(head)) {
Andreas Gruenbacher969183b2020-02-03 19:22:45 +01001453 qd = list_last_entry(head, struct gfs2_quota_data, qd_list);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001454
Abhijith Das0a7ab792009-01-07 16:03:37 -06001455 list_del(&qd->qd_list);
Steven Whitehousec754fbb2013-12-12 10:47:59 +00001456
Abhijith Das0a7ab792009-01-07 16:03:37 -06001457 /* Also remove if this qd exists in the reclaim list */
Steven Whitehouse2147dbf2013-11-04 10:15:08 +00001458 list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
Abhijith Das0a7ab792009-01-07 16:03:37 -06001459 atomic_dec(&sdp->sd_quota_count);
Steven Whitehouse7d808232013-11-01 14:52:08 -04001460 spin_unlock(&qd_lock);
Abhijith Das0a7ab792009-01-07 16:03:37 -06001461
Steven Whitehousec754fbb2013-12-12 10:47:59 +00001462 spin_lock_bucket(qd->qd_hash);
1463 hlist_bl_del_rcu(&qd->qd_hlist);
1464 spin_unlock_bucket(qd->qd_hash);
1465
Steven Whitehouse8ad151c2013-12-12 11:34:09 +00001466 gfs2_assert_warn(sdp, !qd->qd_change);
1467 gfs2_assert_warn(sdp, !qd->qd_slot_count);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001468 gfs2_assert_warn(sdp, !qd->qd_bh_count);
1469
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001470 gfs2_glock_put(qd->qd_gl);
Steven Whitehousec754fbb2013-12-12 10:47:59 +00001471 call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001472
Steven Whitehouse7d808232013-11-01 14:52:08 -04001473 spin_lock(&qd_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001474 }
Steven Whitehouse7d808232013-11-01 14:52:08 -04001475 spin_unlock(&qd_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001476
1477 gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
1478
Al Viro3cdcf632014-11-20 05:18:38 +00001479 kvfree(sdp->sd_quota_bitmap);
1480 sdp->sd_quota_bitmap = NULL;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001481}
1482
Steven Whitehouse37b2c832008-11-17 14:25:37 +00001483static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error)
1484{
1485 if (error == 0 || error == -EROFS)
1486 return;
Bob Petersoneb43e662019-11-14 09:52:15 -05001487 if (!gfs2_withdrawn(sdp)) {
Bob Petersonf34a61352019-04-16 12:23:28 -06001488 if (!cmpxchg(&sdp->sd_log_error, 0, error))
1489 fs_err(sdp, "gfs2_quotad: %s error %d\n", msg, error);
Bob Peterson942b0cd2017-08-16 11:30:06 -05001490 wake_up(&sdp->sd_logd_waitq);
1491 }
Steven Whitehouse37b2c832008-11-17 14:25:37 +00001492}
1493
1494static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg,
Steven Whitehouse8c42d632009-09-11 14:36:44 +01001495 int (*fxn)(struct super_block *sb, int type),
Steven Whitehouse37b2c832008-11-17 14:25:37 +00001496 unsigned long t, unsigned long *timeo,
1497 unsigned int *new_timeo)
1498{
1499 if (t >= *timeo) {
Steven Whitehouse8c42d632009-09-11 14:36:44 +01001500 int error = fxn(sdp->sd_vfs, 0);
Steven Whitehouse37b2c832008-11-17 14:25:37 +00001501 quotad_error(sdp, msg, error);
1502 *timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ;
1503 } else {
1504 *timeo -= t;
1505 }
1506}
1507
Steven Whitehouse813e0c42008-11-18 13:38:48 +00001508static void quotad_check_trunc_list(struct gfs2_sbd *sdp)
1509{
1510 struct gfs2_inode *ip;
1511
1512 while(1) {
1513 ip = NULL;
1514 spin_lock(&sdp->sd_trunc_lock);
1515 if (!list_empty(&sdp->sd_trunc_list)) {
Andreas Gruenbacher969183b2020-02-03 19:22:45 +01001516 ip = list_first_entry(&sdp->sd_trunc_list,
Steven Whitehouse813e0c42008-11-18 13:38:48 +00001517 struct gfs2_inode, i_trunc_list);
1518 list_del_init(&ip->i_trunc_list);
1519 }
1520 spin_unlock(&sdp->sd_trunc_lock);
1521 if (ip == NULL)
1522 return;
1523 gfs2_glock_finish_truncate(ip);
1524 }
1525}
1526
Benjamin Marzinski3d3c10f2009-10-20 02:39:44 -05001527void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) {
1528 if (!sdp->sd_statfs_force_sync) {
1529 sdp->sd_statfs_force_sync = 1;
1530 wake_up(&sdp->sd_quota_wait);
1531 }
1532}
1533
1534
Steven Whitehouse37b2c832008-11-17 14:25:37 +00001535/**
1536 * gfs2_quotad - Write cached quota changes into the quota file
Lee Jonesc551f662021-03-30 17:44:29 +01001537 * @data: Pointer to GFS2 superblock
Steven Whitehouse37b2c832008-11-17 14:25:37 +00001538 *
1539 */
1540
1541int gfs2_quotad(void *data)
1542{
1543 struct gfs2_sbd *sdp = data;
1544 struct gfs2_tune *tune = &sdp->sd_tune;
1545 unsigned long statfs_timeo = 0;
1546 unsigned long quotad_timeo = 0;
1547 unsigned long t = 0;
1548 DEFINE_WAIT(wait);
Steven Whitehouse813e0c42008-11-18 13:38:48 +00001549 int empty;
Steven Whitehouse37b2c832008-11-17 14:25:37 +00001550
1551 while (!kthread_should_stop()) {
1552
Bob Peterson601ef0d2020-01-28 20:23:45 +01001553 if (gfs2_withdrawn(sdp))
1554 goto bypass;
Steven Whitehouse37b2c832008-11-17 14:25:37 +00001555 /* Update the master statfs file */
Benjamin Marzinski3d3c10f2009-10-20 02:39:44 -05001556 if (sdp->sd_statfs_force_sync) {
1557 int error = gfs2_statfs_sync(sdp->sd_vfs, 0);
1558 quotad_error(sdp, "statfs", error);
1559 statfs_timeo = gfs2_tune_get(sdp, gt_statfs_quantum) * HZ;
1560 }
1561 else
1562 quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t,
1563 &statfs_timeo,
1564 &tune->gt_statfs_quantum);
Steven Whitehouse37b2c832008-11-17 14:25:37 +00001565
1566 /* Update quota file */
Steven Whitehouseedd2e9a2013-06-03 11:12:59 +01001567 quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t,
Steven Whitehouse37b2c832008-11-17 14:25:37 +00001568 &quotad_timeo, &tune->gt_quota_quantum);
1569
Steven Whitehouse813e0c42008-11-18 13:38:48 +00001570 /* Check for & recover partially truncated inodes */
1571 quotad_check_trunc_list(sdp);
1572
Tejun Heoa0acae02011-11-21 12:32:22 -08001573 try_to_freeze();
1574
Bob Peterson601ef0d2020-01-28 20:23:45 +01001575bypass:
Steven Whitehouse37b2c832008-11-17 14:25:37 +00001576 t = min(quotad_timeo, statfs_timeo);
1577
Steven Whitehouse7fa5d202009-03-31 15:49:08 +01001578 prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE);
Steven Whitehouse813e0c42008-11-18 13:38:48 +00001579 spin_lock(&sdp->sd_trunc_lock);
1580 empty = list_empty(&sdp->sd_trunc_list);
1581 spin_unlock(&sdp->sd_trunc_lock);
Benjamin Marzinski3d3c10f2009-10-20 02:39:44 -05001582 if (empty && !sdp->sd_statfs_force_sync)
Steven Whitehouse813e0c42008-11-18 13:38:48 +00001583 t -= schedule_timeout(t);
1584 else
1585 t = 0;
Steven Whitehouse37b2c832008-11-17 14:25:37 +00001586 finish_wait(&sdp->sd_quota_wait, &wait);
1587 }
1588
1589 return 0;
1590}
1591
Jan Karae54b2e22014-11-19 16:41:07 +01001592static int gfs2_quota_get_state(struct super_block *sb, struct qc_state *state)
Steven Whitehouse1d371b52009-09-11 15:57:27 +01001593{
1594 struct gfs2_sbd *sdp = sb->s_fs_info;
1595
Jan Karae54b2e22014-11-19 16:41:07 +01001596 memset(state, 0, sizeof(*state));
Christoph Hellwigad6bb902010-05-05 00:10:56 +02001597
1598 switch (sdp->sd_args.ar_quota) {
1599 case GFS2_QUOTA_ON:
Jan Karae54b2e22014-11-19 16:41:07 +01001600 state->s_state[USRQUOTA].flags |= QCI_LIMITS_ENFORCED;
1601 state->s_state[GRPQUOTA].flags |= QCI_LIMITS_ENFORCED;
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -05001602 fallthrough;
Christoph Hellwigad6bb902010-05-05 00:10:56 +02001603 case GFS2_QUOTA_ACCOUNT:
Jan Karae54b2e22014-11-19 16:41:07 +01001604 state->s_state[USRQUOTA].flags |= QCI_ACCT_ENABLED |
1605 QCI_SYSFILE;
1606 state->s_state[GRPQUOTA].flags |= QCI_ACCT_ENABLED |
1607 QCI_SYSFILE;
Christoph Hellwigad6bb902010-05-05 00:10:56 +02001608 break;
1609 case GFS2_QUOTA_OFF:
1610 break;
1611 }
Steven Whitehouse1d371b52009-09-11 15:57:27 +01001612 if (sdp->sd_quota_inode) {
Jan Karae54b2e22014-11-19 16:41:07 +01001613 state->s_state[USRQUOTA].ino =
1614 GFS2_I(sdp->sd_quota_inode)->i_no_addr;
1615 state->s_state[USRQUOTA].blocks = sdp->sd_quota_inode->i_blocks;
Steven Whitehouse1d371b52009-09-11 15:57:27 +01001616 }
Jan Karae54b2e22014-11-19 16:41:07 +01001617 state->s_state[USRQUOTA].nextents = 1; /* unsupported */
1618 state->s_state[GRPQUOTA] = state->s_state[USRQUOTA];
1619 state->s_incoredqs = list_lru_count(&gfs2_qd_lru);
Steven Whitehouse1d371b52009-09-11 15:57:27 +01001620 return 0;
1621}
1622
Eric W. Biederman74a8a102012-09-16 02:07:49 -07001623static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
Jan Kara14bf61f2014-10-09 16:03:13 +02001624 struct qc_dqblk *fdq)
Steven Whitehouse113d6b32009-09-28 11:52:16 +01001625{
1626 struct gfs2_sbd *sdp = sb->s_fs_info;
1627 struct gfs2_quota_lvb *qlvb;
1628 struct gfs2_quota_data *qd;
1629 struct gfs2_holder q_gh;
1630 int error;
1631
Jan Kara14bf61f2014-10-09 16:03:13 +02001632 memset(fdq, 0, sizeof(*fdq));
Steven Whitehouse113d6b32009-09-28 11:52:16 +01001633
1634 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1635 return -ESRCH; /* Crazy XFS error code */
1636
Eric W. Biederman236c64e2013-01-31 20:09:30 -08001637 if ((qid.type != USRQUOTA) &&
1638 (qid.type != GRPQUOTA))
Steven Whitehouse113d6b32009-09-28 11:52:16 +01001639 return -EINVAL;
1640
Eric W. Biederman05e0a602013-01-31 19:52:08 -08001641 error = qd_get(sdp, qid, &qd);
Steven Whitehouse113d6b32009-09-28 11:52:16 +01001642 if (error)
1643 return error;
1644 error = do_glock(qd, FORCE, &q_gh);
1645 if (error)
1646 goto out;
1647
David Teigland4e2f8842012-11-14 13:47:37 -05001648 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
Jan Kara14bf61f2014-10-09 16:03:13 +02001649 fdq->d_spc_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_sb.sb_bsize_shift;
1650 fdq->d_spc_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_sb.sb_bsize_shift;
1651 fdq->d_space = be64_to_cpu(qlvb->qb_value) << sdp->sd_sb.sb_bsize_shift;
Steven Whitehouse113d6b32009-09-28 11:52:16 +01001652
1653 gfs2_glock_dq_uninit(&q_gh);
1654out:
1655 qd_put(qd);
1656 return error;
1657}
1658
Steven Whitehousee285c102009-09-23 13:50:49 +01001659/* GFS2 only supports a subset of the XFS fields */
Jan Kara14bf61f2014-10-09 16:03:13 +02001660#define GFS2_FIELDMASK (QC_SPC_SOFT|QC_SPC_HARD|QC_SPACE)
Steven Whitehousee285c102009-09-23 13:50:49 +01001661
Eric W. Biederman74a8a102012-09-16 02:07:49 -07001662static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
Jan Kara14bf61f2014-10-09 16:03:13 +02001663 struct qc_dqblk *fdq)
Steven Whitehousee285c102009-09-23 13:50:49 +01001664{
1665 struct gfs2_sbd *sdp = sb->s_fs_info;
1666 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
1667 struct gfs2_quota_data *qd;
1668 struct gfs2_holder q_gh, i_gh;
1669 unsigned int data_blocks, ind_blocks;
1670 unsigned int blocks = 0;
1671 int alloc_required;
Steven Whitehousee285c102009-09-23 13:50:49 +01001672 loff_t offset;
1673 int error;
1674
1675 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1676 return -ESRCH; /* Crazy XFS error code */
1677
Eric W. Biederman236c64e2013-01-31 20:09:30 -08001678 if ((qid.type != USRQUOTA) &&
1679 (qid.type != GRPQUOTA))
Steven Whitehousee285c102009-09-23 13:50:49 +01001680 return -EINVAL;
Steven Whitehousee285c102009-09-23 13:50:49 +01001681
1682 if (fdq->d_fieldmask & ~GFS2_FIELDMASK)
1683 return -EINVAL;
Steven Whitehousee285c102009-09-23 13:50:49 +01001684
Eric W. Biederman05e0a602013-01-31 19:52:08 -08001685 error = qd_get(sdp, qid, &qd);
Steven Whitehousee285c102009-09-23 13:50:49 +01001686 if (error)
1687 return error;
1688
Bob Peterson2fba46a2020-02-27 12:47:53 -06001689 error = gfs2_qa_get(ip);
Bob Peterson0a305e42012-06-06 11:17:59 +01001690 if (error)
1691 goto out_put;
1692
Al Viro59551022016-01-22 15:40:57 -05001693 inode_lock(&ip->i_inode);
Steven Whitehousee285c102009-09-23 13:50:49 +01001694 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 0, &q_gh);
1695 if (error)
Bob Peterson0a305e42012-06-06 11:17:59 +01001696 goto out_unlockput;
Steven Whitehousee285c102009-09-23 13:50:49 +01001697 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
1698 if (error)
1699 goto out_q;
1700
1701 /* Check for existing entry, if none then alloc new blocks */
1702 error = update_qd(sdp, qd);
1703 if (error)
1704 goto out_i;
1705
1706 /* If nothing has changed, this is a no-op */
Jan Kara14bf61f2014-10-09 16:03:13 +02001707 if ((fdq->d_fieldmask & QC_SPC_SOFT) &&
1708 ((fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
1709 fdq->d_fieldmask ^= QC_SPC_SOFT;
Abhijith Das802ec9b2010-11-18 11:26:46 -05001710
Jan Kara14bf61f2014-10-09 16:03:13 +02001711 if ((fdq->d_fieldmask & QC_SPC_HARD) &&
1712 ((fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
1713 fdq->d_fieldmask ^= QC_SPC_HARD;
Abhijith Das802ec9b2010-11-18 11:26:46 -05001714
Jan Kara14bf61f2014-10-09 16:03:13 +02001715 if ((fdq->d_fieldmask & QC_SPACE) &&
1716 ((fdq->d_space >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_value)))
1717 fdq->d_fieldmask ^= QC_SPACE;
Abhijith Das802ec9b2010-11-18 11:26:46 -05001718
Steven Whitehousee285c102009-09-23 13:50:49 +01001719 if (fdq->d_fieldmask == 0)
1720 goto out_i;
1721
1722 offset = qd2offset(qd);
Bob Peterson461cb412010-06-24 19:21:20 -04001723 alloc_required = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota));
Abhijith Dase79a46a2011-02-07 11:22:41 -05001724 if (gfs2_is_stuffed(ip))
1725 alloc_required = 1;
Steven Whitehousee285c102009-09-23 13:50:49 +01001726 if (alloc_required) {
Steven Whitehouse7b9cff42013-10-02 11:13:25 +01001727 struct gfs2_alloc_parms ap = { .aflags = 0, };
Steven Whitehousee285c102009-09-23 13:50:49 +01001728 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
1729 &data_blocks, &ind_blocks);
Bob Peterson564e12b2011-11-21 13:36:17 -05001730 blocks = 1 + data_blocks + ind_blocks;
Steven Whitehouse7b9cff42013-10-02 11:13:25 +01001731 ap.target = blocks;
1732 error = gfs2_inplace_reserve(ip, &ap);
Steven Whitehousee285c102009-09-23 13:50:49 +01001733 if (error)
Bob Peterson564e12b2011-11-21 13:36:17 -05001734 goto out_i;
Steven Whitehouse71f890f2012-07-30 14:53:19 +01001735 blocks += gfs2_rg_blocks(ip, blocks);
Steven Whitehousee285c102009-09-23 13:50:49 +01001736 }
1737
Abhijith Dase79a46a2011-02-07 11:22:41 -05001738 /* Some quotas span block boundaries and can update two blocks,
1739 adding an extra block to the transaction to handle such quotas */
1740 error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 2, 0);
Steven Whitehousee285c102009-09-23 13:50:49 +01001741 if (error)
1742 goto out_release;
1743
1744 /* Apply changes */
1745 error = gfs2_adjust_quota(ip, offset, 0, qd, fdq);
Abhi Das9cde2892015-06-02 11:03:04 -05001746 if (!error)
1747 clear_bit(QDF_QMSG_QUIET, &qd->qd_flags);
Steven Whitehousee285c102009-09-23 13:50:49 +01001748
1749 gfs2_trans_end(sdp);
1750out_release:
Bob Peterson564e12b2011-11-21 13:36:17 -05001751 if (alloc_required)
Steven Whitehousee285c102009-09-23 13:50:49 +01001752 gfs2_inplace_release(ip);
Steven Whitehousee285c102009-09-23 13:50:49 +01001753out_i:
1754 gfs2_glock_dq_uninit(&i_gh);
1755out_q:
1756 gfs2_glock_dq_uninit(&q_gh);
Bob Peterson0a305e42012-06-06 11:17:59 +01001757out_unlockput:
Bob Peterson2fba46a2020-02-27 12:47:53 -06001758 gfs2_qa_put(ip);
Al Viro59551022016-01-22 15:40:57 -05001759 inode_unlock(&ip->i_inode);
Bob Peterson0a305e42012-06-06 11:17:59 +01001760out_put:
Steven Whitehousee285c102009-09-23 13:50:49 +01001761 qd_put(qd);
1762 return error;
1763}
1764
Steven Whitehousecc632e72009-09-15 09:59:02 +01001765const struct quotactl_ops gfs2_quotactl_ops = {
1766 .quota_sync = gfs2_quota_sync,
Jan Karae54b2e22014-11-19 16:41:07 +01001767 .get_state = gfs2_quota_get_state,
Christoph Hellwigb9b2dd32010-05-06 17:04:58 -04001768 .get_dqblk = gfs2_get_dqblk,
Christoph Hellwigc472b432010-05-06 17:05:17 -04001769 .set_dqblk = gfs2_set_dqblk,
Steven Whitehousecc632e72009-09-15 09:59:02 +01001770};
Steven Whitehousec754fbb2013-12-12 10:47:59 +00001771
1772void __init gfs2_quota_hash_init(void)
1773{
1774 unsigned i;
1775
1776 for(i = 0; i < GFS2_QD_HASH_SIZE; i++)
1777 INIT_HLIST_BL_HEAD(&qd_hash_table[i]);
1778}