blob: d1962b2f67f98092529192e731f7809c8ef19e21 [file] [log] [blame]
David Teiglandb3b94fa2006-01-16 16:50:04 +00001/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
Bob Peterson0d0868b2007-12-11 18:51:25 -06003 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
David Teiglandb3b94fa2006-01-16 16:50:04 +00004 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
Steven Whitehousee9fc2aa2006-09-01 11:05:15 -04007 * of the GNU General Public License version 2.
David Teiglandb3b94fa2006-01-16 16:50:04 +00008 */
9
10/*
11 * Quota change tags are associated with each transaction that allocates or
12 * deallocates space. Those changes are accumulated locally to each node (in a
13 * per-node file) and then are periodically synced to the quota file. This
14 * avoids the bottleneck of constantly touching the quota file, but introduces
15 * fuzziness in the current usage value of IDs that are being used on different
16 * nodes in the cluster simultaneously. So, it is possible for a user on
17 * multiple nodes to overrun their quota, but that overrun is controlable.
Steven Whitehouse1e72c0f2009-09-15 20:42:56 +010018 * Since quota tags are part of transactions, there is no need for a quota check
David Teiglandb3b94fa2006-01-16 16:50:04 +000019 * program to be run on node crashes or anything like that.
20 *
21 * There are couple of knobs that let the administrator manage the quota
22 * fuzziness. "quota_quantum" sets the maximum time a quota change can be
23 * sitting on one node before being synced to the quota file. (The default is
24 * 60 seconds.) Another knob, "quota_scale" controls how quickly the frequency
25 * of quota file syncs increases as the user moves closer to their limit. The
26 * more frequent the syncs, the more accurate the quota enforcement, but that
27 * means that there is more contention between the nodes for the quota file.
28 * The default value is one. This sets the maximum theoretical quota overrun
29 * (with infinite node with infinite bandwidth) to twice the user's limit. (In
30 * practice, the maximum overrun you see should be much less.) A "quota_scale"
31 * number greater than one makes quota syncs more frequent and reduces the
32 * maximum overrun. Numbers less than one (but greater than zero) make quota
33 * syncs less frequent.
34 *
35 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
36 * the quota file, so it is not being constantly read.
37 */
38
39#include <linux/sched.h>
40#include <linux/slab.h>
Ying Han1495f232011-05-24 17:12:27 -070041#include <linux/mm.h>
David Teiglandb3b94fa2006-01-16 16:50:04 +000042#include <linux/spinlock.h>
43#include <linux/completion.h>
44#include <linux/buffer_head.h>
David Teiglandb3b94fa2006-01-16 16:50:04 +000045#include <linux/sort.h>
Steven Whitehouse18ec7d52006-02-08 11:50:51 +000046#include <linux/fs.h>
Steven Whitehouse2e565bb2006-10-02 11:38:25 -040047#include <linux/bio.h>
Steven Whitehouse5c676f62006-02-27 17:23:27 -050048#include <linux/gfs2_ondisk.h>
Steven Whitehouse37b2c832008-11-17 14:25:37 +000049#include <linux/kthread.h>
50#include <linux/freezer.h>
Steven Whitehouse2ec46502009-09-28 12:49:15 +010051#include <linux/quota.h>
Steven Whitehouse1d371b52009-09-11 15:57:27 +010052#include <linux/dqblk_xfs.h>
David Teiglandb3b94fa2006-01-16 16:50:04 +000053
54#include "gfs2.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050055#include "incore.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000056#include "bmap.h"
57#include "glock.h"
58#include "glops.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000059#include "log.h"
60#include "meta_io.h"
61#include "quota.h"
62#include "rgrp.h"
63#include "super.h"
64#include "trans.h"
Steven Whitehouse18ec7d52006-02-08 11:50:51 +000065#include "inode.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050066#include "util.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000067
68#define QUOTA_USER 1
69#define QUOTA_GROUP 0
70
Steven Whitehousebb8d8a62007-06-01 14:11:58 +010071struct gfs2_quota_change_host {
72 u64 qc_change;
73 u32 qc_flags; /* GFS2_QCF_... */
74 u32 qc_id;
75};
76
Abhijith Das0a7ab792009-01-07 16:03:37 -060077static LIST_HEAD(qd_lru_list);
78static atomic_t qd_lru_count = ATOMIC_INIT(0);
Xu Gang1328df72009-04-14 14:54:14 +080079static DEFINE_SPINLOCK(qd_lru_lock);
Abhijith Das0a7ab792009-01-07 16:03:37 -060080
Ying Han1495f232011-05-24 17:12:27 -070081int gfs2_shrink_qd_memory(struct shrinker *shrink, struct shrink_control *sc)
Abhijith Das0a7ab792009-01-07 16:03:37 -060082{
83 struct gfs2_quota_data *qd;
84 struct gfs2_sbd *sdp;
Ying Han1495f232011-05-24 17:12:27 -070085 int nr_to_scan = sc->nr_to_scan;
Abhijith Das0a7ab792009-01-07 16:03:37 -060086
Ying Han1495f232011-05-24 17:12:27 -070087 if (nr_to_scan == 0)
Abhijith Das0a7ab792009-01-07 16:03:37 -060088 goto out;
89
Ying Han1495f232011-05-24 17:12:27 -070090 if (!(sc->gfp_mask & __GFP_FS))
Abhijith Das0a7ab792009-01-07 16:03:37 -060091 return -1;
92
93 spin_lock(&qd_lru_lock);
Ying Han1495f232011-05-24 17:12:27 -070094 while (nr_to_scan && !list_empty(&qd_lru_list)) {
Abhijith Das0a7ab792009-01-07 16:03:37 -060095 qd = list_entry(qd_lru_list.next,
96 struct gfs2_quota_data, qd_reclaim);
97 sdp = qd->qd_gl->gl_sbd;
98
99 /* Free from the filesystem-specific list */
100 list_del(&qd->qd_list);
101
Abhijith Das0a7ab792009-01-07 16:03:37 -0600102 gfs2_assert_warn(sdp, !qd->qd_change);
103 gfs2_assert_warn(sdp, !qd->qd_slot_count);
104 gfs2_assert_warn(sdp, !qd->qd_bh_count);
105
Steven Whitehousef057f6c2009-01-12 10:43:39 +0000106 gfs2_glock_put(qd->qd_gl);
Abhijith Das0a7ab792009-01-07 16:03:37 -0600107 atomic_dec(&sdp->sd_quota_count);
108
109 /* Delete it from the common reclaim list */
110 list_del_init(&qd->qd_reclaim);
111 atomic_dec(&qd_lru_count);
112 spin_unlock(&qd_lru_lock);
113 kmem_cache_free(gfs2_quotad_cachep, qd);
114 spin_lock(&qd_lru_lock);
Ying Han1495f232011-05-24 17:12:27 -0700115 nr_to_scan--;
Abhijith Das0a7ab792009-01-07 16:03:37 -0600116 }
117 spin_unlock(&qd_lru_lock);
118
119out:
120 return (atomic_read(&qd_lru_count) * sysctl_vfs_cache_pressure) / 100;
121}
122
Steven Whitehousecd915492006-09-04 12:49:07 -0400123static u64 qd2offset(struct gfs2_quota_data *qd)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000124{
Steven Whitehousecd915492006-09-04 12:49:07 -0400125 u64 offset;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000126
Steven Whitehousecd915492006-09-04 12:49:07 -0400127 offset = 2 * (u64)qd->qd_id + !test_bit(QDF_USER, &qd->qd_flags);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000128 offset *= sizeof(struct gfs2_quota);
129
130 return offset;
131}
132
Steven Whitehousecd915492006-09-04 12:49:07 -0400133static int qd_alloc(struct gfs2_sbd *sdp, int user, u32 id,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000134 struct gfs2_quota_data **qdp)
135{
136 struct gfs2_quota_data *qd;
137 int error;
138
Steven Whitehouse37b2c832008-11-17 14:25:37 +0000139 qd = kmem_cache_zalloc(gfs2_quotad_cachep, GFP_NOFS);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000140 if (!qd)
141 return -ENOMEM;
142
Abhijith Das0a7ab792009-01-07 16:03:37 -0600143 atomic_set(&qd->qd_count, 1);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000144 qd->qd_id = id;
145 if (user)
146 set_bit(QDF_USER, &qd->qd_flags);
147 qd->qd_slot = -1;
Abhijith Das0a7ab792009-01-07 16:03:37 -0600148 INIT_LIST_HEAD(&qd->qd_reclaim);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000149
Steven Whitehousecd915492006-09-04 12:49:07 -0400150 error = gfs2_glock_get(sdp, 2 * (u64)id + !user,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000151 &gfs2_quota_glops, CREATE, &qd->qd_gl);
152 if (error)
153 goto fail;
154
David Teiglandb3b94fa2006-01-16 16:50:04 +0000155 *qdp = qd;
156
157 return 0;
158
Steven Whitehousea91ea692006-09-04 12:04:26 -0400159fail:
Steven Whitehouse37b2c832008-11-17 14:25:37 +0000160 kmem_cache_free(gfs2_quotad_cachep, qd);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000161 return error;
162}
163
Steven Whitehouse6a6ada82009-09-15 16:30:38 +0100164static int qd_get(struct gfs2_sbd *sdp, int user, u32 id,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000165 struct gfs2_quota_data **qdp)
166{
167 struct gfs2_quota_data *qd = NULL, *new_qd = NULL;
168 int error, found;
169
170 *qdp = NULL;
171
172 for (;;) {
173 found = 0;
Abhijith Das0a7ab792009-01-07 16:03:37 -0600174 spin_lock(&qd_lru_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000175 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
176 if (qd->qd_id == id &&
177 !test_bit(QDF_USER, &qd->qd_flags) == !user) {
Abhijith Das0a7ab792009-01-07 16:03:37 -0600178 if (!atomic_read(&qd->qd_count) &&
179 !list_empty(&qd->qd_reclaim)) {
180 /* Remove it from reclaim list */
181 list_del_init(&qd->qd_reclaim);
182 atomic_dec(&qd_lru_count);
183 }
184 atomic_inc(&qd->qd_count);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000185 found = 1;
186 break;
187 }
188 }
189
190 if (!found)
191 qd = NULL;
192
193 if (!qd && new_qd) {
194 qd = new_qd;
195 list_add(&qd->qd_list, &sdp->sd_quota_list);
196 atomic_inc(&sdp->sd_quota_count);
197 new_qd = NULL;
198 }
199
Abhijith Das0a7ab792009-01-07 16:03:37 -0600200 spin_unlock(&qd_lru_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000201
Steven Whitehouse6a6ada82009-09-15 16:30:38 +0100202 if (qd) {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000203 if (new_qd) {
Steven Whitehousef057f6c2009-01-12 10:43:39 +0000204 gfs2_glock_put(new_qd->qd_gl);
Steven Whitehouse37b2c832008-11-17 14:25:37 +0000205 kmem_cache_free(gfs2_quotad_cachep, new_qd);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000206 }
207 *qdp = qd;
208 return 0;
209 }
210
211 error = qd_alloc(sdp, user, id, &new_qd);
212 if (error)
213 return error;
214 }
215}
216
217static void qd_hold(struct gfs2_quota_data *qd)
218{
219 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
Abhijith Das0a7ab792009-01-07 16:03:37 -0600220 gfs2_assert(sdp, atomic_read(&qd->qd_count));
221 atomic_inc(&qd->qd_count);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000222}
223
224static void qd_put(struct gfs2_quota_data *qd)
225{
Abhijith Das0a7ab792009-01-07 16:03:37 -0600226 if (atomic_dec_and_lock(&qd->qd_count, &qd_lru_lock)) {
227 /* Add to the reclaim list */
228 list_add_tail(&qd->qd_reclaim, &qd_lru_list);
229 atomic_inc(&qd_lru_count);
230 spin_unlock(&qd_lru_lock);
231 }
David Teiglandb3b94fa2006-01-16 16:50:04 +0000232}
233
234static int slot_get(struct gfs2_quota_data *qd)
235{
236 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
237 unsigned int c, o = 0, b;
238 unsigned char byte = 0;
239
Steven Whitehouse22077f52009-01-08 14:28:42 +0000240 spin_lock(&qd_lru_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000241
242 if (qd->qd_slot_count++) {
Steven Whitehouse22077f52009-01-08 14:28:42 +0000243 spin_unlock(&qd_lru_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000244 return 0;
245 }
246
247 for (c = 0; c < sdp->sd_quota_chunks; c++)
248 for (o = 0; o < PAGE_SIZE; o++) {
249 byte = sdp->sd_quota_bitmap[c][o];
250 if (byte != 0xFF)
251 goto found;
252 }
253
254 goto fail;
255
Steven Whitehousea91ea692006-09-04 12:04:26 -0400256found:
David Teiglandb3b94fa2006-01-16 16:50:04 +0000257 for (b = 0; b < 8; b++)
258 if (!(byte & (1 << b)))
259 break;
260 qd->qd_slot = c * (8 * PAGE_SIZE) + o * 8 + b;
261
262 if (qd->qd_slot >= sdp->sd_quota_slots)
263 goto fail;
264
265 sdp->sd_quota_bitmap[c][o] |= 1 << b;
266
Steven Whitehouse22077f52009-01-08 14:28:42 +0000267 spin_unlock(&qd_lru_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000268
269 return 0;
270
Steven Whitehousea91ea692006-09-04 12:04:26 -0400271fail:
David Teiglandb3b94fa2006-01-16 16:50:04 +0000272 qd->qd_slot_count--;
Steven Whitehouse22077f52009-01-08 14:28:42 +0000273 spin_unlock(&qd_lru_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000274 return -ENOSPC;
275}
276
277static void slot_hold(struct gfs2_quota_data *qd)
278{
279 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
280
Steven Whitehouse22077f52009-01-08 14:28:42 +0000281 spin_lock(&qd_lru_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000282 gfs2_assert(sdp, qd->qd_slot_count);
283 qd->qd_slot_count++;
Steven Whitehouse22077f52009-01-08 14:28:42 +0000284 spin_unlock(&qd_lru_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000285}
286
287static void slot_put(struct gfs2_quota_data *qd)
288{
289 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
290
Steven Whitehouse22077f52009-01-08 14:28:42 +0000291 spin_lock(&qd_lru_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000292 gfs2_assert(sdp, qd->qd_slot_count);
293 if (!--qd->qd_slot_count) {
294 gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, qd->qd_slot, 0);
295 qd->qd_slot = -1;
296 }
Steven Whitehouse22077f52009-01-08 14:28:42 +0000297 spin_unlock(&qd_lru_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000298}
299
300static int bh_get(struct gfs2_quota_data *qd)
301{
302 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400303 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000304 unsigned int block, offset;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000305 struct buffer_head *bh;
306 int error;
Steven Whitehouse23591252006-10-13 17:25:45 -0400307 struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
David Teiglandb3b94fa2006-01-16 16:50:04 +0000308
Steven Whitehousef55ab262006-02-21 12:51:39 +0000309 mutex_lock(&sdp->sd_quota_mutex);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000310
311 if (qd->qd_bh_count++) {
Steven Whitehousef55ab262006-02-21 12:51:39 +0000312 mutex_unlock(&sdp->sd_quota_mutex);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000313 return 0;
314 }
315
316 block = qd->qd_slot / sdp->sd_qc_per_block;
Bob Peterson0d0868b2007-12-11 18:51:25 -0600317 offset = qd->qd_slot % sdp->sd_qc_per_block;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000318
Steven Whitehouse23591252006-10-13 17:25:45 -0400319 bh_map.b_size = 1 << ip->i_inode.i_blkbits;
Bob Petersone9e1ef22007-12-10 14:13:27 -0600320 error = gfs2_block_map(&ip->i_inode, block, &bh_map, 0);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000321 if (error)
322 goto fail;
Steven Whitehouse7276b3b2006-09-21 17:05:23 -0400323 error = gfs2_meta_read(ip->i_gl, bh_map.b_blocknr, DIO_WAIT, &bh);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000324 if (error)
325 goto fail;
326 error = -EIO;
327 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
328 goto fail_brelse;
329
330 qd->qd_bh = bh;
331 qd->qd_bh_qc = (struct gfs2_quota_change *)
332 (bh->b_data + sizeof(struct gfs2_meta_header) +
333 offset * sizeof(struct gfs2_quota_change));
334
Josef Whiter2e95b662007-02-20 00:03:29 -0500335 mutex_unlock(&sdp->sd_quota_mutex);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000336
337 return 0;
338
Steven Whitehousea91ea692006-09-04 12:04:26 -0400339fail_brelse:
David Teiglandb3b94fa2006-01-16 16:50:04 +0000340 brelse(bh);
Steven Whitehousea91ea692006-09-04 12:04:26 -0400341fail:
David Teiglandb3b94fa2006-01-16 16:50:04 +0000342 qd->qd_bh_count--;
Steven Whitehousef55ab262006-02-21 12:51:39 +0000343 mutex_unlock(&sdp->sd_quota_mutex);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000344 return error;
345}
346
347static void bh_put(struct gfs2_quota_data *qd)
348{
349 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
350
Steven Whitehousef55ab262006-02-21 12:51:39 +0000351 mutex_lock(&sdp->sd_quota_mutex);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000352 gfs2_assert(sdp, qd->qd_bh_count);
353 if (!--qd->qd_bh_count) {
354 brelse(qd->qd_bh);
355 qd->qd_bh = NULL;
356 qd->qd_bh_qc = NULL;
357 }
Steven Whitehousef55ab262006-02-21 12:51:39 +0000358 mutex_unlock(&sdp->sd_quota_mutex);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000359}
360
361static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
362{
363 struct gfs2_quota_data *qd = NULL;
364 int error;
365 int found = 0;
366
367 *qdp = NULL;
368
369 if (sdp->sd_vfs->s_flags & MS_RDONLY)
370 return 0;
371
Abhijith Das0a7ab792009-01-07 16:03:37 -0600372 spin_lock(&qd_lru_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000373
374 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
375 if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
376 !test_bit(QDF_CHANGE, &qd->qd_flags) ||
377 qd->qd_sync_gen >= sdp->sd_quota_sync_gen)
378 continue;
379
380 list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
381
382 set_bit(QDF_LOCKED, &qd->qd_flags);
Abhijith Das0a7ab792009-01-07 16:03:37 -0600383 gfs2_assert_warn(sdp, atomic_read(&qd->qd_count));
384 atomic_inc(&qd->qd_count);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000385 qd->qd_change_sync = qd->qd_change;
386 gfs2_assert_warn(sdp, qd->qd_slot_count);
387 qd->qd_slot_count++;
388 found = 1;
389
390 break;
391 }
392
393 if (!found)
394 qd = NULL;
395
Abhijith Das0a7ab792009-01-07 16:03:37 -0600396 spin_unlock(&qd_lru_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000397
398 if (qd) {
399 gfs2_assert_warn(sdp, qd->qd_change_sync);
400 error = bh_get(qd);
401 if (error) {
402 clear_bit(QDF_LOCKED, &qd->qd_flags);
403 slot_put(qd);
404 qd_put(qd);
405 return error;
406 }
407 }
408
409 *qdp = qd;
410
411 return 0;
412}
413
414static int qd_trylock(struct gfs2_quota_data *qd)
415{
416 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
417
418 if (sdp->sd_vfs->s_flags & MS_RDONLY)
419 return 0;
420
Abhijith Das0a7ab792009-01-07 16:03:37 -0600421 spin_lock(&qd_lru_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000422
423 if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
424 !test_bit(QDF_CHANGE, &qd->qd_flags)) {
Abhijith Das0a7ab792009-01-07 16:03:37 -0600425 spin_unlock(&qd_lru_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000426 return 0;
427 }
428
429 list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
430
431 set_bit(QDF_LOCKED, &qd->qd_flags);
Abhijith Das0a7ab792009-01-07 16:03:37 -0600432 gfs2_assert_warn(sdp, atomic_read(&qd->qd_count));
433 atomic_inc(&qd->qd_count);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000434 qd->qd_change_sync = qd->qd_change;
435 gfs2_assert_warn(sdp, qd->qd_slot_count);
436 qd->qd_slot_count++;
437
Abhijith Das0a7ab792009-01-07 16:03:37 -0600438 spin_unlock(&qd_lru_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000439
440 gfs2_assert_warn(sdp, qd->qd_change_sync);
441 if (bh_get(qd)) {
442 clear_bit(QDF_LOCKED, &qd->qd_flags);
443 slot_put(qd);
444 qd_put(qd);
445 return 0;
446 }
447
448 return 1;
449}
450
451static void qd_unlock(struct gfs2_quota_data *qd)
452{
Steven Whitehouse568f4c92006-02-27 12:00:42 -0500453 gfs2_assert_warn(qd->qd_gl->gl_sbd,
454 test_bit(QDF_LOCKED, &qd->qd_flags));
David Teiglandb3b94fa2006-01-16 16:50:04 +0000455 clear_bit(QDF_LOCKED, &qd->qd_flags);
456 bh_put(qd);
457 slot_put(qd);
458 qd_put(qd);
459}
460
Steven Whitehouse33a82522009-09-15 16:25:40 +0100461static int qdsb_get(struct gfs2_sbd *sdp, int user, u32 id,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000462 struct gfs2_quota_data **qdp)
463{
464 int error;
465
Steven Whitehouse6a6ada82009-09-15 16:30:38 +0100466 error = qd_get(sdp, user, id, qdp);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000467 if (error)
468 return error;
469
470 error = slot_get(*qdp);
471 if (error)
472 goto fail;
473
474 error = bh_get(*qdp);
475 if (error)
476 goto fail_slot;
477
478 return 0;
479
Steven Whitehousea91ea692006-09-04 12:04:26 -0400480fail_slot:
David Teiglandb3b94fa2006-01-16 16:50:04 +0000481 slot_put(*qdp);
Steven Whitehousea91ea692006-09-04 12:04:26 -0400482fail:
David Teiglandb3b94fa2006-01-16 16:50:04 +0000483 qd_put(*qdp);
484 return error;
485}
486
487static void qdsb_put(struct gfs2_quota_data *qd)
488{
489 bh_put(qd);
490 slot_put(qd);
491 qd_put(qd);
492}
493
Steven Whitehousecd915492006-09-04 12:49:07 -0400494int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000495{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400496 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
Steven Whitehouse6dbd8222008-01-10 15:18:55 +0000497 struct gfs2_alloc *al = ip->i_alloc;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000498 struct gfs2_quota_data **qd = al->al_qd;
499 int error;
500
501 if (gfs2_assert_warn(sdp, !al->al_qd_num) ||
502 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)))
503 return -EIO;
504
505 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
506 return 0;
507
Steven Whitehouse33a82522009-09-15 16:25:40 +0100508 error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, qd);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000509 if (error)
510 goto out;
511 al->al_qd_num++;
512 qd++;
513
Steven Whitehouse33a82522009-09-15 16:25:40 +0100514 error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, qd);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000515 if (error)
516 goto out;
517 al->al_qd_num++;
518 qd++;
519
Steven Whitehouse2933f922006-11-01 13:23:29 -0500520 if (uid != NO_QUOTA_CHANGE && uid != ip->i_inode.i_uid) {
Steven Whitehouse33a82522009-09-15 16:25:40 +0100521 error = qdsb_get(sdp, QUOTA_USER, uid, qd);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000522 if (error)
523 goto out;
524 al->al_qd_num++;
525 qd++;
526 }
527
Steven Whitehouse2933f922006-11-01 13:23:29 -0500528 if (gid != NO_QUOTA_CHANGE && gid != ip->i_inode.i_gid) {
Steven Whitehouse33a82522009-09-15 16:25:40 +0100529 error = qdsb_get(sdp, QUOTA_GROUP, gid, qd);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000530 if (error)
531 goto out;
532 al->al_qd_num++;
533 qd++;
534 }
535
Steven Whitehousea91ea692006-09-04 12:04:26 -0400536out:
David Teiglandb3b94fa2006-01-16 16:50:04 +0000537 if (error)
538 gfs2_quota_unhold(ip);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000539 return error;
540}
541
542void gfs2_quota_unhold(struct gfs2_inode *ip)
543{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400544 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
Steven Whitehouse6dbd8222008-01-10 15:18:55 +0000545 struct gfs2_alloc *al = ip->i_alloc;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000546 unsigned int x;
547
548 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
549
550 for (x = 0; x < al->al_qd_num; x++) {
551 qdsb_put(al->al_qd[x]);
552 al->al_qd[x] = NULL;
553 }
554 al->al_qd_num = 0;
555}
556
557static int sort_qd(const void *a, const void *b)
558{
Steven Whitehouse48fac172006-09-05 15:17:12 -0400559 const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a;
560 const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000561
562 if (!test_bit(QDF_USER, &qd_a->qd_flags) !=
563 !test_bit(QDF_USER, &qd_b->qd_flags)) {
564 if (test_bit(QDF_USER, &qd_a->qd_flags))
Steven Whitehouse48fac172006-09-05 15:17:12 -0400565 return -1;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000566 else
Steven Whitehouse48fac172006-09-05 15:17:12 -0400567 return 1;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000568 }
Steven Whitehouse48fac172006-09-05 15:17:12 -0400569 if (qd_a->qd_id < qd_b->qd_id)
570 return -1;
571 if (qd_a->qd_id > qd_b->qd_id)
572 return 1;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000573
Steven Whitehouse48fac172006-09-05 15:17:12 -0400574 return 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000575}
576
Steven Whitehousecd915492006-09-04 12:49:07 -0400577static void do_qc(struct gfs2_quota_data *qd, s64 change)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000578{
579 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400580 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000581 struct gfs2_quota_change *qc = qd->qd_bh_qc;
Steven Whitehousecd915492006-09-04 12:49:07 -0400582 s64 x;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000583
Steven Whitehousef55ab262006-02-21 12:51:39 +0000584 mutex_lock(&sdp->sd_quota_mutex);
Steven Whitehoused4e9c4c2006-01-18 11:19:28 +0000585 gfs2_trans_add_bh(ip->i_gl, qd->qd_bh, 1);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000586
587 if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
588 qc->qc_change = 0;
589 qc->qc_flags = 0;
590 if (test_bit(QDF_USER, &qd->qd_flags))
591 qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
592 qc->qc_id = cpu_to_be32(qd->qd_id);
593 }
594
Al Virob44b84d2006-10-14 10:46:30 -0400595 x = be64_to_cpu(qc->qc_change) + change;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000596 qc->qc_change = cpu_to_be64(x);
597
Steven Whitehouse22077f52009-01-08 14:28:42 +0000598 spin_lock(&qd_lru_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000599 qd->qd_change = x;
Steven Whitehouse22077f52009-01-08 14:28:42 +0000600 spin_unlock(&qd_lru_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000601
602 if (!x) {
603 gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
604 clear_bit(QDF_CHANGE, &qd->qd_flags);
605 qc->qc_flags = 0;
606 qc->qc_id = 0;
607 slot_put(qd);
608 qd_put(qd);
609 } else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
610 qd_hold(qd);
611 slot_hold(qd);
612 }
Steven Whitehouse907b9bc2006-09-25 09:26:04 -0400613
Steven Whitehousef55ab262006-02-21 12:51:39 +0000614 mutex_unlock(&sdp->sd_quota_mutex);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000615}
616
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000617/**
Steven Whitehouse1e72c0f2009-09-15 20:42:56 +0100618 * gfs2_adjust_quota - adjust record of current block usage
619 * @ip: The quota inode
620 * @loc: Offset of the entry in the quota file
Steven Whitehousee285c102009-09-23 13:50:49 +0100621 * @change: The amount of usage change to record
Steven Whitehouse1e72c0f2009-09-15 20:42:56 +0100622 * @qd: The quota data
Steven Whitehousee285c102009-09-23 13:50:49 +0100623 * @fdq: The updated limits to record
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000624 *
625 * This function was mostly borrowed from gfs2_block_truncate_page which was
626 * in turn mostly borrowed from ext3
Steven Whitehouse1e72c0f2009-09-15 20:42:56 +0100627 *
628 * Returns: 0 or -ve on error
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000629 */
Steven Whitehouse1e72c0f2009-09-15 20:42:56 +0100630
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000631static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
Steven Whitehousee285c102009-09-23 13:50:49 +0100632 s64 change, struct gfs2_quota_data *qd,
633 struct fs_disk_quota *fdq)
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000634{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400635 struct inode *inode = &ip->i_inode;
Abhijith Das14870b42010-11-18 11:24:24 -0500636 struct gfs2_sbd *sdp = GFS2_SB(inode);
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000637 struct address_space *mapping = inode->i_mapping;
638 unsigned long index = loc >> PAGE_CACHE_SHIFT;
Abhijith Das1990e912007-05-31 17:52:02 -0500639 unsigned offset = loc & (PAGE_CACHE_SIZE - 1);
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000640 unsigned blocksize, iblock, pos;
Steven Whitehouseab9bbda2011-08-15 14:20:36 +0100641 struct buffer_head *bh;
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000642 struct page *page;
Abhijith Das7e619bc2010-05-07 17:50:18 -0400643 void *kaddr, *ptr;
644 struct gfs2_quota q, *qp;
645 int err, nbytes;
Steven Whitehousee285c102009-09-23 13:50:49 +0100646 u64 size;
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000647
Steven Whitehouse891a8e92011-09-19 10:25:49 +0100648 if (gfs2_is_stuffed(ip)) {
649 err = gfs2_unstuff_dinode(ip, NULL);
650 if (err)
651 return err;
652 }
Abhijith Das7e619bc2010-05-07 17:50:18 -0400653
654 memset(&q, 0, sizeof(struct gfs2_quota));
655 err = gfs2_internal_read(ip, NULL, (char *)&q, &loc, sizeof(q));
656 if (err < 0)
657 return err;
658
659 err = -EIO;
660 qp = &q;
661 qp->qu_value = be64_to_cpu(qp->qu_value);
662 qp->qu_value += change;
663 qp->qu_value = cpu_to_be64(qp->qu_value);
664 qd->qd_qb.qb_value = qp->qu_value;
665 if (fdq) {
666 if (fdq->d_fieldmask & FS_DQ_BSOFT) {
Abhijith Das14870b42010-11-18 11:24:24 -0500667 qp->qu_warn = cpu_to_be64(fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift);
Abhijith Das7e619bc2010-05-07 17:50:18 -0400668 qd->qd_qb.qb_warn = qp->qu_warn;
669 }
670 if (fdq->d_fieldmask & FS_DQ_BHARD) {
Abhijith Das14870b42010-11-18 11:24:24 -0500671 qp->qu_limit = cpu_to_be64(fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift);
Abhijith Das7e619bc2010-05-07 17:50:18 -0400672 qd->qd_qb.qb_limit = qp->qu_limit;
673 }
Abhijith Das802ec9b2010-11-18 11:26:46 -0500674 if (fdq->d_fieldmask & FS_DQ_BCOUNT) {
675 qp->qu_value = cpu_to_be64(fdq->d_bcount >> sdp->sd_fsb2bb_shift);
676 qd->qd_qb.qb_value = qp->qu_value;
677 }
Abhijith Das7e619bc2010-05-07 17:50:18 -0400678 }
679
680 /* Write the quota into the quota file on disk */
681 ptr = qp;
682 nbytes = sizeof(struct gfs2_quota);
683get_a_page:
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000684 page = grab_cache_page(mapping, index);
685 if (!page)
686 return -ENOMEM;
687
688 blocksize = inode->i_sb->s_blocksize;
689 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
690
691 if (!page_has_buffers(page))
692 create_empty_buffers(page, blocksize, 0);
693
694 bh = page_buffers(page);
695 pos = blocksize;
696 while (offset >= pos) {
697 bh = bh->b_this_page;
698 iblock++;
699 pos += blocksize;
700 }
701
702 if (!buffer_mapped(bh)) {
Bob Petersone9e1ef22007-12-10 14:13:27 -0600703 gfs2_block_map(inode, iblock, bh, 1);
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000704 if (!buffer_mapped(bh))
Abhijith Das7e619bc2010-05-07 17:50:18 -0400705 goto unlock_out;
706 /* If it's a newly allocated disk block for quota, zero it */
Abhijith Das8b421602010-07-04 01:33:24 -0400707 if (buffer_new(bh))
708 zero_user(page, pos - blocksize, bh->b_size);
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000709 }
710
711 if (PageUptodate(page))
712 set_buffer_uptodate(bh);
713
714 if (!buffer_uptodate(bh)) {
Steven Whitehouse20ed0532011-10-31 09:52:02 +0000715 ll_rw_block(READ | REQ_META, 1, &bh);
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000716 wait_on_buffer(bh);
717 if (!buffer_uptodate(bh))
Abhijith Das7e619bc2010-05-07 17:50:18 -0400718 goto unlock_out;
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000719 }
720
721 gfs2_trans_add_bh(ip->i_gl, bh, 0);
722
723 kaddr = kmap_atomic(page, KM_USER0);
Abhijith Das7e619bc2010-05-07 17:50:18 -0400724 if (offset + sizeof(struct gfs2_quota) > PAGE_CACHE_SIZE)
725 nbytes = PAGE_CACHE_SIZE - offset;
726 memcpy(kaddr + offset, ptr, nbytes);
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000727 flush_dcache_page(page);
728 kunmap_atomic(kaddr, KM_USER0);
Abhijith Das7e619bc2010-05-07 17:50:18 -0400729 unlock_page(page);
730 page_cache_release(page);
Steven Whitehousee285c102009-09-23 13:50:49 +0100731
Abhijith Das7e619bc2010-05-07 17:50:18 -0400732 /* If quota straddles page boundary, we need to update the rest of the
733 * quota at the beginning of the next page */
Abhijith Das8b421602010-07-04 01:33:24 -0400734 if ((offset + sizeof(struct gfs2_quota)) > PAGE_CACHE_SIZE) {
Abhijith Das7e619bc2010-05-07 17:50:18 -0400735 ptr = ptr + nbytes;
736 nbytes = sizeof(struct gfs2_quota) - nbytes;
737 offset = 0;
738 index++;
739 goto get_a_page;
740 }
741
Steven Whitehousee285c102009-09-23 13:50:49 +0100742 size = loc + sizeof(struct gfs2_quota);
Steven Whitehousea2e0f792010-08-11 09:53:11 +0100743 if (size > inode->i_size)
Steven Whitehousee285c102009-09-23 13:50:49 +0100744 i_size_write(inode, size);
Steven Whitehousee285c102009-09-23 13:50:49 +0100745 inode->i_mtime = inode->i_atime = CURRENT_TIME;
Steven Whitehousee285c102009-09-23 13:50:49 +0100746 mark_inode_dirty(inode);
Abhijith Das7e619bc2010-05-07 17:50:18 -0400747 return err;
Steven Whitehouseab9bbda2011-08-15 14:20:36 +0100748
Abhijith Das7e619bc2010-05-07 17:50:18 -0400749unlock_out:
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000750 unlock_page(page);
751 page_cache_release(page);
752 return err;
753}
754
David Teiglandb3b94fa2006-01-16 16:50:04 +0000755static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
756{
757 struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd;
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400758 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000759 unsigned int data_blocks, ind_blocks;
760 struct gfs2_holder *ghs, i_gh;
761 unsigned int qx, x;
762 struct gfs2_quota_data *qd;
Steven Whitehousef42faf42006-01-30 18:34:10 +0000763 loff_t offset;
Abhijith Das20b95bf2008-03-06 17:43:52 -0600764 unsigned int nalloc = 0, blocks;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000765 struct gfs2_alloc *al = NULL;
766 int error;
767
768 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
769 &data_blocks, &ind_blocks);
770
Josef Bacik16c5f062008-04-09 09:33:41 -0400771 ghs = kcalloc(num_qd, sizeof(struct gfs2_holder), GFP_NOFS);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000772 if (!ghs)
773 return -ENOMEM;
774
775 sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
Steven Whitehousee285c102009-09-23 13:50:49 +0100776 mutex_lock_nested(&ip->i_inode.i_mutex, I_MUTEX_QUOTA);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000777 for (qx = 0; qx < num_qd; qx++) {
Steven Whitehouse1e72c0f2009-09-15 20:42:56 +0100778 error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000779 GL_NOCACHE, &ghs[qx]);
780 if (error)
781 goto out;
782 }
783
784 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
785 if (error)
786 goto out;
787
788 for (x = 0; x < num_qd; x++) {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000789 offset = qd2offset(qda[x]);
Bob Peterson461cb412010-06-24 19:21:20 -0400790 if (gfs2_write_alloc_required(ip, offset,
791 sizeof(struct gfs2_quota)))
David Teiglandb3b94fa2006-01-16 16:50:04 +0000792 nalloc++;
793 }
794
Abhijith Das20b95bf2008-03-06 17:43:52 -0600795 al = gfs2_alloc_get(ip);
796 if (!al) {
797 error = -ENOMEM;
798 goto out_gunlock;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000799 }
Abhijith Das20b95bf2008-03-06 17:43:52 -0600800 /*
801 * 1 blk for unstuffing inode if stuffed. We add this extra
802 * block to the reservation unconditionally. If the inode
803 * doesn't need unstuffing, the block will be released to the
804 * rgrp since it won't be allocated during the transaction
805 */
806 al->al_requested = 1;
Abhijith Das7e619bc2010-05-07 17:50:18 -0400807 /* +3 in the end for unstuffing block, inode size update block
808 * and another block in case quota straddles page boundary and
809 * two blocks need to be updated instead of 1 */
810 blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
Abhijith Das20b95bf2008-03-06 17:43:52 -0600811
812 if (nalloc)
813 al->al_requested += nalloc * (data_blocks + ind_blocks);
814 error = gfs2_inplace_reserve(ip);
815 if (error)
816 goto out_alloc;
817
818 if (nalloc)
Steven Whitehouse54335b12011-09-01 13:31:59 +0100819 blocks += gfs2_rg_blocks(ip) + nalloc * ind_blocks + RES_STATFS;
Abhijith Das20b95bf2008-03-06 17:43:52 -0600820
821 error = gfs2_trans_begin(sdp, blocks, 0);
822 if (error)
823 goto out_ipres;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000824
825 for (x = 0; x < num_qd; x++) {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000826 qd = qda[x];
827 offset = qd2offset(qd);
Steven Whitehousee285c102009-09-23 13:50:49 +0100828 error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, qd, NULL);
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000829 if (error)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000830 goto out_end_trans;
831
David Teiglandb3b94fa2006-01-16 16:50:04 +0000832 do_qc(qd, -qd->qd_change_sync);
Abhijith Das662e3a52011-03-08 10:40:42 -0500833 set_bit(QDF_REFRESH, &qd->qd_flags);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000834 }
835
836 error = 0;
837
Steven Whitehousea91ea692006-09-04 12:04:26 -0400838out_end_trans:
David Teiglandb3b94fa2006-01-16 16:50:04 +0000839 gfs2_trans_end(sdp);
Steven Whitehousea91ea692006-09-04 12:04:26 -0400840out_ipres:
Abhijith Das20b95bf2008-03-06 17:43:52 -0600841 gfs2_inplace_release(ip);
Steven Whitehousea91ea692006-09-04 12:04:26 -0400842out_alloc:
Abhijith Das20b95bf2008-03-06 17:43:52 -0600843 gfs2_alloc_put(ip);
Steven Whitehousea91ea692006-09-04 12:04:26 -0400844out_gunlock:
David Teiglandb3b94fa2006-01-16 16:50:04 +0000845 gfs2_glock_dq_uninit(&i_gh);
Steven Whitehousea91ea692006-09-04 12:04:26 -0400846out:
David Teiglandb3b94fa2006-01-16 16:50:04 +0000847 while (qx--)
848 gfs2_glock_dq_uninit(&ghs[qx]);
Steven Whitehousee285c102009-09-23 13:50:49 +0100849 mutex_unlock(&ip->i_inode.i_mutex);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000850 kfree(ghs);
Steven Whitehouseb09e5932006-04-07 11:17:32 -0400851 gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000852 return error;
853}
854
Steven Whitehousee285c102009-09-23 13:50:49 +0100855static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
856{
857 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
858 struct gfs2_quota q;
859 struct gfs2_quota_lvb *qlvb;
860 loff_t pos;
861 int error;
862
863 memset(&q, 0, sizeof(struct gfs2_quota));
864 pos = qd2offset(qd);
865 error = gfs2_internal_read(ip, NULL, (char *)&q, &pos, sizeof(q));
866 if (error < 0)
867 return error;
868
869 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
870 qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
871 qlvb->__pad = 0;
872 qlvb->qb_limit = q.qu_limit;
873 qlvb->qb_warn = q.qu_warn;
874 qlvb->qb_value = q.qu_value;
875 qd->qd_qb = *qlvb;
876
877 return 0;
878}
879
David Teiglandb3b94fa2006-01-16 16:50:04 +0000880static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
881 struct gfs2_holder *q_gh)
882{
883 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400884 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000885 struct gfs2_holder i_gh;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000886 int error;
887
Steven Whitehousea91ea692006-09-04 12:04:26 -0400888restart:
David Teiglandb3b94fa2006-01-16 16:50:04 +0000889 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
890 if (error)
891 return error;
892
Steven Whitehousee9fc2aa2006-09-01 11:05:15 -0400893 qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000894
Steven Whitehousee9fc2aa2006-09-01 11:05:15 -0400895 if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000896 gfs2_glock_dq_uninit(q_gh);
Steven Whitehouse91094d02009-09-11 15:21:56 +0100897 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE,
898 GL_NOCACHE, q_gh);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000899 if (error)
900 return error;
901
Steven Whitehousee9fc2aa2006-09-01 11:05:15 -0400902 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000903 if (error)
904 goto fail;
905
Steven Whitehousee285c102009-09-23 13:50:49 +0100906 error = update_qd(sdp, qd);
907 if (error)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000908 goto fail_gunlock;
Steven Whitehousee285c102009-09-23 13:50:49 +0100909
David Teiglandb3b94fa2006-01-16 16:50:04 +0000910 gfs2_glock_dq_uninit(&i_gh);
Steven Whitehouse91094d02009-09-11 15:21:56 +0100911 gfs2_glock_dq_uninit(q_gh);
912 force_refresh = 0;
913 goto restart;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000914 }
915
916 return 0;
917
Steven Whitehousea91ea692006-09-04 12:04:26 -0400918fail_gunlock:
David Teiglandb3b94fa2006-01-16 16:50:04 +0000919 gfs2_glock_dq_uninit(&i_gh);
Steven Whitehousea91ea692006-09-04 12:04:26 -0400920fail:
David Teiglandb3b94fa2006-01-16 16:50:04 +0000921 gfs2_glock_dq_uninit(q_gh);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000922 return error;
923}
924
Steven Whitehousecd915492006-09-04 12:49:07 -0400925int gfs2_quota_lock(struct gfs2_inode *ip, u32 uid, u32 gid)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000926{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400927 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
Steven Whitehouse6dbd8222008-01-10 15:18:55 +0000928 struct gfs2_alloc *al = ip->i_alloc;
Abhijith Das662e3a52011-03-08 10:40:42 -0500929 struct gfs2_quota_data *qd;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000930 unsigned int x;
931 int error = 0;
932
Steven Whitehouse891a8e92011-09-19 10:25:49 +0100933 error = gfs2_quota_hold(ip, uid, gid);
934 if (error)
935 return error;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000936
937 if (capable(CAP_SYS_RESOURCE) ||
938 sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
939 return 0;
940
941 sort(al->al_qd, al->al_qd_num, sizeof(struct gfs2_quota_data *),
942 sort_qd, NULL);
943
944 for (x = 0; x < al->al_qd_num; x++) {
Abhijith Das662e3a52011-03-08 10:40:42 -0500945 int force = NO_FORCE;
946 qd = al->al_qd[x];
947 if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags))
948 force = FORCE;
949 error = do_glock(qd, force, &al->al_qd_ghs[x]);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000950 if (error)
951 break;
952 }
953
954 if (!error)
955 set_bit(GIF_QD_LOCKED, &ip->i_flags);
956 else {
957 while (x--)
958 gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
959 gfs2_quota_unhold(ip);
960 }
961
962 return error;
963}
964
965static int need_sync(struct gfs2_quota_data *qd)
966{
967 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
968 struct gfs2_tune *gt = &sdp->sd_tune;
Steven Whitehousecd915492006-09-04 12:49:07 -0400969 s64 value;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000970 unsigned int num, den;
971 int do_sync = 1;
972
973 if (!qd->qd_qb.qb_limit)
974 return 0;
975
Steven Whitehouse22077f52009-01-08 14:28:42 +0000976 spin_lock(&qd_lru_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000977 value = qd->qd_change;
Steven Whitehouse22077f52009-01-08 14:28:42 +0000978 spin_unlock(&qd_lru_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000979
980 spin_lock(&gt->gt_spin);
981 num = gt->gt_quota_scale_num;
982 den = gt->gt_quota_scale_den;
983 spin_unlock(&gt->gt_spin);
984
985 if (value < 0)
986 do_sync = 0;
Steven Whitehousee9fc2aa2006-09-01 11:05:15 -0400987 else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >=
988 (s64)be64_to_cpu(qd->qd_qb.qb_limit))
David Teiglandb3b94fa2006-01-16 16:50:04 +0000989 do_sync = 0;
990 else {
991 value *= gfs2_jindex_size(sdp) * num;
David Howells4abaca172008-07-11 14:39:56 +0100992 value = div_s64(value, den);
Steven Whitehousee9fc2aa2006-09-01 11:05:15 -0400993 value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
Steven Whitehousecd915492006-09-04 12:49:07 -0400994 if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
David Teiglandb3b94fa2006-01-16 16:50:04 +0000995 do_sync = 0;
996 }
997
998 return do_sync;
999}
1000
1001void gfs2_quota_unlock(struct gfs2_inode *ip)
1002{
Steven Whitehouse6dbd8222008-01-10 15:18:55 +00001003 struct gfs2_alloc *al = ip->i_alloc;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001004 struct gfs2_quota_data *qda[4];
1005 unsigned int count = 0;
1006 unsigned int x;
1007
1008 if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
1009 goto out;
1010
1011 for (x = 0; x < al->al_qd_num; x++) {
1012 struct gfs2_quota_data *qd;
1013 int sync;
1014
1015 qd = al->al_qd[x];
1016 sync = need_sync(qd);
1017
1018 gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
1019
1020 if (sync && qd_trylock(qd))
1021 qda[count++] = qd;
1022 }
1023
1024 if (count) {
1025 do_sync(count, qda);
1026 for (x = 0; x < count; x++)
1027 qd_unlock(qda[x]);
1028 }
1029
Steven Whitehousea91ea692006-09-04 12:04:26 -04001030out:
David Teiglandb3b94fa2006-01-16 16:50:04 +00001031 gfs2_quota_unhold(ip);
1032}
1033
1034#define MAX_LINE 256
1035
1036static int print_message(struct gfs2_quota_data *qd, char *type)
1037{
1038 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001039
Steven Whitehouse2ec46502009-09-28 12:49:15 +01001040 printk(KERN_INFO "GFS2: fsid=%s: quota %s for %s %u\n",
Steven Whitehouse02630a12006-07-03 11:20:06 -04001041 sdp->sd_fsname, type,
1042 (test_bit(QDF_USER, &qd->qd_flags)) ? "user" : "group",
1043 qd->qd_id);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001044
1045 return 0;
1046}
1047
Steven Whitehousecd915492006-09-04 12:49:07 -04001048int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001049{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04001050 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
Steven Whitehouse6dbd8222008-01-10 15:18:55 +00001051 struct gfs2_alloc *al = ip->i_alloc;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001052 struct gfs2_quota_data *qd;
Steven Whitehousecd915492006-09-04 12:49:07 -04001053 s64 value;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001054 unsigned int x;
1055 int error = 0;
1056
1057 if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
1058 return 0;
1059
1060 if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
1061 return 0;
1062
1063 for (x = 0; x < al->al_qd_num; x++) {
1064 qd = al->al_qd[x];
1065
1066 if (!((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
1067 (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))))
1068 continue;
1069
Steven Whitehousee9fc2aa2006-09-01 11:05:15 -04001070 value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
Steven Whitehouse22077f52009-01-08 14:28:42 +00001071 spin_lock(&qd_lru_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001072 value += qd->qd_change;
Steven Whitehouse22077f52009-01-08 14:28:42 +00001073 spin_unlock(&qd_lru_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001074
Steven Whitehousecd915492006-09-04 12:49:07 -04001075 if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) {
David Teiglandb3b94fa2006-01-16 16:50:04 +00001076 print_message(qd, "exceeded");
Steven Whitehouse2ec46502009-09-28 12:49:15 +01001077 quota_send_warning(test_bit(QDF_USER, &qd->qd_flags) ?
1078 USRQUOTA : GRPQUOTA, qd->qd_id,
1079 sdp->sd_vfs->s_dev, QUOTA_NL_BHARDWARN);
1080
David Teiglandb3b94fa2006-01-16 16:50:04 +00001081 error = -EDQUOT;
1082 break;
Steven Whitehousee9fc2aa2006-09-01 11:05:15 -04001083 } else if (be64_to_cpu(qd->qd_qb.qb_warn) &&
Steven Whitehousecd915492006-09-04 12:49:07 -04001084 (s64)be64_to_cpu(qd->qd_qb.qb_warn) < value &&
David Teiglandb3b94fa2006-01-16 16:50:04 +00001085 time_after_eq(jiffies, qd->qd_last_warn +
Steven Whitehouse568f4c92006-02-27 12:00:42 -05001086 gfs2_tune_get(sdp,
1087 gt_quota_warn_period) * HZ)) {
Steven Whitehouse2ec46502009-09-28 12:49:15 +01001088 quota_send_warning(test_bit(QDF_USER, &qd->qd_flags) ?
1089 USRQUOTA : GRPQUOTA, qd->qd_id,
1090 sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001091 error = print_message(qd, "warning");
1092 qd->qd_last_warn = jiffies;
1093 }
1094 }
1095
1096 return error;
1097}
1098
Steven Whitehousecd915492006-09-04 12:49:07 -04001099void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
1100 u32 uid, u32 gid)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001101{
Steven Whitehouse6dbd8222008-01-10 15:18:55 +00001102 struct gfs2_alloc *al = ip->i_alloc;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001103 struct gfs2_quota_data *qd;
1104 unsigned int x;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001105
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04001106 if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), change))
David Teiglandb3b94fa2006-01-16 16:50:04 +00001107 return;
Steven Whitehouse383f01f2008-11-04 10:05:22 +00001108 if (ip->i_diskflags & GFS2_DIF_SYSTEM)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001109 return;
1110
1111 for (x = 0; x < al->al_qd_num; x++) {
1112 qd = al->al_qd[x];
1113
1114 if ((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
1115 (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))) {
1116 do_qc(qd, change);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001117 }
1118 }
1119}
1120
Christoph Hellwig5fb324a2010-02-16 03:44:52 -05001121int gfs2_quota_sync(struct super_block *sb, int type, int wait)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001122{
Steven Whitehouse8c42d632009-09-11 14:36:44 +01001123 struct gfs2_sbd *sdp = sb->s_fs_info;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001124 struct gfs2_quota_data **qda;
1125 unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync);
1126 unsigned int num_qd;
1127 unsigned int x;
1128 int error = 0;
1129
1130 sdp->sd_quota_sync_gen++;
1131
1132 qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
1133 if (!qda)
1134 return -ENOMEM;
1135
1136 do {
1137 num_qd = 0;
1138
1139 for (;;) {
1140 error = qd_fish(sdp, qda + num_qd);
1141 if (error || !qda[num_qd])
1142 break;
1143 if (++num_qd == max_qd)
1144 break;
1145 }
1146
1147 if (num_qd) {
1148 if (!error)
1149 error = do_sync(num_qd, qda);
1150 if (!error)
1151 for (x = 0; x < num_qd; x++)
1152 qda[x]->qd_sync_gen =
1153 sdp->sd_quota_sync_gen;
1154
1155 for (x = 0; x < num_qd; x++)
1156 qd_unlock(qda[x]);
1157 }
1158 } while (!error && num_qd == max_qd);
1159
1160 kfree(qda);
1161
1162 return error;
1163}
1164
Christoph Hellwig5fb324a2010-02-16 03:44:52 -05001165static int gfs2_quota_sync_timeo(struct super_block *sb, int type)
1166{
1167 return gfs2_quota_sync(sb, type, 0);
1168}
1169
Steven Whitehousecd915492006-09-04 12:49:07 -04001170int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001171{
1172 struct gfs2_quota_data *qd;
1173 struct gfs2_holder q_gh;
1174 int error;
1175
Steven Whitehouse6a6ada82009-09-15 16:30:38 +01001176 error = qd_get(sdp, user, id, &qd);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001177 if (error)
1178 return error;
1179
1180 error = do_glock(qd, FORCE, &q_gh);
1181 if (!error)
1182 gfs2_glock_dq_uninit(&q_gh);
1183
1184 qd_put(qd);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001185 return error;
1186}
1187
Steven Whitehousebb8d8a62007-06-01 14:11:58 +01001188static void gfs2_quota_change_in(struct gfs2_quota_change_host *qc, const void *buf)
1189{
1190 const struct gfs2_quota_change *str = buf;
1191
1192 qc->qc_change = be64_to_cpu(str->qc_change);
1193 qc->qc_flags = be32_to_cpu(str->qc_flags);
1194 qc->qc_id = be32_to_cpu(str->qc_id);
1195}
1196
David Teiglandb3b94fa2006-01-16 16:50:04 +00001197int gfs2_quota_init(struct gfs2_sbd *sdp)
1198{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04001199 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
Steven Whitehousea2e0f792010-08-11 09:53:11 +01001200 u64 size = i_size_read(sdp->sd_qc_inode);
1201 unsigned int blocks = size >> sdp->sd_sb.sb_bsize_shift;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001202 unsigned int x, slot = 0;
1203 unsigned int found = 0;
Steven Whitehousecd915492006-09-04 12:49:07 -04001204 u64 dblock;
1205 u32 extlen = 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001206 int error;
1207
Steven Whitehousea2e0f792010-08-11 09:53:11 +01001208 if (gfs2_check_internal_file_size(sdp->sd_qc_inode, 1, 64 << 20))
Steven Whitehouse907b9bc2006-09-25 09:26:04 -04001209 return -EIO;
Steven Whitehousea2e0f792010-08-11 09:53:11 +01001210
David Teiglandb3b94fa2006-01-16 16:50:04 +00001211 sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
Steven Whitehouse5c676f62006-02-27 17:23:27 -05001212 sdp->sd_quota_chunks = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * PAGE_SIZE);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001213
1214 error = -ENOMEM;
1215
1216 sdp->sd_quota_bitmap = kcalloc(sdp->sd_quota_chunks,
Josef Bacik16c5f062008-04-09 09:33:41 -04001217 sizeof(unsigned char *), GFP_NOFS);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001218 if (!sdp->sd_quota_bitmap)
1219 return error;
1220
1221 for (x = 0; x < sdp->sd_quota_chunks; x++) {
Josef Bacik16c5f062008-04-09 09:33:41 -04001222 sdp->sd_quota_bitmap[x] = kzalloc(PAGE_SIZE, GFP_NOFS);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001223 if (!sdp->sd_quota_bitmap[x])
1224 goto fail;
1225 }
1226
1227 for (x = 0; x < blocks; x++) {
1228 struct buffer_head *bh;
1229 unsigned int y;
1230
1231 if (!extlen) {
1232 int new = 0;
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04001233 error = gfs2_extent_map(&ip->i_inode, x, &new, &dblock, &extlen);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001234 if (error)
1235 goto fail;
1236 }
David Teiglandb3b94fa2006-01-16 16:50:04 +00001237 error = -EIO;
Steven Whitehouse7276b3b2006-09-21 17:05:23 -04001238 bh = gfs2_meta_ra(ip->i_gl, dblock, extlen);
1239 if (!bh)
1240 goto fail;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001241 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
1242 brelse(bh);
1243 goto fail;
1244 }
1245
Steven Whitehouse7276b3b2006-09-21 17:05:23 -04001246 for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001247 y++, slot++) {
Al Virob62f9632006-10-13 23:46:46 -04001248 struct gfs2_quota_change_host qc;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001249 struct gfs2_quota_data *qd;
1250
1251 gfs2_quota_change_in(&qc, bh->b_data +
1252 sizeof(struct gfs2_meta_header) +
1253 y * sizeof(struct gfs2_quota_change));
1254 if (!qc.qc_change)
1255 continue;
1256
1257 error = qd_alloc(sdp, (qc.qc_flags & GFS2_QCF_USER),
1258 qc.qc_id, &qd);
1259 if (error) {
1260 brelse(bh);
1261 goto fail;
1262 }
1263
1264 set_bit(QDF_CHANGE, &qd->qd_flags);
1265 qd->qd_change = qc.qc_change;
1266 qd->qd_slot = slot;
1267 qd->qd_slot_count = 1;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001268
Abhijith Das0a7ab792009-01-07 16:03:37 -06001269 spin_lock(&qd_lru_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001270 gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1);
1271 list_add(&qd->qd_list, &sdp->sd_quota_list);
1272 atomic_inc(&sdp->sd_quota_count);
Abhijith Das0a7ab792009-01-07 16:03:37 -06001273 spin_unlock(&qd_lru_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001274
1275 found++;
1276 }
1277
1278 brelse(bh);
1279 dblock++;
1280 extlen--;
1281 }
1282
1283 if (found)
1284 fs_info(sdp, "found %u quota changes\n", found);
1285
1286 return 0;
1287
Steven Whitehousea91ea692006-09-04 12:04:26 -04001288fail:
David Teiglandb3b94fa2006-01-16 16:50:04 +00001289 gfs2_quota_cleanup(sdp);
1290 return error;
1291}
1292
David Teiglandb3b94fa2006-01-16 16:50:04 +00001293void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
1294{
1295 struct list_head *head = &sdp->sd_quota_list;
1296 struct gfs2_quota_data *qd;
1297 unsigned int x;
1298
Abhijith Das0a7ab792009-01-07 16:03:37 -06001299 spin_lock(&qd_lru_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001300 while (!list_empty(head)) {
1301 qd = list_entry(head->prev, struct gfs2_quota_data, qd_list);
1302
Abhijith Das0a7ab792009-01-07 16:03:37 -06001303 if (atomic_read(&qd->qd_count) > 1 ||
1304 (atomic_read(&qd->qd_count) &&
1305 !test_bit(QDF_CHANGE, &qd->qd_flags))) {
Abhijith Das0a7ab792009-01-07 16:03:37 -06001306 list_move(&qd->qd_list, head);
1307 spin_unlock(&qd_lru_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001308 schedule();
Abhijith Das0a7ab792009-01-07 16:03:37 -06001309 spin_lock(&qd_lru_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001310 continue;
1311 }
David Teiglandb3b94fa2006-01-16 16:50:04 +00001312
Abhijith Das0a7ab792009-01-07 16:03:37 -06001313 list_del(&qd->qd_list);
1314 /* Also remove if this qd exists in the reclaim list */
1315 if (!list_empty(&qd->qd_reclaim)) {
1316 list_del_init(&qd->qd_reclaim);
1317 atomic_dec(&qd_lru_count);
1318 }
1319 atomic_dec(&sdp->sd_quota_count);
1320 spin_unlock(&qd_lru_lock);
1321
1322 if (!atomic_read(&qd->qd_count)) {
David Teiglandb3b94fa2006-01-16 16:50:04 +00001323 gfs2_assert_warn(sdp, !qd->qd_change);
1324 gfs2_assert_warn(sdp, !qd->qd_slot_count);
1325 } else
1326 gfs2_assert_warn(sdp, qd->qd_slot_count == 1);
1327 gfs2_assert_warn(sdp, !qd->qd_bh_count);
1328
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001329 gfs2_glock_put(qd->qd_gl);
Steven Whitehouse37b2c832008-11-17 14:25:37 +00001330 kmem_cache_free(gfs2_quotad_cachep, qd);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001331
Abhijith Das0a7ab792009-01-07 16:03:37 -06001332 spin_lock(&qd_lru_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001333 }
Abhijith Das0a7ab792009-01-07 16:03:37 -06001334 spin_unlock(&qd_lru_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001335
1336 gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
1337
1338 if (sdp->sd_quota_bitmap) {
1339 for (x = 0; x < sdp->sd_quota_chunks; x++)
1340 kfree(sdp->sd_quota_bitmap[x]);
1341 kfree(sdp->sd_quota_bitmap);
1342 }
1343}
1344
Steven Whitehouse37b2c832008-11-17 14:25:37 +00001345static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error)
1346{
1347 if (error == 0 || error == -EROFS)
1348 return;
1349 if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
1350 fs_err(sdp, "gfs2_quotad: %s error %d\n", msg, error);
1351}
1352
1353static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg,
Steven Whitehouse8c42d632009-09-11 14:36:44 +01001354 int (*fxn)(struct super_block *sb, int type),
Steven Whitehouse37b2c832008-11-17 14:25:37 +00001355 unsigned long t, unsigned long *timeo,
1356 unsigned int *new_timeo)
1357{
1358 if (t >= *timeo) {
Steven Whitehouse8c42d632009-09-11 14:36:44 +01001359 int error = fxn(sdp->sd_vfs, 0);
Steven Whitehouse37b2c832008-11-17 14:25:37 +00001360 quotad_error(sdp, msg, error);
1361 *timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ;
1362 } else {
1363 *timeo -= t;
1364 }
1365}
1366
Steven Whitehouse813e0c42008-11-18 13:38:48 +00001367static void quotad_check_trunc_list(struct gfs2_sbd *sdp)
1368{
1369 struct gfs2_inode *ip;
1370
1371 while(1) {
1372 ip = NULL;
1373 spin_lock(&sdp->sd_trunc_lock);
1374 if (!list_empty(&sdp->sd_trunc_list)) {
1375 ip = list_entry(sdp->sd_trunc_list.next,
1376 struct gfs2_inode, i_trunc_list);
1377 list_del_init(&ip->i_trunc_list);
1378 }
1379 spin_unlock(&sdp->sd_trunc_lock);
1380 if (ip == NULL)
1381 return;
1382 gfs2_glock_finish_truncate(ip);
1383 }
1384}
1385
Benjamin Marzinski3d3c10f2009-10-20 02:39:44 -05001386void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) {
1387 if (!sdp->sd_statfs_force_sync) {
1388 sdp->sd_statfs_force_sync = 1;
1389 wake_up(&sdp->sd_quota_wait);
1390 }
1391}
1392
1393
Steven Whitehouse37b2c832008-11-17 14:25:37 +00001394/**
1395 * gfs2_quotad - Write cached quota changes into the quota file
1396 * @sdp: Pointer to GFS2 superblock
1397 *
1398 */
1399
1400int gfs2_quotad(void *data)
1401{
1402 struct gfs2_sbd *sdp = data;
1403 struct gfs2_tune *tune = &sdp->sd_tune;
1404 unsigned long statfs_timeo = 0;
1405 unsigned long quotad_timeo = 0;
1406 unsigned long t = 0;
1407 DEFINE_WAIT(wait);
Steven Whitehouse813e0c42008-11-18 13:38:48 +00001408 int empty;
Steven Whitehouse37b2c832008-11-17 14:25:37 +00001409
1410 while (!kthread_should_stop()) {
1411
1412 /* Update the master statfs file */
Benjamin Marzinski3d3c10f2009-10-20 02:39:44 -05001413 if (sdp->sd_statfs_force_sync) {
1414 int error = gfs2_statfs_sync(sdp->sd_vfs, 0);
1415 quotad_error(sdp, "statfs", error);
1416 statfs_timeo = gfs2_tune_get(sdp, gt_statfs_quantum) * HZ;
1417 }
1418 else
1419 quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t,
1420 &statfs_timeo,
1421 &tune->gt_statfs_quantum);
Steven Whitehouse37b2c832008-11-17 14:25:37 +00001422
1423 /* Update quota file */
Christoph Hellwig5fb324a2010-02-16 03:44:52 -05001424 quotad_check_timeo(sdp, "sync", gfs2_quota_sync_timeo, t,
Steven Whitehouse37b2c832008-11-17 14:25:37 +00001425 &quotad_timeo, &tune->gt_quota_quantum);
1426
Steven Whitehouse813e0c42008-11-18 13:38:48 +00001427 /* Check for & recover partially truncated inodes */
1428 quotad_check_trunc_list(sdp);
1429
Steven Whitehouse37b2c832008-11-17 14:25:37 +00001430 if (freezing(current))
1431 refrigerator();
1432 t = min(quotad_timeo, statfs_timeo);
1433
Steven Whitehouse7fa5d202009-03-31 15:49:08 +01001434 prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE);
Steven Whitehouse813e0c42008-11-18 13:38:48 +00001435 spin_lock(&sdp->sd_trunc_lock);
1436 empty = list_empty(&sdp->sd_trunc_list);
1437 spin_unlock(&sdp->sd_trunc_lock);
Benjamin Marzinski3d3c10f2009-10-20 02:39:44 -05001438 if (empty && !sdp->sd_statfs_force_sync)
Steven Whitehouse813e0c42008-11-18 13:38:48 +00001439 t -= schedule_timeout(t);
1440 else
1441 t = 0;
Steven Whitehouse37b2c832008-11-17 14:25:37 +00001442 finish_wait(&sdp->sd_quota_wait, &wait);
1443 }
1444
1445 return 0;
1446}
1447
Steven Whitehouse1d371b52009-09-11 15:57:27 +01001448static int gfs2_quota_get_xstate(struct super_block *sb,
1449 struct fs_quota_stat *fqs)
1450{
1451 struct gfs2_sbd *sdp = sb->s_fs_info;
1452
1453 memset(fqs, 0, sizeof(struct fs_quota_stat));
1454 fqs->qs_version = FS_QSTAT_VERSION;
Christoph Hellwigad6bb902010-05-05 00:10:56 +02001455
1456 switch (sdp->sd_args.ar_quota) {
1457 case GFS2_QUOTA_ON:
Christoph Hellwigade7ce32010-06-04 10:56:01 +02001458 fqs->qs_flags |= (FS_QUOTA_UDQ_ENFD | FS_QUOTA_GDQ_ENFD);
Christoph Hellwigad6bb902010-05-05 00:10:56 +02001459 /*FALLTHRU*/
1460 case GFS2_QUOTA_ACCOUNT:
Christoph Hellwigade7ce32010-06-04 10:56:01 +02001461 fqs->qs_flags |= (FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT);
Christoph Hellwigad6bb902010-05-05 00:10:56 +02001462 break;
1463 case GFS2_QUOTA_OFF:
1464 break;
1465 }
1466
Steven Whitehouse1d371b52009-09-11 15:57:27 +01001467 if (sdp->sd_quota_inode) {
1468 fqs->qs_uquota.qfs_ino = GFS2_I(sdp->sd_quota_inode)->i_no_addr;
1469 fqs->qs_uquota.qfs_nblks = sdp->sd_quota_inode->i_blocks;
1470 }
1471 fqs->qs_uquota.qfs_nextents = 1; /* unsupported */
1472 fqs->qs_gquota = fqs->qs_uquota; /* its the same inode in both cases */
1473 fqs->qs_incoredqs = atomic_read(&qd_lru_count);
1474 return 0;
1475}
1476
Christoph Hellwigb9b2dd32010-05-06 17:04:58 -04001477static int gfs2_get_dqblk(struct super_block *sb, int type, qid_t id,
1478 struct fs_disk_quota *fdq)
Steven Whitehouse113d6b32009-09-28 11:52:16 +01001479{
1480 struct gfs2_sbd *sdp = sb->s_fs_info;
1481 struct gfs2_quota_lvb *qlvb;
1482 struct gfs2_quota_data *qd;
1483 struct gfs2_holder q_gh;
1484 int error;
1485
1486 memset(fdq, 0, sizeof(struct fs_disk_quota));
1487
1488 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1489 return -ESRCH; /* Crazy XFS error code */
1490
1491 if (type == USRQUOTA)
1492 type = QUOTA_USER;
1493 else if (type == GRPQUOTA)
1494 type = QUOTA_GROUP;
1495 else
1496 return -EINVAL;
1497
1498 error = qd_get(sdp, type, id, &qd);
1499 if (error)
1500 return error;
1501 error = do_glock(qd, FORCE, &q_gh);
1502 if (error)
1503 goto out;
1504
1505 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
1506 fdq->d_version = FS_DQUOT_VERSION;
Christoph Hellwigade7ce32010-06-04 10:56:01 +02001507 fdq->d_flags = (type == QUOTA_USER) ? FS_USER_QUOTA : FS_GROUP_QUOTA;
Steven Whitehouse113d6b32009-09-28 11:52:16 +01001508 fdq->d_id = id;
Abhijith Das14870b42010-11-18 11:24:24 -05001509 fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_fsb2bb_shift;
1510 fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_fsb2bb_shift;
1511 fdq->d_bcount = be64_to_cpu(qlvb->qb_value) << sdp->sd_fsb2bb_shift;
Steven Whitehouse113d6b32009-09-28 11:52:16 +01001512
1513 gfs2_glock_dq_uninit(&q_gh);
1514out:
1515 qd_put(qd);
1516 return error;
1517}
1518
Steven Whitehousee285c102009-09-23 13:50:49 +01001519/* GFS2 only supports a subset of the XFS fields */
Abhijith Das802ec9b2010-11-18 11:26:46 -05001520#define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD|FS_DQ_BCOUNT)
Steven Whitehousee285c102009-09-23 13:50:49 +01001521
Christoph Hellwigc472b432010-05-06 17:05:17 -04001522static int gfs2_set_dqblk(struct super_block *sb, int type, qid_t id,
1523 struct fs_disk_quota *fdq)
Steven Whitehousee285c102009-09-23 13:50:49 +01001524{
1525 struct gfs2_sbd *sdp = sb->s_fs_info;
1526 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
1527 struct gfs2_quota_data *qd;
1528 struct gfs2_holder q_gh, i_gh;
1529 unsigned int data_blocks, ind_blocks;
1530 unsigned int blocks = 0;
1531 int alloc_required;
1532 struct gfs2_alloc *al;
1533 loff_t offset;
1534 int error;
1535
1536 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1537 return -ESRCH; /* Crazy XFS error code */
1538
1539 switch(type) {
1540 case USRQUOTA:
1541 type = QUOTA_USER;
Christoph Hellwigade7ce32010-06-04 10:56:01 +02001542 if (fdq->d_flags != FS_USER_QUOTA)
Steven Whitehousee285c102009-09-23 13:50:49 +01001543 return -EINVAL;
1544 break;
1545 case GRPQUOTA:
1546 type = QUOTA_GROUP;
Christoph Hellwigade7ce32010-06-04 10:56:01 +02001547 if (fdq->d_flags != FS_GROUP_QUOTA)
Steven Whitehousee285c102009-09-23 13:50:49 +01001548 return -EINVAL;
1549 break;
1550 default:
1551 return -EINVAL;
1552 }
1553
1554 if (fdq->d_fieldmask & ~GFS2_FIELDMASK)
1555 return -EINVAL;
1556 if (fdq->d_id != id)
1557 return -EINVAL;
1558
1559 error = qd_get(sdp, type, id, &qd);
1560 if (error)
1561 return error;
1562
1563 mutex_lock(&ip->i_inode.i_mutex);
1564 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 0, &q_gh);
1565 if (error)
1566 goto out_put;
1567 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
1568 if (error)
1569 goto out_q;
1570
1571 /* Check for existing entry, if none then alloc new blocks */
1572 error = update_qd(sdp, qd);
1573 if (error)
1574 goto out_i;
1575
1576 /* If nothing has changed, this is a no-op */
1577 if ((fdq->d_fieldmask & FS_DQ_BSOFT) &&
Abhijith Das14870b42010-11-18 11:24:24 -05001578 ((fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
Steven Whitehousee285c102009-09-23 13:50:49 +01001579 fdq->d_fieldmask ^= FS_DQ_BSOFT;
Abhijith Das802ec9b2010-11-18 11:26:46 -05001580
Steven Whitehousee285c102009-09-23 13:50:49 +01001581 if ((fdq->d_fieldmask & FS_DQ_BHARD) &&
Abhijith Das14870b42010-11-18 11:24:24 -05001582 ((fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
Steven Whitehousee285c102009-09-23 13:50:49 +01001583 fdq->d_fieldmask ^= FS_DQ_BHARD;
Abhijith Das802ec9b2010-11-18 11:26:46 -05001584
1585 if ((fdq->d_fieldmask & FS_DQ_BCOUNT) &&
1586 ((fdq->d_bcount >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_value)))
1587 fdq->d_fieldmask ^= FS_DQ_BCOUNT;
1588
Steven Whitehousee285c102009-09-23 13:50:49 +01001589 if (fdq->d_fieldmask == 0)
1590 goto out_i;
1591
1592 offset = qd2offset(qd);
Bob Peterson461cb412010-06-24 19:21:20 -04001593 alloc_required = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota));
Abhijith Dase79a46a2011-02-07 11:22:41 -05001594 if (gfs2_is_stuffed(ip))
1595 alloc_required = 1;
Steven Whitehousee285c102009-09-23 13:50:49 +01001596 if (alloc_required) {
1597 al = gfs2_alloc_get(ip);
1598 if (al == NULL)
1599 goto out_i;
1600 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
1601 &data_blocks, &ind_blocks);
1602 blocks = al->al_requested = 1 + data_blocks + ind_blocks;
1603 error = gfs2_inplace_reserve(ip);
1604 if (error)
1605 goto out_alloc;
Steven Whitehouse54335b12011-09-01 13:31:59 +01001606 blocks += gfs2_rg_blocks(ip);
Steven Whitehousee285c102009-09-23 13:50:49 +01001607 }
1608
Abhijith Dase79a46a2011-02-07 11:22:41 -05001609 /* Some quotas span block boundaries and can update two blocks,
1610 adding an extra block to the transaction to handle such quotas */
1611 error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 2, 0);
Steven Whitehousee285c102009-09-23 13:50:49 +01001612 if (error)
1613 goto out_release;
1614
1615 /* Apply changes */
1616 error = gfs2_adjust_quota(ip, offset, 0, qd, fdq);
1617
1618 gfs2_trans_end(sdp);
1619out_release:
1620 if (alloc_required) {
1621 gfs2_inplace_release(ip);
1622out_alloc:
1623 gfs2_alloc_put(ip);
1624 }
1625out_i:
1626 gfs2_glock_dq_uninit(&i_gh);
1627out_q:
1628 gfs2_glock_dq_uninit(&q_gh);
1629out_put:
1630 mutex_unlock(&ip->i_inode.i_mutex);
1631 qd_put(qd);
1632 return error;
1633}
1634
Steven Whitehousecc632e72009-09-15 09:59:02 +01001635const struct quotactl_ops gfs2_quotactl_ops = {
1636 .quota_sync = gfs2_quota_sync,
Steven Whitehouse1d371b52009-09-11 15:57:27 +01001637 .get_xstate = gfs2_quota_get_xstate,
Christoph Hellwigb9b2dd32010-05-06 17:04:58 -04001638 .get_dqblk = gfs2_get_dqblk,
Christoph Hellwigc472b432010-05-06 17:05:17 -04001639 .set_dqblk = gfs2_set_dqblk,
Steven Whitehousecc632e72009-09-15 09:59:02 +01001640};