blob: be2fc26029e4548e7e16063e30a6777249e675af [file] [log] [blame]
David Teiglandb3b94fa2006-01-16 16:50:04 +00001/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
Bob Petersonfe6c9912008-01-28 11:13:02 -06003 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
David Teiglandb3b94fa2006-01-16 16:50:04 +00004 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
Steven Whitehousee9fc2aa2006-09-01 11:05:15 -04007 * of the GNU General Public License version 2.
David Teiglandb3b94fa2006-01-16 16:50:04 +00008 */
9
Joe Perchesd77d1b52014-03-06 12:10:45 -080010#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
David Teiglandb3b94fa2006-01-16 16:50:04 +000012#include <linux/slab.h>
13#include <linux/spinlock.h>
14#include <linux/completion.h>
15#include <linux/buffer_head.h>
Steven Whitehousef42faf42006-01-30 18:34:10 +000016#include <linux/fs.h>
Steven Whitehouse5c676f62006-02-27 17:23:27 -050017#include <linux/gfs2_ondisk.h>
Bob Peterson1f466a42008-03-10 18:17:47 -050018#include <linux/prefetch.h>
Steven Whitehousef15ab562009-02-09 09:25:01 +000019#include <linux/blkdev.h>
Bob Peterson7c9ca622011-08-31 09:53:19 +010020#include <linux/rbtree.h>
Steven Whitehouse9dbe9612012-10-31 10:37:10 +000021#include <linux/random.h>
David Teiglandb3b94fa2006-01-16 16:50:04 +000022
23#include "gfs2.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050024#include "incore.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000025#include "glock.h"
26#include "glops.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000027#include "lops.h"
28#include "meta_io.h"
29#include "quota.h"
30#include "rgrp.h"
31#include "super.h"
32#include "trans.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050033#include "util.h"
Benjamin Marzinski172e0452007-03-23 14:51:56 -060034#include "log.h"
Steven Whitehousec8cdf472007-06-08 10:05:33 +010035#include "inode.h"
Steven Whitehouse63997772009-06-12 08:49:20 +010036#include "trace_gfs2.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000037
Steven Whitehouse2c1e52a2006-09-05 15:41:57 -040038#define BFITNOENT ((u32)~0)
Bob Peterson6760bdc2007-07-24 14:09:32 -050039#define NO_BLOCK ((u64)~0)
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -040040
Bob Peterson1f466a42008-03-10 18:17:47 -050041#if BITS_PER_LONG == 32
42#define LBITMASK (0x55555555UL)
43#define LBITSKIP55 (0x55555555UL)
44#define LBITSKIP00 (0x00000000UL)
45#else
46#define LBITMASK (0x5555555555555555UL)
47#define LBITSKIP55 (0x5555555555555555UL)
48#define LBITSKIP00 (0x0000000000000000UL)
49#endif
50
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -040051/*
52 * These routines are used by the resource group routines (rgrp.c)
53 * to keep track of block allocation. Each block is represented by two
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -040054 * bits. So, each byte represents GFS2_NBBY (i.e. 4) blocks.
55 *
56 * 0 = Free
57 * 1 = Used (not metadata)
58 * 2 = Unlinked (still in use) inode
59 * 3 = Used (metadata)
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -040060 */
61
Bob Peterson5ce13432013-11-06 10:55:52 -050062struct gfs2_extent {
63 struct gfs2_rbm rbm;
64 u32 len;
65};
66
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -040067static const char valid_change[16] = {
68 /* current */
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -040069 /* n */ 0, 1, 1, 1,
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -040070 /* e */ 1, 0, 0, 0,
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -040071 /* w */ 0, 0, 0, 1,
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -040072 1, 0, 0, 0
73};
74
Bob Peterson5ce13432013-11-06 10:55:52 -050075static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
Bob Peterson8381e602016-05-02 09:42:49 -050076 const struct gfs2_inode *ip, bool nowrap);
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +010077
78
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -040079/**
80 * gfs2_setbit - Set a bit in the bitmaps
Steven Whitehouse3e6339d2012-08-13 11:37:51 +010081 * @rbm: The position of the bit to set
82 * @do_clone: Also set the clone bitmap, if it exists
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -040083 * @new_state: the new state of the block
84 *
85 */
86
Steven Whitehouse3e6339d2012-08-13 11:37:51 +010087static inline void gfs2_setbit(const struct gfs2_rbm *rbm, bool do_clone,
Bob Peterson06344b92012-04-26 12:44:35 -040088 unsigned char new_state)
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -040089{
Steven Whitehouseb45e41d2008-02-06 10:11:15 +000090 unsigned char *byte1, *byte2, *end, cur_state;
Bob Petersone579ed42013-09-17 13:12:15 -040091 struct gfs2_bitmap *bi = rbm_bi(rbm);
92 unsigned int buflen = bi->bi_len;
Steven Whitehouse3e6339d2012-08-13 11:37:51 +010093 const unsigned int bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE;
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -040094
Bob Petersone579ed42013-09-17 13:12:15 -040095 byte1 = bi->bi_bh->b_data + bi->bi_offset + (rbm->offset / GFS2_NBBY);
96 end = bi->bi_bh->b_data + bi->bi_offset + buflen;
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -040097
Steven Whitehouseb45e41d2008-02-06 10:11:15 +000098 BUG_ON(byte1 >= end);
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -040099
Steven Whitehouseb45e41d2008-02-06 10:11:15 +0000100 cur_state = (*byte1 >> bit) & GFS2_BIT_MASK;
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400101
Steven Whitehouseb45e41d2008-02-06 10:11:15 +0000102 if (unlikely(!valid_change[new_state * 4 + cur_state])) {
Joe Perchesd77d1b52014-03-06 12:10:45 -0800103 pr_warn("buf_blk = 0x%x old_state=%d, new_state=%d\n",
104 rbm->offset, cur_state, new_state);
105 pr_warn("rgrp=0x%llx bi_start=0x%x\n",
106 (unsigned long long)rbm->rgd->rd_addr, bi->bi_start);
107 pr_warn("bi_offset=0x%x bi_len=0x%x\n",
108 bi->bi_offset, bi->bi_len);
Bob Peterson95c8e172011-03-22 10:49:12 -0400109 dump_stack();
Steven Whitehouse3e6339d2012-08-13 11:37:51 +0100110 gfs2_consist_rgrpd(rbm->rgd);
Steven Whitehouseb45e41d2008-02-06 10:11:15 +0000111 return;
112 }
113 *byte1 ^= (cur_state ^ new_state) << bit;
114
Bob Petersone579ed42013-09-17 13:12:15 -0400115 if (do_clone && bi->bi_clone) {
116 byte2 = bi->bi_clone + bi->bi_offset + (rbm->offset / GFS2_NBBY);
Steven Whitehouseb45e41d2008-02-06 10:11:15 +0000117 cur_state = (*byte2 >> bit) & GFS2_BIT_MASK;
118 *byte2 ^= (cur_state ^ new_state) << bit;
119 }
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400120}
121
122/**
123 * gfs2_testbit - test a bit in the bitmaps
Steven Whitehousec04a2ef2012-08-13 11:14:57 +0100124 * @rbm: The bit to test
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400125 *
Steven Whitehousec04a2ef2012-08-13 11:14:57 +0100126 * Returns: The two bit block state of the requested bit
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400127 */
128
Steven Whitehousec04a2ef2012-08-13 11:14:57 +0100129static inline u8 gfs2_testbit(const struct gfs2_rbm *rbm)
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400130{
Bob Petersone579ed42013-09-17 13:12:15 -0400131 struct gfs2_bitmap *bi = rbm_bi(rbm);
132 const u8 *buffer = bi->bi_bh->b_data + bi->bi_offset;
Steven Whitehousec04a2ef2012-08-13 11:14:57 +0100133 const u8 *byte;
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400134 unsigned int bit;
135
Steven Whitehousec04a2ef2012-08-13 11:14:57 +0100136 byte = buffer + (rbm->offset / GFS2_NBBY);
137 bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE;
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400138
Steven Whitehousec04a2ef2012-08-13 11:14:57 +0100139 return (*byte >> bit) & GFS2_BIT_MASK;
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400140}
141
142/**
Steven Whitehouse223b2b82009-02-17 14:13:35 +0000143 * gfs2_bit_search
144 * @ptr: Pointer to bitmap data
145 * @mask: Mask to use (normally 0x55555.... but adjusted for search start)
146 * @state: The state we are searching for
147 *
148 * We xor the bitmap data with a patter which is the bitwise opposite
149 * of what we are looking for, this gives rise to a pattern of ones
150 * wherever there is a match. Since we have two bits per entry, we
151 * take this pattern, shift it down by one place and then and it with
152 * the original. All the even bit positions (0,2,4, etc) then represent
153 * successful matches, so we mask with 0x55555..... to remove the unwanted
154 * odd bit positions.
155 *
156 * This allows searching of a whole u64 at once (32 blocks) with a
157 * single test (on 64 bit arches).
158 */
159
160static inline u64 gfs2_bit_search(const __le64 *ptr, u64 mask, u8 state)
161{
162 u64 tmp;
163 static const u64 search[] = {
Hannes Eder075ac442009-02-21 02:11:42 +0100164 [0] = 0xffffffffffffffffULL,
165 [1] = 0xaaaaaaaaaaaaaaaaULL,
166 [2] = 0x5555555555555555ULL,
167 [3] = 0x0000000000000000ULL,
Steven Whitehouse223b2b82009-02-17 14:13:35 +0000168 };
169 tmp = le64_to_cpu(*ptr) ^ search[state];
170 tmp &= (tmp >> 1);
171 tmp &= mask;
172 return tmp;
173}
174
175/**
Bob Peterson8e2e0042012-07-19 08:12:40 -0400176 * rs_cmp - multi-block reservation range compare
177 * @blk: absolute file system block number of the new reservation
178 * @len: number of blocks in the new reservation
179 * @rs: existing reservation to compare against
180 *
181 * returns: 1 if the block range is beyond the reach of the reservation
182 * -1 if the block range is before the start of the reservation
183 * 0 if the block range overlaps with the reservation
184 */
185static inline int rs_cmp(u64 blk, u32 len, struct gfs2_blkreserv *rs)
186{
Steven Whitehouse4a993fb2012-07-31 15:21:20 +0100187 u64 startblk = gfs2_rbm_to_block(&rs->rs_rbm);
Bob Peterson8e2e0042012-07-19 08:12:40 -0400188
189 if (blk >= startblk + rs->rs_free)
190 return 1;
191 if (blk + len - 1 < startblk)
192 return -1;
193 return 0;
194}
195
196/**
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400197 * gfs2_bitfit - Search an rgrp's bitmap buffer to find a bit-pair representing
198 * a block in a given allocation state.
Bob Peterson886b1412012-04-11 13:03:52 -0400199 * @buf: the buffer that holds the bitmaps
Steven Whitehouse223b2b82009-02-17 14:13:35 +0000200 * @len: the length (in bytes) of the buffer
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400201 * @goal: start search at this block's bit-pair (within @buffer)
Steven Whitehouse223b2b82009-02-17 14:13:35 +0000202 * @state: GFS2_BLKST_XXX the state of the block we're looking for.
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400203 *
204 * Scope of @goal and returned block number is only within this bitmap buffer,
205 * not entire rgrp or filesystem. @buffer will be offset from the actual
Steven Whitehouse223b2b82009-02-17 14:13:35 +0000206 * beginning of a bitmap block buffer, skipping any header structures, but
207 * headers are always a multiple of 64 bits long so that the buffer is
208 * always aligned to a 64 bit boundary.
209 *
210 * The size of the buffer is in bytes, but is it assumed that it is
Anand Gadiyarfd589a82009-07-16 17:13:03 +0200211 * always ok to read a complete multiple of 64 bits at the end
Steven Whitehouse223b2b82009-02-17 14:13:35 +0000212 * of the block in case the end is no aligned to a natural boundary.
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400213 *
214 * Return: the block number (bitmap buffer scope) that was found
215 */
216
Hannes Eder02ab1722009-02-21 02:12:05 +0100217static u32 gfs2_bitfit(const u8 *buf, const unsigned int len,
218 u32 goal, u8 state)
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400219{
Steven Whitehouse223b2b82009-02-17 14:13:35 +0000220 u32 spoint = (goal << 1) & ((8*sizeof(u64)) - 1);
221 const __le64 *ptr = ((__le64 *)buf) + (goal >> 5);
222 const __le64 *end = (__le64 *)(buf + ALIGN(len, sizeof(u64)));
223 u64 tmp;
Hannes Eder075ac442009-02-21 02:11:42 +0100224 u64 mask = 0x5555555555555555ULL;
Steven Whitehouse223b2b82009-02-17 14:13:35 +0000225 u32 bit;
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400226
Steven Whitehouse223b2b82009-02-17 14:13:35 +0000227 /* Mask off bits we don't care about at the start of the search */
228 mask <<= spoint;
229 tmp = gfs2_bit_search(ptr, mask, state);
230 ptr++;
231 while(tmp == 0 && ptr < end) {
Hannes Eder075ac442009-02-21 02:11:42 +0100232 tmp = gfs2_bit_search(ptr, 0x5555555555555555ULL, state);
Steven Whitehouse223b2b82009-02-17 14:13:35 +0000233 ptr++;
Bob Peterson1f466a42008-03-10 18:17:47 -0500234 }
Steven Whitehouse223b2b82009-02-17 14:13:35 +0000235 /* Mask off any bits which are more than len bytes from the start */
236 if (ptr == end && (len & (sizeof(u64) - 1)))
237 tmp &= (((u64)~0) >> (64 - 8*(len & (sizeof(u64) - 1))));
238 /* Didn't find anything, so return */
239 if (tmp == 0)
240 return BFITNOENT;
241 ptr--;
Steven Whitehoused8bd5042009-04-23 08:54:02 +0100242 bit = __ffs64(tmp);
Steven Whitehouse223b2b82009-02-17 14:13:35 +0000243 bit /= 2; /* two bits per entry in the bitmap */
244 return (((const unsigned char *)ptr - buf) * GFS2_NBBY) + bit;
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400245}
246
247/**
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +0100248 * gfs2_rbm_from_block - Set the rbm based upon rgd and block number
249 * @rbm: The rbm with rgd already set correctly
250 * @block: The block number (filesystem relative)
251 *
252 * This sets the bi and offset members of an rbm based on a
253 * resource group and a filesystem relative block number. The
254 * resource group must be set in the rbm on entry, the bi and
255 * offset members will be set by this function.
256 *
257 * Returns: 0 on success, or an error code
258 */
259
260static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block)
261{
262 u64 rblock = block - rbm->rgd->rd_data0;
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +0100263
264 if (WARN_ON_ONCE(rblock > UINT_MAX))
265 return -EINVAL;
266 if (block >= rbm->rgd->rd_data0 + rbm->rgd->rd_data)
267 return -E2BIG;
268
Bob Petersone579ed42013-09-17 13:12:15 -0400269 rbm->bii = 0;
Bob Petersona68a0a32012-10-19 08:32:51 -0400270 rbm->offset = (u32)(rblock);
271 /* Check if the block is within the first block */
Bob Petersone579ed42013-09-17 13:12:15 -0400272 if (rbm->offset < rbm_bi(rbm)->bi_blocks)
Bob Petersona68a0a32012-10-19 08:32:51 -0400273 return 0;
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +0100274
Bob Petersona68a0a32012-10-19 08:32:51 -0400275 /* Adjust for the size diff between gfs2_meta_header and gfs2_rgrp */
276 rbm->offset += (sizeof(struct gfs2_rgrp) -
277 sizeof(struct gfs2_meta_header)) * GFS2_NBBY;
Bob Petersone579ed42013-09-17 13:12:15 -0400278 rbm->bii = rbm->offset / rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
279 rbm->offset -= rbm->bii * rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +0100280 return 0;
281}
282
283/**
Bob Peterson149ed7f2013-09-17 13:14:35 -0400284 * gfs2_rbm_incr - increment an rbm structure
285 * @rbm: The rbm with rgd already set correctly
286 *
287 * This function takes an existing rbm structure and increments it to the next
288 * viable block offset.
289 *
290 * Returns: If incrementing the offset would cause the rbm to go past the
291 * end of the rgrp, true is returned, otherwise false.
292 *
293 */
294
295static bool gfs2_rbm_incr(struct gfs2_rbm *rbm)
296{
297 if (rbm->offset + 1 < rbm_bi(rbm)->bi_blocks) { /* in the same bitmap */
298 rbm->offset++;
299 return false;
300 }
301 if (rbm->bii == rbm->rgd->rd_length - 1) /* at the last bitmap */
302 return true;
303
304 rbm->offset = 0;
305 rbm->bii++;
306 return false;
307}
308
309/**
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +0100310 * gfs2_unaligned_extlen - Look for free blocks which are not byte aligned
311 * @rbm: Position to search (value/result)
312 * @n_unaligned: Number of unaligned blocks to check
313 * @len: Decremented for each block found (terminate on zero)
314 *
315 * Returns: true if a non-free block is encountered
316 */
317
318static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *len)
319{
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +0100320 u32 n;
321 u8 res;
322
323 for (n = 0; n < n_unaligned; n++) {
324 res = gfs2_testbit(rbm);
325 if (res != GFS2_BLKST_FREE)
326 return true;
327 (*len)--;
328 if (*len == 0)
329 return true;
Bob Peterson149ed7f2013-09-17 13:14:35 -0400330 if (gfs2_rbm_incr(rbm))
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +0100331 return true;
332 }
333
334 return false;
335}
336
337/**
338 * gfs2_free_extlen - Return extent length of free blocks
Fabian Frederick27ff6a02014-07-02 22:05:27 +0200339 * @rrbm: Starting position
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +0100340 * @len: Max length to check
341 *
342 * Starting at the block specified by the rbm, see how many free blocks
343 * there are, not reading more than len blocks ahead. This can be done
344 * using memchr_inv when the blocks are byte aligned, but has to be done
345 * on a block by block basis in case of unaligned blocks. Also this
346 * function can cope with bitmap boundaries (although it must stop on
347 * a resource group boundary)
348 *
349 * Returns: Number of free blocks in the extent
350 */
351
352static u32 gfs2_free_extlen(const struct gfs2_rbm *rrbm, u32 len)
353{
354 struct gfs2_rbm rbm = *rrbm;
355 u32 n_unaligned = rbm.offset & 3;
356 u32 size = len;
357 u32 bytes;
358 u32 chunk_size;
359 u8 *ptr, *start, *end;
360 u64 block;
Bob Petersone579ed42013-09-17 13:12:15 -0400361 struct gfs2_bitmap *bi;
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +0100362
363 if (n_unaligned &&
364 gfs2_unaligned_extlen(&rbm, 4 - n_unaligned, &len))
365 goto out;
366
Bob Peterson37015302012-09-12 09:40:31 -0400367 n_unaligned = len & 3;
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +0100368 /* Start is now byte aligned */
369 while (len > 3) {
Bob Petersone579ed42013-09-17 13:12:15 -0400370 bi = rbm_bi(&rbm);
371 start = bi->bi_bh->b_data;
372 if (bi->bi_clone)
373 start = bi->bi_clone;
374 end = start + bi->bi_bh->b_size;
375 start += bi->bi_offset;
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +0100376 BUG_ON(rbm.offset & 3);
377 start += (rbm.offset / GFS2_NBBY);
378 bytes = min_t(u32, len / GFS2_NBBY, (end - start));
379 ptr = memchr_inv(start, 0, bytes);
380 chunk_size = ((ptr == NULL) ? bytes : (ptr - start));
381 chunk_size *= GFS2_NBBY;
382 BUG_ON(len < chunk_size);
383 len -= chunk_size;
384 block = gfs2_rbm_to_block(&rbm);
Bob Peterson15bd50a2012-12-20 13:21:07 -0500385 if (gfs2_rbm_from_block(&rbm, block + chunk_size)) {
386 n_unaligned = 0;
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +0100387 break;
Bob Peterson15bd50a2012-12-20 13:21:07 -0500388 }
389 if (ptr) {
390 n_unaligned = 3;
391 break;
392 }
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +0100393 n_unaligned = len & 3;
394 }
395
396 /* Deal with any bits left over at the end */
397 if (n_unaligned)
398 gfs2_unaligned_extlen(&rbm, n_unaligned, &len);
399out:
400 return size - len;
401}
402
403/**
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400404 * gfs2_bitcount - count the number of bits in a certain state
Bob Peterson886b1412012-04-11 13:03:52 -0400405 * @rgd: the resource group descriptor
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400406 * @buffer: the buffer that holds the bitmaps
407 * @buflen: the length (in bytes) of the buffer
408 * @state: the state of the block we're looking for
409 *
410 * Returns: The number of bits
411 */
412
Steven Whitehouse110acf32008-01-29 13:30:20 +0000413static u32 gfs2_bitcount(struct gfs2_rgrpd *rgd, const u8 *buffer,
414 unsigned int buflen, u8 state)
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400415{
Steven Whitehouse110acf32008-01-29 13:30:20 +0000416 const u8 *byte = buffer;
417 const u8 *end = buffer + buflen;
418 const u8 state1 = state << 2;
419 const u8 state2 = state << 4;
420 const u8 state3 = state << 6;
Steven Whitehousecd915492006-09-04 12:49:07 -0400421 u32 count = 0;
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400422
423 for (; byte < end; byte++) {
424 if (((*byte) & 0x03) == state)
425 count++;
426 if (((*byte) & 0x0C) == state1)
427 count++;
428 if (((*byte) & 0x30) == state2)
429 count++;
430 if (((*byte) & 0xC0) == state3)
431 count++;
432 }
433
434 return count;
435}
436
David Teiglandb3b94fa2006-01-16 16:50:04 +0000437/**
438 * gfs2_rgrp_verify - Verify that a resource group is consistent
David Teiglandb3b94fa2006-01-16 16:50:04 +0000439 * @rgd: the rgrp
440 *
441 */
442
443void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd)
444{
445 struct gfs2_sbd *sdp = rgd->rd_sbd;
446 struct gfs2_bitmap *bi = NULL;
Steven Whitehousebb8d8a62007-06-01 14:11:58 +0100447 u32 length = rgd->rd_length;
Steven Whitehousecd915492006-09-04 12:49:07 -0400448 u32 count[4], tmp;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000449 int buf, x;
450
Steven Whitehousecd915492006-09-04 12:49:07 -0400451 memset(count, 0, 4 * sizeof(u32));
David Teiglandb3b94fa2006-01-16 16:50:04 +0000452
453 /* Count # blocks in each of 4 possible allocation states */
454 for (buf = 0; buf < length; buf++) {
455 bi = rgd->rd_bits + buf;
456 for (x = 0; x < 4; x++)
457 count[x] += gfs2_bitcount(rgd,
458 bi->bi_bh->b_data +
459 bi->bi_offset,
460 bi->bi_len, x);
461 }
462
Steven Whitehousecfc8b542008-11-04 10:25:13 +0000463 if (count[0] != rgd->rd_free) {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000464 if (gfs2_consist_rgrpd(rgd))
465 fs_err(sdp, "free data mismatch: %u != %u\n",
Steven Whitehousecfc8b542008-11-04 10:25:13 +0000466 count[0], rgd->rd_free);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000467 return;
468 }
469
Steven Whitehouse73f74942008-11-04 10:32:57 +0000470 tmp = rgd->rd_data - rgd->rd_free - rgd->rd_dinodes;
Benjamin Marzinski6b946172009-07-10 18:13:26 -0500471 if (count[1] != tmp) {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000472 if (gfs2_consist_rgrpd(rgd))
473 fs_err(sdp, "used data mismatch: %u != %u\n",
474 count[1], tmp);
475 return;
476 }
477
Benjamin Marzinski6b946172009-07-10 18:13:26 -0500478 if (count[2] + count[3] != rgd->rd_dinodes) {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000479 if (gfs2_consist_rgrpd(rgd))
480 fs_err(sdp, "used metadata mismatch: %u != %u\n",
Benjamin Marzinski6b946172009-07-10 18:13:26 -0500481 count[2] + count[3], rgd->rd_dinodes);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000482 return;
483 }
484}
485
David Teiglandb3b94fa2006-01-16 16:50:04 +0000486/**
487 * gfs2_blk2rgrpd - Find resource group for a given data/meta block number
488 * @sdp: The GFS2 superblock
Bob Peterson886b1412012-04-11 13:03:52 -0400489 * @blk: The data block number
490 * @exact: True if this needs to be an exact match
David Teiglandb3b94fa2006-01-16 16:50:04 +0000491 *
492 * Returns: The resource group, or NULL if not found
493 */
494
Steven Whitehouse66fc0612012-02-08 12:58:32 +0000495struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk, bool exact)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000496{
Steven Whitehouse66fc0612012-02-08 12:58:32 +0000497 struct rb_node *n, *next;
Steven Whitehousef75bbfb2011-09-08 10:21:13 +0100498 struct gfs2_rgrpd *cur;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000499
500 spin_lock(&sdp->sd_rindex_spin);
Steven Whitehouse66fc0612012-02-08 12:58:32 +0000501 n = sdp->sd_rindex_tree.rb_node;
502 while (n) {
503 cur = rb_entry(n, struct gfs2_rgrpd, rd_node);
504 next = NULL;
Bob Peterson7c9ca622011-08-31 09:53:19 +0100505 if (blk < cur->rd_addr)
Steven Whitehouse66fc0612012-02-08 12:58:32 +0000506 next = n->rb_left;
Steven Whitehousef75bbfb2011-09-08 10:21:13 +0100507 else if (blk >= cur->rd_data0 + cur->rd_data)
Steven Whitehouse66fc0612012-02-08 12:58:32 +0000508 next = n->rb_right;
509 if (next == NULL) {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000510 spin_unlock(&sdp->sd_rindex_spin);
Steven Whitehouse66fc0612012-02-08 12:58:32 +0000511 if (exact) {
512 if (blk < cur->rd_addr)
513 return NULL;
514 if (blk >= cur->rd_data0 + cur->rd_data)
515 return NULL;
516 }
Bob Peterson7c9ca622011-08-31 09:53:19 +0100517 return cur;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000518 }
Steven Whitehouse66fc0612012-02-08 12:58:32 +0000519 n = next;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000520 }
David Teiglandb3b94fa2006-01-16 16:50:04 +0000521 spin_unlock(&sdp->sd_rindex_spin);
522
523 return NULL;
524}
525
526/**
527 * gfs2_rgrpd_get_first - get the first Resource Group in the filesystem
528 * @sdp: The GFS2 superblock
529 *
530 * Returns: The first rgrp in the filesystem
531 */
532
533struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp)
534{
Bob Peterson7c9ca622011-08-31 09:53:19 +0100535 const struct rb_node *n;
536 struct gfs2_rgrpd *rgd;
537
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100538 spin_lock(&sdp->sd_rindex_spin);
Bob Peterson7c9ca622011-08-31 09:53:19 +0100539 n = rb_first(&sdp->sd_rindex_tree);
540 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100541 spin_unlock(&sdp->sd_rindex_spin);
Bob Peterson7c9ca622011-08-31 09:53:19 +0100542
543 return rgd;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000544}
545
546/**
547 * gfs2_rgrpd_get_next - get the next RG
Bob Peterson886b1412012-04-11 13:03:52 -0400548 * @rgd: the resource group descriptor
David Teiglandb3b94fa2006-01-16 16:50:04 +0000549 *
550 * Returns: The next rgrp
551 */
552
553struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd)
554{
Bob Peterson7c9ca622011-08-31 09:53:19 +0100555 struct gfs2_sbd *sdp = rgd->rd_sbd;
556 const struct rb_node *n;
557
558 spin_lock(&sdp->sd_rindex_spin);
559 n = rb_next(&rgd->rd_node);
560 if (n == NULL)
561 n = rb_first(&sdp->sd_rindex_tree);
562
563 if (unlikely(&rgd->rd_node == n)) {
564 spin_unlock(&sdp->sd_rindex_spin);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000565 return NULL;
Bob Peterson7c9ca622011-08-31 09:53:19 +0100566 }
567 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
568 spin_unlock(&sdp->sd_rindex_spin);
569 return rgd;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000570}
571
Abhi Das00a158b2014-09-18 21:40:28 -0500572void check_and_update_goal(struct gfs2_inode *ip)
573{
574 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
575 if (!ip->i_goal || gfs2_blk2rgrpd(sdp, ip->i_goal, 1) == NULL)
576 ip->i_goal = ip->i_no_addr;
577}
578
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100579void gfs2_free_clones(struct gfs2_rgrpd *rgd)
580{
581 int x;
582
583 for (x = 0; x < rgd->rd_length; x++) {
584 struct gfs2_bitmap *bi = rgd->rd_bits + x;
585 kfree(bi->bi_clone);
586 bi->bi_clone = NULL;
587 }
588}
589
Bob Peterson0a305e42012-06-06 11:17:59 +0100590/**
Bob Petersonb54e9a02015-10-26 10:40:28 -0500591 * gfs2_rsqa_alloc - make sure we have a reservation assigned to the inode
592 * plus a quota allocations data structure, if necessary
Bob Peterson0a305e42012-06-06 11:17:59 +0100593 * @ip: the inode for this reservation
594 */
Bob Petersonb54e9a02015-10-26 10:40:28 -0500595int gfs2_rsqa_alloc(struct gfs2_inode *ip)
Bob Peterson0a305e42012-06-06 11:17:59 +0100596{
Bob Petersona097dc7e2015-07-16 08:28:04 -0500597 return gfs2_qa_alloc(ip);
Bob Peterson0a305e42012-06-06 11:17:59 +0100598}
599
Steven Whitehouse9e733d32012-08-23 15:37:59 +0100600static void dump_rs(struct seq_file *seq, const struct gfs2_blkreserv *rs)
Bob Peterson8e2e0042012-07-19 08:12:40 -0400601{
Steven Whitehouse9e733d32012-08-23 15:37:59 +0100602 gfs2_print_dbg(seq, " B: n:%llu s:%llu b:%u f:%u\n",
603 (unsigned long long)rs->rs_inum,
604 (unsigned long long)gfs2_rbm_to_block(&rs->rs_rbm),
Steven Whitehouse4a993fb2012-07-31 15:21:20 +0100605 rs->rs_rbm.offset, rs->rs_free);
Bob Peterson8e2e0042012-07-19 08:12:40 -0400606}
607
Bob Peterson0a305e42012-06-06 11:17:59 +0100608/**
Bob Peterson8e2e0042012-07-19 08:12:40 -0400609 * __rs_deltree - remove a multi-block reservation from the rgd tree
610 * @rs: The reservation to remove
611 *
612 */
Bob Peterson20095212013-03-13 10:26:38 -0400613static void __rs_deltree(struct gfs2_blkreserv *rs)
Bob Peterson8e2e0042012-07-19 08:12:40 -0400614{
615 struct gfs2_rgrpd *rgd;
616
617 if (!gfs2_rs_active(rs))
618 return;
619
Steven Whitehouse4a993fb2012-07-31 15:21:20 +0100620 rgd = rs->rs_rbm.rgd;
Steven Whitehouse9e733d32012-08-23 15:37:59 +0100621 trace_gfs2_rs(rs, TRACE_RS_TREEDEL);
Steven Whitehouse4a993fb2012-07-31 15:21:20 +0100622 rb_erase(&rs->rs_node, &rgd->rd_rstree);
Michel Lespinasse24d634e2012-08-05 22:04:08 -0700623 RB_CLEAR_NODE(&rs->rs_node);
Bob Peterson8e2e0042012-07-19 08:12:40 -0400624
625 if (rs->rs_free) {
Bob Petersone579ed42013-09-17 13:12:15 -0400626 struct gfs2_bitmap *bi = rbm_bi(&rs->rs_rbm);
627
Bob Peterson20095212013-03-13 10:26:38 -0400628 /* return reserved blocks to the rgrp */
Steven Whitehouse4a993fb2012-07-31 15:21:20 +0100629 BUG_ON(rs->rs_rbm.rgd->rd_reserved < rs->rs_free);
630 rs->rs_rbm.rgd->rd_reserved -= rs->rs_free;
Bob Peterson5ea50502013-11-25 11:16:25 +0000631 /* The rgrp extent failure point is likely not to increase;
632 it will only do so if the freed blocks are somehow
633 contiguous with a span of free blocks that follows. Still,
634 it will force the number to be recalculated later. */
635 rgd->rd_extfail_pt += rs->rs_free;
Bob Peterson8e2e0042012-07-19 08:12:40 -0400636 rs->rs_free = 0;
Bob Petersone579ed42013-09-17 13:12:15 -0400637 clear_bit(GBF_FULL, &bi->bi_flags);
Bob Peterson8e2e0042012-07-19 08:12:40 -0400638 }
Bob Peterson8e2e0042012-07-19 08:12:40 -0400639}
640
641/**
642 * gfs2_rs_deltree - remove a multi-block reservation from the rgd tree
643 * @rs: The reservation to remove
644 *
645 */
Bob Peterson20095212013-03-13 10:26:38 -0400646void gfs2_rs_deltree(struct gfs2_blkreserv *rs)
Bob Peterson8e2e0042012-07-19 08:12:40 -0400647{
648 struct gfs2_rgrpd *rgd;
649
Steven Whitehouse4a993fb2012-07-31 15:21:20 +0100650 rgd = rs->rs_rbm.rgd;
651 if (rgd) {
652 spin_lock(&rgd->rd_rsspin);
Bob Peterson20095212013-03-13 10:26:38 -0400653 __rs_deltree(rs);
Bob Peterson44f52122016-07-06 10:36:43 -0500654 BUG_ON(rs->rs_free);
Steven Whitehouse4a993fb2012-07-31 15:21:20 +0100655 spin_unlock(&rgd->rd_rsspin);
656 }
Bob Peterson8e2e0042012-07-19 08:12:40 -0400657}
658
659/**
Bob Petersonb54e9a02015-10-26 10:40:28 -0500660 * gfs2_rsqa_delete - delete a multi-block reservation and quota allocation
Bob Peterson0a305e42012-06-06 11:17:59 +0100661 * @ip: The inode for this reservation
Steven Whitehouseaf5c2692013-09-27 12:49:33 +0100662 * @wcount: The inode's write count, or NULL
Bob Peterson0a305e42012-06-06 11:17:59 +0100663 *
664 */
Bob Petersonb54e9a02015-10-26 10:40:28 -0500665void gfs2_rsqa_delete(struct gfs2_inode *ip, atomic_t *wcount)
Bob Peterson0a305e42012-06-06 11:17:59 +0100666{
667 down_write(&ip->i_rw_mutex);
Bob Peterson44f52122016-07-06 10:36:43 -0500668 if ((wcount == NULL) || (atomic_read(wcount) <= 1))
Bob Petersona097dc7e2015-07-16 08:28:04 -0500669 gfs2_rs_deltree(&ip->i_res);
Bob Peterson0a305e42012-06-06 11:17:59 +0100670 up_write(&ip->i_rw_mutex);
Bob Petersona097dc7e2015-07-16 08:28:04 -0500671 gfs2_qa_delete(ip, wcount);
Bob Peterson0a305e42012-06-06 11:17:59 +0100672}
673
Bob Peterson8e2e0042012-07-19 08:12:40 -0400674/**
675 * return_all_reservations - return all reserved blocks back to the rgrp.
676 * @rgd: the rgrp that needs its space back
677 *
678 * We previously reserved a bunch of blocks for allocation. Now we need to
679 * give them back. This leave the reservation structures in tact, but removes
680 * all of their corresponding "no-fly zones".
681 */
682static void return_all_reservations(struct gfs2_rgrpd *rgd)
683{
684 struct rb_node *n;
685 struct gfs2_blkreserv *rs;
686
687 spin_lock(&rgd->rd_rsspin);
688 while ((n = rb_first(&rgd->rd_rstree))) {
689 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
Bob Peterson20095212013-03-13 10:26:38 -0400690 __rs_deltree(rs);
Bob Peterson8e2e0042012-07-19 08:12:40 -0400691 }
692 spin_unlock(&rgd->rd_rsspin);
693}
694
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100695void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000696{
Bob Peterson7c9ca622011-08-31 09:53:19 +0100697 struct rb_node *n;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000698 struct gfs2_rgrpd *rgd;
699 struct gfs2_glock *gl;
700
Bob Peterson7c9ca622011-08-31 09:53:19 +0100701 while ((n = rb_first(&sdp->sd_rindex_tree))) {
702 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000703 gl = rgd->rd_gl;
704
Bob Peterson7c9ca622011-08-31 09:53:19 +0100705 rb_erase(n, &sdp->sd_rindex_tree);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000706
707 if (gl) {
Andreas Gruenbacher7023a0b2017-08-30 07:46:24 -0500708 glock_clear_object(gl, rgd);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000709 gfs2_glock_put(gl);
710 }
711
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100712 gfs2_free_clones(rgd);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000713 kfree(rgd->rd_bits);
Bob Peterson36e4ad02016-06-09 14:24:07 -0500714 rgd->rd_bits = NULL;
Bob Peterson8e2e0042012-07-19 08:12:40 -0400715 return_all_reservations(rgd);
Bob Peterson6bdd9be2008-01-28 17:20:26 -0600716 kmem_cache_free(gfs2_rgrpd_cachep, rgd);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000717 }
718}
719
Steven Whitehousebb8d8a62007-06-01 14:11:58 +0100720static void gfs2_rindex_print(const struct gfs2_rgrpd *rgd)
721{
Joe Perchesd77d1b52014-03-06 12:10:45 -0800722 pr_info("ri_addr = %llu\n", (unsigned long long)rgd->rd_addr);
723 pr_info("ri_length = %u\n", rgd->rd_length);
724 pr_info("ri_data0 = %llu\n", (unsigned long long)rgd->rd_data0);
725 pr_info("ri_data = %u\n", rgd->rd_data);
726 pr_info("ri_bitbytes = %u\n", rgd->rd_bitbytes);
Steven Whitehousebb8d8a62007-06-01 14:11:58 +0100727}
728
David Teiglandb3b94fa2006-01-16 16:50:04 +0000729/**
730 * gfs2_compute_bitstructs - Compute the bitmap sizes
731 * @rgd: The resource group descriptor
732 *
733 * Calculates bitmap descriptors, one for each block that contains bitmap data
734 *
735 * Returns: errno
736 */
737
738static int compute_bitstructs(struct gfs2_rgrpd *rgd)
739{
740 struct gfs2_sbd *sdp = rgd->rd_sbd;
741 struct gfs2_bitmap *bi;
Steven Whitehousebb8d8a62007-06-01 14:11:58 +0100742 u32 length = rgd->rd_length; /* # blocks in hdr & bitmap */
Steven Whitehousecd915492006-09-04 12:49:07 -0400743 u32 bytes_left, bytes;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000744 int x;
745
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400746 if (!length)
747 return -EINVAL;
748
Steven Whitehousedd894be2006-07-27 14:29:00 -0400749 rgd->rd_bits = kcalloc(length, sizeof(struct gfs2_bitmap), GFP_NOFS);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000750 if (!rgd->rd_bits)
751 return -ENOMEM;
752
Steven Whitehousebb8d8a62007-06-01 14:11:58 +0100753 bytes_left = rgd->rd_bitbytes;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000754
755 for (x = 0; x < length; x++) {
756 bi = rgd->rd_bits + x;
757
Steven Whitehouse60a0b8f2009-05-21 12:23:12 +0100758 bi->bi_flags = 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000759 /* small rgrp; bitmap stored completely in header block */
760 if (length == 1) {
761 bytes = bytes_left;
762 bi->bi_offset = sizeof(struct gfs2_rgrp);
763 bi->bi_start = 0;
764 bi->bi_len = bytes;
Bob Peterson7e230f52013-09-11 13:44:02 -0500765 bi->bi_blocks = bytes * GFS2_NBBY;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000766 /* header block */
767 } else if (x == 0) {
768 bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp);
769 bi->bi_offset = sizeof(struct gfs2_rgrp);
770 bi->bi_start = 0;
771 bi->bi_len = bytes;
Bob Peterson7e230f52013-09-11 13:44:02 -0500772 bi->bi_blocks = bytes * GFS2_NBBY;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000773 /* last block */
774 } else if (x + 1 == length) {
775 bytes = bytes_left;
776 bi->bi_offset = sizeof(struct gfs2_meta_header);
Steven Whitehousebb8d8a62007-06-01 14:11:58 +0100777 bi->bi_start = rgd->rd_bitbytes - bytes_left;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000778 bi->bi_len = bytes;
Bob Peterson7e230f52013-09-11 13:44:02 -0500779 bi->bi_blocks = bytes * GFS2_NBBY;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000780 /* other blocks */
781 } else {
Steven Whitehouse568f4c92006-02-27 12:00:42 -0500782 bytes = sdp->sd_sb.sb_bsize -
783 sizeof(struct gfs2_meta_header);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000784 bi->bi_offset = sizeof(struct gfs2_meta_header);
Steven Whitehousebb8d8a62007-06-01 14:11:58 +0100785 bi->bi_start = rgd->rd_bitbytes - bytes_left;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000786 bi->bi_len = bytes;
Bob Peterson7e230f52013-09-11 13:44:02 -0500787 bi->bi_blocks = bytes * GFS2_NBBY;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000788 }
789
790 bytes_left -= bytes;
791 }
792
793 if (bytes_left) {
794 gfs2_consist_rgrpd(rgd);
795 return -EIO;
796 }
797 bi = rgd->rd_bits + (length - 1);
Steven Whitehousebb8d8a62007-06-01 14:11:58 +0100798 if ((bi->bi_start + bi->bi_len) * GFS2_NBBY != rgd->rd_data) {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000799 if (gfs2_consist_rgrpd(rgd)) {
Steven Whitehousebb8d8a62007-06-01 14:11:58 +0100800 gfs2_rindex_print(rgd);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000801 fs_err(sdp, "start=%u len=%u offset=%u\n",
802 bi->bi_start, bi->bi_len, bi->bi_offset);
803 }
804 return -EIO;
805 }
806
807 return 0;
808}
809
810/**
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500811 * gfs2_ri_total - Total up the file system space, according to the rindex.
Bob Peterson886b1412012-04-11 13:03:52 -0400812 * @sdp: the filesystem
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500813 *
814 */
815u64 gfs2_ri_total(struct gfs2_sbd *sdp)
816{
817 u64 total_data = 0;
818 struct inode *inode = sdp->sd_rindex;
819 struct gfs2_inode *ip = GFS2_I(inode);
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500820 char buf[sizeof(struct gfs2_rindex)];
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500821 int error, rgrps;
822
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500823 for (rgrps = 0;; rgrps++) {
824 loff_t pos = rgrps * sizeof(struct gfs2_rindex);
825
Bob Petersonbcd72782010-12-07 13:58:56 -0500826 if (pos + sizeof(struct gfs2_rindex) > i_size_read(inode))
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500827 break;
Andrew Price43066292012-04-16 16:40:55 +0100828 error = gfs2_internal_read(ip, buf, &pos,
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500829 sizeof(struct gfs2_rindex));
830 if (error != sizeof(struct gfs2_rindex))
831 break;
Steven Whitehousebb8d8a62007-06-01 14:11:58 +0100832 total_data += be32_to_cpu(((struct gfs2_rindex *)buf)->ri_data);
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500833 }
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500834 return total_data;
835}
836
Bob Peterson6aad1c32012-03-05 09:20:59 -0500837static int rgd_insert(struct gfs2_rgrpd *rgd)
Bob Peterson7c9ca622011-08-31 09:53:19 +0100838{
839 struct gfs2_sbd *sdp = rgd->rd_sbd;
840 struct rb_node **newn = &sdp->sd_rindex_tree.rb_node, *parent = NULL;
841
842 /* Figure out where to put new node */
843 while (*newn) {
844 struct gfs2_rgrpd *cur = rb_entry(*newn, struct gfs2_rgrpd,
845 rd_node);
846
847 parent = *newn;
848 if (rgd->rd_addr < cur->rd_addr)
849 newn = &((*newn)->rb_left);
850 else if (rgd->rd_addr > cur->rd_addr)
851 newn = &((*newn)->rb_right);
852 else
Bob Peterson6aad1c32012-03-05 09:20:59 -0500853 return -EEXIST;
Bob Peterson7c9ca622011-08-31 09:53:19 +0100854 }
855
856 rb_link_node(&rgd->rd_node, parent, newn);
857 rb_insert_color(&rgd->rd_node, &sdp->sd_rindex_tree);
Bob Peterson6aad1c32012-03-05 09:20:59 -0500858 sdp->sd_rgrps++;
859 return 0;
Bob Peterson7c9ca622011-08-31 09:53:19 +0100860}
861
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500862/**
Robert Peterson6c532672007-05-10 16:54:38 -0500863 * read_rindex_entry - Pull in a new resource index entry from the disk
Andrew Price43066292012-04-16 16:40:55 +0100864 * @ip: Pointer to the rindex inode
David Teiglandb3b94fa2006-01-16 16:50:04 +0000865 *
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100866 * Returns: 0 on success, > 0 on EOF, error code otherwise
Robert Peterson6c532672007-05-10 16:54:38 -0500867 */
868
Andrew Price43066292012-04-16 16:40:55 +0100869static int read_rindex_entry(struct gfs2_inode *ip)
Robert Peterson6c532672007-05-10 16:54:38 -0500870{
871 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
Steven Whitehouse7005c3e2013-12-06 10:16:14 +0000872 const unsigned bsize = sdp->sd_sb.sb_bsize;
Robert Peterson6c532672007-05-10 16:54:38 -0500873 loff_t pos = sdp->sd_rgrps * sizeof(struct gfs2_rindex);
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100874 struct gfs2_rindex buf;
Robert Peterson6c532672007-05-10 16:54:38 -0500875 int error;
876 struct gfs2_rgrpd *rgd;
877
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100878 if (pos >= i_size_read(&ip->i_inode))
879 return 1;
880
Andrew Price43066292012-04-16 16:40:55 +0100881 error = gfs2_internal_read(ip, (char *)&buf, &pos,
Robert Peterson6c532672007-05-10 16:54:38 -0500882 sizeof(struct gfs2_rindex));
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100883
884 if (error != sizeof(struct gfs2_rindex))
885 return (error == 0) ? 1 : error;
Robert Peterson6c532672007-05-10 16:54:38 -0500886
Bob Peterson6bdd9be2008-01-28 17:20:26 -0600887 rgd = kmem_cache_zalloc(gfs2_rgrpd_cachep, GFP_NOFS);
Robert Peterson6c532672007-05-10 16:54:38 -0500888 error = -ENOMEM;
889 if (!rgd)
890 return error;
891
Robert Peterson6c532672007-05-10 16:54:38 -0500892 rgd->rd_sbd = sdp;
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100893 rgd->rd_addr = be64_to_cpu(buf.ri_addr);
894 rgd->rd_length = be32_to_cpu(buf.ri_length);
895 rgd->rd_data0 = be64_to_cpu(buf.ri_data0);
896 rgd->rd_data = be32_to_cpu(buf.ri_data);
897 rgd->rd_bitbytes = be32_to_cpu(buf.ri_bitbytes);
Bob Peterson8e2e0042012-07-19 08:12:40 -0400898 spin_lock_init(&rgd->rd_rsspin);
Bob Peterson7c9ca622011-08-31 09:53:19 +0100899
Robert Peterson6c532672007-05-10 16:54:38 -0500900 error = compute_bitstructs(rgd);
901 if (error)
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100902 goto fail;
Robert Peterson6c532672007-05-10 16:54:38 -0500903
Steven Whitehousebb8d8a62007-06-01 14:11:58 +0100904 error = gfs2_glock_get(sdp, rgd->rd_addr,
Robert Peterson6c532672007-05-10 16:54:38 -0500905 &gfs2_rgrp_glops, CREATE, &rgd->rd_gl);
906 if (error)
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100907 goto fail;
Robert Peterson6c532672007-05-10 16:54:38 -0500908
David Teigland4e2f8842012-11-14 13:47:37 -0500909 rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr;
Bob Peterson0e27c182014-10-29 08:02:28 -0500910 rgd->rd_flags &= ~(GFS2_RDF_UPTODATE | GFS2_RDF_PREFERRED);
Bob Peterson7c9ca622011-08-31 09:53:19 +0100911 if (rgd->rd_data > sdp->sd_max_rg_data)
912 sdp->sd_max_rg_data = rgd->rd_data;
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100913 spin_lock(&sdp->sd_rindex_spin);
Bob Peterson6aad1c32012-03-05 09:20:59 -0500914 error = rgd_insert(rgd);
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100915 spin_unlock(&sdp->sd_rindex_spin);
Bob Peterson36e4ad02016-06-09 14:24:07 -0500916 if (!error) {
Andreas Gruenbacher6f6597ba2017-06-30 07:55:08 -0500917 glock_set_object(rgd->rd_gl, rgd);
Bob Peterson36e4ad02016-06-09 14:24:07 -0500918 rgd->rd_gl->gl_vm.start = (rgd->rd_addr * bsize) & PAGE_MASK;
919 rgd->rd_gl->gl_vm.end = PAGE_ALIGN((rgd->rd_addr +
920 rgd->rd_length) * bsize) - 1;
Bob Peterson6aad1c32012-03-05 09:20:59 -0500921 return 0;
Bob Peterson36e4ad02016-06-09 14:24:07 -0500922 }
Bob Peterson6aad1c32012-03-05 09:20:59 -0500923
924 error = 0; /* someone else read in the rgrp; free it and ignore it */
Bob Petersonc1ac5392012-03-22 08:58:30 -0400925 gfs2_glock_put(rgd->rd_gl);
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100926
927fail:
928 kfree(rgd->rd_bits);
Bob Peterson36e4ad02016-06-09 14:24:07 -0500929 rgd->rd_bits = NULL;
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100930 kmem_cache_free(gfs2_rgrpd_cachep, rgd);
Robert Peterson6c532672007-05-10 16:54:38 -0500931 return error;
932}
933
934/**
Bob Peterson0e27c182014-10-29 08:02:28 -0500935 * set_rgrp_preferences - Run all the rgrps, selecting some we prefer to use
936 * @sdp: the GFS2 superblock
937 *
938 * The purpose of this function is to select a subset of the resource groups
939 * and mark them as PREFERRED. We do it in such a way that each node prefers
940 * to use a unique set of rgrps to minimize glock contention.
941 */
942static void set_rgrp_preferences(struct gfs2_sbd *sdp)
943{
944 struct gfs2_rgrpd *rgd, *first;
945 int i;
946
947 /* Skip an initial number of rgrps, based on this node's journal ID.
948 That should start each node out on its own set. */
949 rgd = gfs2_rgrpd_get_first(sdp);
950 for (i = 0; i < sdp->sd_lockstruct.ls_jid; i++)
951 rgd = gfs2_rgrpd_get_next(rgd);
952 first = rgd;
953
954 do {
955 rgd->rd_flags |= GFS2_RDF_PREFERRED;
956 for (i = 0; i < sdp->sd_journals; i++) {
957 rgd = gfs2_rgrpd_get_next(rgd);
Abhi Das959b6712015-05-05 11:26:04 -0500958 if (!rgd || rgd == first)
Bob Peterson0e27c182014-10-29 08:02:28 -0500959 break;
960 }
Abhi Das959b6712015-05-05 11:26:04 -0500961 } while (rgd && rgd != first);
Bob Peterson0e27c182014-10-29 08:02:28 -0500962}
963
964/**
Robert Peterson6c532672007-05-10 16:54:38 -0500965 * gfs2_ri_update - Pull in a new resource index from the disk
966 * @ip: pointer to the rindex inode
967 *
David Teiglandb3b94fa2006-01-16 16:50:04 +0000968 * Returns: 0 on successful update, error code otherwise
969 */
970
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100971static int gfs2_ri_update(struct gfs2_inode *ip)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000972{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400973 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000974 int error;
975
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100976 do {
Andrew Price43066292012-04-16 16:40:55 +0100977 error = read_rindex_entry(ip);
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100978 } while (error == 0);
979
980 if (error < 0)
981 return error;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000982
Bob Peterson0e27c182014-10-29 08:02:28 -0500983 set_rgrp_preferences(sdp);
984
Bob Petersoncf45b752008-01-31 10:31:39 -0600985 sdp->sd_rindex_uptodate = 1;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000986 return 0;
Robert Peterson6c532672007-05-10 16:54:38 -0500987}
David Teiglandb3b94fa2006-01-16 16:50:04 +0000988
Robert Peterson6c532672007-05-10 16:54:38 -0500989/**
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100990 * gfs2_rindex_update - Update the rindex if required
David Teiglandb3b94fa2006-01-16 16:50:04 +0000991 * @sdp: The GFS2 superblock
David Teiglandb3b94fa2006-01-16 16:50:04 +0000992 *
993 * We grab a lock on the rindex inode to make sure that it doesn't
994 * change whilst we are performing an operation. We keep this lock
995 * for quite long periods of time compared to other locks. This
996 * doesn't matter, since it is shared and it is very, very rarely
997 * accessed in the exclusive mode (i.e. only when expanding the filesystem).
998 *
999 * This makes sure that we're using the latest copy of the resource index
1000 * special file, which might have been updated if someone expanded the
1001 * filesystem (via gfs2_grow utility), which adds new resource groups.
1002 *
Steven Whitehouse8339ee52011-08-31 16:38:29 +01001003 * Returns: 0 on succeess, error code otherwise
David Teiglandb3b94fa2006-01-16 16:50:04 +00001004 */
1005
Steven Whitehouse8339ee52011-08-31 16:38:29 +01001006int gfs2_rindex_update(struct gfs2_sbd *sdp)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001007{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04001008 struct gfs2_inode *ip = GFS2_I(sdp->sd_rindex);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001009 struct gfs2_glock *gl = ip->i_gl;
Steven Whitehouse8339ee52011-08-31 16:38:29 +01001010 struct gfs2_holder ri_gh;
1011 int error = 0;
Steven Whitehousea365fbf2012-02-24 15:09:14 +00001012 int unlock_required = 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001013
1014 /* Read new copy from disk if we don't have the latest */
Bob Petersoncf45b752008-01-31 10:31:39 -06001015 if (!sdp->sd_rindex_uptodate) {
Steven Whitehousea365fbf2012-02-24 15:09:14 +00001016 if (!gfs2_glock_is_locked_by_me(gl)) {
1017 error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, &ri_gh);
1018 if (error)
Bob Peterson6aad1c32012-03-05 09:20:59 -05001019 return error;
Steven Whitehousea365fbf2012-02-24 15:09:14 +00001020 unlock_required = 1;
1021 }
Steven Whitehouse8339ee52011-08-31 16:38:29 +01001022 if (!sdp->sd_rindex_uptodate)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001023 error = gfs2_ri_update(ip);
Steven Whitehousea365fbf2012-02-24 15:09:14 +00001024 if (unlock_required)
1025 gfs2_glock_dq_uninit(&ri_gh);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001026 }
1027
1028 return error;
1029}
1030
Bob Peterson42d52e32008-01-28 18:38:07 -06001031static void gfs2_rgrp_in(struct gfs2_rgrpd *rgd, const void *buf)
Steven Whitehousebb8d8a62007-06-01 14:11:58 +01001032{
1033 const struct gfs2_rgrp *str = buf;
Bob Peterson42d52e32008-01-28 18:38:07 -06001034 u32 rg_flags;
Steven Whitehousebb8d8a62007-06-01 14:11:58 +01001035
Bob Peterson42d52e32008-01-28 18:38:07 -06001036 rg_flags = be32_to_cpu(str->rg_flags);
Steven Whitehouse09010972009-05-20 10:48:47 +01001037 rg_flags &= ~GFS2_RDF_MASK;
Steven Whitehouse1ce97e52009-05-21 15:18:19 +01001038 rgd->rd_flags &= GFS2_RDF_MASK;
1039 rgd->rd_flags |= rg_flags;
Steven Whitehousecfc8b542008-11-04 10:25:13 +00001040 rgd->rd_free = be32_to_cpu(str->rg_free);
Steven Whitehouse73f74942008-11-04 10:32:57 +00001041 rgd->rd_dinodes = be32_to_cpu(str->rg_dinodes);
Steven Whitehoused8b71f72008-11-04 10:19:03 +00001042 rgd->rd_igeneration = be64_to_cpu(str->rg_igeneration);
Steven Whitehousebb8d8a62007-06-01 14:11:58 +01001043}
1044
Bob Peterson42d52e32008-01-28 18:38:07 -06001045static void gfs2_rgrp_out(struct gfs2_rgrpd *rgd, void *buf)
Steven Whitehousebb8d8a62007-06-01 14:11:58 +01001046{
Andrew Price65adc272017-12-12 11:37:15 -06001047 struct gfs2_rgrpd *next = gfs2_rgrpd_get_next(rgd);
Steven Whitehousebb8d8a62007-06-01 14:11:58 +01001048 struct gfs2_rgrp *str = buf;
1049
Steven Whitehouse09010972009-05-20 10:48:47 +01001050 str->rg_flags = cpu_to_be32(rgd->rd_flags & ~GFS2_RDF_MASK);
Steven Whitehousecfc8b542008-11-04 10:25:13 +00001051 str->rg_free = cpu_to_be32(rgd->rd_free);
Steven Whitehouse73f74942008-11-04 10:32:57 +00001052 str->rg_dinodes = cpu_to_be32(rgd->rd_dinodes);
Andrew Price65adc272017-12-12 11:37:15 -06001053 if (next == NULL)
1054 str->rg_skip = 0;
1055 else if (next->rd_addr > rgd->rd_addr)
1056 str->rg_skip = cpu_to_be32(next->rd_addr - rgd->rd_addr);
Steven Whitehoused8b71f72008-11-04 10:19:03 +00001057 str->rg_igeneration = cpu_to_be64(rgd->rd_igeneration);
Steven Whitehousebb8d8a62007-06-01 14:11:58 +01001058 memset(&str->rg_reserved, 0, sizeof(str->rg_reserved));
1059}
1060
Benjamin Marzinski90306c42012-05-29 23:01:09 -05001061static int gfs2_rgrp_lvb_valid(struct gfs2_rgrpd *rgd)
1062{
1063 struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
1064 struct gfs2_rgrp *str = (struct gfs2_rgrp *)rgd->rd_bits[0].bi_bh->b_data;
1065
1066 if (rgl->rl_flags != str->rg_flags || rgl->rl_free != str->rg_free ||
1067 rgl->rl_dinodes != str->rg_dinodes ||
1068 rgl->rl_igeneration != str->rg_igeneration)
1069 return 0;
1070 return 1;
1071}
1072
1073static void gfs2_rgrp_ondisk2lvb(struct gfs2_rgrp_lvb *rgl, const void *buf)
1074{
1075 const struct gfs2_rgrp *str = buf;
1076
1077 rgl->rl_magic = cpu_to_be32(GFS2_MAGIC);
1078 rgl->rl_flags = str->rg_flags;
1079 rgl->rl_free = str->rg_free;
1080 rgl->rl_dinodes = str->rg_dinodes;
1081 rgl->rl_igeneration = str->rg_igeneration;
1082 rgl->__pad = 0UL;
1083}
1084
1085static void update_rgrp_lvb_unlinked(struct gfs2_rgrpd *rgd, u32 change)
1086{
1087 struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
1088 u32 unlinked = be32_to_cpu(rgl->rl_unlinked) + change;
1089 rgl->rl_unlinked = cpu_to_be32(unlinked);
1090}
1091
1092static u32 count_unlinked(struct gfs2_rgrpd *rgd)
1093{
1094 struct gfs2_bitmap *bi;
1095 const u32 length = rgd->rd_length;
1096 const u8 *buffer = NULL;
1097 u32 i, goal, count = 0;
1098
1099 for (i = 0, bi = rgd->rd_bits; i < length; i++, bi++) {
1100 goal = 0;
1101 buffer = bi->bi_bh->b_data + bi->bi_offset;
1102 WARN_ON(!buffer_uptodate(bi->bi_bh));
1103 while (goal < bi->bi_len * GFS2_NBBY) {
1104 goal = gfs2_bitfit(buffer, bi->bi_len, goal,
1105 GFS2_BLKST_UNLINKED);
1106 if (goal == BFITNOENT)
1107 break;
1108 count++;
1109 goal++;
1110 }
1111 }
1112
1113 return count;
1114}
1115
1116
David Teiglandb3b94fa2006-01-16 16:50:04 +00001117/**
Benjamin Marzinski90306c42012-05-29 23:01:09 -05001118 * gfs2_rgrp_bh_get - Read in a RG's header and bitmaps
1119 * @rgd: the struct gfs2_rgrpd describing the RG to read in
David Teiglandb3b94fa2006-01-16 16:50:04 +00001120 *
1121 * Read in all of a Resource Group's header and bitmap blocks.
1122 * Caller must eventually call gfs2_rgrp_relse() to free the bitmaps.
1123 *
1124 * Returns: errno
1125 */
1126
Rashika Kheriac2b0b302014-02-09 18:40:19 +05301127static int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001128{
1129 struct gfs2_sbd *sdp = rgd->rd_sbd;
1130 struct gfs2_glock *gl = rgd->rd_gl;
Steven Whitehousebb8d8a62007-06-01 14:11:58 +01001131 unsigned int length = rgd->rd_length;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001132 struct gfs2_bitmap *bi;
1133 unsigned int x, y;
1134 int error;
1135
Benjamin Marzinski90306c42012-05-29 23:01:09 -05001136 if (rgd->rd_bits[0].bi_bh != NULL)
1137 return 0;
1138
David Teiglandb3b94fa2006-01-16 16:50:04 +00001139 for (x = 0; x < length; x++) {
1140 bi = rgd->rd_bits + x;
Andreas Gruenbacherc8d57702015-11-11 15:00:35 -06001141 error = gfs2_meta_read(gl, rgd->rd_addr + x, 0, 0, &bi->bi_bh);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001142 if (error)
1143 goto fail;
1144 }
1145
1146 for (y = length; y--;) {
1147 bi = rgd->rd_bits + y;
Steven Whitehouse7276b3b2006-09-21 17:05:23 -04001148 error = gfs2_meta_wait(sdp, bi->bi_bh);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001149 if (error)
1150 goto fail;
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04001151 if (gfs2_metatype_check(sdp, bi->bi_bh, y ? GFS2_METATYPE_RB :
David Teiglandb3b94fa2006-01-16 16:50:04 +00001152 GFS2_METATYPE_RG)) {
1153 error = -EIO;
1154 goto fail;
1155 }
1156 }
1157
Bob Petersoncf45b752008-01-31 10:31:39 -06001158 if (!(rgd->rd_flags & GFS2_RDF_UPTODATE)) {
Steven Whitehouse60a0b8f2009-05-21 12:23:12 +01001159 for (x = 0; x < length; x++)
1160 clear_bit(GBF_FULL, &rgd->rd_bits[x].bi_flags);
Bob Peterson42d52e32008-01-28 18:38:07 -06001161 gfs2_rgrp_in(rgd, (rgd->rd_bits[0].bi_bh)->b_data);
Steven Whitehouse1ce97e52009-05-21 15:18:19 +01001162 rgd->rd_flags |= (GFS2_RDF_UPTODATE | GFS2_RDF_CHECK);
Bob Peterson7c9ca622011-08-31 09:53:19 +01001163 rgd->rd_free_clone = rgd->rd_free;
Bob Peterson5ea50502013-11-25 11:16:25 +00001164 /* max out the rgrp allocation failure point */
1165 rgd->rd_extfail_pt = rgd->rd_free;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001166 }
Al Viro951b4bd2013-06-02 19:53:40 -04001167 if (cpu_to_be32(GFS2_MAGIC) != rgd->rd_rgl->rl_magic) {
Benjamin Marzinski90306c42012-05-29 23:01:09 -05001168 rgd->rd_rgl->rl_unlinked = cpu_to_be32(count_unlinked(rgd));
1169 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl,
1170 rgd->rd_bits[0].bi_bh->b_data);
1171 }
1172 else if (sdp->sd_args.ar_rgrplvb) {
1173 if (!gfs2_rgrp_lvb_valid(rgd)){
1174 gfs2_consist_rgrpd(rgd);
1175 error = -EIO;
1176 goto fail;
1177 }
1178 if (rgd->rd_rgl->rl_unlinked == 0)
1179 rgd->rd_flags &= ~GFS2_RDF_CHECK;
1180 }
David Teiglandb3b94fa2006-01-16 16:50:04 +00001181 return 0;
1182
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04001183fail:
David Teiglandb3b94fa2006-01-16 16:50:04 +00001184 while (x--) {
1185 bi = rgd->rd_bits + x;
1186 brelse(bi->bi_bh);
1187 bi->bi_bh = NULL;
1188 gfs2_assert_warn(sdp, !bi->bi_clone);
1189 }
David Teiglandb3b94fa2006-01-16 16:50:04 +00001190
1191 return error;
1192}
1193
Rashika Kheriac2b0b302014-02-09 18:40:19 +05301194static int update_rgrp_lvb(struct gfs2_rgrpd *rgd)
Benjamin Marzinski90306c42012-05-29 23:01:09 -05001195{
1196 u32 rl_flags;
1197
1198 if (rgd->rd_flags & GFS2_RDF_UPTODATE)
1199 return 0;
1200
Al Viro951b4bd2013-06-02 19:53:40 -04001201 if (cpu_to_be32(GFS2_MAGIC) != rgd->rd_rgl->rl_magic)
Benjamin Marzinski90306c42012-05-29 23:01:09 -05001202 return gfs2_rgrp_bh_get(rgd);
1203
1204 rl_flags = be32_to_cpu(rgd->rd_rgl->rl_flags);
1205 rl_flags &= ~GFS2_RDF_MASK;
1206 rgd->rd_flags &= GFS2_RDF_MASK;
1207 rgd->rd_flags |= (rl_flags | GFS2_RDF_UPTODATE | GFS2_RDF_CHECK);
1208 if (rgd->rd_rgl->rl_unlinked == 0)
1209 rgd->rd_flags &= ~GFS2_RDF_CHECK;
1210 rgd->rd_free = be32_to_cpu(rgd->rd_rgl->rl_free);
1211 rgd->rd_free_clone = rgd->rd_free;
1212 rgd->rd_dinodes = be32_to_cpu(rgd->rd_rgl->rl_dinodes);
1213 rgd->rd_igeneration = be64_to_cpu(rgd->rd_rgl->rl_igeneration);
1214 return 0;
1215}
1216
1217int gfs2_rgrp_go_lock(struct gfs2_holder *gh)
1218{
1219 struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object;
1220 struct gfs2_sbd *sdp = rgd->rd_sbd;
1221
1222 if (gh->gh_flags & GL_SKIP && sdp->sd_args.ar_rgrplvb)
1223 return 0;
Bob Peterson8b127d02014-01-16 08:52:16 -05001224 return gfs2_rgrp_bh_get(rgd);
Benjamin Marzinski90306c42012-05-29 23:01:09 -05001225}
1226
David Teiglandb3b94fa2006-01-16 16:50:04 +00001227/**
Bob Peterson39b0f1e2015-06-05 08:38:57 -05001228 * gfs2_rgrp_brelse - Release RG bitmaps read in with gfs2_rgrp_bh_get()
1229 * @rgd: The resource group
David Teiglandb3b94fa2006-01-16 16:50:04 +00001230 *
1231 */
1232
Bob Peterson39b0f1e2015-06-05 08:38:57 -05001233void gfs2_rgrp_brelse(struct gfs2_rgrpd *rgd)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001234{
Steven Whitehousebb8d8a62007-06-01 14:11:58 +01001235 int x, length = rgd->rd_length;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001236
David Teiglandb3b94fa2006-01-16 16:50:04 +00001237 for (x = 0; x < length; x++) {
1238 struct gfs2_bitmap *bi = rgd->rd_bits + x;
Benjamin Marzinski90306c42012-05-29 23:01:09 -05001239 if (bi->bi_bh) {
1240 brelse(bi->bi_bh);
1241 bi->bi_bh = NULL;
1242 }
David Teiglandb3b94fa2006-01-16 16:50:04 +00001243 }
1244
David Teiglandb3b94fa2006-01-16 16:50:04 +00001245}
1246
Bob Peterson39b0f1e2015-06-05 08:38:57 -05001247/**
1248 * gfs2_rgrp_go_unlock - Unlock a rgrp glock
1249 * @gh: The glock holder for the resource group
1250 *
1251 */
1252
1253void gfs2_rgrp_go_unlock(struct gfs2_holder *gh)
1254{
1255 struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object;
1256 int demote_requested = test_bit(GLF_DEMOTE, &gh->gh_gl->gl_flags) |
1257 test_bit(GLF_PENDING_DEMOTE, &gh->gh_gl->gl_flags);
1258
1259 if (rgd && demote_requested)
1260 gfs2_rgrp_brelse(rgd);
1261}
1262
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001263int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
Bob Peterson7c9ca622011-08-31 09:53:19 +01001264 struct buffer_head *bh,
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001265 const struct gfs2_bitmap *bi, unsigned minlen, u64 *ptrimmed)
Steven Whitehousef15ab562009-02-09 09:25:01 +00001266{
1267 struct super_block *sb = sdp->sd_vfs;
Steven Whitehousef15ab562009-02-09 09:25:01 +00001268 u64 blk;
Steven Whitehouse64d576b2009-02-12 13:31:58 +00001269 sector_t start = 0;
Bob Petersonb2c87ca2013-03-22 10:07:24 -04001270 sector_t nr_blks = 0;
Steven Whitehousef15ab562009-02-09 09:25:01 +00001271 int rv;
1272 unsigned int x;
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001273 u32 trimmed = 0;
1274 u8 diff;
Steven Whitehousef15ab562009-02-09 09:25:01 +00001275
1276 for (x = 0; x < bi->bi_len; x++) {
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001277 const u8 *clone = bi->bi_clone ? bi->bi_clone : bi->bi_bh->b_data;
1278 clone += bi->bi_offset;
1279 clone += x;
1280 if (bh) {
1281 const u8 *orig = bh->b_data + bi->bi_offset + x;
1282 diff = ~(*orig | (*orig >> 1)) & (*clone | (*clone >> 1));
1283 } else {
1284 diff = ~(*clone | (*clone >> 1));
1285 }
Steven Whitehousef15ab562009-02-09 09:25:01 +00001286 diff &= 0x55;
1287 if (diff == 0)
1288 continue;
1289 blk = offset + ((bi->bi_start + x) * GFS2_NBBY);
Steven Whitehousef15ab562009-02-09 09:25:01 +00001290 while(diff) {
1291 if (diff & 1) {
Bob Petersonb2c87ca2013-03-22 10:07:24 -04001292 if (nr_blks == 0)
Steven Whitehousef15ab562009-02-09 09:25:01 +00001293 goto start_new_extent;
Bob Petersonb2c87ca2013-03-22 10:07:24 -04001294 if ((start + nr_blks) != blk) {
1295 if (nr_blks >= minlen) {
1296 rv = sb_issue_discard(sb,
1297 start, nr_blks,
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001298 GFP_NOFS, 0);
1299 if (rv)
1300 goto fail;
Bob Petersonb2c87ca2013-03-22 10:07:24 -04001301 trimmed += nr_blks;
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001302 }
Bob Petersonb2c87ca2013-03-22 10:07:24 -04001303 nr_blks = 0;
Steven Whitehousef15ab562009-02-09 09:25:01 +00001304start_new_extent:
1305 start = blk;
1306 }
Bob Petersonb2c87ca2013-03-22 10:07:24 -04001307 nr_blks++;
Steven Whitehousef15ab562009-02-09 09:25:01 +00001308 }
1309 diff >>= 2;
Bob Petersonb2c87ca2013-03-22 10:07:24 -04001310 blk++;
Steven Whitehousef15ab562009-02-09 09:25:01 +00001311 }
1312 }
Bob Petersonb2c87ca2013-03-22 10:07:24 -04001313 if (nr_blks >= minlen) {
1314 rv = sb_issue_discard(sb, start, nr_blks, GFP_NOFS, 0);
Steven Whitehousef15ab562009-02-09 09:25:01 +00001315 if (rv)
1316 goto fail;
Bob Petersonb2c87ca2013-03-22 10:07:24 -04001317 trimmed += nr_blks;
Steven Whitehousef15ab562009-02-09 09:25:01 +00001318 }
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001319 if (ptrimmed)
1320 *ptrimmed = trimmed;
1321 return 0;
1322
Steven Whitehousef15ab562009-02-09 09:25:01 +00001323fail:
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001324 if (sdp->sd_args.ar_discard)
1325 fs_warn(sdp, "error %d on discard request, turning discards off for this filesystem", rv);
Steven Whitehousef15ab562009-02-09 09:25:01 +00001326 sdp->sd_args.ar_discard = 0;
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001327 return -EIO;
1328}
1329
1330/**
1331 * gfs2_fitrim - Generate discard requests for unused bits of the filesystem
1332 * @filp: Any file on the filesystem
1333 * @argp: Pointer to the arguments (also used to pass result)
1334 *
1335 * Returns: 0 on success, otherwise error code
1336 */
1337
1338int gfs2_fitrim(struct file *filp, void __user *argp)
1339{
Al Viro496ad9a2013-01-23 17:07:38 -05001340 struct inode *inode = file_inode(filp);
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001341 struct gfs2_sbd *sdp = GFS2_SB(inode);
1342 struct request_queue *q = bdev_get_queue(sdp->sd_vfs->s_bdev);
1343 struct buffer_head *bh;
1344 struct gfs2_rgrpd *rgd;
1345 struct gfs2_rgrpd *rgd_end;
1346 struct gfs2_holder gh;
1347 struct fstrim_range r;
1348 int ret = 0;
1349 u64 amt;
1350 u64 trimmed = 0;
Lukas Czerner076f0fa2012-10-16 11:39:08 +02001351 u64 start, end, minlen;
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001352 unsigned int x;
Lukas Czerner076f0fa2012-10-16 11:39:08 +02001353 unsigned bs_shift = sdp->sd_sb.sb_bsize_shift;
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001354
1355 if (!capable(CAP_SYS_ADMIN))
1356 return -EPERM;
1357
1358 if (!blk_queue_discard(q))
1359 return -EOPNOTSUPP;
1360
Lukas Czerner3a238ad2012-10-16 11:39:07 +02001361 if (copy_from_user(&r, argp, sizeof(r)))
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001362 return -EFAULT;
1363
Bob Peterson5e2f7d62012-04-04 22:11:16 -04001364 ret = gfs2_rindex_update(sdp);
1365 if (ret)
1366 return ret;
1367
Lukas Czerner076f0fa2012-10-16 11:39:08 +02001368 start = r.start >> bs_shift;
1369 end = start + (r.len >> bs_shift);
1370 minlen = max_t(u64, r.minlen,
1371 q->limits.discard_granularity) >> bs_shift;
1372
Abhijith Das6a98c332013-06-19 17:03:29 -04001373 if (end <= start || minlen > sdp->sd_max_rg_data)
Lukas Czerner076f0fa2012-10-16 11:39:08 +02001374 return -EINVAL;
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001375
Abhijith Das6a98c332013-06-19 17:03:29 -04001376 rgd = gfs2_blk2rgrpd(sdp, start, 0);
1377 rgd_end = gfs2_blk2rgrpd(sdp, end, 0);
1378
1379 if ((gfs2_rgrpd_get_first(sdp) == gfs2_rgrpd_get_next(rgd_end))
1380 && (start > rgd_end->rd_data0 + rgd_end->rd_data))
1381 return -EINVAL; /* start is beyond the end of the fs */
1382
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001383 while (1) {
1384
1385 ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh);
1386 if (ret)
1387 goto out;
1388
1389 if (!(rgd->rd_flags & GFS2_RGF_TRIMMED)) {
1390 /* Trim each bitmap in the rgrp */
1391 for (x = 0; x < rgd->rd_length; x++) {
1392 struct gfs2_bitmap *bi = rgd->rd_bits + x;
Lukas Czerner076f0fa2012-10-16 11:39:08 +02001393 ret = gfs2_rgrp_send_discards(sdp,
1394 rgd->rd_data0, NULL, bi, minlen,
1395 &amt);
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001396 if (ret) {
1397 gfs2_glock_dq_uninit(&gh);
1398 goto out;
1399 }
1400 trimmed += amt;
1401 }
1402
1403 /* Mark rgrp as having been trimmed */
1404 ret = gfs2_trans_begin(sdp, RES_RG_HDR, 0);
1405 if (ret == 0) {
1406 bh = rgd->rd_bits[0].bi_bh;
1407 rgd->rd_flags |= GFS2_RGF_TRIMMED;
Steven Whitehouse350a9b02012-12-14 12:36:02 +00001408 gfs2_trans_add_meta(rgd->rd_gl, bh);
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001409 gfs2_rgrp_out(rgd, bh->b_data);
Benjamin Marzinski90306c42012-05-29 23:01:09 -05001410 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, bh->b_data);
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001411 gfs2_trans_end(sdp);
1412 }
1413 }
1414 gfs2_glock_dq_uninit(&gh);
1415
1416 if (rgd == rgd_end)
1417 break;
1418
1419 rgd = gfs2_rgrpd_get_next(rgd);
1420 }
1421
1422out:
Abhijith Das6a98c332013-06-19 17:03:29 -04001423 r.len = trimmed << bs_shift;
Lukas Czerner3a238ad2012-10-16 11:39:07 +02001424 if (copy_to_user(argp, &r, sizeof(r)))
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001425 return -EFAULT;
1426
1427 return ret;
Steven Whitehousef15ab562009-02-09 09:25:01 +00001428}
1429
David Teiglandb3b94fa2006-01-16 16:50:04 +00001430/**
Bob Peterson8e2e0042012-07-19 08:12:40 -04001431 * rs_insert - insert a new multi-block reservation into the rgrp's rb_tree
Bob Peterson8e2e0042012-07-19 08:12:40 -04001432 * @ip: the inode structure
Bob Peterson8e2e0042012-07-19 08:12:40 -04001433 *
Bob Peterson8e2e0042012-07-19 08:12:40 -04001434 */
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001435static void rs_insert(struct gfs2_inode *ip)
Bob Peterson8e2e0042012-07-19 08:12:40 -04001436{
1437 struct rb_node **newn, *parent = NULL;
1438 int rc;
Bob Petersona097dc7e2015-07-16 08:28:04 -05001439 struct gfs2_blkreserv *rs = &ip->i_res;
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01001440 struct gfs2_rgrpd *rgd = rs->rs_rbm.rgd;
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001441 u64 fsblock = gfs2_rbm_to_block(&rs->rs_rbm);
1442
1443 BUG_ON(gfs2_rs_active(rs));
Bob Peterson8e2e0042012-07-19 08:12:40 -04001444
1445 spin_lock(&rgd->rd_rsspin);
1446 newn = &rgd->rd_rstree.rb_node;
Bob Peterson8e2e0042012-07-19 08:12:40 -04001447 while (*newn) {
1448 struct gfs2_blkreserv *cur =
1449 rb_entry(*newn, struct gfs2_blkreserv, rs_node);
1450
1451 parent = *newn;
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001452 rc = rs_cmp(fsblock, rs->rs_free, cur);
Bob Peterson8e2e0042012-07-19 08:12:40 -04001453 if (rc > 0)
1454 newn = &((*newn)->rb_right);
1455 else if (rc < 0)
1456 newn = &((*newn)->rb_left);
1457 else {
1458 spin_unlock(&rgd->rd_rsspin);
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001459 WARN_ON(1);
1460 return;
Bob Peterson8e2e0042012-07-19 08:12:40 -04001461 }
1462 }
1463
Bob Peterson8e2e0042012-07-19 08:12:40 -04001464 rb_link_node(&rs->rs_node, parent, newn);
1465 rb_insert_color(&rs->rs_node, &rgd->rd_rstree);
1466
Bob Peterson8e2e0042012-07-19 08:12:40 -04001467 /* Do our rgrp accounting for the reservation */
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001468 rgd->rd_reserved += rs->rs_free; /* blocks reserved */
Bob Peterson8e2e0042012-07-19 08:12:40 -04001469 spin_unlock(&rgd->rd_rsspin);
Steven Whitehouse9e733d32012-08-23 15:37:59 +01001470 trace_gfs2_rs(rs, TRACE_RS_INSERT);
Bob Peterson8e2e0042012-07-19 08:12:40 -04001471}
1472
1473/**
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001474 * rg_mblk_search - find a group of multiple free blocks to form a reservation
Bob Peterson8e2e0042012-07-19 08:12:40 -04001475 * @rgd: the resource group descriptor
Bob Peterson8e2e0042012-07-19 08:12:40 -04001476 * @ip: pointer to the inode for which we're reserving blocks
Steven Whitehouse7b9cff42013-10-02 11:13:25 +01001477 * @ap: the allocation parameters
Bob Peterson8e2e0042012-07-19 08:12:40 -04001478 *
Bob Peterson8e2e0042012-07-19 08:12:40 -04001479 */
1480
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001481static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
Steven Whitehouse7b9cff42013-10-02 11:13:25 +01001482 const struct gfs2_alloc_parms *ap)
Bob Peterson8e2e0042012-07-19 08:12:40 -04001483{
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001484 struct gfs2_rbm rbm = { .rgd = rgd, };
1485 u64 goal;
Bob Petersona097dc7e2015-07-16 08:28:04 -05001486 struct gfs2_blkreserv *rs = &ip->i_res;
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001487 u32 extlen;
1488 u32 free_blocks = rgd->rd_free_clone - rgd->rd_reserved;
1489 int ret;
Bob Petersonaf21ca82013-05-14 13:04:29 -04001490 struct inode *inode = &ip->i_inode;
Bob Peterson8e2e0042012-07-19 08:12:40 -04001491
Bob Petersonaf21ca82013-05-14 13:04:29 -04001492 if (S_ISDIR(inode->i_mode))
1493 extlen = 1;
1494 else {
Steven Whitehouse7b9cff42013-10-02 11:13:25 +01001495 extlen = max_t(u32, atomic_read(&rs->rs_sizehint), ap->target);
Bob Petersonaf21ca82013-05-14 13:04:29 -04001496 extlen = clamp(extlen, RGRP_RSRV_MINBLKS, free_blocks);
1497 }
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001498 if ((rgd->rd_free_clone < rgd->rd_reserved) || (free_blocks < extlen))
Steven Whitehousec743ffd2012-08-25 18:21:47 +01001499 return;
1500
Bob Peterson8e2e0042012-07-19 08:12:40 -04001501 /* Find bitmap block that contains bits for goal block */
1502 if (rgrp_contains_block(rgd, ip->i_goal))
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001503 goal = ip->i_goal;
Bob Peterson8e2e0042012-07-19 08:12:40 -04001504 else
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001505 goal = rgd->rd_last_alloc + rgd->rd_data0;
Steven Whitehousec743ffd2012-08-25 18:21:47 +01001506
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001507 if (WARN_ON(gfs2_rbm_from_block(&rbm, goal)))
1508 return;
Bob Peterson8e2e0042012-07-19 08:12:40 -04001509
Bob Peterson8381e602016-05-02 09:42:49 -05001510 ret = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, &extlen, ip, true);
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001511 if (ret == 0) {
1512 rs->rs_rbm = rbm;
1513 rs->rs_free = extlen;
1514 rs->rs_inum = ip->i_no_addr;
1515 rs_insert(ip);
Bob Peterson13d2eb02012-12-20 13:23:04 -05001516 } else {
1517 if (goal == rgd->rd_last_alloc + rgd->rd_data0)
1518 rgd->rd_last_alloc = 0;
Bob Peterson8e2e0042012-07-19 08:12:40 -04001519 }
Bob Petersonb3e47ca2011-11-21 11:47:08 -05001520}
1521
David Teiglandb3b94fa2006-01-16 16:50:04 +00001522/**
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001523 * gfs2_next_unreserved_block - Return next block that is not reserved
1524 * @rgd: The resource group
1525 * @block: The starting block
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001526 * @length: The required length
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001527 * @ip: Ignore any reservations for this inode
1528 *
1529 * If the block does not appear in any reservation, then return the
1530 * block number unchanged. If it does appear in the reservation, then
1531 * keep looking through the tree of reservations in order to find the
1532 * first block number which is not reserved.
1533 */
1534
1535static u64 gfs2_next_unreserved_block(struct gfs2_rgrpd *rgd, u64 block,
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001536 u32 length,
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001537 const struct gfs2_inode *ip)
1538{
1539 struct gfs2_blkreserv *rs;
1540 struct rb_node *n;
1541 int rc;
1542
1543 spin_lock(&rgd->rd_rsspin);
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001544 n = rgd->rd_rstree.rb_node;
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001545 while (n) {
1546 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001547 rc = rs_cmp(block, length, rs);
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001548 if (rc < 0)
1549 n = n->rb_left;
1550 else if (rc > 0)
1551 n = n->rb_right;
1552 else
1553 break;
1554 }
1555
1556 if (n) {
Bob Petersona097dc7e2015-07-16 08:28:04 -05001557 while ((rs_cmp(block, length, rs) == 0) && (&ip->i_res != rs)) {
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001558 block = gfs2_rbm_to_block(&rs->rs_rbm) + rs->rs_free;
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001559 n = n->rb_right;
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001560 if (n == NULL)
1561 break;
1562 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
1563 }
1564 }
1565
1566 spin_unlock(&rgd->rd_rsspin);
1567 return block;
1568}
1569
1570/**
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001571 * gfs2_reservation_check_and_update - Check for reservations during block alloc
1572 * @rbm: The current position in the resource group
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001573 * @ip: The inode for which we are searching for blocks
1574 * @minext: The minimum extent length
Bob Peterson5ce13432013-11-06 10:55:52 -05001575 * @maxext: A pointer to the maximum extent structure
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001576 *
1577 * This checks the current position in the rgrp to see whether there is
1578 * a reservation covering this block. If not then this function is a
1579 * no-op. If there is, then the position is moved to the end of the
1580 * contiguous reservation(s) so that we are pointing at the first
1581 * non-reserved block.
1582 *
1583 * Returns: 0 if no reservation, 1 if @rbm has changed, otherwise an error
1584 */
1585
1586static int gfs2_reservation_check_and_update(struct gfs2_rbm *rbm,
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001587 const struct gfs2_inode *ip,
Bob Peterson5ce13432013-11-06 10:55:52 -05001588 u32 minext,
1589 struct gfs2_extent *maxext)
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001590{
1591 u64 block = gfs2_rbm_to_block(rbm);
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001592 u32 extlen = 1;
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001593 u64 nblock;
1594 int ret;
1595
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001596 /*
1597 * If we have a minimum extent length, then skip over any extent
1598 * which is less than the min extent length in size.
1599 */
1600 if (minext) {
1601 extlen = gfs2_free_extlen(rbm, minext);
Bob Peterson5ce13432013-11-06 10:55:52 -05001602 if (extlen <= maxext->len)
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001603 goto fail;
1604 }
1605
1606 /*
1607 * Check the extent which has been found against the reservations
1608 * and skip if parts of it are already reserved
1609 */
1610 nblock = gfs2_next_unreserved_block(rbm->rgd, block, extlen, ip);
Bob Peterson5ce13432013-11-06 10:55:52 -05001611 if (nblock == block) {
1612 if (!minext || extlen >= minext)
1613 return 0;
1614
1615 if (extlen > maxext->len) {
1616 maxext->len = extlen;
1617 maxext->rbm = *rbm;
1618 }
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001619fail:
Bob Peterson5ce13432013-11-06 10:55:52 -05001620 nblock = block + extlen;
1621 }
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001622 ret = gfs2_rbm_from_block(rbm, nblock);
1623 if (ret < 0)
1624 return ret;
1625 return 1;
1626}
1627
1628/**
1629 * gfs2_rbm_find - Look for blocks of a particular state
1630 * @rbm: Value/result starting position and final position
1631 * @state: The state which we want to find
Bob Peterson5ce13432013-11-06 10:55:52 -05001632 * @minext: Pointer to the requested extent length (NULL for a single block)
1633 * This is updated to be the actual reservation size.
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001634 * @ip: If set, check for reservations
1635 * @nowrap: Stop looking at the end of the rgrp, rather than wrapping
1636 * around until we've reached the starting point.
1637 *
1638 * Side effects:
1639 * - If looking for free blocks, we set GBF_FULL on each bitmap which
1640 * has no free blocks in it.
Bob Peterson5ea50502013-11-25 11:16:25 +00001641 * - If looking for free blocks, we set rd_extfail_pt on each rgrp which
1642 * has come up short on a free block search.
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001643 *
1644 * Returns: 0 on success, -ENOSPC if there is no block of the requested state
1645 */
1646
Bob Peterson5ce13432013-11-06 10:55:52 -05001647static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
Bob Peterson8381e602016-05-02 09:42:49 -05001648 const struct gfs2_inode *ip, bool nowrap)
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001649{
1650 struct buffer_head *bh;
Bob Petersone579ed42013-09-17 13:12:15 -04001651 int initial_bii;
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001652 u32 initial_offset;
Bob Peterson5ea50502013-11-25 11:16:25 +00001653 int first_bii = rbm->bii;
1654 u32 first_offset = rbm->offset;
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001655 u32 offset;
1656 u8 *buffer;
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001657 int n = 0;
1658 int iters = rbm->rgd->rd_length;
1659 int ret;
Bob Petersone579ed42013-09-17 13:12:15 -04001660 struct gfs2_bitmap *bi;
Bob Peterson5ce13432013-11-06 10:55:52 -05001661 struct gfs2_extent maxext = { .rbm.rgd = rbm->rgd, };
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001662
1663 /* If we are not starting at the beginning of a bitmap, then we
1664 * need to add one to the bitmap count to ensure that we search
1665 * the starting bitmap twice.
1666 */
1667 if (rbm->offset != 0)
1668 iters++;
1669
1670 while(1) {
Bob Petersone579ed42013-09-17 13:12:15 -04001671 bi = rbm_bi(rbm);
1672 if (test_bit(GBF_FULL, &bi->bi_flags) &&
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001673 (state == GFS2_BLKST_FREE))
1674 goto next_bitmap;
1675
Bob Petersone579ed42013-09-17 13:12:15 -04001676 bh = bi->bi_bh;
1677 buffer = bh->b_data + bi->bi_offset;
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001678 WARN_ON(!buffer_uptodate(bh));
Bob Petersone579ed42013-09-17 13:12:15 -04001679 if (state != GFS2_BLKST_UNLINKED && bi->bi_clone)
1680 buffer = bi->bi_clone + bi->bi_offset;
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001681 initial_offset = rbm->offset;
Bob Petersone579ed42013-09-17 13:12:15 -04001682 offset = gfs2_bitfit(buffer, bi->bi_len, rbm->offset, state);
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001683 if (offset == BFITNOENT)
1684 goto bitmap_full;
1685 rbm->offset = offset;
1686 if (ip == NULL)
1687 return 0;
1688
Bob Petersone579ed42013-09-17 13:12:15 -04001689 initial_bii = rbm->bii;
Bob Peterson5ce13432013-11-06 10:55:52 -05001690 ret = gfs2_reservation_check_and_update(rbm, ip,
1691 minext ? *minext : 0,
1692 &maxext);
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001693 if (ret == 0)
1694 return 0;
1695 if (ret > 0) {
Bob Petersone579ed42013-09-17 13:12:15 -04001696 n += (rbm->bii - initial_bii);
Bob Peterson8d8b7522012-08-07 13:28:17 -04001697 goto next_iter;
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001698 }
Steven Whitehouse5d50d532012-08-07 13:47:12 +01001699 if (ret == -E2BIG) {
Bob Petersone579ed42013-09-17 13:12:15 -04001700 rbm->bii = 0;
Steven Whitehouse5d50d532012-08-07 13:47:12 +01001701 rbm->offset = 0;
Bob Petersone579ed42013-09-17 13:12:15 -04001702 n += (rbm->bii - initial_bii);
Steven Whitehouse5d50d532012-08-07 13:47:12 +01001703 goto res_covered_end_of_rgrp;
1704 }
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001705 return ret;
1706
1707bitmap_full: /* Mark bitmap as full and fall through */
Fabian Fredericka3e32132015-05-18 15:23:03 -05001708 if ((state == GFS2_BLKST_FREE) && initial_offset == 0)
Bob Petersone579ed42013-09-17 13:12:15 -04001709 set_bit(GBF_FULL, &bi->bi_flags);
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001710
1711next_bitmap: /* Find next bitmap in the rgrp */
1712 rbm->offset = 0;
Bob Petersone579ed42013-09-17 13:12:15 -04001713 rbm->bii++;
1714 if (rbm->bii == rbm->rgd->rd_length)
1715 rbm->bii = 0;
Steven Whitehouse5d50d532012-08-07 13:47:12 +01001716res_covered_end_of_rgrp:
Bob Petersone579ed42013-09-17 13:12:15 -04001717 if ((rbm->bii == 0) && nowrap)
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001718 break;
1719 n++;
Bob Peterson8d8b7522012-08-07 13:28:17 -04001720next_iter:
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001721 if (n >= iters)
1722 break;
1723 }
1724
Bob Peterson5ce13432013-11-06 10:55:52 -05001725 if (minext == NULL || state != GFS2_BLKST_FREE)
1726 return -ENOSPC;
1727
Bob Peterson5ea50502013-11-25 11:16:25 +00001728 /* If the extent was too small, and it's smaller than the smallest
1729 to have failed before, remember for future reference that it's
1730 useless to search this rgrp again for this amount or more. */
1731 if ((first_offset == 0) && (first_bii == 0) &&
1732 (*minext < rbm->rgd->rd_extfail_pt))
1733 rbm->rgd->rd_extfail_pt = *minext;
1734
Bob Peterson5ce13432013-11-06 10:55:52 -05001735 /* If the maximum extent we found is big enough to fulfill the
1736 minimum requirements, use it anyway. */
1737 if (maxext.len) {
1738 *rbm = maxext.rbm;
1739 *minext = maxext.len;
1740 return 0;
1741 }
1742
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001743 return -ENOSPC;
1744}
1745
1746/**
Steven Whitehousec8cdf472007-06-08 10:05:33 +01001747 * try_rgrp_unlink - Look for any unlinked, allocated, but unused inodes
1748 * @rgd: The rgrp
Bob Peterson886b1412012-04-11 13:03:52 -04001749 * @last_unlinked: block address of the last dinode we unlinked
1750 * @skip: block address we should explicitly not unlink
Steven Whitehousec8cdf472007-06-08 10:05:33 +01001751 *
Bob Peterson1a0eae82010-04-14 11:58:16 -04001752 * Returns: 0 if no error
1753 * The inode, if one has been found, in inode.
Steven Whitehousec8cdf472007-06-08 10:05:33 +01001754 */
1755
Steven Whitehouse044b9412010-11-03 20:01:07 +00001756static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip)
Steven Whitehousec8cdf472007-06-08 10:05:33 +01001757{
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001758 u64 block;
Bob Peterson5f3eae72007-08-08 16:52:09 -05001759 struct gfs2_sbd *sdp = rgd->rd_sbd;
Steven Whitehouse044b9412010-11-03 20:01:07 +00001760 struct gfs2_glock *gl;
1761 struct gfs2_inode *ip;
1762 int error;
1763 int found = 0;
Bob Petersone579ed42013-09-17 13:12:15 -04001764 struct gfs2_rbm rbm = { .rgd = rgd, .bii = 0, .offset = 0 };
Steven Whitehousec8cdf472007-06-08 10:05:33 +01001765
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001766 while (1) {
Bob Peterson5f3eae72007-08-08 16:52:09 -05001767 down_write(&sdp->sd_log_flush_lock);
Bob Peterson5ce13432013-11-06 10:55:52 -05001768 error = gfs2_rbm_find(&rbm, GFS2_BLKST_UNLINKED, NULL, NULL,
Bob Peterson8381e602016-05-02 09:42:49 -05001769 true);
Bob Peterson5f3eae72007-08-08 16:52:09 -05001770 up_write(&sdp->sd_log_flush_lock);
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001771 if (error == -ENOSPC)
1772 break;
1773 if (WARN_ON_ONCE(error))
Bob Peterson24c73872007-07-12 16:58:50 -05001774 break;
Bob Petersonb3e47ca2011-11-21 11:47:08 -05001775
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001776 block = gfs2_rbm_to_block(&rbm);
1777 if (gfs2_rbm_from_block(&rbm, block + 1))
1778 break;
1779 if (*last_unlinked != NO_BLOCK && block <= *last_unlinked)
Steven Whitehousec8cdf472007-06-08 10:05:33 +01001780 continue;
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001781 if (block == skip)
Steven Whitehouse1e19a192009-07-10 21:13:38 +01001782 continue;
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001783 *last_unlinked = block;
Steven Whitehouse044b9412010-11-03 20:01:07 +00001784
Bob Peterson5ea31bc2015-12-04 12:57:00 -06001785 error = gfs2_glock_get(sdp, block, &gfs2_iopen_glops, CREATE, &gl);
Steven Whitehouse044b9412010-11-03 20:01:07 +00001786 if (error)
1787 continue;
1788
1789 /* If the inode is already in cache, we can ignore it here
1790 * because the existing inode disposal code will deal with
1791 * it when all refs have gone away. Accessing gl_object like
1792 * this is not safe in general. Here it is ok because we do
1793 * not dereference the pointer, and we only need an approx
1794 * answer to whether it is NULL or not.
1795 */
1796 ip = gl->gl_object;
1797
1798 if (ip || queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
1799 gfs2_glock_put(gl);
1800 else
1801 found++;
1802
1803 /* Limit reclaim to sensible number of tasks */
Bob Peterson44ad37d2011-03-17 16:19:58 -04001804 if (found > NR_CPUS)
Steven Whitehouse044b9412010-11-03 20:01:07 +00001805 return;
Steven Whitehousec8cdf472007-06-08 10:05:33 +01001806 }
1807
1808 rgd->rd_flags &= ~GFS2_RDF_CHECK;
Steven Whitehouse044b9412010-11-03 20:01:07 +00001809 return;
Steven Whitehousec8cdf472007-06-08 10:05:33 +01001810}
1811
Steven Whitehousebcd97c02012-10-31 09:58:42 +00001812/**
1813 * gfs2_rgrp_congested - Use stats to figure out whether an rgrp is congested
1814 * @rgd: The rgrp in question
1815 * @loops: An indication of how picky we can be (0=very, 1=less so)
1816 *
1817 * This function uses the recently added glock statistics in order to
1818 * figure out whether a parciular resource group is suffering from
1819 * contention from multiple nodes. This is done purely on the basis
1820 * of timings, since this is the only data we have to work with and
1821 * our aim here is to reject a resource group which is highly contended
1822 * but (very important) not to do this too often in order to ensure that
1823 * we do not land up introducing fragmentation by changing resource
1824 * groups when not actually required.
1825 *
1826 * The calculation is fairly simple, we want to know whether the SRTTB
1827 * (i.e. smoothed round trip time for blocking operations) to acquire
1828 * the lock for this rgrp's glock is significantly greater than the
1829 * time taken for resource groups on average. We introduce a margin in
1830 * the form of the variable @var which is computed as the sum of the two
1831 * respective variences, and multiplied by a factor depending on @loops
1832 * and whether we have a lot of data to base the decision on. This is
1833 * then tested against the square difference of the means in order to
1834 * decide whether the result is statistically significant or not.
1835 *
1836 * Returns: A boolean verdict on the congestion status
1837 */
1838
1839static bool gfs2_rgrp_congested(const struct gfs2_rgrpd *rgd, int loops)
1840{
1841 const struct gfs2_glock *gl = rgd->rd_gl;
Bob Peterson15562c42015-03-16 11:52:05 -05001842 const struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
Steven Whitehousebcd97c02012-10-31 09:58:42 +00001843 struct gfs2_lkstats *st;
Ben Hutchings4d207132015-08-27 12:51:45 -05001844 u64 r_dcount, l_dcount;
1845 u64 l_srttb, a_srttb = 0;
Steven Whitehousebcd97c02012-10-31 09:58:42 +00001846 s64 srttb_diff;
Ben Hutchings4d207132015-08-27 12:51:45 -05001847 u64 sqr_diff;
1848 u64 var;
Bob Peterson0166b192015-04-22 11:24:12 -05001849 int cpu, nonzero = 0;
Steven Whitehousebcd97c02012-10-31 09:58:42 +00001850
1851 preempt_disable();
Bob Petersonf4a3ae92014-11-19 12:27:11 -06001852 for_each_present_cpu(cpu) {
1853 st = &per_cpu_ptr(sdp->sd_lkstats, cpu)->lkstats[LM_TYPE_RGRP];
Bob Peterson0166b192015-04-22 11:24:12 -05001854 if (st->stats[GFS2_LKS_SRTTB]) {
1855 a_srttb += st->stats[GFS2_LKS_SRTTB];
1856 nonzero++;
1857 }
Bob Petersonf4a3ae92014-11-19 12:27:11 -06001858 }
Steven Whitehousebcd97c02012-10-31 09:58:42 +00001859 st = &this_cpu_ptr(sdp->sd_lkstats)->lkstats[LM_TYPE_RGRP];
Bob Peterson0166b192015-04-22 11:24:12 -05001860 if (nonzero)
1861 do_div(a_srttb, nonzero);
Steven Whitehousebcd97c02012-10-31 09:58:42 +00001862 r_dcount = st->stats[GFS2_LKS_DCOUNT];
1863 var = st->stats[GFS2_LKS_SRTTVARB] +
1864 gl->gl_stats.stats[GFS2_LKS_SRTTVARB];
1865 preempt_enable();
1866
1867 l_srttb = gl->gl_stats.stats[GFS2_LKS_SRTTB];
1868 l_dcount = gl->gl_stats.stats[GFS2_LKS_DCOUNT];
1869
Bob Petersonf4a3ae92014-11-19 12:27:11 -06001870 if ((l_dcount < 1) || (r_dcount < 1) || (a_srttb == 0))
Steven Whitehousebcd97c02012-10-31 09:58:42 +00001871 return false;
1872
Bob Petersonf4a3ae92014-11-19 12:27:11 -06001873 srttb_diff = a_srttb - l_srttb;
Steven Whitehousebcd97c02012-10-31 09:58:42 +00001874 sqr_diff = srttb_diff * srttb_diff;
1875
1876 var *= 2;
1877 if (l_dcount < 8 || r_dcount < 8)
1878 var *= 2;
1879 if (loops == 1)
1880 var *= 2;
1881
1882 return ((srttb_diff < 0) && (sqr_diff > var));
1883}
1884
1885/**
1886 * gfs2_rgrp_used_recently
1887 * @rs: The block reservation with the rgrp to test
1888 * @msecs: The time limit in milliseconds
1889 *
1890 * Returns: True if the rgrp glock has been used within the time limit
1891 */
1892static bool gfs2_rgrp_used_recently(const struct gfs2_blkreserv *rs,
1893 u64 msecs)
1894{
1895 u64 tdiff;
1896
1897 tdiff = ktime_to_ns(ktime_sub(ktime_get_real(),
1898 rs->rs_rbm.rgd->rd_gl->gl_dstamp));
1899
1900 return tdiff > (msecs * 1000 * 1000);
1901}
1902
Steven Whitehouse9dbe9612012-10-31 10:37:10 +00001903static u32 gfs2_orlov_skip(const struct gfs2_inode *ip)
1904{
1905 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1906 u32 skip;
1907
1908 get_random_bytes(&skip, sizeof(skip));
1909 return skip % sdp->sd_rgrps;
1910}
1911
Steven Whitehousec743ffd2012-08-25 18:21:47 +01001912static bool gfs2_select_rgrp(struct gfs2_rgrpd **pos, const struct gfs2_rgrpd *begin)
1913{
1914 struct gfs2_rgrpd *rgd = *pos;
Steven Whitehouseaa8920c2012-11-13 14:50:35 +00001915 struct gfs2_sbd *sdp = rgd->rd_sbd;
Steven Whitehousec743ffd2012-08-25 18:21:47 +01001916
1917 rgd = gfs2_rgrpd_get_next(rgd);
1918 if (rgd == NULL)
Steven Whitehouseaa8920c2012-11-13 14:50:35 +00001919 rgd = gfs2_rgrpd_get_first(sdp);
Steven Whitehousec743ffd2012-08-25 18:21:47 +01001920 *pos = rgd;
1921 if (rgd != begin) /* If we didn't wrap */
1922 return true;
1923 return false;
1924}
1925
Steven Whitehousec8cdf472007-06-08 10:05:33 +01001926/**
Bob Peterson0e27c182014-10-29 08:02:28 -05001927 * fast_to_acquire - determine if a resource group will be fast to acquire
1928 *
1929 * If this is one of our preferred rgrps, it should be quicker to acquire,
1930 * because we tried to set ourselves up as dlm lock master.
1931 */
1932static inline int fast_to_acquire(struct gfs2_rgrpd *rgd)
1933{
1934 struct gfs2_glock *gl = rgd->rd_gl;
1935
1936 if (gl->gl_state != LM_ST_UNLOCKED && list_empty(&gl->gl_holders) &&
1937 !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
1938 !test_bit(GLF_DEMOTE, &gl->gl_flags))
1939 return 1;
1940 if (rgd->rd_flags & GFS2_RDF_PREFERRED)
1941 return 1;
1942 return 0;
1943}
1944
1945/**
Bob Peterson666d1d82012-06-13 23:03:56 -04001946 * gfs2_inplace_reserve - Reserve space in the filesystem
David Teiglandb3b94fa2006-01-16 16:50:04 +00001947 * @ip: the inode to reserve space for
Steven Whitehouse7b9cff42013-10-02 11:13:25 +01001948 * @ap: the allocation parameters
David Teiglandb3b94fa2006-01-16 16:50:04 +00001949 *
Abhi Das25435e52015-03-18 12:04:37 -05001950 * We try our best to find an rgrp that has at least ap->target blocks
1951 * available. After a couple of passes (loops == 2), the prospects of finding
1952 * such an rgrp diminish. At this stage, we return the first rgrp that has
1953 * atleast ap->min_target blocks available. Either way, we set ap->allowed to
1954 * the number of blocks available in the chosen rgrp.
1955 *
1956 * Returns: 0 on success,
1957 * -ENOMEM if a suitable rgrp can't be found
1958 * errno otherwise
David Teiglandb3b94fa2006-01-16 16:50:04 +00001959 */
1960
Abhi Das25435e52015-03-18 12:04:37 -05001961int gfs2_inplace_reserve(struct gfs2_inode *ip, struct gfs2_alloc_parms *ap)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001962{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04001963 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
Bob Peterson8e2e0042012-07-19 08:12:40 -04001964 struct gfs2_rgrpd *begin = NULL;
Bob Petersona097dc7e2015-07-16 08:28:04 -05001965 struct gfs2_blkreserv *rs = &ip->i_res;
Steven Whitehousebcd97c02012-10-31 09:58:42 +00001966 int error = 0, rg_locked, flags = 0;
Bob Peterson666d1d82012-06-13 23:03:56 -04001967 u64 last_unlinked = NO_BLOCK;
Bob Peterson7c9ca622011-08-31 09:53:19 +01001968 int loops = 0;
Steven Whitehouse9dbe9612012-10-31 10:37:10 +00001969 u32 skip = 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001970
Benjamin Marzinski90306c42012-05-29 23:01:09 -05001971 if (sdp->sd_args.ar_rgrplvb)
1972 flags |= GL_SKIP;
Steven Whitehouse7b9cff42013-10-02 11:13:25 +01001973 if (gfs2_assert_warn(sdp, ap->target))
Steven Whitehousec743ffd2012-08-25 18:21:47 +01001974 return -EINVAL;
Bob Peterson8e2e0042012-07-19 08:12:40 -04001975 if (gfs2_rs_active(rs)) {
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01001976 begin = rs->rs_rbm.rgd;
Bob Peterson8e2e0042012-07-19 08:12:40 -04001977 } else if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, ip->i_goal)) {
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01001978 rs->rs_rbm.rgd = begin = ip->i_rgd;
Bob Peterson8e2e0042012-07-19 08:12:40 -04001979 } else {
Abhi Das00a158b2014-09-18 21:40:28 -05001980 check_and_update_goal(ip);
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01001981 rs->rs_rbm.rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1);
Bob Peterson8e2e0042012-07-19 08:12:40 -04001982 }
Steven Whitehouse7b9cff42013-10-02 11:13:25 +01001983 if (S_ISDIR(ip->i_inode.i_mode) && (ap->aflags & GFS2_AF_ORLOV))
Steven Whitehouse9dbe9612012-10-31 10:37:10 +00001984 skip = gfs2_orlov_skip(ip);
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01001985 if (rs->rs_rbm.rgd == NULL)
Bob Peterson7c9ca622011-08-31 09:53:19 +01001986 return -EBADSLT;
1987
1988 while (loops < 3) {
Steven Whitehousec743ffd2012-08-25 18:21:47 +01001989 rg_locked = 1;
Abhijith Das292c8c12007-11-29 14:13:54 -06001990
Steven Whitehousec743ffd2012-08-25 18:21:47 +01001991 if (!gfs2_glock_is_locked_by_me(rs->rs_rbm.rgd->rd_gl)) {
1992 rg_locked = 0;
Steven Whitehouse9dbe9612012-10-31 10:37:10 +00001993 if (skip && skip--)
1994 goto next_rgrp;
Bob Peterson0e27c182014-10-29 08:02:28 -05001995 if (!gfs2_rs_active(rs)) {
1996 if (loops == 0 &&
1997 !fast_to_acquire(rs->rs_rbm.rgd))
1998 goto next_rgrp;
1999 if ((loops < 2) &&
2000 gfs2_rgrp_used_recently(rs, 1000) &&
2001 gfs2_rgrp_congested(rs->rs_rbm.rgd, loops))
2002 goto next_rgrp;
2003 }
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01002004 error = gfs2_glock_nq_init(rs->rs_rbm.rgd->rd_gl,
Bob Peterson8e2e0042012-07-19 08:12:40 -04002005 LM_ST_EXCLUSIVE, flags,
2006 &rs->rs_rgd_gh);
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002007 if (unlikely(error))
2008 return error;
Steven Whitehousebcd97c02012-10-31 09:58:42 +00002009 if (!gfs2_rs_active(rs) && (loops < 2) &&
2010 gfs2_rgrp_congested(rs->rs_rbm.rgd, loops))
2011 goto skip_rgrp;
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002012 if (sdp->sd_args.ar_rgrplvb) {
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01002013 error = update_rgrp_lvb(rs->rs_rbm.rgd);
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002014 if (unlikely(error)) {
Benjamin Marzinski90306c42012-05-29 23:01:09 -05002015 gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
2016 return error;
2017 }
2018 }
Abhijith Das292c8c12007-11-29 14:13:54 -06002019 }
Bob Peterson666d1d82012-06-13 23:03:56 -04002020
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002021 /* Skip unuseable resource groups */
Bob Peterson5ea50502013-11-25 11:16:25 +00002022 if ((rs->rs_rbm.rgd->rd_flags & (GFS2_RGF_NOALLOC |
2023 GFS2_RDF_ERROR)) ||
Abhi Das25435e52015-03-18 12:04:37 -05002024 (loops == 0 && ap->target > rs->rs_rbm.rgd->rd_extfail_pt))
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002025 goto skip_rgrp;
2026
2027 if (sdp->sd_args.ar_rgrplvb)
2028 gfs2_rgrp_bh_get(rs->rs_rbm.rgd);
2029
2030 /* Get a reservation if we don't already have one */
2031 if (!gfs2_rs_active(rs))
Steven Whitehouse7b9cff42013-10-02 11:13:25 +01002032 rg_mblk_search(rs->rs_rbm.rgd, ip, ap);
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002033
2034 /* Skip rgrps when we can't get a reservation on first pass */
2035 if (!gfs2_rs_active(rs) && (loops < 1))
2036 goto check_rgrp;
2037
2038 /* If rgrp has enough free space, use it */
Abhi Das25435e52015-03-18 12:04:37 -05002039 if (rs->rs_rbm.rgd->rd_free_clone >= ap->target ||
2040 (loops == 2 && ap->min_target &&
2041 rs->rs_rbm.rgd->rd_free_clone >= ap->min_target)) {
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002042 ip->i_rgd = rs->rs_rbm.rgd;
Abhi Das25435e52015-03-18 12:04:37 -05002043 ap->allowed = ip->i_rgd->rd_free_clone;
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002044 return 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +00002045 }
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002046check_rgrp:
2047 /* Check for unlinked inodes which can be reclaimed */
2048 if (rs->rs_rbm.rgd->rd_flags & GFS2_RDF_CHECK)
2049 try_rgrp_unlink(rs->rs_rbm.rgd, &last_unlinked,
2050 ip->i_no_addr);
2051skip_rgrp:
Bob Peterson1330edb2013-11-06 10:58:00 -05002052 /* Drop reservation, if we couldn't use reserved rgrp */
2053 if (gfs2_rs_active(rs))
2054 gfs2_rs_deltree(rs);
2055
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002056 /* Unlock rgrp if required */
2057 if (!rg_locked)
2058 gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
2059next_rgrp:
2060 /* Find the next rgrp, and continue looking */
2061 if (gfs2_select_rgrp(&rs->rs_rbm.rgd, begin))
2062 continue;
Steven Whitehouse9dbe9612012-10-31 10:37:10 +00002063 if (skip)
2064 continue;
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002065
2066 /* If we've scanned all the rgrps, but found no free blocks
2067 * then this checks for some less likely conditions before
2068 * trying again.
2069 */
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002070 loops++;
2071 /* Check that fs hasn't grown if writing to rindex */
2072 if (ip == GFS2_I(sdp->sd_rindex) && !sdp->sd_rindex_uptodate) {
2073 error = gfs2_ri_update(ip);
2074 if (error)
2075 return error;
2076 }
2077 /* Flushing the log may release space */
2078 if (loops == 2)
Benjamin Marzinski24972552014-05-01 22:26:55 -05002079 gfs2_log_flush(sdp, NULL, NORMAL_FLUSH);
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002080 }
2081
2082 return -ENOSPC;
David Teiglandb3b94fa2006-01-16 16:50:04 +00002083}
2084
2085/**
2086 * gfs2_inplace_release - release an inplace reservation
2087 * @ip: the inode the reservation was taken out on
2088 *
2089 * Release a reservation made by gfs2_inplace_reserve().
2090 */
2091
2092void gfs2_inplace_release(struct gfs2_inode *ip)
2093{
Bob Petersona097dc7e2015-07-16 08:28:04 -05002094 struct gfs2_blkreserv *rs = &ip->i_res;
David Teiglandb3b94fa2006-01-16 16:50:04 +00002095
Andreas Gruenbacher6df9f9a2016-06-17 07:31:27 -05002096 if (gfs2_holder_initialized(&rs->rs_rgd_gh))
Bob Peterson564e12b2011-11-21 13:36:17 -05002097 gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002098}
2099
2100/**
2101 * gfs2_get_block_type - Check a block in a RG is of given type
2102 * @rgd: the resource group holding the block
2103 * @block: the block number
2104 *
2105 * Returns: The block type (GFS2_BLKST_*)
2106 */
2107
Steven Whitehouseacf7e242009-09-08 18:00:30 +01002108static unsigned char gfs2_get_block_type(struct gfs2_rgrpd *rgd, u64 block)
David Teiglandb3b94fa2006-01-16 16:50:04 +00002109{
Steven Whitehouse39839032012-08-03 11:10:30 +01002110 struct gfs2_rbm rbm = { .rgd = rgd, };
2111 int ret;
David Teiglandb3b94fa2006-01-16 16:50:04 +00002112
Steven Whitehouse39839032012-08-03 11:10:30 +01002113 ret = gfs2_rbm_from_block(&rbm, block);
2114 WARN_ON_ONCE(ret != 0);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002115
Steven Whitehousec04a2ef2012-08-13 11:14:57 +01002116 return gfs2_testbit(&rbm);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002117}
2118
David Teiglandb3b94fa2006-01-16 16:50:04 +00002119
Bob Petersonb3e47ca2011-11-21 11:47:08 -05002120/**
2121 * gfs2_alloc_extent - allocate an extent from a given bitmap
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01002122 * @rbm: the resource group information
Bob Petersonb3e47ca2011-11-21 11:47:08 -05002123 * @dinode: TRUE if the first block we allocate is for a dinode
Steven Whitehousec04a2ef2012-08-13 11:14:57 +01002124 * @n: The extent length (value/result)
Bob Petersonb3e47ca2011-11-21 11:47:08 -05002125 *
Steven Whitehousec04a2ef2012-08-13 11:14:57 +01002126 * Add the bitmap buffer to the transaction.
Bob Petersonb3e47ca2011-11-21 11:47:08 -05002127 * Set the found bits to @new_state to change block's allocation state.
Bob Petersonb3e47ca2011-11-21 11:47:08 -05002128 */
Steven Whitehousec04a2ef2012-08-13 11:14:57 +01002129static void gfs2_alloc_extent(const struct gfs2_rbm *rbm, bool dinode,
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01002130 unsigned int *n)
Bob Petersonb3e47ca2011-11-21 11:47:08 -05002131{
Steven Whitehousec04a2ef2012-08-13 11:14:57 +01002132 struct gfs2_rbm pos = { .rgd = rbm->rgd, };
Bob Petersonb3e47ca2011-11-21 11:47:08 -05002133 const unsigned int elen = *n;
Steven Whitehousec04a2ef2012-08-13 11:14:57 +01002134 u64 block;
2135 int ret;
Bob Petersonb3e47ca2011-11-21 11:47:08 -05002136
Steven Whitehousec04a2ef2012-08-13 11:14:57 +01002137 *n = 1;
2138 block = gfs2_rbm_to_block(rbm);
Bob Petersone579ed42013-09-17 13:12:15 -04002139 gfs2_trans_add_meta(rbm->rgd->rd_gl, rbm_bi(rbm)->bi_bh);
Steven Whitehouse3e6339d2012-08-13 11:37:51 +01002140 gfs2_setbit(rbm, true, dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
Steven Whitehousec04a2ef2012-08-13 11:14:57 +01002141 block++;
Steven Whitehouse60a0b8f2009-05-21 12:23:12 +01002142 while (*n < elen) {
Steven Whitehousec04a2ef2012-08-13 11:14:57 +01002143 ret = gfs2_rbm_from_block(&pos, block);
Bob Peterson0688a5e2012-08-28 08:45:56 -04002144 if (ret || gfs2_testbit(&pos) != GFS2_BLKST_FREE)
Steven Whitehouse60a0b8f2009-05-21 12:23:12 +01002145 break;
Bob Petersone579ed42013-09-17 13:12:15 -04002146 gfs2_trans_add_meta(pos.rgd->rd_gl, rbm_bi(&pos)->bi_bh);
Steven Whitehouse3e6339d2012-08-13 11:37:51 +01002147 gfs2_setbit(&pos, true, GFS2_BLKST_USED);
Steven Whitehouse60a0b8f2009-05-21 12:23:12 +01002148 (*n)++;
Steven Whitehousec04a2ef2012-08-13 11:14:57 +01002149 block++;
Steven Whitehouse60a0b8f2009-05-21 12:23:12 +01002150 }
David Teiglandb3b94fa2006-01-16 16:50:04 +00002151}
2152
2153/**
2154 * rgblk_free - Change alloc state of given block(s)
2155 * @sdp: the filesystem
2156 * @bstart: the start of a run of blocks to free
2157 * @blen: the length of the block run (all must lie within ONE RG!)
2158 * @new_state: GFS2_BLKST_XXX the after-allocation block state
2159 *
2160 * Returns: Resource group containing the block(s)
2161 */
2162
Steven Whitehousecd915492006-09-04 12:49:07 -04002163static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart,
2164 u32 blen, unsigned char new_state)
David Teiglandb3b94fa2006-01-16 16:50:04 +00002165{
Steven Whitehouse3b1d0b92012-08-03 11:23:28 +01002166 struct gfs2_rbm rbm;
Bob Petersond24e0562014-10-03 08:38:06 -04002167 struct gfs2_bitmap *bi, *bi_prev = NULL;
David Teiglandb3b94fa2006-01-16 16:50:04 +00002168
Steven Whitehouse3b1d0b92012-08-03 11:23:28 +01002169 rbm.rgd = gfs2_blk2rgrpd(sdp, bstart, 1);
2170 if (!rbm.rgd) {
David Teiglandb3b94fa2006-01-16 16:50:04 +00002171 if (gfs2_consist(sdp))
Steven Whitehouse382066d2006-05-24 10:22:09 -04002172 fs_err(sdp, "block = %llu\n", (unsigned long long)bstart);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002173 return NULL;
2174 }
2175
Bob Petersond24e0562014-10-03 08:38:06 -04002176 gfs2_rbm_from_block(&rbm, bstart);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002177 while (blen--) {
Bob Petersone579ed42013-09-17 13:12:15 -04002178 bi = rbm_bi(&rbm);
Bob Petersond24e0562014-10-03 08:38:06 -04002179 if (bi != bi_prev) {
2180 if (!bi->bi_clone) {
2181 bi->bi_clone = kmalloc(bi->bi_bh->b_size,
2182 GFP_NOFS | __GFP_NOFAIL);
2183 memcpy(bi->bi_clone + bi->bi_offset,
2184 bi->bi_bh->b_data + bi->bi_offset,
2185 bi->bi_len);
2186 }
2187 gfs2_trans_add_meta(rbm.rgd->rd_gl, bi->bi_bh);
2188 bi_prev = bi;
David Teiglandb3b94fa2006-01-16 16:50:04 +00002189 }
Steven Whitehouse3e6339d2012-08-13 11:37:51 +01002190 gfs2_setbit(&rbm, false, new_state);
Bob Petersond24e0562014-10-03 08:38:06 -04002191 gfs2_rbm_incr(&rbm);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002192 }
2193
Steven Whitehouse3b1d0b92012-08-03 11:23:28 +01002194 return rbm.rgd;
David Teiglandb3b94fa2006-01-16 16:50:04 +00002195}
2196
2197/**
Steven Whitehouse09010972009-05-20 10:48:47 +01002198 * gfs2_rgrp_dump - print out an rgrp
2199 * @seq: The iterator
2200 * @gl: The glock in question
David Teiglandb3b94fa2006-01-16 16:50:04 +00002201 *
David Teiglandb3b94fa2006-01-16 16:50:04 +00002202 */
2203
Steven Whitehouseac3beb62014-01-16 10:31:13 +00002204void gfs2_rgrp_dump(struct seq_file *seq, const struct gfs2_glock *gl)
Steven Whitehouse09010972009-05-20 10:48:47 +01002205{
Bob Peterson8e2e0042012-07-19 08:12:40 -04002206 struct gfs2_rgrpd *rgd = gl->gl_object;
2207 struct gfs2_blkreserv *trs;
2208 const struct rb_node *n;
2209
Steven Whitehouse09010972009-05-20 10:48:47 +01002210 if (rgd == NULL)
Steven Whitehouseac3beb62014-01-16 10:31:13 +00002211 return;
Bob Peterson5ea50502013-11-25 11:16:25 +00002212 gfs2_print_dbg(seq, " R: n:%llu f:%02x b:%u/%u i:%u r:%u e:%u\n",
Steven Whitehouse09010972009-05-20 10:48:47 +01002213 (unsigned long long)rgd->rd_addr, rgd->rd_flags,
Bob Peterson8e2e0042012-07-19 08:12:40 -04002214 rgd->rd_free, rgd->rd_free_clone, rgd->rd_dinodes,
Bob Peterson5ea50502013-11-25 11:16:25 +00002215 rgd->rd_reserved, rgd->rd_extfail_pt);
Bob Peterson8e2e0042012-07-19 08:12:40 -04002216 spin_lock(&rgd->rd_rsspin);
2217 for (n = rb_first(&rgd->rd_rstree); n; n = rb_next(&trs->rs_node)) {
2218 trs = rb_entry(n, struct gfs2_blkreserv, rs_node);
2219 dump_rs(seq, trs);
2220 }
2221 spin_unlock(&rgd->rd_rsspin);
Steven Whitehouse09010972009-05-20 10:48:47 +01002222}
2223
Steven Whitehouse6050b9c2009-07-31 16:19:40 +01002224static void gfs2_rgrp_error(struct gfs2_rgrpd *rgd)
2225{
2226 struct gfs2_sbd *sdp = rgd->rd_sbd;
2227 fs_warn(sdp, "rgrp %llu has an error, marking it readonly until umount\n",
Steven Whitehouse86d00632009-09-14 09:50:57 +01002228 (unsigned long long)rgd->rd_addr);
Steven Whitehouse6050b9c2009-07-31 16:19:40 +01002229 fs_warn(sdp, "umount on all nodes and run fsck.gfs2 to fix the error\n");
2230 gfs2_rgrp_dump(NULL, rgd->rd_gl);
2231 rgd->rd_flags |= GFS2_RDF_ERROR;
2232}
2233
Steven Whitehouse09010972009-05-20 10:48:47 +01002234/**
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01002235 * gfs2_adjust_reservation - Adjust (or remove) a reservation after allocation
2236 * @ip: The inode we have just allocated blocks for
2237 * @rbm: The start of the allocated blocks
2238 * @len: The extent length
Bob Peterson8e2e0042012-07-19 08:12:40 -04002239 *
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01002240 * Adjusts a reservation after an allocation has taken place. If the
2241 * reservation does not match the allocation, or if it is now empty
2242 * then it is removed.
Bob Peterson8e2e0042012-07-19 08:12:40 -04002243 */
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01002244
2245static void gfs2_adjust_reservation(struct gfs2_inode *ip,
2246 const struct gfs2_rbm *rbm, unsigned len)
Bob Peterson8e2e0042012-07-19 08:12:40 -04002247{
Bob Petersona097dc7e2015-07-16 08:28:04 -05002248 struct gfs2_blkreserv *rs = &ip->i_res;
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01002249 struct gfs2_rgrpd *rgd = rbm->rgd;
2250 unsigned rlen;
2251 u64 block;
2252 int ret;
Bob Peterson8e2e0042012-07-19 08:12:40 -04002253
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01002254 spin_lock(&rgd->rd_rsspin);
2255 if (gfs2_rs_active(rs)) {
2256 if (gfs2_rbm_eq(&rs->rs_rbm, rbm)) {
2257 block = gfs2_rbm_to_block(rbm);
2258 ret = gfs2_rbm_from_block(&rs->rs_rbm, block + len);
2259 rlen = min(rs->rs_free, len);
2260 rs->rs_free -= rlen;
2261 rgd->rd_reserved -= rlen;
Steven Whitehouse9e733d32012-08-23 15:37:59 +01002262 trace_gfs2_rs(rs, TRACE_RS_CLAIM);
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01002263 if (rs->rs_free && !ret)
2264 goto out;
Bob Peterson1a855032014-10-29 08:02:30 -05002265 /* We used up our block reservation, so we should
2266 reserve more blocks next time. */
2267 atomic_add(RGRP_RSRV_ADDBLKS, &rs->rs_sizehint);
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01002268 }
Bob Peterson20095212013-03-13 10:26:38 -04002269 __rs_deltree(rs);
Bob Peterson8e2e0042012-07-19 08:12:40 -04002270 }
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01002271out:
2272 spin_unlock(&rgd->rd_rsspin);
Bob Peterson8e2e0042012-07-19 08:12:40 -04002273}
2274
2275/**
Steven Whitehouse9e07f2c2013-10-02 14:42:45 +01002276 * gfs2_set_alloc_start - Set starting point for block allocation
2277 * @rbm: The rbm which will be set to the required location
2278 * @ip: The gfs2 inode
2279 * @dinode: Flag to say if allocation includes a new inode
2280 *
2281 * This sets the starting point from the reservation if one is active
2282 * otherwise it falls back to guessing a start point based on the
2283 * inode's goal block or the last allocation point in the rgrp.
2284 */
2285
2286static void gfs2_set_alloc_start(struct gfs2_rbm *rbm,
2287 const struct gfs2_inode *ip, bool dinode)
2288{
2289 u64 goal;
2290
Bob Petersona097dc7e2015-07-16 08:28:04 -05002291 if (gfs2_rs_active(&ip->i_res)) {
2292 *rbm = ip->i_res.rs_rbm;
Steven Whitehouse9e07f2c2013-10-02 14:42:45 +01002293 return;
2294 }
2295
2296 if (!dinode && rgrp_contains_block(rbm->rgd, ip->i_goal))
2297 goal = ip->i_goal;
2298 else
2299 goal = rbm->rgd->rd_last_alloc + rbm->rgd->rd_data0;
2300
2301 gfs2_rbm_from_block(rbm, goal);
2302}
2303
2304/**
Bob Peterson6e87ed02011-11-18 10:58:32 -05002305 * gfs2_alloc_blocks - Allocate one or more blocks of data and/or a dinode
Steven Whitehouse09010972009-05-20 10:48:47 +01002306 * @ip: the inode to allocate the block for
2307 * @bn: Used to return the starting block number
Bob Peterson8e2e0042012-07-19 08:12:40 -04002308 * @nblocks: requested number of blocks/extent length (value/result)
Bob Peterson6e87ed02011-11-18 10:58:32 -05002309 * @dinode: 1 if we're allocating a dinode block, else 0
Bob Peterson3c5d7852011-11-14 11:17:08 -05002310 * @generation: the generation number of the inode
Steven Whitehouse09010972009-05-20 10:48:47 +01002311 *
2312 * Returns: 0 or error
2313 */
2314
Steven Whitehouse6a8099e2011-11-22 12:18:51 +00002315int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
Bob Peterson6e87ed02011-11-18 10:58:32 -05002316 bool dinode, u64 *generation)
David Teiglandb3b94fa2006-01-16 16:50:04 +00002317{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04002318 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
Steven Whitehoused9ba7612009-04-23 08:59:41 +01002319 struct buffer_head *dibh;
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01002320 struct gfs2_rbm rbm = { .rgd = ip->i_rgd, };
Steven Whitehouse6a8099e2011-11-22 12:18:51 +00002321 unsigned int ndata;
Bob Peterson3c5d7852011-11-14 11:17:08 -05002322 u64 block; /* block, within the file system scope */
Steven Whitehoused9ba7612009-04-23 08:59:41 +01002323 int error;
David Teiglandb3b94fa2006-01-16 16:50:04 +00002324
Steven Whitehouse9e07f2c2013-10-02 14:42:45 +01002325 gfs2_set_alloc_start(&rbm, ip, dinode);
Bob Peterson8381e602016-05-02 09:42:49 -05002326 error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, NULL, ip, false);
Steven Whitehouse62e252e2012-07-30 11:06:08 +01002327
Steven Whitehouse137834a2012-08-23 13:43:40 +01002328 if (error == -ENOSPC) {
Steven Whitehouse9e07f2c2013-10-02 14:42:45 +01002329 gfs2_set_alloc_start(&rbm, ip, dinode);
Bob Peterson8381e602016-05-02 09:42:49 -05002330 error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, NULL, NULL, false);
Steven Whitehouse137834a2012-08-23 13:43:40 +01002331 }
2332
Steven Whitehouse62e252e2012-07-30 11:06:08 +01002333 /* Since all blocks are reserved in advance, this shouldn't happen */
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01002334 if (error) {
Bob Peterson5ea50502013-11-25 11:16:25 +00002335 fs_warn(sdp, "inum=%llu error=%d, nblocks=%u, full=%d fail_pt=%d\n",
Steven Whitehouse9e733d32012-08-23 15:37:59 +01002336 (unsigned long long)ip->i_no_addr, error, *nblocks,
Bob Peterson5ea50502013-11-25 11:16:25 +00002337 test_bit(GBF_FULL, &rbm.rgd->rd_bits->bi_flags),
2338 rbm.rgd->rd_extfail_pt);
Steven Whitehouse62e252e2012-07-30 11:06:08 +01002339 goto rgrp_error;
2340 }
2341
Steven Whitehousec04a2ef2012-08-13 11:14:57 +01002342 gfs2_alloc_extent(&rbm, dinode, nblocks);
2343 block = gfs2_rbm_to_block(&rbm);
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002344 rbm.rgd->rd_last_alloc = block - rbm.rgd->rd_data0;
Bob Petersona097dc7e2015-07-16 08:28:04 -05002345 if (gfs2_rs_active(&ip->i_res))
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01002346 gfs2_adjust_reservation(ip, &rbm, *nblocks);
Steven Whitehouse6a8099e2011-11-22 12:18:51 +00002347 ndata = *nblocks;
2348 if (dinode)
2349 ndata--;
Bob Petersonb3e47ca2011-11-21 11:47:08 -05002350
Bob Peterson3c5d7852011-11-14 11:17:08 -05002351 if (!dinode) {
Steven Whitehouse6a8099e2011-11-22 12:18:51 +00002352 ip->i_goal = block + ndata - 1;
Bob Peterson3c5d7852011-11-14 11:17:08 -05002353 error = gfs2_meta_inode_buffer(ip, &dibh);
2354 if (error == 0) {
2355 struct gfs2_dinode *di =
2356 (struct gfs2_dinode *)dibh->b_data;
Steven Whitehouse350a9b02012-12-14 12:36:02 +00002357 gfs2_trans_add_meta(ip->i_gl, dibh);
Bob Peterson3c5d7852011-11-14 11:17:08 -05002358 di->di_goal_meta = di->di_goal_data =
2359 cpu_to_be64(ip->i_goal);
2360 brelse(dibh);
2361 }
Steven Whitehoused9ba7612009-04-23 08:59:41 +01002362 }
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01002363 if (rbm.rgd->rd_free < *nblocks) {
Fabian Frederickfc554ed2014-03-05 22:06:42 +08002364 pr_warn("nblocks=%u\n", *nblocks);
Steven Whitehouse09010972009-05-20 10:48:47 +01002365 goto rgrp_error;
Bob Peterson8e2e0042012-07-19 08:12:40 -04002366 }
Steven Whitehouse09010972009-05-20 10:48:47 +01002367
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01002368 rbm.rgd->rd_free -= *nblocks;
Bob Peterson3c5d7852011-11-14 11:17:08 -05002369 if (dinode) {
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01002370 rbm.rgd->rd_dinodes++;
2371 *generation = rbm.rgd->rd_igeneration++;
Bob Peterson3c5d7852011-11-14 11:17:08 -05002372 if (*generation == 0)
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01002373 *generation = rbm.rgd->rd_igeneration++;
Bob Peterson3c5d7852011-11-14 11:17:08 -05002374 }
David Teiglandb3b94fa2006-01-16 16:50:04 +00002375
Steven Whitehouse350a9b02012-12-14 12:36:02 +00002376 gfs2_trans_add_meta(rbm.rgd->rd_gl, rbm.rgd->rd_bits[0].bi_bh);
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01002377 gfs2_rgrp_out(rbm.rgd, rbm.rgd->rd_bits[0].bi_bh->b_data);
2378 gfs2_rgrp_ondisk2lvb(rbm.rgd->rd_rgl, rbm.rgd->rd_bits[0].bi_bh->b_data);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002379
Steven Whitehouse6a8099e2011-11-22 12:18:51 +00002380 gfs2_statfs_change(sdp, 0, -(s64)*nblocks, dinode ? 1 : 0);
Bob Peterson3c5d7852011-11-14 11:17:08 -05002381 if (dinode)
Steven Whitehouseb2c8b3e2014-02-04 15:45:11 +00002382 gfs2_trans_add_unrevoke(sdp, block, *nblocks);
Steven Whitehouse6a8099e2011-11-22 12:18:51 +00002383
Steven Whitehousefd4b4e02013-02-26 16:15:20 +00002384 gfs2_quota_change(ip, *nblocks, ip->i_inode.i_uid, ip->i_inode.i_gid);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002385
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01002386 rbm.rgd->rd_free_clone -= *nblocks;
2387 trace_gfs2_block_alloc(ip, rbm.rgd, block, *nblocks,
Bob Peterson6e87ed02011-11-18 10:58:32 -05002388 dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
Steven Whitehouse6050b9c2009-07-31 16:19:40 +01002389 *bn = block;
2390 return 0;
2391
2392rgrp_error:
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01002393 gfs2_rgrp_error(rbm.rgd);
Steven Whitehouse6050b9c2009-07-31 16:19:40 +01002394 return -EIO;
David Teiglandb3b94fa2006-01-16 16:50:04 +00002395}
2396
2397/**
Eric Sandeen46fcb2e2011-06-23 10:39:34 -05002398 * __gfs2_free_blocks - free a contiguous run of block(s)
David Teiglandb3b94fa2006-01-16 16:50:04 +00002399 * @ip: the inode these blocks are being freed from
2400 * @bstart: first block of a run of contiguous blocks
2401 * @blen: the length of the block run
Eric Sandeen46fcb2e2011-06-23 10:39:34 -05002402 * @meta: 1 if the blocks represent metadata
David Teiglandb3b94fa2006-01-16 16:50:04 +00002403 *
2404 */
2405
Eric Sandeen46fcb2e2011-06-23 10:39:34 -05002406void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta)
David Teiglandb3b94fa2006-01-16 16:50:04 +00002407{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04002408 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002409 struct gfs2_rgrpd *rgd;
2410
2411 rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE);
2412 if (!rgd)
2413 return;
Bob Peterson41db1ab2012-05-09 12:11:35 -04002414 trace_gfs2_block_alloc(ip, rgd, bstart, blen, GFS2_BLKST_FREE);
Steven Whitehousecfc8b542008-11-04 10:25:13 +00002415 rgd->rd_free += blen;
Steven Whitehouse66fc0612012-02-08 12:58:32 +00002416 rgd->rd_flags &= ~GFS2_RGF_TRIMMED;
Steven Whitehouse350a9b02012-12-14 12:36:02 +00002417 gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
Bob Peterson42d52e32008-01-28 18:38:07 -06002418 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
Benjamin Marzinski90306c42012-05-29 23:01:09 -05002419 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002420
Steven Whitehouse6d3117b2011-05-21 14:05:58 +01002421 /* Directories keep their data in the metadata address space */
Eric Sandeen46fcb2e2011-06-23 10:39:34 -05002422 if (meta || ip->i_depth)
Steven Whitehouse6d3117b2011-05-21 14:05:58 +01002423 gfs2_meta_wipe(ip, bstart, blen);
Bob Peterson4c16c362011-02-23 16:11:33 -05002424}
David Teiglandb3b94fa2006-01-16 16:50:04 +00002425
Bob Peterson4c16c362011-02-23 16:11:33 -05002426/**
Bob Peterson4c16c362011-02-23 16:11:33 -05002427 * gfs2_free_meta - free a contiguous run of data block(s)
2428 * @ip: the inode these blocks are being freed from
2429 * @bstart: first block of a run of contiguous blocks
2430 * @blen: the length of the block run
2431 *
2432 */
2433
Steven Whitehousecd915492006-09-04 12:49:07 -04002434void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen)
David Teiglandb3b94fa2006-01-16 16:50:04 +00002435{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04002436 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002437
Eric Sandeen46fcb2e2011-06-23 10:39:34 -05002438 __gfs2_free_blocks(ip, bstart, blen, 1);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002439 gfs2_statfs_change(sdp, 0, +blen, 0);
Steven Whitehouse2933f922006-11-01 13:23:29 -05002440 gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002441}
2442
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04002443void gfs2_unlink_di(struct inode *inode)
2444{
2445 struct gfs2_inode *ip = GFS2_I(inode);
2446 struct gfs2_sbd *sdp = GFS2_SB(inode);
2447 struct gfs2_rgrpd *rgd;
Steven Whitehousedbb7cae2007-05-15 15:37:50 +01002448 u64 blkno = ip->i_no_addr;
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04002449
2450 rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_UNLINKED);
2451 if (!rgd)
2452 return;
Bob Peterson41db1ab2012-05-09 12:11:35 -04002453 trace_gfs2_block_alloc(ip, rgd, blkno, 1, GFS2_BLKST_UNLINKED);
Steven Whitehouse350a9b02012-12-14 12:36:02 +00002454 gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
Bob Peterson42d52e32008-01-28 18:38:07 -06002455 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
Benjamin Marzinski90306c42012-05-29 23:01:09 -05002456 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
2457 update_rgrp_lvb_unlinked(rgd, 1);
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04002458}
2459
Bob Petersona18c78c2017-11-22 09:24:14 -06002460void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
David Teiglandb3b94fa2006-01-16 16:50:04 +00002461{
2462 struct gfs2_sbd *sdp = rgd->rd_sbd;
2463 struct gfs2_rgrpd *tmp_rgd;
2464
Bob Petersona18c78c2017-11-22 09:24:14 -06002465 tmp_rgd = rgblk_free(sdp, ip->i_no_addr, 1, GFS2_BLKST_FREE);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002466 if (!tmp_rgd)
2467 return;
2468 gfs2_assert_withdraw(sdp, rgd == tmp_rgd);
2469
Steven Whitehouse73f74942008-11-04 10:32:57 +00002470 if (!rgd->rd_dinodes)
David Teiglandb3b94fa2006-01-16 16:50:04 +00002471 gfs2_consist_rgrpd(rgd);
Steven Whitehouse73f74942008-11-04 10:32:57 +00002472 rgd->rd_dinodes--;
Steven Whitehousecfc8b542008-11-04 10:25:13 +00002473 rgd->rd_free++;
David Teiglandb3b94fa2006-01-16 16:50:04 +00002474
Steven Whitehouse350a9b02012-12-14 12:36:02 +00002475 gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
Bob Peterson42d52e32008-01-28 18:38:07 -06002476 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
Benjamin Marzinski90306c42012-05-29 23:01:09 -05002477 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
2478 update_rgrp_lvb_unlinked(rgd, -1);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002479
2480 gfs2_statfs_change(sdp, 0, +1, -1);
Bob Peterson41db1ab2012-05-09 12:11:35 -04002481 trace_gfs2_block_alloc(ip, rgd, ip->i_no_addr, 1, GFS2_BLKST_FREE);
Steven Whitehouse2933f922006-11-01 13:23:29 -05002482 gfs2_quota_change(ip, -1, ip->i_inode.i_uid, ip->i_inode.i_gid);
Steven Whitehousedbb7cae2007-05-15 15:37:50 +01002483 gfs2_meta_wipe(ip, ip->i_no_addr, 1);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002484}
2485
2486/**
Steven Whitehouseacf7e242009-09-08 18:00:30 +01002487 * gfs2_check_blk_type - Check the type of a block
2488 * @sdp: The superblock
2489 * @no_addr: The block number to check
2490 * @type: The block type we are looking for
2491 *
2492 * Returns: 0 if the block type matches the expected type
2493 * -ESTALE if it doesn't match
2494 * or -ve errno if something went wrong while checking
2495 */
2496
2497int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type)
2498{
2499 struct gfs2_rgrpd *rgd;
Steven Whitehouse8339ee52011-08-31 16:38:29 +01002500 struct gfs2_holder rgd_gh;
Bob Peterson58884c42012-03-05 10:19:35 -05002501 int error = -EINVAL;
Steven Whitehouseacf7e242009-09-08 18:00:30 +01002502
Steven Whitehouse66fc0612012-02-08 12:58:32 +00002503 rgd = gfs2_blk2rgrpd(sdp, no_addr, 1);
Steven Whitehouseacf7e242009-09-08 18:00:30 +01002504 if (!rgd)
Steven Whitehouse8339ee52011-08-31 16:38:29 +01002505 goto fail;
Steven Whitehouseacf7e242009-09-08 18:00:30 +01002506
2507 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_SHARED, 0, &rgd_gh);
2508 if (error)
Steven Whitehouse8339ee52011-08-31 16:38:29 +01002509 goto fail;
Steven Whitehouseacf7e242009-09-08 18:00:30 +01002510
2511 if (gfs2_get_block_type(rgd, no_addr) != type)
2512 error = -ESTALE;
2513
2514 gfs2_glock_dq_uninit(&rgd_gh);
Steven Whitehouseacf7e242009-09-08 18:00:30 +01002515fail:
2516 return error;
2517}
2518
2519/**
David Teiglandb3b94fa2006-01-16 16:50:04 +00002520 * gfs2_rlist_add - add a RG to a list of RGs
Steven Whitehouse70b0c362011-09-02 16:08:09 +01002521 * @ip: the inode
David Teiglandb3b94fa2006-01-16 16:50:04 +00002522 * @rlist: the list of resource groups
2523 * @block: the block
2524 *
2525 * Figure out what RG a block belongs to and add that RG to the list
2526 *
2527 * FIXME: Don't use NOFAIL
2528 *
2529 */
2530
Steven Whitehouse70b0c362011-09-02 16:08:09 +01002531void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist,
Steven Whitehousecd915492006-09-04 12:49:07 -04002532 u64 block)
David Teiglandb3b94fa2006-01-16 16:50:04 +00002533{
Steven Whitehouse70b0c362011-09-02 16:08:09 +01002534 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002535 struct gfs2_rgrpd *rgd;
2536 struct gfs2_rgrpd **tmp;
2537 unsigned int new_space;
2538 unsigned int x;
2539
2540 if (gfs2_assert_warn(sdp, !rlist->rl_ghs))
2541 return;
2542
Steven Whitehouse70b0c362011-09-02 16:08:09 +01002543 if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, block))
2544 rgd = ip->i_rgd;
2545 else
Steven Whitehouse66fc0612012-02-08 12:58:32 +00002546 rgd = gfs2_blk2rgrpd(sdp, block, 1);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002547 if (!rgd) {
Steven Whitehouse70b0c362011-09-02 16:08:09 +01002548 fs_err(sdp, "rlist_add: no rgrp for block %llu\n", (unsigned long long)block);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002549 return;
2550 }
Steven Whitehouse70b0c362011-09-02 16:08:09 +01002551 ip->i_rgd = rgd;
David Teiglandb3b94fa2006-01-16 16:50:04 +00002552
2553 for (x = 0; x < rlist->rl_rgrps; x++)
2554 if (rlist->rl_rgd[x] == rgd)
2555 return;
2556
2557 if (rlist->rl_rgrps == rlist->rl_space) {
2558 new_space = rlist->rl_space + 10;
2559
2560 tmp = kcalloc(new_space, sizeof(struct gfs2_rgrpd *),
Steven Whitehousedd894be2006-07-27 14:29:00 -04002561 GFP_NOFS | __GFP_NOFAIL);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002562
2563 if (rlist->rl_rgd) {
2564 memcpy(tmp, rlist->rl_rgd,
2565 rlist->rl_space * sizeof(struct gfs2_rgrpd *));
2566 kfree(rlist->rl_rgd);
2567 }
2568
2569 rlist->rl_space = new_space;
2570 rlist->rl_rgd = tmp;
2571 }
2572
2573 rlist->rl_rgd[rlist->rl_rgrps++] = rgd;
2574}
2575
2576/**
2577 * gfs2_rlist_alloc - all RGs have been added to the rlist, now allocate
2578 * and initialize an array of glock holders for them
2579 * @rlist: the list of resource groups
2580 * @state: the lock state to acquire the RG lock in
David Teiglandb3b94fa2006-01-16 16:50:04 +00002581 *
2582 * FIXME: Don't use NOFAIL
2583 *
2584 */
2585
Bob Petersonfe6c9912008-01-28 11:13:02 -06002586void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state)
David Teiglandb3b94fa2006-01-16 16:50:04 +00002587{
2588 unsigned int x;
2589
Andreas Gruenbacher6df9f9a2016-06-17 07:31:27 -05002590 rlist->rl_ghs = kmalloc(rlist->rl_rgrps * sizeof(struct gfs2_holder),
Steven Whitehousedd894be2006-07-27 14:29:00 -04002591 GFP_NOFS | __GFP_NOFAIL);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002592 for (x = 0; x < rlist->rl_rgrps; x++)
2593 gfs2_holder_init(rlist->rl_rgd[x]->rd_gl,
Bob Petersonfe6c9912008-01-28 11:13:02 -06002594 state, 0,
David Teiglandb3b94fa2006-01-16 16:50:04 +00002595 &rlist->rl_ghs[x]);
2596}
2597
2598/**
2599 * gfs2_rlist_free - free a resource group list
Fabian Frederick27ff6a02014-07-02 22:05:27 +02002600 * @rlist: the list of resource groups
David Teiglandb3b94fa2006-01-16 16:50:04 +00002601 *
2602 */
2603
2604void gfs2_rlist_free(struct gfs2_rgrp_list *rlist)
2605{
2606 unsigned int x;
2607
2608 kfree(rlist->rl_rgd);
2609
2610 if (rlist->rl_ghs) {
2611 for (x = 0; x < rlist->rl_rgrps; x++)
2612 gfs2_holder_uninit(&rlist->rl_ghs[x]);
2613 kfree(rlist->rl_ghs);
Bob Peterson8e2e0042012-07-19 08:12:40 -04002614 rlist->rl_ghs = NULL;
David Teiglandb3b94fa2006-01-16 16:50:04 +00002615 }
2616}
2617